diff --git "a/1090.jsonl" "b/1090.jsonl" new file mode 100644--- /dev/null +++ "b/1090.jsonl" @@ -0,0 +1,2326 @@ +{"seq_id":"4568514363","text":"import re\n\nfrom lark.lark import Tree\nfrom lark.lexer import Token\n\nfrom humanized_opening_hours.temporal_objects import WEEKDAYS\n\n# flake8: noqa\n\nFREQUENT_FIELDS = {\n \"24/7\": Tree(\"time_domain\", [Tree(\"always_open_rule\", [Token(\"ALWAYS_OPEN\", '24/7')])]),\n \"sunrise-sunset\": Tree(\"time_domain\", [Tree(\"rule_sequence\", [Tree(\"time_selector\", [Tree(\"timespan\", [Tree(\"time\", [Tree(\"variable_time\", [Token(\"EVENT\", 'sunrise')])]), Tree(\"time\", [Tree(\"variable_time\", [Token(\"EVENT\", 'sunset')])])])])])]),\n \"sunset-sunrise\": Tree(\"time_domain\", [Tree(\"rule_sequence\", [Tree(\"time_selector\", [Tree(\"timespan\", [Tree(\"time\", [Tree(\"variable_time\", [Token(\"EVENT\", 'sunset')])]), Tree(\"time\", [Tree(\"variable_time\", [Token(\"EVENT\", 'sunrise')])])])])])]),\n}\n\n\nRE_WDAY_OFF = re.compile(\"^[A-Z][a-z] off$\")\nRE_WDAY_TIMESPAN = re.compile(\"^[A-Z][a-z] [0-9]{2}:[0-9]{2}-[0-9]{2}:[0-9]{2}$\")\nRE_WDAY_WDAY_TIMESPAN = re.compile(\"^[A-Z][a-z]-[A-Z][a-z] [0-9]{2}:[0-9]{2}-[0-9]{2}:[0-9]{2}$\")\nRE_TIMESPAN = re.compile(\"^[0-9]{2}:[0-9]{2}-[0-9]{2}:[0-9]{2}$\")\nRE_TIMESPANS = re.compile(\"([0-9]{2}):([0-9]{2})-([0-9]{2}):([0-9]{2})\")\n\n\ndef parse_simple_field(field):\n \"\"\"Returns None or a tree if the field is simple enough.\n \n Simple field example: \"Mo-Fr 08:00-20:00; Sa 08:00-12:00\"\n \"\"\"\n # It's about 12 times faster than with Lark.\n # Effective for a bit more than 35% of OSM fields.\n splited_field = [\n part.strip() for part in field.strip(' \\n\\t;').split(';')\n ]\n parsed_parts = []\n for part in splited_field:\n if RE_WDAY_OFF.match(part):\n wday = part[:2]\n if wday not in WEEKDAYS:\n return None\n parsed_parts.append(\n Tree(\"range_modifier_rule\", [Tree(\"range_selectors\", [Tree(\"weekday_or_holiday_sequence_selector\", [Tree(\"weekday_sequence\", [Tree(\"weekday_range\", [Token(\"WDAY\", wday)])])])]), Tree(\"rule_modifier_closed\", [Token(\"CLOSED\", ' off')])])\n )\n elif RE_WDAY_TIMESPAN.match(part):\n wday = part[:2]\n if wday not in WEEKDAYS:\n return None\n timespans = []\n for timespan in RE_TIMESPANS.findall(part):\n from_h, from_m, to_h, to_m = timespan\n timespans.append(\n Tree(\"timespan\", [Tree(\"time\", [Tree(\"hour_minutes\", [Token(\"TWO_DIGITS\", from_h), Token(\"TWO_DIGITS\", from_m)])]), Tree(\"time\", [Tree(\"hour_minutes\", [Token(\"TWO_DIGITS\", to_h), Token(\"TWO_DIGITS\", to_m)])])])\n )\n parsed_parts.append(\n Tree(\"rule_sequence\", [Tree(\"range_selectors\", [Tree(\"weekday_or_holiday_sequence_selector\", [Tree(\"weekday_sequence\", [Tree(\"weekday_range\", [Token(\"WDAY\", wday)])])])]), Tree(\"time_selector\", timespans)])\n )\n elif RE_WDAY_WDAY_TIMESPAN.match(part):\n wday_from, wday_to = part[:5].split('-')\n if wday_from not in WEEKDAYS or wday_to not in WEEKDAYS:\n return None\n timespans = []\n for timespan in RE_TIMESPANS.findall(part):\n from_h, from_m, to_h, to_m = timespan\n timespans.append(\n Tree(\"timespan\", [Tree(\"time\", [Tree(\"hour_minutes\", [Token(\"TWO_DIGITS\", from_h), Token(\"TWO_DIGITS\", from_m)])]), Tree(\"time\", [Tree(\"hour_minutes\", [Token(\"TWO_DIGITS\", to_h), Token(\"TWO_DIGITS\", to_m)])])])\n )\n parsed_parts.append(\n Tree(\"rule_sequence\", [Tree(\"range_selectors\", [Tree(\"weekday_or_holiday_sequence_selector\", [Tree(\"weekday_sequence\", [Tree(\"weekday_range\", [Token(\"WDAY\", wday_from), Token(\"WDAY\", wday_to)])])])]), Tree(\"time_selector\", timespans)])\n )\n elif RE_TIMESPAN.match(part):\n from_h, from_m = part[:5].split(':')\n to_h, to_m = part[6:].split(':')\n parsed_parts.append(\n Tree(\"rule_sequence\", [Tree(\"time_selector\", [Tree(\"timespan\", [Tree(\"time\", [Tree(\"hour_minutes\", [Token(\"TWO_DIGITS\", from_h), Token(\"TWO_DIGITS\", from_m)])]), Tree(\"time\", [Tree(\"hour_minutes\", [Token(\"TWO_DIGITS\", to_h), Token(\"TWO_DIGITS\", to_m)])])])])])\n )\n else:\n return None\n return Tree(\"time_domain\", parsed_parts)\n","repo_name":"rezemika/humanized_opening_hours","sub_path":"humanized_opening_hours/frequent_fields.py","file_name":"frequent_fields.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"20089924695","text":"from datetime import datetime\nfrom typing import Dict, List\nfrom uuid import UUID\n\nfrom pydantic import BaseModel, Field\n\nfrom src.models.general import (\n RestrictionType,\n TracingType,\n TracingVariables,\n UnitTime\n)\n\n\nclass QuarantineVariable(BaseModel):\n name: str = Field(None)\n delay: int = Field(None)\n delay_units: UnitTime = Field(None)\n length: int = Field(None)\n length_units: UnitTime = Field(None)\n unrestricted_time: int = Field(None)\n unrestricted_time_units: UnitTime = Field(None)\n\n class Config:\n use_enum_values = True\n\n\nclass TracingRestriction(BaseModel):\n start_percentage: float = Field(...)\n stop_mode: TracingType = Field(...)\n stop_percentage: float = Field(None)\n stop_length: int = Field(None)\n stop_length_units: UnitTime = Field(None)\n variables: Dict[UUID, bool] = Field(None)\n\n class Config:\n use_enum_values = True\n\n\nclass CyclicRestriction(BaseModel):\n grace_time: datetime = Field(...)\n global_quarantine: int = Field(...)\n global_quarantine_units: UnitTime = Field(...)\n restriction_mode: RestrictionType = Field(...)\n time_without_restrictions: int = Field(None)\n time_without_restrictions_units: UnitTime = Field(None)\n variables: Dict[str, QuarantineVariable] = Field(None)\n\n\nTracing = Dict[TracingVariables, TracingRestriction]\n\n\nclass UpdateQuarantine(BaseModel):\n has_cyclic_restrictions: bool = Field(None)\n has_tracing_restrictions: bool = Field(None)\n cyclic_restrictions: CyclicRestriction = Field(None)\n tracing_restrictions: Tracing = Field(None)\n\n class Config:\n use_enum_values = True\n\n\nclass QuarantineGroup(BaseModel):\n name: str = Field(...)\n\n\nclass NewQuarantine(UpdateQuarantine):\n quarantine_groups: List[QuarantineGroup] = Field(...)\n","repo_name":"fenfisdi/cdslab_agents_config_api","sub_path":"src/models/route_models/quarantine_group.py","file_name":"quarantine_group.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27035518203","text":"\nfrom efficientnet.tfkeras import EfficientNetB0\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Dense, GlobalAveragePooling2D\nfrom tensorflow.keras.layers import Flatten, Conv2D, Dropout, MaxPooling2D\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.losses import binary_crossentropy\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint,CSVLogger,TensorBoard\n\nfrom sklearn.model_selection import train_test_split\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\nimport os\n\n# W&B Imports\n#import wandb\n#from wandb.keras import WandbCallback\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n# This is secret and shouldn't be checked into version control\n#WANDB_API_KEY=\"c329d6367775bffc964f667cec47f1b912d97508\"\n\n# Name and notes optional\n#WANDB_NAME=\"DeepVsion\"\n#WANDB_NOTES=\"Smaller learning rate, more regularization.\"\n\n#wandb.init(project=\"deepvision-final-work\",config={\"hyper\": \"parameter\"})\n\nprint(tf.__version__)\nprint(\"GPU AVAILABLE: \",tf.test.is_gpu_available())\n\n#config = tf.ConfigProto(allow_soft_placement=True)\n#config.gpu_options.allow_growth = True\n#config.gpu_options.per_process_gpu_memory_fraction = 0.9 # making sure Tensorflow doesn't overflow the GPU\n\n# Some constants\nIMG_ROWS = 256\nIMG_COLS = 256\nNUM_CLASSES = 17\nTEST_SIZE = 0.2\nRANDOM_STATE = 137\n\n#Model\nNO_EPOCHS = 30\nBATCH_SIZE = 32\n\n##################################\n## THE DATA ######################\n##################################\n\nfilename = \"train-jpg-labels.pkl\" \netiqueta = pd.read_pickle(filename)\n\netiqueta.dtypes\n\nNimages = etiqueta[\"image_name\"].size\nprint(Nimages)\n\ntrain, test = train_test_split(etiqueta, test_size = TEST_SIZE, random_state = RANDOM_STATE)\nprint(train[\"image_name\"].size)\nprint(test[\"image_name\"].size)\n\n# Training data generator\ndatagen_train = ImageDataGenerator(\n rescale=1./255, \n horizontal_flip=True,\n vertical_flip=True,\n rotation_range = 45,\n width_shift_range=0.2,\n height_shift_range=0.2)\n\n# Validation data generator\ndatagen_val = ImageDataGenerator(\n rescale=1./255)\n\n\n##################################\n## MODEL #########################\n##################################\n\ninput_shape = (IMG_ROWS,IMG_COLS,3)\n\n#### Efficient ####\neffnet = EfficientNetB0(weights='imagenet', include_top=False, input_shape=input_shape)\n\neffnet.trainable = True\n\nx = effnet.output\nx = Flatten()(x)\nx = Dense(2048, activation='relu')(x)\nx = Dropout(0.5)(x)\nx = Dense(1024, activation='relu')(x)\nx = Dropout(0.5)(x)\n\npredictions = Dense(NUM_CLASSES, activation='sigmoid')(x)\nmodel = Model(inputs = effnet.input, outputs = predictions)\n###################\n\n\n#### ResNet ####\n#restnet = ResNet50(include_top=False, weights='imagenet', input_shape=input_shape)\n#\n#output = restnet.layers[-1].output\n#output = Flatten()(output)\n#restnet = Model(inputs=restnet.input,outputs=output)\n#\n#for i,layer in enumerate(restnet.layers):\n# layer.trainable = False\n#\n#model = Sequential()\n#\n#model.add(restnet)\n#\n#model.add(Dense(1024, activation='relu'))\n#model.add(Dropout(0.5))\n#\n#model.add(Dense(512, activation='relu'))\n#model.add(Dropout(0.5))\n#\n#model.add(Dense(NUM_CLASSES, activation='sigmoid'))\n################\n\nMETRICS = [keras.metrics.TruePositives(name=\"tp\"),\n keras.metrics.FalsePositives(name=\"fp\"),\n keras.metrics.TrueNegatives(name=\"tn\"),\n keras.metrics.FalseNegatives(name=\"fn\"),\n keras.metrics.BinaryAccuracy(name=\"accuracy\"),\n keras.metrics.Precision(name=\"precision\"),\n keras.metrics.Recall(name=\"recall\"),\n keras.metrics.AUC(name=\"auc\")]\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-5,decay=1.0e-6),\n metrics=METRICS)\n\n#model.load_weights(\"best_model_with_resnet_checkpoint.hdf5\")\n\n##################################\n## CALLBACKS #####################\n##################################\n\nearlystopping = EarlyStopping(monitor='val_loss', \n min_delta = 0, \n patience = 5, \n verbose = 1, \n mode = 'auto', \n baseline = None, \n restore_best_weights = False)\n\ncheck_point_file = \"checkpoint_\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\") + \".hdf5\"\ncheckpoint = ModelCheckpoint(check_point_file,\n monitor = 'loss',\n verbose = 0,\n save_best_only = True,\n mode = 'auto',\n save_freq = 'epoch')\n\ncsvlogger = CSVLogger(\"training.log\")\n\n#wandbcallback = WandbCallback(save_model=False,\n# monitor=\"val_loss\",\n# mode='auto',\n# data_type=None,\n# validation_data=None,\n# predictions=8,\n# generator=None)\n\nlog_dir=\"logs/fit/\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard = TensorBoard(log_dir=log_dir, \n histogram_freq = 1,\n write_graph = True,\n write_images = False,\n update_freq = 'epoch')\n\ncallbacks = [checkpoint,csvlogger,tensorboard]#wandbcallback]\n\n##################################\n## TRAINING ######################\n##################################\n\ndirectory = \"\"\nx_col = \"image_name\"\ny_col = \"tags\"\n\nkeras.backend.clear_session() # For easy reset of notebook state.\n\n# Train!\ntrain_model = model.fit_generator(\n datagen_train.flow_from_dataframe(train,\n directory=directory,\n x_col=x_col, \n y_col=y_col, \n weight_col=None, \n target_size=(IMG_ROWS, IMG_COLS), \n color_mode='rgb', \n classes=None, \n class_mode='categorical', \n batch_size=BATCH_SIZE, \n shuffle=True, \n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='jpg',\n subset=None,\n interpolation='nearest',\n validate_filenames=False),\n validation_data=datagen_val.flow_from_dataframe(test,\n directory=directory,\n x_col=x_col, \n y_col=y_col, \n weight_col=None, \n target_size=(IMG_ROWS, IMG_COLS), \n color_mode='rgb', \n classes=None, \n class_mode='categorical', \n batch_size=BATCH_SIZE, \n shuffle=True, \n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='jpg',\n subset=None,\n interpolation='nearest',\n validate_filenames=False),\n epochs = NO_EPOCHS,\n steps_per_epoch = train.size // BATCH_SIZE,\n validation_steps = test.size // BATCH_SIZE,\n callbacks = callbacks)\n\nmodel.save_weights(\"last_weights.hdf5\",overwrite=True,save_format=\"h5\")\n","repo_name":"masgro/DeepVision","sub_path":"trabajo_final.py","file_name":"trabajo_final.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38594189858","text":"import os\nfrom setuptools import setup, find_packages\n\nREADME = 'PYPI_README.md'\nPACKAGENAME = 'YOUR_PROJECT_NAME'\nMAIN_ENTRYPOINT = \"YOUR_PROJECT_ENTRYPOINT\"\n\n# __version__ = x.x.x\nwith open(os.path.join(os.path.dirname(__file__),\n PACKAGENAME,\n 'version.py')) as f:\n exec(f.read())\n\n\nwith open('requirements.txt') as f:\n # support hash required requirements as well\n requirements = [req.split(\" --hash=sha256\")[0] for req in f.readlines()]\n\n\ndef long_desc():\n with open(README) as f:\n return f.read()\n\nsetup(\n name=PACKAGENAME,\n version=__version__,\n description='Sample package description',\n license='MIT',\n long_description=long_desc(),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n test_suite='tests',\n install_requires=requirements,\n setup_requires=[\"setuptools\"],\n entry_points={\n 'console_scripts': ['{}={}.main:main'.format(MAIN_ENTRYPOINT,\n PACKAGENAME)]\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ]\n)\n","repo_name":"accssharma/pypi-bootstrap","sub_path":"pypi-project/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4805266643","text":"import os\nimport pandas as pd\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\nclass LongtermFunction:\n def __init__(self, Target_Path , Start_Day , End_Day , Insample_Length_Month , Outsample_Length_Month):\n\n self.Target_Path = Target_Path\n self.Start_Day = Start_Day\n self.End_Day = End_Day\n self.Insample_Length_Month = Insample_Length_Month\n self.Outsample_Length_Month = Outsample_Length_Month\n\n def CutTime(self, Start_Day, End_Day):\n\n Insample_Length_Month = self.Insample_Length_Month\n Outsample_Length_Month = self.Outsample_Length_Month\n All = []\n\n # datetime\n Start_Day = datetime.datetime.strptime(Start_Day, '%Y-%m-%d').date()\n End_Day = datetime.datetime.strptime(End_Day, '%Y-%m-%d').date()\n\n # 初始 InSample Time\n InSampleStart = Start_Day\n InSampleEnd = InSampleStart\n InSampleEnd += relativedelta(months=+Insample_Length_Month, days=-1)\n InSampleStartNum = 1\n InSampleEndNum = Insample_Length_Month\n\n OutSampleStart = InSampleEnd\n OutSampleStart += relativedelta(days=1)\n OutSampleEnd = OutSampleStart\n OutSampleEnd += relativedelta(months=+(Outsample_Length_Month), days=-1)\n OutSampleStartNum = InSampleEndNum + 1\n OutSampleEndNum = InSampleEndNum + Outsample_Length_Month\n\n while (True):\n\n InSamplePeriod = []\n OutSamplePeriod = []\n InSampleAddMonth = round((Insample_Length_Month/(Insample_Length_Month+Outsample_Length_Month))*Outsample_Length_Month)\n OutSampleAddMonth = Outsample_Length_Month-InSampleAddMonth\n\n if (OutSampleEnd >= End_Day):\n diffDay = int((OutSampleEnd - End_Day).days)\n OutSampleEndNum -= (round(diffDay / 30.4))\n\n InSamplePeriod.append(str(InSampleStart))\n InSamplePeriod.append(str(InSampleEnd))\n InSamplePeriod.append(InSampleEndNum - InSampleStartNum + 1)\n\n OutSamplePeriod.append(str(OutSampleStart))\n OutSamplePeriod.append(str(End_Day))\n OutSamplePeriod.append(OutSampleEndNum - OutSampleStartNum + 1)\n\n Temp = [InSamplePeriod, OutSamplePeriod]\n All.append(Temp)\n break\n\n else:\n InSamplePeriod.append(str(InSampleStart))\n InSamplePeriod.append(str(InSampleEnd))\n InSamplePeriod.append(InSampleEndNum - InSampleStartNum + 1)\n\n OutSamplePeriod.append(str(OutSampleStart))\n OutSamplePeriod.append(str(OutSampleEnd))\n OutSamplePeriod.append(OutSampleEndNum - OutSampleStartNum + 1)\n\n Temp = [InSamplePeriod, OutSamplePeriod]\n All.append(Temp)\n\n InSampleEnd += relativedelta(months=+InSampleAddMonth)\n InSampleEndNum += InSampleAddMonth\n OutSampleStart += relativedelta(months=+InSampleAddMonth)\n OutSampleStartNum += InSampleAddMonth\n OutSampleEnd += relativedelta(months=+(Outsample_Length_Month))\n OutSampleEndNum += (Outsample_Length_Month)\n\n return All\n\n def StartDayChange(self, Window_Start_Day):\n\n Target_Path = self.Target_Path\n Day = datetime.datetime.strptime(Window_Start_Day, '%Y-%m-%d').date()\n File = pd.read_csv(os.path.join(Target_Path, os.listdir(Target_Path)[0]), engine='python')\n File['Day'] = pd.to_datetime(File['Day'], format='%Y/%m/%d')\n if (len(File[File['Day'] == Day]) == 1):\n index = File[File['Day'] == Day].index.tolist()[0]\n else:\n Day += relativedelta(days=1)\n return self.StartDayChange(str(Day))\n return index\n\n def EndDayChange(self, Window_End_Day):\n Target_Path = self.Target_Path\n Day = datetime.datetime.strptime(Window_End_Day, '%Y-%m-%d').date()\n File = pd.read_csv(os.path.join(Target_Path, os.listdir(Target_Path)[0]), engine='python')\n File['Day'] = pd.to_datetime(File['Day'], format='%Y/%m/%d')\n if (len(File[File['Day'] == Day]) == 1):\n index = File[File['Day'] == Day].index.tolist()[0]\n else:\n Day -= relativedelta(days=1)\n return self.EndDayChange(str(Day))\n return index","repo_name":"amosricky/StrategyAnalysisPlatform","sub_path":"LongtermModule/LongtermFunction.py","file_name":"LongtermFunction.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6050481131","text":"import re\n\ncedict = {}\nwith open('sources/cedict_ts.u8', 'r') as f:\n i = 0\n for line in f:\n if line.startswith('#'):\n continue\n z = re.match(r\"(\\S*) (\\S*) \\[(.*)\\] \\/(.*)\\/\", line)\n print(z.groups())\n if i > 100:\n break\n i+=1\n\n\n","repo_name":"rememberberry/rememberberry-anki","sub_path":"corpus/generate_cedict.py","file_name":"generate_cedict.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16482408794","text":"import threading\nimport time\nimport cv2\n\nclass NicoCameras:\n\n def __init__(self):\n self.ids = [0,2]\n self.frames = {}\n self.fpss = {}\n for id in self.ids:\n self.frames[id] = None\n self.fpss[id] = 0\n print('starting camera threads')\n self.threads = []\n for i in range(len(self.ids)):\n thread = threading.Thread(name=\"camera\"+str(i), target=self.grabbing, args=(self.ids[i],))\n thread.start()\n self.threads.append(thread)\n self.stopped = False\n\n def grabbing(self,id):\n print(f'grabbing thread {id} started')\n camera = cv2.VideoCapture(id,cv2.CAP_DSHOW)\n fps = 30 \n camera.set(cv2.CAP_PROP_FPS,fps)\n fps = 0\n t0 = time.time()\n while True:\n hasFrame, self.frames[id] = camera.read()\n if not hasFrame or self.stopped:\n break\n t1 = time.time()\n if int(t1) != int(t0):\n self.fpss[id] = fps\n fps = 0\n t0 = t1\n fps += 1\n cv2.waitKey(1)\n\n def read(self):\n return ( self.frames[id] for id in self.ids )\n\n def fps(self):\n return ( self.fpss[id] for id in self.ids )\n \n def close(self):\n self.stopped = True\n ","repo_name":"andylucny/nico","sub_path":"nicogui/nicocameras.py","file_name":"nicocameras.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15274076828","text":"from Internship_Project_Ctrl import MainWinCtrl \nfrom PySide6.QtWidgets import QApplication, QCheckBox, QComboBox, QDateTimeEdit, QDialogButtonBox, QGroupBox, QLabel, QMainWindow, QRadioButton, QTimeEdit\nfrom PySide6.QtWidgets import QGridLayout, QPushButton, QTabWidget, QVBoxLayout, QWidget, QLineEdit, QPlainTextEdit\nfrom PySide6.QtCore import QDate, Qt \nimport sys\n\n\nclass myPlainTextEdit(QPlainTextEdit):\n def __init__(self, parent):\n super().__init__(parent)\n def keyPressEvent(self, e):\n '''This class is designed considering the limitations of merged images. '''\n\n if len(self.toPlainText()) < 100 and str(self.toPlainText()).count('\\n') < 3 :\n if str(self.toPlainText()).count('\\n') == 2 and e.key() == 16777220: # 16777220 is Numeric representaion of Qt.Key_Enter.\n return None\n else:\n return super().keyPressEvent(e)\n # print(str(self.toPlainText()).count('\\n'))\n else:\n if e.key() in [Qt.Key_Delete, Qt.Key_Backspace, Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right, \n Qt.Key_PageDown, Qt.Key_PageDown, Qt.Key_Home, Qt.Key_End]:\n return super().keyPressEvent(e)\n\nclass MainWin(QMainWindow): \n def __init__(self): \n super().__init__() \n #----------------------------General Setup Start--------------------------#\n self.setWindowTitle('New Photo Editor') \n self.setFixedSize(440, 460) \n self.centralWidget = QWidget(self) \n self.setCentralWidget(self.centralWidget) \n \n self.general_Layout = QVBoxLayout(parent = self) \n self.innerlayout_1 = QGridLayout() \n self.innerlayout_1.setAlignment(Qt.AlignTop) \n self.colorNames_List = ['Alice Blue', 'Antique White', 'Aqua', 'Aqua Marine', 'Azure', 'Beige', 'Bisque', 'Black', 'Blanched Almond', 'Blue', 'Blue Violet', 'Brown', 'Burlywood', 'Cadet Blue'\n , 'Chartreuse', 'Chocolate', 'Coral', 'Cornflower Blue', 'Cornsilk', 'Crimson', 'Cyan', 'Dark Blue', 'Dark Cyan', 'Dark Goldenrod', 'Dark Gray', 'Dark Green', 'Dark Khaki'\n , 'Dark Magenta', 'Dark Olive Green', 'Dark Orange', 'Dark Orchid', 'Dark Red', 'Dark Salmon', 'Dark Sea Green', 'Dark Slate Blue', 'Dark Slate Gray', 'Dark Turquoise', 'Dark Violet'\n , 'Deep Pink', 'Deep Sky Blue', 'Dim Gray', 'Dodger Blue', 'Fire Brick', 'Floral White', 'Forest Green', 'Fuchsia', 'Gainsboro', 'Ghost White', 'Gold', 'Goldenrod', 'gray'\n , 'Green', 'Green Yellow', 'Honeydew', 'Hot Pink', 'Indian Red', 'Indigo', 'Ivory', 'Khaki', 'Lavender', 'Lavender Blush', 'Lawn Green', 'Lemon Chiffon', 'Light Blue', 'Light Coral'\n , 'Light Cyan', 'Light Goldenrod Yellow', 'Light Green', 'Light Gray', 'Light Pink', 'Light Salmon', 'Light Sea Green', 'Light Sky Blue', 'Light Slate Gray', 'Light Steel Blue'\n , 'Light Yellow', 'Lime', 'Lime Green', 'Linen', 'Magenta', 'Maroon', 'Medium Aquamarine', 'Medium Blue', 'Medium Orchid', 'Medium Purple', 'Medium Sea Green', 'Medium Slate Blue'\n , 'Medium Spring Green', 'Medium Turquoise', 'Medium Violet Red', 'Midnight Blue', 'Mint Cream', 'Misty Rose', 'Moccasin', 'Navajo White', 'Navy', 'Old Lace', 'Olive', 'Olive Drab'\n , 'Orange', 'Orange Red', 'Orchid', 'Pale Goldenrod', 'Pale Green', 'Pale Turquoise', 'Pale Violet Red', 'Papayawhip', 'Peach Puff', 'Peru', 'Pink', 'Plum', 'Powder Blue', 'Purple'\n , 'Rebecca Purple', 'Red', 'Rosy Brown', 'Royal Blue', 'Saddle Brown', 'Salmon', 'Sandy Brown', 'Sea Green', 'Seashell', 'Sienna', 'Silver', 'Sky Blue', 'Slate Blue', 'Slate Gray', 'Snow' \n , 'Spring Green', 'Steel Blue', 'Tan', 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White', 'White Smoke', 'Yellow', 'Yellow Green']\n #----------------------------Mutual Setup Start----------------------------#\n self.addSourceWidgets() \n self.addDestinationWidgets() \n self.addFileWidgets() \n self.general_Layout.addLayout(self.innerlayout_1)\n\n self.createTabsWidgets() #-----> Creating Tabs\n\n #----------------------------Merge Tab Start-------------------------------#\n self.mergeTabMain_Layout = QVBoxLayout(parent=self.mergeTab)\n self.mergeTabMain_Layout.setAlignment(Qt.AlignTop)\n self.addMergeCalendar()\n self.addMergeIntervalOptions()\n self.mergeTab.setLayout(self.mergeTabMain_Layout)\n\n #----------------------------Annotate Tab Start----------------------------#\n self.annotateTabMain_Layout = QVBoxLayout(self.annotateTab)\n self.addAnnotateTimeWidgets()\n self.addAnnotateCommentWidget()\n self.annotateTab.setLayout(self.annotateTabMain_Layout)\n \n #----------------------------Close Button Start----------------------------#\n self.closeButton = QDialogButtonBox(QDialogButtonBox.Close, parent = self)\n self.general_Layout.addWidget(self.closeButton)\n\n\n self.centralWidget.setLayout(self.general_Layout)\n\n #-------------------------------Display and Label Functions Start-----------------------------#\n def addSourceWidgets(self): \n self.sourceDisplay_Label = QLabel(parent = self.centralWidget, text = 'Source Path:') \n self.innerlayout_1.addWidget(self.sourceDisplay_Label, 0, 0) # --> Label \n \n self.sourceDisplay = QLineEdit(parent = self.centralWidget) \n self.sourceDisplay.setAlignment(Qt.AlignCenter) \n self.sourceDisplay.setReadOnly(True) \n self.innerlayout_1.addWidget(self.sourceDisplay, 1, 0, 1, 5) # --> Display \n \n self.sourceBrowseButton = QPushButton(parent = self.centralWidget, text= 'Browse') \n self.innerlayout_1.addWidget(self.sourceBrowseButton, 1, 5, 1, 1) # --> Button \n \n def addDestinationWidgets(self): \n self.destinationDisplay_Label = QLabel(parent = self.centralWidget, text = 'Destination Path:') \n self.innerlayout_1.addWidget(self.destinationDisplay_Label, 2, 0) # --> Destination Label \n \n self.destinationDisplay = QLineEdit(parent = self.centralWidget) \n self.destinationDisplay.setAlignment(Qt.AlignCenter) \n self.destinationDisplay.setReadOnly(True) \n self.innerlayout_1.addWidget(self.destinationDisplay, 3, 0, 1, 5 )# --> Destination Display \n \n self.destinationBrowseButton = QPushButton(parent = self.centralWidget, text= 'Browse') \n self.innerlayout_1.addWidget(self.destinationBrowseButton, 3, 5, 1, 1) # --> Destination Browse Button \n \n def addFileWidgets(self): \n self.fileName_Label = QLabel(parent = self.centralWidget, text ='File Name:') \n self.innerlayout_1.addWidget(self.fileName_Label, 4, 0, 1, 3) # --> File Name Label \n \n self.fileType_Label = QLabel(parent = self.centralWidget, text ='File Type:') \n self.innerlayout_1.addWidget(self.fileType_Label, 4, 3, 1, 3) # --> File Type Label \n \n self.fileName = QLineEdit(parent = self.centralWidget) \n self.fileName.setMaxLength(30)\n self.fileName.setPlaceholderText('Enter the File Name') \n self.fileName.setAlignment(Qt.AlignCenter) \n self.innerlayout_1.addWidget(self.fileName, 5, 0, 1, 3) # --> File Name Widget \n \n self.fileType = QComboBox(parent = self.centralWidget) \n self.fileType.addItems(['JPEG', 'PNG', 'BMP']) \n self.innerlayout_1.addWidget(self.fileType, 5, 3, 1, 3) # # --> File Type Widget \n \n #-----------------------------------------TABS Start------------------------------------------#\n def createTabsWidgets(self):\n self.tabs = QTabWidget(self.centralWidget)\n self.mergeTab = QWidget()\n self.annotateTab = QWidget()\n self.tabs.addTab(self.mergeTab, 'Merge')\n self.tabs.addTab(self.annotateTab, 'Annotate')\n self.general_Layout.addWidget(self.tabs)\n \n #-------------------------------Merge Tab Functions Start---------------------------#\n def addMergeCalendar(self):\n self.recordStartTime_Label = QLabel(parent = self.mergeTab , text = 'Record Start Time:')\n self.mergeTabMain_Layout.addWidget(self.recordStartTime_Label) # --> Calendar Label\n\n #---------------------------Calendar Start---------------------------------#\n self.recordStartTime = QDateTimeEdit(QDate.currentDate(), parent = self.mergeTab)\n self.recordStartTime.setMaximumDate(QDate.currentDate().addDays(365))\n self.recordStartTime.setMinimumDate(QDate.currentDate().addDays(-365))\n self.recordStartTime.setCalendarPopup(True)\n self.recordStartTime.setDisplayFormat('yyyy/MM/dd HH:mm')\n self.recordStartTime.setAlignment(Qt.AlignCenter)\n self.mergeTabMain_Layout.addWidget(self.recordStartTime)\n #---------------------------Calendar End---------------------------------#\n \n def addMergeIntervalOptions(self):\n #---------------------------Display Interval Start-------------------------#\n self.dateTimeDiplayInterval = QGroupBox(parent= self.mergeTab, title = 'Display Interval:')\n self.thirtyMin = QRadioButton(parent = self.dateTimeDiplayInterval, text = '30 Minutes')\n self.oneHour = QRadioButton(parent = self.dateTimeDiplayInterval, text = '1 Hour')\n self.ninetyMin = QRadioButton(parent = self.dateTimeDiplayInterval, text = '90 Minutes')\n self.twoHours = QRadioButton(parent = self.dateTimeDiplayInterval, text = '2 Hours')\n self.thirtyMin.setChecked(True)\n\n self.dateTimeDiplayInterval_Layout = QVBoxLayout(self.dateTimeDiplayInterval)\n self.dateTimeDiplayInterval_Layout.addWidget(self.thirtyMin)\n self.dateTimeDiplayInterval_Layout.addWidget(self.oneHour)\n self.dateTimeDiplayInterval_Layout.addWidget(self.ninetyMin)\n self.dateTimeDiplayInterval_Layout.addWidget(self.twoHours)\n\n self.dateTimeDiplayInterval.setLayout(self.dateTimeDiplayInterval_Layout)\n self.mergeTabMain_Layout.addWidget(self.dateTimeDiplayInterval)\n #---------------------------Display Interval End-------------------------#\n\n self.mergeTabMain_Layout.addSpacing(17) # --> Spacing for Aesthtic Reasons. \n # If the Window size changes the command above must be adjusted accordingly as well.\n \n #---------------------------Merge Button Start-----------------------------#\n self.mergeButton = QPushButton(parent = self.mergeTab, text = 'Merge')\n self.mergeTabMain_Layout.addWidget(self.mergeButton, alignment = Qt.AlignBottom)\n #---------------------------Merge Button Start---------------------------#\n\n #-------------------------------Annotate Tab Functions Start------------------------#\n def addAnnotateTimeWidgets(self):\n self.annotateTab_Layout = QGridLayout()\n self.annotateTab_Layout.setAlignment(Qt.AlignTop)\n\n #---------------------------Start Time Start-------------------------------#\n self.startTime_Label = QLabel(parent = self.annotateTab, text = 'Start Time:')\n self.annotateTab_Layout.addWidget(self.startTime_Label, 0, 0, 1, 1)\n\n # self.color_Label = QLabel(parent = self.annotateTab, text = 'Color:')\n # self.annotateTab_Layout.addWidget(self.color_Label, 0, 5, 1, 1)\n\n self.startTime = QTimeEdit(parent = self.annotateTab)\n self.startTime.setDisplayFormat('HH:mm:ss')\n self.startTime.setAlignment(Qt.AlignCenter)\n self.annotateTab_Layout.addWidget(self.startTime, 0, 1, 1, 3)\n\n #-----------------------------Color Start----------------------------------#\n self.color = QComboBox(parent = self.annotateTab)\n self.color.addItems(self.colorNames_List)\n self.annotateTab_Layout.addWidget(self.color, 0, 4, 1, 2)\n \n #---------------------------End Time or Time Length Start------------------#\n self.endTimeOrTimeLength_Label = QLabel(parent = self.annotateTab, text = 'Finish Time: ')\n self.annotateTab_Layout.addWidget(self.endTimeOrTimeLength_Label, 1, 0, 1, 1)\n\n self.endTimeOrTimeLength = QTimeEdit(parent = self.annotateTab)\n self.endTimeOrTimeLength.setDisplayFormat('HH:mm:ss')\n self.endTimeOrTimeLength.setAlignment(Qt.AlignCenter)\n self.annotateTab_Layout.addWidget(self.endTimeOrTimeLength, 1, 1, 1, 3)\n\n # self.endTimeRadio = QRadioButton(parent = self.annotateTab)\n # self.endTimeRadio.setChecked(True)\n # self.annotateTab_Layout.addWidget(self.endTimeRadio, 1, 5, 1, 1, Qt.AlignRight)\n \n self.endTimeOrTimeLengthCheck = QCheckBox(parent = self.annotateTab, text = 'Finish Time/Time Length')\n self.annotateTab_Layout.addWidget(self.endTimeOrTimeLengthCheck, 1, 4, 1, 2, Qt.AlignRight)\n\n #---------------------------UNUSED RADIO BUTTONS Start---------------------#\n # self.timeLength_Label = QLabel(self.annotateTab, text = 'Time Length:')\n # self.annotateTab_Layout.addWidget(self.timeLength_Label, 2, 0, 1, 1)\n\n # readOnlyPalette = QtGui.QPalette()\n # readOnlyPalette.setColor(QtGui.QPalette.Text, Qt.darkGray)\n\n # self.timeLength = QTimeEdit(parent = self.annotateTab)\n # self.timeLength.setDisplayFormat('HH:mm:ss')\n # self.timeLength.setAlignment(Qt.AlignCenter)\n # self.timeLength.setReadOnly(True)\n # self.timeLength.setPalette(readOnlyPalette)\n # self.annotateTab_Layout.addWidget(self.timeLength, 2, 1, 1, 4)\n\n # self.timeLengthRadio = QRadioButton(parent=self.annotateTab)\n # self.annotateTab_Layout.addWidget(self.timeLengthRadio, 2, 5, 1, 1, Qt.AlignRight)\n\n def addAnnotateCommentWidget(self):\n #-----------------------------Comment Start--------------------------------#\n # self.comment_Label = QLabel(parent = self.annotateTab, text = 'Comment:')\n # self.annotateTab_Layout.addWidget(self.comment_Label, 2, 0, 1, 1)\n self.comment = myPlainTextEdit(parent = self.annotateTab)\n self.comment.setUndoRedoEnabled(True)\n self.comment.setPlaceholderText('Enter Comment')\n self.annotateTab_Layout.addWidget(self.comment, 2, 0, 1, 6)\n\n self.characterCount_Label = QLabel(parent = self.annotateTab, text = '0/100')\n self.annotateTab_Layout.addWidget(self.characterCount_Label, 3, 5, 1, 1, Qt.AlignRight)\n\n #-----------------------------Buttons Start------------------------------# \n self.addButton = QPushButton(parent =self.annotateTab, text = '+Add')\n self.annotateTab_Layout.addWidget(self.addButton, 4, 0, 1, 3)\n self.annotateButton = QPushButton(parent = self.annotateTab, text = 'Annotate')\n self.annotateTab_Layout.addWidget(self.annotateButton, 4, 3, 1, 3)\n\n self.annotateTabMain_Layout.addLayout(self.annotateTab_Layout)\n\ndef main():\n app = QApplication()\n GUI = MainWin()\n ctrl = MainWinCtrl(GUI)\n GUI.show()\n sys.exit(app.exec())\n \nif __name__ == '__main__':\n main()","repo_name":"Ata-Shaker/Internship_Project","sub_path":"Internship_Project_Main.py","file_name":"Internship_Project_Main.py","file_ext":"py","file_size_in_byte":14936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41523871552","text":"import os\nimport time\nimport subprocess\nimport multiprocessing as mp\n\ndef epoch2str(epoch):\n return time.strftime('%H:%M:%S', time.localtime(epoch))\n\ndef load(period, q):\n count = 0\n last_bucket = int(time.time()) // period\n\n try:\n while True:\n count += 1\n bucket = int(time.time()) // period\n if bucket > last_bucket:\n q.put((bucket*period, count))\n count = 0\n last_bucket = bucket\n except KeyboardInterrupt:\n pass\n\ndef format_vals(vals):\n return '%d(%s)' % (sum(vals), '+'.join(str(x) for x in vals))\n\ndef get_pcpu(pids):\n pids_arg = ','.join(str(x) for x in pids)\n cmd = 'ps -o pcpu --no-headers --pid %s' % pids_arg\n output = subprocess.check_output(cmd.split(), universal_newlines=True)\n\n pcpu_list = [int(float(x)) for x in output.splitlines()]\n return 'pcpu ' + format_vals(pcpu_list)\n\nexec_times = dict()\n\ndef get_sched_info(pids):\n pids = [str(x) for x in pids]\n exec_deltas = []\n with open('/proc/sched_debug') as f:\n for line in f:\n fields = line.split()\n if len(fields) >= 9 and fields[2] in pids:\n pid = fields[2]\n exec_time = int(float(fields[7]))\n delta = exec_time - exec_times.get(pid, 0)\n exec_times[pid] = exec_time\n exec_deltas.append(delta)\n return 'exec_time ' + format_vals(exec_deltas)\n\ndef print_info(ts, counts, pids):\n info = [\n '[%s]' % epoch2str(ts),\n 'count %s' % format(sum(counts), ','),\n get_pcpu(pids),\n get_sched_info(pids),\n ]\n print(' '.join(info))\n\ndef gen_load(args):\n q = mp.Queue()\n pids = []\n for _ in range(args.nprocs):\n p = mp.Process(target=load, args=(args.period, q))\n p.start()\n pids.append(p.pid)\n\n last_ts = 0\n counts = []\n while True:\n (ts, count) = q.get()\n if ts != last_ts and len(counts) > 0:\n print(\"[%s] skipped\" % epoch2str(last_ts))\n last_ts = ts\n counts = []\n\n last_ts = ts\n counts.append(count)\n\n if len(counts) == args.nprocs:\n print_info(last_ts, counts, pids)\n counts = []\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--nprocs', type=int, default='1')\n parser.add_argument('--period', type=int, default='1')\n args = parser.parse_args()\n\n try:\n gen_load(args)\n except KeyboardInterrupt:\n pass\n","repo_name":"mechpen/misc-tools","sub_path":"load/cpu/genload.py","file_name":"genload.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22619963794","text":"from setuptools import setup\n\nfrom social_login import VERSION\n\npackages = [\n 'social_login',\n]\n\ninstall_requires = [\n 'socialoauth',\n]\n\n\nsetup(\n name='django-social-login',\n version = VERSION,\n license = 'BSD',\n description = 'A Django APP for Social account login via OAuth2 Service',\n long_description = open('README.txt').read(),\n author = 'Wang Chao',\n author_email = 'yueyoum@gmail.com',\n url = 'https://github.com/yueyoum/django-social-login',\n keywords = 'social, oauth, oauth2, django, login',\n packages = packages,\n install_requires = install_requires,\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Topic :: Internet',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n)\n\n","repo_name":"yueyoum/django-social-login","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"53"} +{"seq_id":"11993829069","text":"################################################################################\n# BOTAI\n################################################################################\n# https://github.com/RafaelCartenet/BOTAI\n# This simulator predicts the efficiency of a given strategy. According to a\n# period of time, it will use the strategy policy in order to simulate bets,\n# compute the results, and gives statistics, in order to have an idea about the\n# efficiency of the model.\n\n\nclass Engine:\n\n def __init__(self, data, policy, stats):\n self.data = data\n self.policy = policy\n self.SPs = data.SPs\n self.dates = data.dates\n self.nbtimesteps = self.data.N\n self.stats = stats\n\n def result_action(self, action, timestep):\n action.isEqual = False\n if action.type == \"Put\":\n if self.SPs[timestep + 1] < self.SPs[timestep]:\n result = action.ratio*action.amount\n action.isRight = True\n elif self.SPs[timestep +1] == self.SPs[timestep]:\n result = 0\n action.isRight = True\n action.isEqual = True\n else:\n result = -action.amount\n action.isRight = False\n elif action.type == \"Call\":\n if self.SPs[timestep + 1] > self.SPs[timestep]:\n result = action.ratio*action.amount\n action.isRight = True\n elif self.SPs[timestep + 1] == self.SPs[timestep]:\n result = 0\n action.isRight = True\n action.isEqual = True\n else:\n result = -action.amount\n action.isRight = False\n else:\n result = 0\n return result\n\n def run(self):\n for ts in range(self.nbtimesteps-1):\n seenSPs = self.SPs[:ts]\n seendates = self.dates[:ts]\n\n action = self.policy.ChooseAction(seenSPs, ts)\n\n result = self.result_action(action, ts)\n\n self.stats.update(ts, action, result)\n","repo_name":"RafaelCartenet/BOTAI","sub_path":"Simulator/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"29972290629","text":"# Script calculates the Kullback Liebler divergence value for all\n# daily hashtag counts files in the given directory.\n# It takes as input a directory containing daily hashtag counts files\n# and a specified output file name. It then calculates the kl value for\n# each hashtag and then write the results as space separated txt file.\n\nimport lib.an_util as util\nimport sys\nfrom os import listdir\nfrom os.path import join\nimport math\nimport numpy as np\n\nMSG = (\"Calculate the Kullback-Liebler divergence \"\n \"for all hashtags in given directory \"\n \"containing files of daily counts.\\n\"\n \"usage: {} hashtag_directory output_file_name\").format(\n sys.argv[0])\n\ndef cal_kl(directory, ht_file, ht_kl):\n \"\"\"kl is the expectation of the log difference between\n probabilities p and q.\n The formula for calculating kl is:\n kl = sum[ p(i) * ln(p(i)/q(i)) ]\n where p(i) is the normalized counts of i and q(i) is the\n uniform counts\"\"\"\n\n ht_set, ht_dict = util.load_name_val_pairs(\n\tjoin(directory, ht_file))\n days = len(ht_dict)\n kl = 0\n uniform = float(1)/days\n total_count = 0\n for ht in ht_dict:\n total_count += ht_dict[ht]\n for ht in ht_dict:\n norm = float(ht_dict[ht])/total_count\n if norm == 0:\n continue # log(0) is undef, set to 0\n kl += norm * math.log(norm / uniform)\n ht_kl[ht_file] = kl\n\nif __name__ == '__main__':\n directory, out_fn = util.check_input(sys.argv, 2, MSG)\n\n ht_kl = {}\n for ht_file in listdir(directory):\n cal_kl(directory, ht_file, ht_kl)\n sorted_tup_ls = util.sort_dict(ht_kl)\n util.write_tup_ls(sorted_tup_ls, out_fn)\n","repo_name":"hanxlin/twitter-research","sub_path":"cal_kl.py","file_name":"cal_kl.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29979131339","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, render_to_response\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext, loader\nfrom frontend.models import Subscribe\n\n\ndef index(request):\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n context = RequestContext(request, {\n })\n response = render_to_response(\n \"frontend/index.html\", {}, context)\n return response\n\n\ndef subscribe(request):\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n message = {\"id\": None, \"message\": \"\"}\n if request.GET:\n name = request.GET[\"name\"] if \"name\" in request.GET else None\n email = request.GET[\"email\"] if \"email\" in request.GET else None\n status = Subscribe(name=name, email=email)\n status.save()\n pk = status.id\n message = {\n \"id\": pk, \"message\": \"Thank you for subscribing to the newsletter. Have a nice day!\"}\n print(message)\n #context = RequestContext(request, {\"customer_message\": message})\n context = {\"customer_message\": message}\n response = render_to_response(\n \"frontend/index.html\", context=context)\n return response\n","repo_name":"pawank/analytixjobs","sub_path":"analytixworld/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20019608093","text":"import asyncio\nimport itertools\nfrom typing import Any, Dict, List\n\nimport pytest\n\nfrom application import mongo\n\n\nclass MockedBackend:\n def __init__(self, books: List[Dict[str, Any]]):\n self.books = books\n self.authors = set()\n self.genres = set()\n\n async def get_total_number_of_books(self, authors, genres, published_year):\n await asyncio.sleep(0.1) # Just to make the function an async function\n return len(self._filter_books(authors, genres, published_year))\n\n def _filter_books(self, authors, genres, published_year):\n if authors is None and genres is None and published_year is None:\n return self.books\n books = []\n if authors is not None:\n for book in self.books:\n if book[\"author\"] in authors:\n books.append(book)\n if genres is not None:\n book_genres = set(book[\"genres\"])\n if book_genres.intersection(set(genres)):\n books.append(book)\n if published_year is not None and book[\"published_year\"] == published_year:\n books.append(book)\n return books\n\n async def get_all_books(\n self,\n skips,\n number_of_documents,\n authors=None,\n genres=None,\n published_year=None,\n ):\n await asyncio.sleep(0.1) # Just to make the function an async function\n books = self._filter_books(authors, genres, published_year)\n return books[skips : skips + number_of_documents]\n\n async def get_all_authors(self):\n await asyncio.sleep(0.1) # Just to make the function an async function\n all_authors = set([book[\"author\"] for book in self.books])\n return [{\"name\": author} for author in sorted(all_authors)]\n\n async def get_all_genres(self):\n await asyncio.sleep(0.1) # Just to make the function an async function\n all_genres = sorted(\n list(\n set(\n itertools.chain.from_iterable(\n [book[\"genres\"] for book in self.books]\n )\n )\n )\n )\n return [{\"name\": genre} for genre in all_genres]\n\n async def get_single_book_by_id(self, book_id: str):\n await asyncio.sleep(0.1) # Just to make the function an async function\n for book in self.books:\n if book[\"book_id\"] == book_id:\n return book\n\n async def update_one_book(self, book_id: str, data: Dict[str, Any]):\n await asyncio.sleep(0.1) # Just to make the function an async function\n book_to_change = None\n for book in self.books:\n if book[\"book_id\"] == book_id:\n book_to_change = book\n book_to_change.update(data)\n return book_to_change\n\n @staticmethod\n async def delete_one_book(book_id: str):\n await asyncio.sleep(0.1) # Just to make the function an async function\n\n async def insert_authors_in_db(self, author: str):\n await asyncio.sleep(0.1) # Just to make the function an async function\n self.authors.add(author)\n\n async def get_single_book_by_name(self, name: str):\n await asyncio.sleep(0.1) # Just to make the function an async function\n for book in self.books:\n if book[\"name\"] == name:\n return book\n\n async def insert_genres_in_db(self, genres: List[str]):\n await asyncio.sleep(0.1) # Just to make the function an async function\n for genre in genres:\n self.genres.add(genre)\n\n async def insert_one_book(self, data: Dict[str, Any]):\n await asyncio.sleep(0.1) # Just to make the function an async function\n name = data.get(\"name\")\n for book in self.books:\n if name == book.get(\"name\"):\n raise mongo.BookExistsException()\n self.authors.add(data.get(\"author\"))\n for genre in data.get(\"genres\"):\n self.genres.add(genre)\n self.books.append(data)\n\n\n@pytest.fixture\ndef backend():\n def _backend(books):\n return MockedBackend(books=books)\n\n return _backend\n","repo_name":"subhayan-bhattacharya/mongo-based-book-store-fastapi","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74582572007","text":"import pytest\n\nfrom app.application import App\nfrom app.workers.match_check.worker import MatchCheckWorker\n\nfrom sage_utils.amqp import AmqpExtension\n\n\n@pytest.fixture(scope=\"function\")\ndef test_app(event_loop):\n app = App()\n app.amqp = AmqpExtension(app)\n app.amqp.register_worker(MatchCheckWorker(app))\n\n app.loop = event_loop\n app.init_workers(event_loop)\n yield app\n app.deinit_workers(event_loop)\n app.loop = None\n","repo_name":"OpenMatchmaking/microservice-strategist","sub_path":"strategist/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31076557146","text":"import logging\n\nfrom django.db.models import Count\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom novaclient.exceptions import NotFound\nfrom rest_framework import filters\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom fleio.core.drf import SuperUserOnly\nfrom fleio.core.exceptions import APIBadRequest\nfrom fleio.core.exceptions import ForbiddenException\nfrom fleio.core.features import staff_active_features\nfrom fleio.core.filters import CustomFilter\nfrom fleio.core.models import ClientGroup\nfrom fleio.core.permissions.permissions_cache import permissions_cache\nfrom fleio.openstack.api.identity import IdentityAdminApi\nfrom fleio.openstack.exceptions import handle\nfrom fleio.openstack.flavor import Flavors\nfrom fleio.openstack.instances.instance_status import InstanceStatus\nfrom fleio.openstack.models import FlavorGroup\nfrom fleio.openstack.models import Image\nfrom fleio.openstack.models import Instance\nfrom fleio.openstack.models import OpenstackInstanceFlavor\nfrom fleio.openstack.views.regions import get_regions\nfrom fleiostaff.core.clientgroups.serializers import ClientGroupsMinSerializer\nfrom fleiostaff.openstack.flavors.filters import OpenStackFlavorFilter\nfrom fleiostaff.openstack.flavors.serializers import FlavorSerializer\nfrom fleiostaff.openstack.flavors.serializers import FlavorUpdateSerializer\nfrom fleiostaff.openstack.signals import staff_delete_flavor\n\nLOG = logging.getLogger(__name__)\n\n\nclass AdminFlavorViewSet(viewsets.ModelViewSet):\n permission_classes = (SuperUserOnly,)\n serializer_class = FlavorSerializer\n filter_backends = (filters.OrderingFilter, DjangoFilterBackend, CustomFilter, filters.SearchFilter)\n search_fields = (\n 'name',\n 'description',\n 'memory_mb',\n 'root_gb',\n 'region__id',\n 'vcpus',\n 'flavor_group__name',\n )\n ordering_fields = ('name', 'id', 'vcpus', 'memory_mb', 'root_gb', 'flavor_group')\n filter_class = OpenStackFlavorFilter\n\n def list(self, request, *args, **kwargs):\n response = super().list(request=request, *args, **kwargs)\n response.data['permissions'] = permissions_cache.get_view_permissions(request.user, self.basename)\n return response\n\n @property\n def identity_admin_api(self):\n if hasattr(self, 'request'):\n return IdentityAdminApi(request_session=self.request.session)\n else:\n return IdentityAdminApi()\n\n @staticmethod\n def update_or_create(key, count, dictionary):\n val = dictionary.get(key, None)\n if val:\n dictionary[key] = dictionary[key] + count\n else:\n dictionary[key] = count\n return dictionary\n\n def get_queryset(self):\n return OpenstackInstanceFlavor.objects.filter(deleted=False)\n\n def get_serializer_class(self):\n if self.action in ['update', 'partial_update']:\n return FlavorUpdateSerializer\n else:\n return FlavorSerializer\n\n def perform_update(self, serializer):\n if serializer.partial:\n serializer.save()\n else:\n preserve_id = serializer.validated_data.get('preserve_id', False)\n flavor_id = serializer.instance.id if preserve_id else None\n\n self.perform_destroy(serializer.instance)\n self.perform_create(serializer, region=serializer.instance.region.id, flavor_id=flavor_id)\n\n @action(detail=True, methods=['post'])\n def set_properties(self, request, pk):\n del pk # unused\n\n db_flavor = self.get_object()\n new_properties = request.data.get('new_properties', {})\n flavors_api = Flavors(api_session=self.identity_admin_api.session)\n flavor_api = flavors_api.get(flavor=db_flavor)\n flavor_api.set_properties(new_properties=new_properties)\n return Response({'detail': _('Properties set')})\n\n @action(detail=True, methods=['post'])\n def unset_property(self, request, pk):\n del pk # unused\n db_flavor = self.get_object()\n property_key = request.data.get('property_key', {})\n flavors_api = Flavors(api_session=self.identity_admin_api.session)\n flavor_api = flavors_api.get(flavor=db_flavor)\n try:\n flavor_api.unset_property(property_key=property_key)\n except NotFound:\n handle(self.request, message=_('Unable to remove flavor property'))\n return Response({'detail': _('Property unset')})\n\n @action(detail=False, methods=['get'])\n def create_options(self, request):\n selected_region, regions = get_regions(request)\n return Response({'regions': regions, 'selected_region': selected_region})\n\n def perform_create(self, serializer, region=None, flavor_id=None):\n try:\n flavor_api = Flavors(api_session=self.identity_admin_api.session)\n create_parameters = {\n 'name': serializer.validated_data['name'],\n 'ram': serializer.validated_data['memory_mb'],\n 'vcpus': serializer.validated_data['vcpus'],\n 'disk': serializer.validated_data['root_gb'],\n 'flavorid': serializer.validated_data.get('id', 'auto'),\n 'ephemeral': serializer.validated_data['ephemeral_gb'],\n 'swap': serializer.validated_data['swap'],\n 'is_public': serializer.validated_data['is_public'],\n 'region': region or serializer.validated_data['region'].id,\n }\n\n if flavor_id:\n create_parameters['flavorid'] = flavor_id\n\n flavor = flavor_api.create(**create_parameters)\n except Exception as e:\n LOG.error(e)\n handle(self.request, message=_('Unable to create the flavor'))\n else:\n serializer.save(id=flavor.id, rxtx_factor=flavor.rxtx_factor,\n disabled=getattr(flavor, \"OS-FLV-DISABLED:disabled\", False))\n\n def perform_destroy(self, db_flavor):\n \"\"\"Delete flavor from nova and mark as deleted in Fleio db.\"\"\"\n if staff_active_features.is_enabled('demo'):\n raise ForbiddenException(detail=_('Operation not allowed in demo mode'))\n\n flavor_api = Flavors(api_session=self.identity_admin_api.session)\n flavor = flavor_api.get(flavor=db_flavor)\n try:\n pk = db_flavor.id\n flavor.delete()\n user = self.request.user\n staff_delete_flavor.send(sender=__name__, user=user, user_id=user.id,\n flavor_name=db_flavor.name, flavor_id=pk,\n username=user.username, request=self.request)\n except Exception as e:\n LOG.error(e)\n handle(self.request, message=e)\n\n @staticmethod\n def get_used_flavors_summary_queryset(user):\n return Instance.objects.values('flavor').annotate(\n count=Count('id')\n ).exclude(\n status=InstanceStatus.DELETED\n ).exclude(\n terminated_at__isnull=False\n ).order_by()\n\n @action(detail=False, methods=['get'])\n def summary(self, request):\n del request # unused\n used_flavors = self.get_used_flavors_summary_queryset(user=self.request.user)\n flavors = OpenstackInstanceFlavor.objects.all()\n flavor_info = {}\n flavor_data = []\n flavor_labels = []\n for flavor in used_flavors:\n try:\n db_flavor = flavors.get(id=flavor['flavor'])\n flavor_info = self.update_or_create(db_flavor.name, flavor['count'], flavor_info)\n except OpenstackInstanceFlavor.DoesNotExist:\n flavor_info = self.update_or_create(flavor['flavor'], flavor['count'], flavor_info)\n\n for key, value in iter(flavor_info.items()):\n flavor_data.append(value)\n flavor_labels.append(key)\n return Response({'flavor_data': flavor_data, 'flavor_labels': flavor_labels})\n\n @action(detail=False, methods=['get'])\n def permissions(self, request):\n view_permissions = permissions_cache.get_view_permissions(request.user, self.basename)\n return Response(data=view_permissions)\n\n @action(detail=False, methods=['get'])\n def get_available_flavors_for_group(self, request):\n group_id = request.query_params.get('group')\n search = request.query_params.get('search')\n if not group_id:\n raise APIBadRequest(_('Missing group id to filter flavors against it'))\n try:\n FlavorGroup.objects.get(id=group_id)\n except FlavorGroup.DoesNotExist:\n raise APIBadRequest(_('No group to filter against'))\n queryset = OpenstackInstanceFlavor.objects.all()\n queryset = queryset.exclude(flavor_group__isnull=False)\n if search:\n queryset = queryset.filter(name__icontains=search)\n objects = FlavorSerializer(instance=queryset, many=True, read_only=True).data\n return Response({'objects': objects})\n\n @action(detail=False, methods=['get'])\n def get_flavors_in_group(self, request):\n group_id = request.query_params.get('group_id')\n if not group_id:\n raise APIBadRequest(_('Missing group id to filter flavors against it'))\n try:\n group = FlavorGroup.objects.get(id=group_id)\n except FlavorGroup.DoesNotExist:\n raise APIBadRequest(_('No group to filter against'))\n queryset = OpenstackInstanceFlavor.objects.filter(flavor_group=group)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = FlavorSerializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = FlavorSerializer(queryset, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def add_to_group(self, request, pk):\n del pk # unused\n\n flavor = self.get_object() # type: OpenstackInstanceFlavor\n\n group_id = request.data.get('group_id')\n if not group_id:\n raise APIBadRequest(_('Missing group id to filter flavors against it'))\n try:\n group = FlavorGroup.objects.get(id=group_id)\n except FlavorGroup.DoesNotExist:\n raise APIBadRequest(_('No group to filter against'))\n\n flavor.flavor_group = group\n flavor.save()\n\n return Response({'detail': _('Flavor added to group')})\n\n @action(detail=True, methods=['post'])\n def remove_from_group(self, request, pk):\n del pk # unused\n\n flavor = self.get_object() # type: OpenstackInstanceFlavor\n\n group_id = request.data.get('group_id')\n if not group_id:\n raise APIBadRequest(_('Missing group id to filter flavors against it'))\n try:\n group = FlavorGroup.objects.get(id=group_id)\n del group\n except FlavorGroup.DoesNotExist:\n raise APIBadRequest(_('No group to filter against'))\n\n flavor.flavor_group = None\n flavor.save()\n\n return Response({'detail': _('Flavor removed from group')})\n\n @action(detail=False, methods=['get'])\n def get_available_flavors_for_image(self, request):\n image_id = request.query_params.get('image_id')\n search = request.query_params.get('search')\n if not image_id:\n raise APIBadRequest(_('Missing image id to filter flavors against it'))\n\n try:\n image = Image.objects.get(id=image_id)\n except Image.DoesNotExist:\n raise APIBadRequest(_('No image to filter against'))\n queryset = OpenstackInstanceFlavor.objects.filter(region=image.region)\n queryset = queryset.exclude(images__id=image_id)\n if search:\n queryset = queryset.filter(name__icontains=search)\n objects = FlavorSerializer(instance=queryset, many=True, read_only=True).data\n return Response({'objects': objects})\n\n @action(detail=False, methods=['get'])\n def get_flavors_assigned_to_image(self, request):\n image_id = request.query_params.get('image_id')\n if not image_id:\n raise APIBadRequest(_('Missing image id to filter flavors against it'))\n try:\n Image.objects.get(id=image_id)\n except Image.DoesNotExist:\n raise APIBadRequest(_('No image to filter against'))\n queryset = OpenstackInstanceFlavor.objects.filter(images__id=image_id)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = FlavorSerializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = FlavorSerializer(queryset, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def add_to_image(self, request, pk):\n del pk # unused\n\n flavor = self.get_object() # type: OpenstackInstanceFlavor\n\n image_id = request.data.get('image_id')\n if not image_id:\n raise APIBadRequest(_('Missing image id to filter flavors against it'))\n try:\n image = Image.objects.get(id=image_id)\n except Image.DoesNotExist:\n raise APIBadRequest(_('No image to filter against'))\n\n flavor.images.add(image)\n flavor.save()\n\n return Response({'detail': _('Flavor added to image')})\n\n @action(detail=True, methods=['post'])\n def remove_from_image(self, request, pk):\n del pk # unused\n\n flavor = self.get_object() # type: OpenstackInstanceFlavor\n\n image_id = request.data.get('image_id')\n if not image_id:\n raise APIBadRequest(_('Missing image id to filter flavors against it'))\n try:\n image = Image.objects.get(id=image_id)\n except Image.DoesNotExist:\n raise APIBadRequest(_('No image to filter against'))\n\n flavor.images.remove(image)\n flavor.save()\n\n return Response({'detail': _('Flavor removed from image')})\n\n @action(detail=False, methods=['get'])\n def get_available_client_groups_to_assign(self, request):\n \"\"\"gets client groups not yet assigned to flavor\"\"\"\n flavor_id = request.query_params.get('flavor_id', None)\n if not flavor_id:\n raise APIBadRequest(_('Flavor id required.'))\n flavor = OpenstackInstanceFlavor.objects.filter(id=flavor_id).first()\n if not flavor:\n raise APIBadRequest(_('No flavor found for given id.'))\n search = request.query_params.get('search', None)\n qs = ClientGroup.objects.all().exclude(id__in=flavor.show_to_groups.values('id'))\n if search is not None:\n qs = qs.filter(name__icontains=search)\n page = self.paginate_queryset(qs)\n if page is not None:\n serializer = ClientGroupsMinSerializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = ClientGroupsMinSerializer(qs, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['get'])\n def get_client_groups_related_to_flavor(self, request, pk):\n \"\"\"gets client groups assigned to flavor\"\"\"\n flavor = self.get_object()\n qs = ClientGroup.objects.filter(id__in=flavor.show_to_groups.values('id'))\n page = self.paginate_queryset(qs)\n if page is not None:\n serializer = ClientGroupsMinSerializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = ClientGroupsMinSerializer(qs, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def assign_client_group_to_flavor(self, request, pk):\n \"\"\"assigns a client group to a flavor\"\"\"\n flavor = self.get_object()\n client_group_id = request.data.get('client_group', None)\n if not client_group_id:\n raise APIBadRequest(_('No client group id specified.'))\n client_group = ClientGroup.objects.filter(id=client_group_id).first()\n if not client_group:\n raise APIBadRequest(_('No client group found for assignment.'))\n if client_group in flavor.show_to_groups.all():\n raise APIBadRequest(_('Client group already assigned to this flavor.'))\n flavor.show_to_groups.add(client_group)\n return Response({'detail': _('Successfully assigned client group to flavor.')})\n\n @action(detail=True, methods=['post'])\n def remove_client_group_from_flavor(self, request, pk):\n \"\"\"remove client group from flavor\"\"\"\n flavor = self.get_object()\n client_group_id = request.data.get('client_group', None)\n if not client_group_id:\n raise APIBadRequest(_('No client group id specified.'))\n client_group = ClientGroup.objects.filter(id=client_group_id).first()\n if not client_group:\n raise APIBadRequest(_('No client group found from given id.'))\n if client_group in flavor.show_to_groups.all():\n flavor.show_to_groups.remove(client_group)\n return Response({'detail': _('Successfully removed client group from flavor.')})\n else:\n raise APIBadRequest(_('Cannot remove client group from flavor as it is not assigned to it.'))\n","repo_name":"pizzhub/backendfleio-test","sub_path":"project/common_admin/openstack/flavors/views/flavor.py","file_name":"flavor.py","file_ext":"py","file_size_in_byte":17413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31312717668","text":"from sendMessage import *\nfrom isAdmin import *\nfrom deleteMessage import *\n\n\ndef antiSpam(chat_id, message_id , user_id , message_date ) :\n\n difference = message_date - last_spam_messageDate[0]\n\n isAdmin = isAdmin(chat_id , user_id)\n\n if difference < 10800 and isAdmin == 0 :\n\n deleteMessage(chat_id , message_id)\n\n elif isAdmin == 0 :\n\n message = \"Sorry , in order to not spam this group please do not send the same command multiple times . \\n try again later .\"\n\n sendMessage(chat_id , message_id , message)\n\n last_spam_messageDate.insert(0 , message_date)\n\n return \"OK\"\n","repo_name":"shadoowC1/Crypto-Assistant-Bot","sub_path":"antiSpam.py","file_name":"antiSpam.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20616691249","text":"# coding=utf-8\nimport requests\nimport json\n\nclass BaiduTrans:\n def __init__(self,query_string):\n self.headers = {\"User_Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\"}\n self.url = \"http://fanyi.baidu.com/v2transapi\"\n self.post_data = {\n \"from\":\"zh\",\n \"to\": \"en\",\n \"query\": query_string,\n \"simple_means_flag\": 3\n }\n\n\n def parse_url(self): # 发送请求获取响应\n response = requests.post(self.url,data=self.post_data,headers=self.headers)\n return response.content.decode()\n\n def get_result(self,temp_string):#转化为字典,提取数据\n temp_dict = json.loads(temp_string)\n result = temp_dict[\"trans_result\"][\"data\"][0][\"dst\"]\n return result\n\n def run(self):\n #1、找到post的地址和需要post的数据\n #2、发送url请求,拿到响应\n temp_string = self.parse_url()\n #3、转化为python类型,提取数据\n result = self.get_result(temp_string)\n print(\"翻译结果为:\",result)\n\n \nbaidu_trans = BaiduTrans(\"我爱你\")\nbaidu_trans.run()\n\n","repo_name":"ziuluolan/Python-","sub_path":"百度翻译/aaa.py","file_name":"aaa.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8829234994","text":"from django.db import models\n\n\nclass NonDeletedManager(models.Manager):\n def has_deleted_relation_field(self, field_name, model_name):\n model = self.model\n if hasattr(model, field_name):\n related_field = model._meta.get_field(field_name)\n if related_field and hasattr(related_field, 'related_model'):\n related_model = related_field.related_model\n if related_model and getattr(related_model, '__name__') == model_name:\n return hasattr(related_model, 'deleted')\n return False\n\n def get_queryset(self):\n model = self.model\n queryset = super().get_queryset()\n if hasattr(model, 'deleted'):\n queryset = queryset.filter(deleted=False)\n\n if self.has_deleted_relation_field('user', 'RetailUser'):\n queryset = queryset.exclude(user__deleted=True)\n\n if self.has_deleted_relation_field('document', 'Document'):\n queryset = queryset.exclude(document__deleted=True)\n\n return queryset\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/core/managers/non_deleted_manager.py","file_name":"non_deleted_manager.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72182015528","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nif __name__ == \"__main__\":\r\n n, k = map(int, input().split())\r\n coins = [int(input()) for _ in range(n)]\r\n\r\n coins.reverse()\r\n answer = 0\r\n for coin in coins:\r\n if k >= coin:\r\n answer += k // coin\r\n k %= coin\r\n \r\n print(answer)","repo_name":"yerim10044001/ProblemSolving","sub_path":"백준/Silver/11047. 동전 0/동전 0.py","file_name":"동전 0.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31586573081","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport itchat\n\n@itchat.msg_register([itchat.content.ATTACHMENT], isGroupChat=True)\ndef auto_download(msg):\n print(msg)\n msg.download(msg.fileName)\n sender = itchat.search_friends(userName=msg.fromUserName)\n print('get {} from {} sucessfully'.format(msg.fileName, sender.nickName))\n return\n\nitchat.auto_login(hotReload=True)\nitchat.run()\n ","repo_name":"tenkeyseven/wechatGroupAnalyzer","sub_path":"autoDownload.py","file_name":"autoDownload.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"1964373783","text":"import unittest\n\nfrom modi_plus.module.input_module.joystick import Joystick\nfrom modi_plus.util.message_util import parse_get_property_message\nfrom modi_plus.util.unittest_util import MockConnection, MockJoystick\n\n\nclass TestJoystick(unittest.TestCase):\n \"\"\"Tests for 'Joystick' package.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test fixtures, if any.\"\"\"\n\n self.connection = MockConnection()\n mock_args = (-1, -1, self.connection)\n self.joystick = MockJoystick(*mock_args)\n\n def tearDown(self):\n \"\"\"Tear down test fixtures, if any.\"\"\"\n\n del self.joystick\n\n def test_get_x(self):\n \"\"\"Test get_x method.\"\"\"\n\n _ = self.joystick.x\n self.assertEqual(\n self.connection.send_list[0],\n parse_get_property_message(-1, Joystick.PROPERTY_POSITION_STATE, self.joystick.prop_samp_freq)\n )\n self.assertEqual(_, 0)\n\n def test_get_y(self):\n \"\"\"Test get_y method.\"\"\"\n\n _ = self.joystick.y\n self.assertEqual(\n self.connection.send_list[0],\n parse_get_property_message(-1, Joystick.PROPERTY_POSITION_STATE, self.joystick.prop_samp_freq)\n )\n self.assertEqual(_, 0)\n\n def test_get_dirction(self):\n \"\"\"Test get_dirction method.\"\"\"\n\n _ = self.joystick.direction\n self.assertEqual(\n self.connection.send_list[0],\n parse_get_property_message(-1, Joystick.PROPERTY_DIRECTION_STATE, self.joystick.prop_samp_freq)\n )\n self.assertEqual(_, \"origin\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"LUXROBO/pymodi-plus","sub_path":"tests/module/input_module/test_joystick.py","file_name":"test_joystick.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72280255208","text":"#genome.py\n\nfrom .node_gene import node_gene\nfrom .connect_gene import connect_gene\nfrom random import random\n\nclass Genome:\n \n def __init__(self, inputs, outputs, index, genes = None):\n self.nbr_input = inputs\n self.nbr_output = outputs\n self.nbr_nodes = 0\n self.nodes = {}\n self.outputs = []\n self.connections = []\n self.innov = 0\n self.fitness = 0\n self.index = index\n\n if genes == None:\n for _ in range(self.nbr_input):\n self.nodes[self.nbr_nodes] = node_gene(self.nbr_nodes,'Input')\n self.nbr_nodes += 1\n for _ in range(self.nbr_output):\n new_node = node_gene(self.nbr_nodes,'Output')\n self.nodes[self.nbr_nodes] = new_node\n self.outputs.append(new_node)\n self.nbr_nodes += 1\n for i in range(self.nbr_input):\n for j in range(self.nbr_output):\n weight = self.generate_weight(1) \n self.connections.append(connect_gene(self.nodes[i], self.outputs[j], weight, True, self.innov))\n self.innov += 1\n else:\n pass\n \n def generate_weight(self,range):\n return unform(-1*range,range) # Random weight between -range and range\n\n def propagate(self, input):\n #Check len(input == nbr_input)\n for j in range(self.nbr_input):\n self.nodes[j].add_to_output(input[j])\n \n for i in range(len(self.connections)):\n self.process_connection(self.connections[i],i)\n \n for conn in self.connections:\n conn.accessed = False\n\n output = []\n for out in self.outputs:\n output.append(out.calc_output())\n return output\n\n def process_connection(self, connection, index):\n if not connection.accessed or connection.enabled:\n self.check_input(connection,index)\n connection.output_node.add_to_output(connection.input_node.calc_output()*connection.weight)\n connection.accessed = True\n\n def check_input(self, connection, index):\n if connection.input_node.name == 'Input' or connection.input_node.is_recurrent:\n return\n else:\n for i in range(index,len(self.connections)):\n if self.connections(i).output_node == connection.input_node:\n self.process_connection(self.connections(i), i) \n\n def crossover(self, other):\n #Find matching genes\n connects = {}\n new_connections = []\n\n for conn in self.connections:\n connects[conn.innov] = 1\n for conn in other.connections:\n if conn.innov in connects:\n connects[conn.innov] += 1\n else:\n connects[conn.innov] = 3\n\n for k,v in connects:\n if v > 1:\n if random() < 0.5:\n new_connections.append(self.connections[k])\n else:\n new_connections.append(other.connections[k])\n del connects[k]\n \n if self.fitness > other.fitness:\n find_value = 1\n else:\n find_value = 3\n \n for k,v in connects:\n if v == find_value:\n new_connections.append(self.connections[k])\n\n #Create new genome with genes from self and other\n\n \n #Adds a new node, splitting an old, random connection\n def mutate_add_node(self,innov):\n mutate_connection = self.connections[random.randrange(len(self.connections))]\n mutate_connection.enabled = False\n\n self.nodes[self.nbr_nodes] = node_gene(self.nbr_nodes,\"Hidden\") \n #Connection between previous input and new hidden \n self.connections.apped(connect_gene(mutate_connection.input_node, self.nodes[self.nbr_nodes],1,innov))\n #Connection between hidden and previous output\n self.connections.append(connect_gene(self.nodes[self.nbr_nodes],mutate_connection.output_node, mutate_connection.weight,innov+1))\n\n self.nbr_nodes += 1\n\n #Adds a new connection between two random nodes\n def mutate_add_connection(self,innov):\n node1 = self.nodes[random.randrange(self.nbr_nodes)]\n node2 = self.nodes[random.randrange(self.nbr_nodes)]\n \n if node1 == node2:\n node1.is_recurrent = True\n \n weight = self.generate_weight(1)\n self.connections.append(connect_gene(node1,node2,weight,innov))\n\n #Changes the weights on the connections in the genome\n def mutate_change_weights(self,mutation_chance):\n for conn in self.connections:\n if conn.enabled:\n if random() < mutation_chance:\n conn.weight += self.generate_weight(0.25)\n else:\n conn.weight = self.generate_weight(1)\n","repo_name":"Flourish3/NEAT","sub_path":"genome/genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21549217409","text":"import urllib.request\nimport json\nfrom .models import Sources, Articles\nfrom datetime import datetime\n\napi_key = None\nbase_url = None\narticles_url = None\n\n\ndef configure_request(app):\n global api_key, base_url, articles_url\n api_key = app.config['NEWS_API_KEY']\n base_url = app.config['NEWS_SOURCES_BASE_URL']\n articles_url = app.config['ARTICLES_BASE_URL']\n\n\ndef getSource(category):\n '''\n Function that gets the json response to our url request\n '''\n getSource_url = base_url.format(category, api_key)\n\n with urllib.request.urlopen(getSource_url) as url:\n getSource_data = url.read()\n getSource_response = json.loads(getSource_data)\n\n sources_results = None\n\n if getSource_response['sources']:\n sources_results_list = getSource_response['sources']\n sources_results = process_sources(sources_results_list)\n\n return sources_results\n\n\ndef process_sources(sources_list):\n '''\n Function that processes the news sources results and turns them into a list of objects\n Args:\n sources_list: A list of dictionaries that contain sources details\n Returns:\n sources_results: A list of sources objects\n '''\n sources_results = []\n\n for source_item in sources_list:\n id = source_item.get('id')\n name = source_item.get('name')\n description = source_item.get('description')\n url = source_item.get('url')\n category = source_item.get('category')\n language = source_item.get('language')\n country = source_item.get('country')\n\n sources_object = Sources(\n id, name, description, url, category, country, language)\n sources_results.append(sources_object)\n\n return sources_results\n\n\ndef getArticle(id):\n '''\n Function that processes the articles and returns a list of articles objects\n '''\n getArticle_url = articles_url.format(id, api_key)\n\n with urllib.request.urlopen(getArticle_url) as url:\n articles_results = json.loads(url.read())\n\n articles_object = None\n if articles_results['articles']:\n articles_object = process_articles(articles_results['articles'])\n\n return articles_object\n\n\ndef process_articles(articles_list):\n '''\n '''\n articles_object = []\n for article_item in articles_list:\n id = article_item.get('id')\n author = article_item.get('author')\n title = article_item.get('title')\n description = article_item.get('description')\n url = article_item.get('url')\n image = article_item.get('urlToImage')\n dateAt = article_item.get('publishedAt')\n\n # convert date from json to string and backto my specific format\n dates = datetime.strptime(dateAt, '%Y-%m-%dT%H:%M:%SZ')\n date = dates.strftime('%d.%m.%Y')\n\t\t\n if image:\n articles_result = Articles(\n id, author, title, description, url, image, date)\n articles_object.append(articles_result)\n\n return articles_object","repo_name":"AnnabelNkir/News_bulletin","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70439857767","text":"import os\nfrom typing import Callable, Tuple\n\nimport torch\nimport torch.utils.data as data\n\nimport torchaudio\n\nfrom avreader import load_audio\nfrom downloader import download_and_extract_archive\n\n__all__ = [\"SpeechCommands\", \"SpeechCommandsV2\", \"SpeechCommandsV1\"]\n\n\nclass SpeechCommands(data.Dataset):\n \"\"\"[summary]\n\n Parameters\n ----------\n root : str\n Root directory of the Speech Commands Dataset.\n split : str, optional (default=\"train\")\n The dataset split, supports ``train`` or ``val``\n\n transform : Callable[[torch.Tensor], torch.Tensor], optional (default=None)\n [description]\n target_transform : Callable[[torch.Tensor], torch.Tensor], optional (default=None)\n [description]\n download : bool, optional (default=False)\n [description]\n\n Attributes\n ----------\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n\n \"\"\"\n\n base_folder = \"speech_commands/v0.02\"\n url = \"http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz\"\n md5 = \"6b74f3901214cb2c2934e98196829835\"\n filename = url.rpartition(\"/\")[2]\n\n splits = (\"train\", \"val\", \"test\")\n\n def __init__(\n self,\n root: str,\n split: str = \"train\",\n transform: Callable[[torch.Tensor], torch.Tensor] = None,\n target_transform: Callable[[torch.Tensor], torch.Tensor] = None,\n download: bool = False,\n ) -> None:\n self.root = os.path.expanduser(root)\n self.split = split\n self.transform = transform\n self.target_transform = target_transform\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError(\n \"Dataset not found or corrupted.\"\n + \" You can use download=True to download it\"\n )\n\n self.classes, self.class_to_idx = self._find_classes(os.path.join(self.root, self.base_folder))\n\n self.data, self.targets = self._make_dataset(\n os.path.join(os.path.abspath(self.root), self.base_folder), split, self.classes, self.class_to_idx\n )\n\n # @property\n # def class_to_idx(self) -> Dict[str, int]:\n # return {_class: i for i, _class in enumerate(self.classes)}\n\n\n def _find_classes(self, path: str) -> Tuple[list, dict]:\n classes = [\n d.name\n for d in os.scandir(path)\n if not d.name.startswith(\".\") and not d.name.startswith(\"_\") and d.is_dir()\n ]\n classes.sort()\n class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}\n return classes, class_to_idx\n\n def _make_dataset(\n self, path: str, split: str, classes: str, class_to_idx: dict\n ) -> Tuple[list, list]:\n # load the test and validation lists\n with open(os.path.join(path, \"testing_list.txt\")) as fd:\n test_files = [os.path.join(path, fpath) for fpath in fd.read().strip().splitlines()]\n with open(os.path.join(path, \"validation_list.txt\")) as fd:\n eval_files = [os.path.join(path, fpath) for fpath in fd.read().strip().splitlines()]\n\n # load data and labels depending on the split part\n if split == \"test\":\n data = test_files\n targets = [class_to_idx[os.path.dirname(fpath).rpartition(\"/\")[2]] for fpath in test_files]\n elif split == \"val\":\n data = eval_files\n targets = [class_to_idx[os.path.dirname(fpath).rpartition(\"/\")[2]] for fpath in eval_files]\n else:\n data = []\n targets = []\n test_eval_files = test_files + eval_files\n for classe in self.classes:\n for entry in os.scandir(os.path.join(path, classe)):\n if (\n not entry.name.startswith(\".\")\n and entry.is_file()\n and entry.name.endswith(\".wav\")\n ):\n data.append(entry.path)\n # removes the test and validation file paths to keep only the\n # training file paths\n data = list(set(data) - set(test_eval_files))\n targets = [class_to_idx[os.path.dirname(fpath).rpartition(\"/\")[2]] for fpath in data]\n\n return data, torch.tensor(targets)\n\n def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:\n audio_path, target = self.data[index], int(self.targets[index])\n audio, fs = load_audio(audio_path)\n\n if self.transform is not None:\n audio = self.transform(audio)\n\n if self.target_transform is not None:\n targer = self.target_transform(target)\n\n return audio, target\n\n def __len__(self) -> int:\n return len(self.data)\n\n def _check_exists(self) -> bool:\n # can be more robust\n return os.path.exists(os.path.join(self.root, self.base_folder))\n\n def download(self) -> None:\n if self._check_exists():\n print(\"Files already downloaded and verified\")\n\n download_and_extract_archive(\n self.url,\n self.root,\n filename=self.base_folder,\n remove_finished=True,\n )\n\n def __repr__(self):\n head = \"Dataset \" + self.__class__.__name__\n body = [f\"Number od datapoints: {self.__len__()}\"]\n body.append(f\"Root location: {self.root}\")\n body.append(\"Transforms:\")\n body.append(\"Target transforms:\")\n lines = [head] + [\" \" * 4 + line for line in body]\n return \"\\n\".join(lines)\n\n\n\nclass SpeechCommandsV1(SpeechCommands):\n url = \"https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz\"\n md5 = \"3cd23799cb2bbdec517f1cc028f8d43c\"\n\nSpeechCommandsV2 = SpeechCommands\n","repo_name":"bchamand/datasets","sub_path":"datasets/speech_commands.py","file_name":"speech_commands.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20543547414","text":"class PeriodEvent:\r\n def __init__(self, callable2, interval, exitEvent):\r\n self.callable = callable2\r\n self.interval = interval\r\n self.exitEvent = exitEvent\r\n from threading import Thread, Event\r\n self.request = Event()\r\n Thread(target = self.loopThread).start()\r\n Thread(target = self.listenExit).start()\r\n \r\n def loopThread(self):\r\n while True:\r\n self.request.wait(self.interval)\r\n if self.exitEvent.is_set():\r\n break\r\n self.request.clear()\r\n self.callable()\r\n \r\n def listenExit(self):\r\n self.exitEvent.wait()\r\n self.set()\r\n \r\n def set(self):\r\n self.request.set()\r\n","repo_name":"whitejava/Histo","sub_path":"Histo/histo/bundle/periodevent.py","file_name":"periodevent.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9383307701","text":"import telebot\nfrom telebot import types\nimport sqlite3\nimport datetime\nfrom datetime import date\n\nname = surname = \"\"\n\nday=month=year=0\n\n\n\nbot = telebot.TeleBot('5375695745:AAETNH7ETCPxcaESCy6I3HmweyNbd7I3BPY')\n\nconnect = sqlite3.connect('users.db', check_same_thread=False)\ncursor = connect.cursor()\ncursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS users_bot(\n id INTEGER PRIMARY KEY AUTOINCREMENT, id_numb INTEGER, name TEXT, surname TEXT, age INTEGER, day INTEGER, month INTEGER, year INTEGER\n)\"\"\")\nconnect.commit()\n\n@bot.message_handler(commands=['start','help'])\ndef start_message(message):\n connect = sqlite3.connect('users.db')\n cursor = connect.cursor()\n pipl_id = message.chat.id\n cursor.execute(f\"SELECT id FROM users_bot WHERE id_numb={pipl_id}\")\n data = cursor.fetchone()\n if data is None:\n bot.send_message(message.chat.id, 'Вас приветствует бот-администратор\\n ')\n markup = types.InlineKeyboardMarkup(row_width=2)\n item1 = types.InlineKeyboardButton('регистрация', callback_data='request1')\n item2 = types.InlineKeyboardButton('Инфо', callback_data='request2')\n markup.add(item1, item2)\n bot.send_message(message.chat.id, 'Чем могу помочь ?', reply_markup=markup)\n else:\n cursor.execute(f\"SELECT name,surname FROM users_bot WHERE id_numb={pipl_id}\")\n bot.send_message(message.from_user.id, 'Приветствую '+name+' '+surname)\n@bot.message_handler(content_types=['text'])\ndef get_text(message):\n if message.text.lower() == 'да' or message.text.lower() == 'готов':\n bot.send_message(message.from_user.id, 'Здорово, как тебя зовут?')\n bot.register_next_step_handler(message, get_name)\n\n else:\n bot.send_message(message.from_user.id, 'Я тебя не понимаю =(... Напиши /help')\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_worker(call):\n if call.data == \"request1\":\n bot.send_message(call.message.chat.id, \"Отлично, начинаем регистрацию \\nготов ?\")\n elif call.data=='request2':\n markup = types.InlineKeyboardMarkup(row_width=2)\n item1 = types.InlineKeyboardButton('Мне есть 18', callback_data='request1')\n item2 = types.InlineKeyboardButton('Мне нет 18', callback_data='request3')\n markup.add(item1,item2)\n bot.send_message(call.message.chat.id,\"ВНИМАНИЕ !!!\\nУ данного ресурса есть возрастное ограничение 18+\\nПредоставленные данные будут использованы только для работы с ботом\", reply_markup=markup)\n elif call.data =='request3':\n bot.send_message(call.message.from_user.id, 'Ваш возраст не удовлетворяет возрастным критериям, /help')\n\ndef get_name(message):\n global name\n name = message.text\n bot.send_message(message.from_user.id, 'Фамилия ?')\n bot.register_next_step_handler(message, reg_surname)\n\ndef reg_surname(message):\n global surname\n surname = message.text\n bot.send_message(message.from_user.id, \"Год твоего рождения?\")\n bot.register_next_step_handler(message, reg_year)\n\ndef reg_year(message):\n global year\n while year==0:\n try:\n year= int(message.text)\n except Exception:\n bot.send_message(message.from_user.id, \"Ошибка в году, пиши цифрами\")\n bot.register_next_step_handler(message, reg_year)\n break\n bot.send_message(message.from_user.id, \"Месяц твоего рождения?\")\n bot.register_next_step_handler(message, reg_month)\n\ndef reg_month(message):\n global month\n while month==0:\n try:\n month=int(message.text)\n except Exception:\n bot.send_message(message.from_user.id, \"Ошибка в месяце, пиши цифрами\")\n bot.register_next_step_handler(message, reg_month)\n break\n bot.send_message(message.from_user.id, \"Число твоего рождения?\")\n bot.register_next_step_handler(message, reg_day)\n\ndef reg_day(message):\n global day\n while day==0:\n try:\n day=int(message.text)\n except Exception:\n bot.send_message(message.from_user.id, \"Ошибка в числе, пиши цифрами\")\n bot.register_next_step_handler(message, reg_day)\n break\n bot.send_message(message.from_user.id, \"Тебя зовут \"+name+' '+surname+\", дата рождения \"+str(day)+'.'+str(month)+'.'+str(year)+' , верно ?')\n bot.register_next_step_handler(message, reg_age)\n\ndef reg_age(message):\n global age\n td=datetime.datetime.now().date()\n bd = date(int(year), int(month), int(day))\n age= int((td-bd).days/365)\n pipl_id = message.chat.id\n if age<18:\n bot.send_message(message.from_user.id, 'Ваш возраст не удовлетворяет возрастным критериям, /help')\n else:\n cursor.execute(\"INSERT INTO users_bot(id_numb,name,surname,age,day,month,year) VALUES(?,?,?,?,?,?,?);\",(pipl_id, name, surname, int(age),int(day),int(month),int(year)))\n connect.commit()\n bot.send_message(message.from_user.id, \"Регистрация прошла успешно\\nМожно пользоваться сервисом\")\n print ('зарегился '+name+' '+surname+' '+str(message.chat.id))\n\n\n\nbot.polling()\n\n\n","repo_name":"agvardeitsev/bot_shmelkov","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39883245927","text":"# LinkedList class object\n\n# Define node object\nclass _ListNode(object):\n def __init__(self, val):\n self.val = val\n self.next = None # I don't know which next to point to, so use None\n\n\n# MyLinkedList is a linked list\nclass MyLinkedList(object):\n def __init__(self):\n '''\n Initialize your data structure here.\n Here, we will construct an empty linked list.\n '''\n self._head = None # always maintain a reference that points to the head\n self._tail = None # always maintain a reference that points to the tail\n self._size = 0 # always represents the current number of nodes\n\n def _get(self, index):\n '''\n Find node that corresponds to index.\n Assume index is within [0, self._size]\n '''\n node = self._head\n for _ in xrange(index):\n node = node.next\n return node\n\n def get(self, index):\n '''\n get the value of the index node in the linked list. If the index is invalid, return -1 (actually not good because -1 might be a data value).\n :type index: Int\n :rtype: int\n '''\n # check index is valid or not\n if index < 0 or index >= self._size:\n return -1\n return self._get(index).value\n\n def addAtHead(self, val):\n '''\n Add a node of value before the first element of the linked list\n :type val: int\n :rtype: None\n '''\n if self._head is None:\n self._head = self._tail = _ListNode(val)\n else:\n new_head = _ListNode(val) # add\n new_head.next = self._head\n self._head = new_head # redefine head\n self._size += 1\n\n def addAtTail(self, val):\n '''\n Add a node of value after the last element of the linked list\n :type val: int\n :rtype: None\n '''\n if self._size == 0:\n self._head = self._tail = _ListNode(val)\n else:\n self._tail.next = _ListNode(val) # add\n self._tail = self._tail.next # redefine tail\n self._size += 1\n\n def addAtIndex(self, index, val):\n '''\n Add a node of value val before the index node in the linked list. If index equals the length of linked list, the node will be appended to the end. If index is greater than the length, the node will not be inserted.\n :type index: int\n :tyep val: int\n :rtype: None\n '''\n # Index validation\n if index < 0 or index > self._size: # note len = size - 1\n return # do nothing\n if index == 0:\n self.addAtHead(val) # note size already added\n elif index == self._size:\n self.addAtTail(val)\n else:\n prev_node = self._get(index - 1)\n new_node = _ListNode(val)\n new_node.next = prev_node.next\n prev_node.next = new_node\n self._size += 1\n\n # AddAtIndex better solution with dummy head\n def add_to_index(head, index, val):\n fake_head = ListNode(None)\n fake_head.next = head\n insert_place = search_by_index(fake_head, index)\n if insert_place is None:\n return fake_head.next\n new_node = ListNode(val)\n new_node.next = insert_place.next\n insert_place.next = new_node\n return fake_head.next\n\n def deleteAtIndex(self, index):\n '''\n Delete the index node in the linked list, if the index is valid.\n :type index: int\n :rtype: None\n '''\n if index < 0 or index >= self._size:\n return\n if index == 0:\n #delete old head\n #deletion means two things:\n #1. most important: from linked list perspective, you can not see this node\n #2. optional:from this node perspective, there is no other nodes after it\n # if delete head\n new_head = self._head.next\n self._size -= 1\n self._head.next = None #\n self._head = new_head\n # if I remove the last and first node\n if self._head is None:\n self._tail = None\n else:\n prev_node = self._get(index - 1)\n remove_node = prev_node.next\n prev_node.next = remove_node.next\n remove_node.next = None\n # what if I remove the original tail node?\n if index == self._size - 1:\n self._Tail = prev_node\n self._size -= 1\n\n def __str__(self):\n '''\n Get the string representation of the internal singly list\n '''\n strs = []\n node = self._head\n while node is not None:\n strs.append(str(node.val))\n node = node.next\n return ' -> '.join(strs)","repo_name":"zoelzw/MachineLearning-practice","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75293406248","text":"'''While loop'''\n# The syntax of a while loop looks like this:\n# ''' while condition:\n #Body of the loop '''\n\n# The block keeps executing until the condition is true/false\n# In while loops, the condition is checked first. If it evaluates to true, the body of the loop is executed, otherwise not!\n# If the loop is entered, the process of condition check and execution is continued until the condition becomes false.\n\n'''exmaple'''\ni = 0\nwhile i<5:\n print(\"Harry\",str(i))\n i = i+1 #this kept increasing i every time in the loop so that i=1 i=2 i=3 i=4 aate rahe varna ye loop kabhi khatam nahi hota\nprint(\"done\")\n# (Above program will print Harry 5 times)\n# Note: if the condition i<5 never becomes false, the loop keeps getting executed!\n\n#again\n# i = 0\n# while i<5:\n# print(\"Harry\",str(i))\n# i = i+1 #this kept increasing i every time in the loop so that i=1 i=2 i=3 i=4 aate rahe varna ye loop kabhi khatam nahi hota\n# print(\"done\")\n#ye jo code hai isme print(done) loop ke andar aaraha hai \n\n'''Quick Quiz: Write a program to print 1 to 50 using a while loop'''\n# i=1\n# while i<=50:\n# print(i)\n# i=i+1\n\n\n# wE Generally use 'False while' jaha condition ek point pr false ho jati hai\n# sometimes 'true while' is used jahaa condition kabhi false nahi hoti isliye infinite print hote rehta hai.\n\n\n'''example'''\n# write a program to print the content of a list using while loop\nfrom types import FrameType\n\n\n# i= 0\n# fruit = [\"apple\" ,\"anana\" ,\"chappal\" ,\"hi\"]\n# while i most_sales['sales']:\n most_sales['car'] = car\n most_sales['sales'] = item['total_sales']\n\n if car['car_year'] not in years:\n years[car['car_year']] = 0\n years[car['car_year']] += 1\n\n popular_year = max(years, key=years.get)\n\n summary = [\"\"]\n summary.append(\"The {car_model} had the most sales: {total_sales}\"\n .format(car_model=format_car(most_sales['car']),\n total_sales=most_sales['sales']))\n summary.append(\"The most popular year was {year} with {total_sales} sales.\"\n .format(year=popular_year, total_sales=years[popular_year]))\n\n \nmsg = process_data()\nparagraph = \"
\".join(msg)\nreport.generate('/tmp/cars.pdf', 'Sales summary for last month', process_data(), cars_dict_to_table())\nemails.generate(\"automation@example.com\", \"student@example.com\", \"Sales summary for last month\", \"\\n\".join(msg), \"/tmp/cars.pdf\")\n\n","repo_name":"mehwishh247/coursera-it-automation","sub_path":"automating-real-world-tasks-with-python/practice-module/module-4-practice.py","file_name":"module-4-practice.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35325767332","text":"import os, sys\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, datasets, models\nfrom torchsummary import summary\nimport pytorch_unet\nfrom Unets import transition_UNet, UNet, transition_UNet_large, UNet_large, encoder_decoer\nfrom CLSTM import BDCLSTM\n\nimport torch.nn as nn\n\nfrom collections import defaultdict\nimport torch.nn.functional as F\n\n# my code\nfrom loss import calc_loss\nfrom utils import *\nfrom DSC_pred import DSC_pred\n\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport time\nimport copy\nfrom tqdm import tqdm\nimport argparse\nimport SimpleITK as sitk\nfrom dataloader import LSTMDataset, UnetDataset, CV_Unet_Dataset, CV_LSTMDataset\n\n# unet_device = torch.device('cuda:{}'.format(3) if torch.cuda.is_available() else 'cpu')\n\ndef set_subject_result(GT_dir, test_subjects):\n\tsubject_seg = {}\n\tfor i, subject in enumerate(test_subjects):\n\t\tsubject = os.path.basename(subject)\n\t\tprint(subject)\n\t\tlabel_path = os.path.join(GT_dir, subject + '.npy')\n\t\t# print(label_path)\n\t\tlabel = np.load(label_path)\n\t\tlabel_arr = np.transpose(label, (2, 0, 1))\n\n\t\tsubject_seg[subject] = np.zeros(label_arr.shape, dtype=np.float16)\n\n\treturn subject_seg\n\ndef test_unet(model, dataloaders, root_dir, test_subjects, save_dir, unet_device):\n\tmodel.eval()\n\tepoch_samples = 0\n\tmetrics = defaultdict(float)\n\n\tresult_path = os.path.join(save_dir, 'testing_result.txt')\n\tfile = open(result_path, 'w')\n\n\t# load original shape\n\tGT_dir = os.path.join(root_dir, 'npy_labels')\n\tsubject_seg = set_subject_result(GT_dir, test_subjects)\n\twith torch.no_grad():\n\t\tfor inputs, labels, subject, name in tqdm(dataloaders):\n\t\t\tsubject = subject[0]\n\t\t\tname = name[0].replace('.npy', '')\n\n\t\t\tinputs = inputs.to(unet_device)\n\t\t\tmean = inputs.mean()\n\t\t\tstd = inputs.std()\n\t\t\tlabels = labels.to(unet_device)\n\t\t\t\n\t\t\toutputs = model(inputs)\n\t\t\tthresh_outputs = F.sigmoid(outputs)\n\t\t\tthresh_outputs[thresh_outputs >= 0.5] = 1.0\n\t\t\tthresh_outputs[thresh_outputs < 0.5] = 0.0\n\n\t\t\tloss, dice = calc_loss(outputs, labels, metrics)\n\t\t\tpred = np.squeeze(thresh_outputs.cpu().data[0].numpy()).astype(np.float16)\n\t\t\t\n\t\t\tp_z, p_x, p_y = pred.shape\n\t\t\t_, z,x,y = name.split('_')\n\t\t\tz, x, y = int(z), int(x), int(y)\n\t\t\tsubject_seg[subject][z:z+p_z,x:x+p_x,y:y+p_y] = (subject_seg[subject][z:z+p_z,x:x+p_x,y:y+p_y] + pred)/2\n\n\t\t\tepoch_samples += inputs.size(0)\n\t\n\tsource_dir = '/media/NAS/nas_187/datasets/junghwan/experience/CT/TCIA/Labels'\n\tfor key in subject_seg:\n\t\toriginal_path = os.path.join(source_dir, key)\n\n\t\torigin_label = os.path.join(source_dir, 'label{}.nii.gz'.format(key))\n\t\torigin_3D = sitk.ReadImage(origin_label, sitk.sitkInt16)\n\n\t\tsubject_dir = os.path.join(save_dir, key)\n\t\tif not os.path.exists(subject_dir):\n\t\t\tos.mkdir(subject_dir)\n\t\tsave_path = os.path.join(subject_dir, 'pred.nii')\n\n\t\tsubject_seg[key][subject_seg[key] >= 0.4] = 1\n\t\tsubject_seg[key][subject_seg[key] < 0.4] = 0\n\n\t\tresult_3D = sitk.GetImageFromArray(subject_seg[key].astype(np.uint8))\n\n\t\tresult_3D.SetDirection(origin_3D.GetDirection())\n\t\tresult_3D.SetOrigin(origin_3D.GetOrigin())\n\t\tresult_3D.SetSpacing(origin_3D.GetSpacing())\n\n\t\tsitk.WriteImage(result_3D, save_path)\n\t\tdel result_3D\n\t\n\tfor k in metrics.keys():\n\t\tfile.write('{}: {:.4f}\\n'.format(k, metrics[k] / epoch_samples))\n\tfile.close()\n\tprint_metrics(metrics, epoch_samples, 'test')\n\n\ndef Arg():\n\tparser = argparse.ArgumentParser(description='CT image train')\n\tparser.add_argument('-d', '--root_dir', dest='root_dir', default='/media/NAS/nas_187/datasets/junghwan/experience/CT/TCIA',\n\t\t\t\t\t\thelp='set root_dir, default is \"/media/NAS/nas_187/datasets/junghwan/experience/CT/TCIA\"')\n\tparser.add_argument('-t', '--target', dest='target', default='pancreas',\n\t\t\t\t\t\thelp='choose target, liver or pancreas, default is pancreas')\n\tparser.add_argument('-nf', '--n_fold', dest='n_fold', default=4, type=int,\n\t\t\t\t\t\thelp='set number of folds, default is 4')\n\tparser.add_argument('-g', '--unet_gpu', dest='unet_gpu', default='0',\n\t\t\t\t\t\thelp='choose gpu id, 0~7, default=0, if you want use multi-gpu write \"0,1,2,3\"')\n\tparser.add_argument('-e', '--unet_epochs', dest='unet_epochs', default=30, type=int,\n\t\t\t\t\t\thelp=\"choose num epoch, default is 30\")\n\tparser.add_argument('-p', '--patch_size', dest='patch_size', default=64, type=int,\n\t\t\t\t\t\thelp='set patch size')\n\tparser.add_argument('-v', dest='version', default='old',\n\t\t\t\t\t\thelp='old version means clip the intensity value [-300, 350], new version is [-100, 240], default is old')\n\n\tparser.add_argument('--padding_size', dest='padding_size', default='stride',\n\t\t\t\t\t\thelp='set padding_size. stride or patch, default is stride')\n\tparser.add_argument('--stride_size', dest='stride_size', default='half',\n\t\t\t\t\t\thelp='set stride_size. half or triple, default is half')\n\n\tparser.add_argument('--network_size', dest='network_size', default='normal',\n\t\t\t\t\t\thelp='set network_size. normal or small, default is normal')\n\tparser.add_argument('--optim', dest='optim', default='Adam',\n\t\t\t\t\t\thelp='set optim. SGD or Adam, default is Adam')\n\tparser.add_argument('--input_channels', dest='input_channels', default=64, type=int,\n\t\t\t\t\t\thelp='set input_channels. default is 64')\n\tparser.add_argument('--bce_weight', dest='bce_weight', default=0.25, type=float,\n\t\t\t\t\t\thelp='set bce_weight. default is 0.25, 0 means only use dice loss')\n\tparser.add_argument('--unet_type', dest='unet_type', default='transition',\n\t\t\t\t\t\thelp='choose what Unet network training(for using bdclstm), Unet or transition, defulat is transition')\n\t\n\treturn parser.parse_args()\n\n\ndef main():\n\targs = Arg()\n\troot_dir = args.root_dir\n\tdata_root = '../datasets'\n\tunet_device = torch.device('cuda:{}'.format(args.unet_gpu) if torch.cuda.is_available() else 'cpu') # 'cuda:4,5'\n\n\t# split dataset to N_fold\n\tpre_dir = os.path.join(data_root, 'preprocessed')\n\tif args.stride_size == 'half':\n\t\tdata_path = os.path.join(pre_dir, '{}_patch_{}_padding:{}_{}_version'.format(args.target, args.patch_size, args.padding_size, args.version))\n\t\t\n\telse:\n\t\tdata_path = os.path.join(pre_dir, '{}_patch_{}_padding:{}_stride:{}'.format(args.target, args.patch_size, args.padding_size, args.stride_size))\n\tfold_list = split_subject_for_cv(data_path, n_fold=args.n_fold)\n\n\tmodel_dir = os.path.join(root_dir, 'models')\n\tcur_model_dir = make_dir(model_dir, '3D_lstm')\t\n\tUnets_model_dir = make_dir(cur_model_dir, 'Unets_{}'.format(args.optim))\n\n\tresult_dir = os.path.join(root_dir, 'results')\n\tcur_result_dir = make_dir(result_dir, '3D_lstm')\n\tUnets_result_dir = make_dir(cur_result_dir, 'Unets_{}'.format(args.optim))\n\n\tunet_device_ids = []\n\tfor id in args.unet_gpu.split(','):\n\t\tunet_device_ids.append(int(id))\n\ttorch.cuda.set_device(unet_device_ids[0])\n\n\tif args.stride_size == 'half':\n\t\thyper_name = 'network:{}_unettype:{}_unetsize:{}_patch_size:{}_epochs:{}_padding:{}_input:{}_version:{}_185'.format('Unet', \n\t\t\t\t\t\targs.unet_type, args.network_size, args.patch_size, args.unet_epochs, args.padding_size, args.input_channels, args.version)\n\telse:\n\t\thyper_name = 'network:{}_unettype:{}_unetsize:{}_patch_size:{}_epochs:{}_padding:{}_stride:{}_input:{}'.format('Unet', \n\t\t\t\t\t\targs.unet_type, args.network_size, args.patch_size, args.unet_epochs, args.padding_size, args.stride_size, args.input_channels)\n\tprint(hyper_name)\n\tfor i in range(3, args.n_fold):\n\t\tsubject_paths = split_fold_to_train(fold_list, i)\n\t\t\n\t\tprint('[{}] fold testing start!'.format(i))\n\n\t\tif args.unet_type == 'transition':\n\t\t\tif args.network_size == 'large':\n\t\t\t\tunet = transition_UNet_large(1, args.input_channels).to(unet_device)\n\t\t\telse:\n\t\t\t\tunet = transition_UNet(1, args.input_channels)\n\n\t\t\tunetType_model_dir = make_dir(Unets_model_dir, 'transition')\n\t\t\tunetType_result_dir = make_dir(Unets_result_dir, 'transition')\n\t\telif args.unet_type == 'unet':\n\t\t\tif args.network_size == 'large':\n\t\t\t\tunet = UNet_large(1, args.input_channels).to(unet_device)\n\t\t\telse:\n\t\t\t\tunet = UNet(1, args.input_channels).to(unet_device)\n\t\t\tunetType_model_dir = make_dir(Unets_model_dir, 'unet')\n\t\t\tunetType_result_dir = make_dir(Unets_result_dir, 'unet')\n\t\telse:\n\t\t\tunet = encoder_decoer(1, args.input_channels).to(unet_device)\n\t\t\tunetType_model_dir = make_dir(Unets_model_dir, 'encoder_decoder')\n\t\t\tunetType_result_dir = make_dir(Unets_result_dir, 'encoder_decoder')\n\t\t\t\n\t\thyper_dir = os.path.join(unetType_model_dir, hyper_name)\n\t\tUnet_name = 'fold:{}.ckpt'.format(i)\n\t\t\n\t\tunet = nn.DataParallel(unet, device_ids=unet_device_ids)\n\t\tmodel_path = os.path.join(hyper_dir, Unet_name)\n\t\tprint(model_path)\n\t\tunet.load_state_dict(torch.load(model_path))\n\n\t\tdatasets = CV_Unet_Dataset(subject_paths['val'])\n\t\tdataloader = DataLoader(datasets, batch_size=1, shuffle=True)\n\n\t\tpatch_epoch_dir = make_dir(unetType_result_dir, hyper_name)\n\t\tfold_result_dir = make_dir(patch_epoch_dir, '{}_fold'.format(i), remove=True)\n\t\ttest_unet(unet, dataloader, root_dir, subject_paths['val'], fold_result_dir, unet_device)\n\n\t\t# calculate 3D result\n\t\tDSC_pred(root_dir, fold_result_dir)\nif __name__ == '__main__':\n\tmain()\n","repo_name":"rlawjdghks7/Multi-Dimensional-Convolutional-LSTM","sub_path":"test_unet.py","file_name":"test_unet.py","file_ext":"py","file_size_in_byte":9045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21174766725","text":"import webapp2\nimport urllib\nfrom google.appengine.ext import db\nimport json\n\nclass JsonUniquePost(webapp2.RequestHandler):\n def get(self, resource):\n self.response.headers['Content-Type'] = 'application/json'\n urlTitle = str(urllib.unquote(resource))\n \n posts = list(db.GqlQuery('SELECT * FROM BlogPost '\n 'WHERE url = :1', urlTitle))\n if len(posts) == 0: #if the URL isn't found in the db\n self.response.out.write('Sorry that post does not exist')\n else:\n output = {\"content\": str(posts[0].title), \n \"created\": posts[0].created.strftime('%a %B %d %H:%M:%S %Y'), \n \"subject\": str(posts[0].title)}\n jsonOutput = json.dumps(output)\n self.response.out.write(jsonOutput)\n\nclass JsonBlog(webapp2.RequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'application/json'\n posts = list(db.GqlQuery('SELECT * FROM BlogPost '))\n \n jsonOutput = []\n for entry in posts: #add each db item into a list formated for json\n output = {\"content\": str(entry.title), \n \"created\": entry.created.strftime('%a %B %d %H:%M:%S %Y'), \n \"subject\": str(entry.title)}\n jsonOutput.append(output)\n self.response.out.write(json.dumps(jsonOutput))","repo_name":"kevinmarsh/cs253-blog","sub_path":"jsonParse.py","file_name":"jsonParse.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12286342356","text":"import numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n\n #parameters\n Lx = 10.0 #nm\n Ly = 10.0 #nm\n Lz = 21.9 #nm\n\n max_len = max(Lx, Ly, Lz)\n\n #D = 33.328 #Density [nm^-3]\n D = 36.0\n d = D**(1.0/3.0) #Density in 1D [nm^-1]\n\n print(\"Number of particles: \", Lx*Ly*Lz*D)\n\n x = np.linspace(0.0, max_len, int(d*max_len))\n\n x_len = x.shape[0]\n\n particle = np.empty((x_len*x_len*x_len, 3))\n for i in range(x_len):\n for j in range(x_len):\n for k in range(x_len):\n particle[i*x_len*x_len + j*x_len + k, :] = np.array([x[i], x[j], x[k]])\n\n \n #delete the ones that are out of range\n particle_temp = particle[particle[:,0] <= Lx,:]\n particle_temp = particle_temp[particle_temp[:,1] <= Ly,:]\n particle = particle_temp[particle_temp[:,2] <= Lz,:]\n\n N = particle.shape[0]\n print(\"actual Number of particles: \", particle.shape[0])\n \n particle *= 10.0 #nm -> angstrom\n \n np.savetxt(\"water_cube_{:d}.txt\".format(N), np.c_[particle[:,0], particle[:,1], particle[:,2]], fmt=['%10.3f', '%10.3f', '%10.3f'])\n\n \"\"\"\n #plot for inspection\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(particle[:,0],particle[:,1],particle[:,2])\n max_len *= 10.0\n ax.set_xlim(0.0,max_len)\n ax.set_ylim(0.0,max_len)\n ax.set_zlim(0.0,max_len)\n plt.show()\n \"\"\"","repo_name":"noabauma/semester-thesis","sub_path":"particle_generators/water_cube.py","file_name":"water_cube.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10081152926","text":"import numpy as np\nimport pandas as pd\nimport datetime\nfrom typing import List, Tuple, Iterator, Dict, Any, Callable, Iterable\n\n\n\nclass TimeSeriesSplit():\n def __init__(self, data: pd.DataFrame, sort_data=False):\n self.data = data\n self.sort_data = sort_data\n\n if self.sort_data:\n if sorted(self.data.index) is not self.data.index:\n self.data.sort_index(ascending=True, inplace=True)\n\n def date_range_split(self, split) -> pd.DataFrame:\n \"\"\"\n :param split: a tuple in datetime format that contains the start date and the end date of the range. Year-month-day\n :return: dataframe that contains only the data within the date range\n \"\"\"\n\n date_range = self.data.copy()\n\n nearest_start = date_range.iloc[\n date_range.index.get_loc(datetime.datetime.strptime(split[0], '%m-%d-%Y'), method='nearest')].name\n nearest_end = date_range.iloc[\n date_range.index.get_loc(datetime.datetime.strptime(split[1], '%m-%d-%Y'), method='nearest')].name\n date_range = date_range.truncate(before=nearest_start, after=nearest_end)\n return date_range\n\n def exclude_date_range(self, split: Tuple[str, str]) -> pd.DataFrame:\n \"\"\"\n :param split: a tuple in datetime format that contains the start date and the end date of the range to be excluded. Year-month-day\n :return: dataframe that removes the dates passed in and returns the resulting dataframe.\n \"\"\"\n ex_date_range = self.data.copy()\n nearest_start = ex_date_range.iloc[ex_date_range.index.get_loc(datetime.datetime.strptime(split[0], '%m-%d-%Y'),\n method='nearest')].name\n nearest_end = ex_date_range.iloc[ex_date_range.index.get_loc(datetime.datetime.strptime(split[1], '%m-%d-%Y')\n , method='nearest')].name\n ex_date_range = ex_date_range.reset_index()\n ex_date_range = ex_date_range.where(np.logical_or(ex_date_range['Date_Time'] < nearest_start,\n ex_date_range['Date_Time'] > nearest_end))\n ex_date_range = ex_date_range.set_index('Date_Time').dropna()\n return ex_date_range\n\n def step_size(self, split: Tuple[int, int, int]) -> int:\n \"\"\"\n calculates the step size used in the walk forward test\n :param split: Tuple containing 3 integers. first is total number of windows desired, second is initial\n window multiple, third is the window number who's indices are to be returned\n :return: integer that is the size of the step used for each window in walk forward test\n \"\"\"\n no_splits = split[0]\n initial_window_multiple = split[1]\n step_size = int(np.floor(self.data.shape[0] / (initial_window_multiple + no_splits)))\n return step_size\n\n def initial_window_size(self, split: Tuple[int, int, int]) -> int:\n \"\"\"\n calculates the initial window size that will be used for the initial optimization. uses the multiple in the 2nd\n position of the split tuple\n :param split: Tuple containing 3 integers. first is total number of windows desired, second is initial\n window multiple, third is the window number who's indices are to be returned\n :return: integer that is the size of the initial optimization window\n \"\"\"\n initial_window_multiple = split[1]\n initial_window_size = int(np.floor(self.step_size(split) * initial_window_multiple))\n return initial_window_size\n\n def optimization_indices(self, split: Tuple[int, int, int]) -> Tuple[int, int]:\n \"\"\"\n returns the optimization indices of each of the optimization windows\n :param split: Tuple containing 3 integers. first is total number of windows desired, second is initial\n window multiple, third is the window number who's indices are to be returned\n :return: tuple with start index number and end index number\n \"\"\"\n no_splits = split[0]\n return_split = split[2]\n start = 0\n if no_splits == return_split:\n end = self.data.shape[0]\n else:\n end = (return_split-1 * self.step_size(split)) + self.initial_window_size(split)\n return start, end-1\n\n def optimization_window(self, split: Tuple[int, int, int]) -> pd.DataFrame:\n \"\"\"\n :param split: Tuple containing 3 integers. first is total number of windows desired, second is initial\n window multiple, third is the window number who's indices are to be returned\n :return: dataframe containing the desired optimization window\n \"\"\"\n opt_df = self.data.copy()\n opt_df = opt_df[self.optimization_indices(split)[0]: self.optimization_indices(split)[1]+1]\n return opt_df\n\n def rolling_optimization_indices(self, split: Tuple[int, int, int]) -> Tuple[int, int]:\n \"\"\"\n returns the optimization indices of each of the optimization windows\n :param split: Tuple containing 3 integers. first is total number of windows desired, second is initial\n window multiple, third is the window number who's indices are to be returned\n :return: tuple with start index number and end index number\n \"\"\"\n\n return_split = split[2]\n step_size = self.step_size(split)\n initial_window_size = self.initial_window_size(split)\n\n start = max((return_split - 1) * step_size - 1, 0)\n end = ((return_split-1) * step_size) + initial_window_size\n\n\n return start, end-1\n\n def rolling_optimization_window(self, split: Tuple[int, int, int]) -> pd.DataFrame:\n \"\"\"\n moves the optimzization window forward in each window vs starting at 0\n :param split: Tuple containing 3 integers. first is total number of windows desired, second is initial\n window multiple, third is the window number who's indices are to be returned\n :return: dataframe containing the desired optimization window\n \"\"\"\n opt_df = self.data.copy()\n opt_df = opt_df[self.rolling_optimization_indices(split)[0]: self.rolling_optimization_indices(split)[1]+1]\n return opt_df\n\n\n def walk_forward_indices(self, split: Tuple[int, int, int])-> Tuple[int, int]:\n \"\"\"\n :param split: Tuple containing 3 integers. first is total number of windows desired, second is initial\n window multiple, third is the window number who's indices are to be returned\n :return: tuple with start index number and end index number\n \"\"\"\n no_splits = split[0]\n step_size = self.step_size(split)\n initial_window_size = self.initial_window_size(split)\n return_split = split[2]\n\n if return_split > no_splits:\n raise ValueError('Return Split Greater than Total Number of Splits')\n\n if return_split == 0:\n start = initial_window_size\n else:\n start = ((return_split - 1) * step_size) + initial_window_size + 1\n\n if no_splits == return_split:\n end = self.data.shape[0]\n else:\n end = start - 1 + step_size\n return start-1, end-1 # -1 at the end is b/c it indeces starting at 0. this is based off the shape which counts the 0 index position as a number.\n\n def walk_forward_window(self, split):\n walk_forward_df = self.data.copy()\n walk_forward_df = walk_forward_df[self.walk_forward_indices(split)[0]:self.walk_forward_indices(split)[1]+1]\n return walk_forward_df\n\n def walk_forward_dates(self, split):\n indices = self.walk_forward_indices(split)\n start_date = self.data.iloc[indices[0], ].name\n end_date = self.data.iloc[indices[1], ].name\n return start_date, end_date\n\n def optimization_window_dates(self, split, optimization_type='Rolling'):\n\n if optimization_type == 'Rolling':\n indices = self.rolling_optimization_indices(split)\n start_date = self.data.iloc[indices[0], ].name\n end_date = self.data.iloc[indices[1], ].name\n elif optimization_type == 'Standard':\n indices = self.optimization_indices(split)\n start_date = self.data.iloc[indices[0], ].name\n end_date = self.data.iloc[indices[1], ].name\n\n\n return start_date, end_date\n\n def split_selector(self, split, split_type):\n if split_type == 'DateRange':\n return self.date_range_split(split)\n elif split_type == 'FixedOptimizationWindow':\n return self.optimization_window(split)\n elif split_type == 'RollingOptimizationWindow':\n return self.rolling_optimization_window(split)\n elif split_type == 'WalkForwardWindow':\n return self.walk_forward_window(split)\n elif split_type == 'ExcludeDateRange':\n return self.exclude_date_range(split)\n elif split is None:\n return self.data.copy()","repo_name":"waleed-aly1/backtest_platform","sub_path":"backtest/time_series_split.py","file_name":"time_series_split.py","file_ext":"py","file_size_in_byte":9133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22755383822","text":"import torch\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nclass Trainer:\n\n def __init__(self, optimizer, criterion, model, epoch, loader, use_cuda):\n self.optimizer = optimizer\n self.criterion = criterion\n self.model = model\n self.epoch = epoch\n self.loader = loader\n self.use_cuda = use_cuda\n\n def run(self):\n self.average_losses = []\n losses = torch.Tensor([0])\n for e in range(1, self.epoch + 1):\n for i, (inputs, labels) in enumerate(self.loader):\n if self.use_cuda:\n inputs = inputs.cuda()\n labels = labels.cuda()\n inputs = Variable(inputs)\n labels = Variable(labels)\n self.optimizer.zero_grad()\n outputs = self.model(inputs)\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n print('Epoch: {0}/{1} [{2}/{3}] Loss: {4}'.format(e, self.epoch, i, len(self.loader), loss.data[0]))\n losses.add(loss)\n average_loss = losses.div(len(self.loader))\n self.average_losses.append(average_loss.data[0])\n self.save_graph()\n return self.model\n\n def save_graph(self):\n print(self.losses)\n losses = pd.DataFrame(self.losses)\n plt.plot(losses)\n plt.xlabel(u\"Epoch\")\n plt.ylabel(u\"Loss\")\n plt.savefig('reslut.jpg')\n\n","repo_name":"yamad07/VGG","sub_path":"modules/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23084446203","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\nclass DnCNN(nn.Module):\n def __init__(self, depth=17, n_channels=64, image_channels=3, use_bnorm=True, kernel_size=3):\n super(DnCNN, self).__init__()\n kernel_size = 3\n padding = 1\n layers = []\n\n layers.append(nn.Conv2d(in_channels=image_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=True))\n layers.append(nn.ReLU(inplace=True))\n for _ in range(depth-2):\n layers.append(nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=False))\n layers.append(nn.BatchNorm2d(n_channels, eps=0.0001, momentum = 0.95))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.Conv2d(in_channels=n_channels, out_channels=image_channels, kernel_size=kernel_size, padding=padding, bias=False))\n self.dncnn = nn.Sequential(*layers)\n self._initialize_weights()\n\n def forward(self, x):\n y = x\n out = self.dncnn(x)\n return y-out\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.orthogonal_(m.weight)\n print('init weight')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n \n \nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass MemNet(nn.Module):\n def __init__(self, in_channels=3, channels=64, num_memblock=6, num_resblock=6):\n super(MemNet, self).__init__()\n self.feature_extractor = BNReLUConv(in_channels, channels)\n self.reconstructor = BNReLUConv(channels, in_channels)\n self.dense_memory = nn.ModuleList(\n [MemoryBlock(channels, num_resblock, i+1) for i in range(num_memblock)]\n )\n\n def forward(self, x):\n # x = x.contiguous()\n residual = x\n out = self.feature_extractor(x)\n ys = [out]\n for memory_block in self.dense_memory:\n out = memory_block(out, ys)\n out = self.reconstructor(out)\n out = out + residual\n \n return out\n\n\nclass MemoryBlock(nn.Module):\n \"\"\"Note: num_memblock denotes the number of MemoryBlock currently\"\"\"\n def __init__(self, channels, num_resblock, num_memblock):\n super(MemoryBlock, self).__init__()\n self.recursive_unit = nn.ModuleList(\n [ResidualBlock(channels) for i in range(num_resblock)]\n )\n self.gate_unit = BNReLUConv((num_resblock+num_memblock) * channels, channels, 1, 1, 0)\n\n def forward(self, x, ys):\n \"\"\"ys is a list which contains long-term memory coming from previous memory block\n xs denotes the short-term memory coming from recursive unit\n \"\"\"\n xs = []\n residual = x\n for layer in self.recursive_unit:\n x = layer(x)\n xs.append(x)\n \n gate_out = self.gate_unit(torch.cat(xs+ys, 1))\n ys.append(gate_out)\n return gate_out\n\n\nclass ResidualBlock(torch.nn.Module):\n \"\"\"ResidualBlock\n introduced in: https://arxiv.org/abs/1512.03385\n x - Relu - Conv - Relu - Conv - x\n \"\"\"\n\n def __init__(self, channels, k=3, s=1, p=1):\n super(ResidualBlock, self).__init__()\n self.relu_conv1 = BNReLUConv(channels, channels, k, s, p)\n self.relu_conv2 = BNReLUConv(channels, channels, k, s, p)\n \n def forward(self, x):\n residual = x\n out = self.relu_conv1(x)\n out = self.relu_conv2(out)\n out = out + residual\n return out\n\n\nclass BNReLUConv(nn.Sequential):\n def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=True):\n super(BNReLUConv, self).__init__()\n self.add_module('bn', nn.BatchNorm2d(in_channels))\n self.add_module('relu', nn.ReLU(inplace=inplace))\n self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=False))\n \n \n \n \n \n \n\nclass _DCR_block(nn.Module):\n def __init__(self, channel_in):\n super(_DCR_block, self).__init__()\n\n self.conv_1 = nn.Conv2d(in_channels=channel_in, out_channels=int(channel_in/2.), kernel_size=3, stride=1, padding=1)\n self.relu1 = nn.PReLU()\n self.conv_2 = nn.Conv2d(in_channels=int(channel_in*3/2.), out_channels=int(channel_in/2.), kernel_size=3, stride=1, padding=1)\n self.relu2 = nn.PReLU()\n self.conv_3 = nn.Conv2d(in_channels=channel_in*2, out_channels=channel_in, kernel_size=3, stride=1, padding=1)\n self.relu3 = nn.PReLU()\n\n def forward(self, x):\n residual = x\n\n out = self.relu1(self.conv_1(x))\n\n conc = torch.cat([x, out], 1)\n\n out = self.relu2(self.conv_2(conc))\n\n conc = torch.cat([conc, out], 1)\n\n out = self.relu3(self.conv_3(conc))\n\n out = torch.add(out, residual)\n\n return out\n\n\nclass _down(nn.Module):\n def __init__(self, channel_in):\n super(_down, self).__init__()\n\n self.relu = nn.PReLU()\n self.maxpool = nn.MaxPool2d(2)\n self.conv = nn.Conv2d(in_channels=channel_in, out_channels=2*channel_in, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n out = self.maxpool(x)\n\n out = self.relu(self.conv(out))\n\n return out\n\n\nclass _up(nn.Module):\n def __init__(self, channel_in):\n super(_up, self).__init__()\n\n self.relu = nn.PReLU()\n self.subpixel = nn.PixelShuffle(2)\n self.conv = nn.Conv2d(in_channels=channel_in, out_channels=channel_in, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n out = self.relu(self.conv(x))\n\n out = self.subpixel(out)\n\n return out\n\nclass DHDN(nn.Module):\n def __init__(self):\n super(DHDN, self).__init__()\n\n self.conv_i = nn.Conv2d(in_channels=3, out_channels=128, kernel_size=1, stride=1, padding=0)\n self.relu1 = nn.PReLU()\n self.DCR_block11 = self.make_layer(_DCR_block, 128)\n self.DCR_block12 = self.make_layer(_DCR_block, 128)\n self.down1 = self.make_layer(_down, 128)\n self.DCR_block21 = self.make_layer(_DCR_block, 256)\n self.DCR_block22 = self.make_layer(_DCR_block, 256)\n self.down2 = self.make_layer(_down, 256)\n self.DCR_block31 = self.make_layer(_DCR_block, 512)\n self.DCR_block32 = self.make_layer(_DCR_block, 512)\n self.down3 = self.make_layer(_down, 512)\n self.DCR_block41 = self.make_layer(_DCR_block, 1024)\n self.DCR_block42 = self.make_layer(_DCR_block, 1024)\n self.up3 = self.make_layer(_up, 2048)\n self.DCR_block33 = self.make_layer(_DCR_block, 1024)\n self.DCR_block34 = self.make_layer(_DCR_block, 1024)\n self.up2 = self.make_layer(_up, 1024)\n self.DCR_block23 = self.make_layer(_DCR_block, 512)\n self.DCR_block24 = self.make_layer(_DCR_block, 512)\n self.up1 = self.make_layer(_up, 512)\n self.DCR_block13 = self.make_layer(_DCR_block, 256)\n self.DCR_block14 = self.make_layer(_DCR_block, 256)\n self.conv_f = nn.Conv2d(in_channels=256, out_channels=3, kernel_size=1, stride=1, padding=0)\n self.relu2 = nn.PReLU()\n\n def make_layer(self, block, channel_in):\n layers = []\n layers.append(block(channel_in))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n residual = x\n\n out = self.relu1(self.conv_i(x))\n\n out = self.DCR_block11(out)\n\n conc1 = self.DCR_block12(out)\n\n out = self.down1(conc1)\n\n out = self.DCR_block21(out)\n\n conc2 = self.DCR_block22(out)\n\n out = self.down2(conc2)\n\n out = self.DCR_block31(out)\n\n conc3 = self.DCR_block32(out)\n\n conc4 = self.down3(conc3)\n\n out = self.DCR_block41(conc4)\n\n out = self.DCR_block42(out)\n\n out = torch.cat([conc4, out], 1)\n\n out = self.up3(out)\n\n out = torch.cat([conc3, out], 1)\n\n out = self.DCR_block33(out)\n\n out = self.DCR_block34(out)\n\n out = self.up2(out)\n\n out = torch.cat([conc2, out], 1)\n\n out = self.DCR_block23(out)\n\n out = self.DCR_block24(out)\n\n out = self.up1(out)\n\n out = torch.cat([conc1, out], 1)\n\n out = self.DCR_block13(out)\n\n out = self.DCR_block14(out)\n\n out = self.relu2(self.conv_f(out))\n\n out = torch.add(residual, out)\n\n return out\n \n \ndef FFDNet_downsample(x):\n \"\"\"\n :param x: (C, H, W)\n :param noise_sigma: (C, H/2, W/2)\n :return: (4, C, H/2, W/2)\n \"\"\"\n # x = x[:, :, :x.shape[2] // 2 * 2, :x.shape[3] // 2 * 2]\n N, C, W, H = x.size()\n idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]\n\n Cout = 4 * C\n Wout = W // 2\n Hout = H // 2\n\n if 'cuda' in x.type():\n down_features = torch.cuda.FloatTensor(N, Cout, Wout, Hout).fill_(0)\n else:\n down_features = torch.FloatTensor(N, Cout, Wout, Hout).fill_(0)\n \n for idx in range(4):\n down_features[:, idx:Cout:4, :, :] = x[:, :, idxL[idx][0]::2, idxL[idx][1]::2]\n\n return down_features\n\ndef FFDNet_upsample(x):\n \"\"\"\n :param x: (n, C, W, H)\n :return: (n, C/4, W*2, H*2)\n \"\"\"\n N, Cin, Win, Hin = x.size()\n idxL = [[0, 0], [0, 1], [1, 0], [1, 1]]\n \n Cout = Cin // 4\n Wout = Win * 2\n Hout = Hin * 2\n\n up_feature = torch.zeros((N, Cout, Wout, Hout)).type(x.type())\n for idx in range(4):\n up_feature[:, :, idxL[idx][0]::2, idxL[idx][1]::2] = x[:, idx:Cin:4, :, :]\n\n return up_feature\n\nimport numpy as np\nfrom torch.autograd import Variable\nclass FFDNet(nn.Module):\n\n def __init__(self, is_gray=False):\n super(FFDNet, self).__init__()\n\n if is_gray:\n self.num_conv_layers = 15 # all layers number\n self.downsampled_channels = 5 # Conv_Relu in\n self.num_feature_maps = 64 # Conv_Bn_Relu in\n self.output_features = 4 # Conv out\n else:\n self.num_conv_layers = 12\n self.downsampled_channels = 15\n self.num_feature_maps = 96\n self.output_features = 12\n \n self.kernel_size = 3\n self.padding = 1\n \n layers = []\n # Conv + Relu\n layers.append(nn.Conv2d(in_channels=self.downsampled_channels, out_channels=self.num_feature_maps, \\\n kernel_size=self.kernel_size, padding=self.padding, bias=False))\n layers.append(nn.ReLU(inplace=True))\n\n # Conv + BN + Relu\n for _ in range(self.num_conv_layers - 2):\n layers.append(nn.Conv2d(in_channels=self.num_feature_maps, out_channels=self.num_feature_maps, \\\n kernel_size=self.kernel_size, padding=self.padding, bias=False))\n layers.append(nn.BatchNorm2d(self.num_feature_maps))\n layers.append(nn.ReLU(inplace=True))\n \n # Conv\n layers.append(nn.Conv2d(in_channels=self.num_feature_maps, out_channels=self.output_features, \\\n kernel_size=self.kernel_size, padding=self.padding, bias=False))\n\n self.intermediate_dncnn = nn.Sequential(*layers)\n\n def forward(self, x):\n noise_sigma = torch.FloatTensor(np.array([30/255 for idx in range(x.shape[0])]))\n noise_map = noise_sigma.view(x.shape[0], 1, 1, 1).repeat(1, x.shape[1], x.shape[2] // 2, x.shape[3] // 2).to(x.device)\n\n x_up = FFDNet_downsample(x.data) # 4 * C * H/2 * W/2\n x_cat = torch.cat((noise_map.data, x_up), 1) # 4 * (C + 1) * H/2 * W/2\n x_cat = Variable(x_cat)\n\n h_dncnn = self.intermediate_dncnn(x_cat)\n y_pred = FFDNet_upsample(h_dncnn)\n return y_pred\n","repo_name":"hyungminr/PyTorch_SISR","sub_path":"models/ref_denoise.py","file_name":"ref_denoise.py","file_ext":"py","file_size_in_byte":11859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14780927244","text":"### implement fifo-random amount - demeaned by fed rates\n\nimport pandas as pd\nimport random\nimport numpy as np\nfrom dotenv import load_dotenv\nfrom datetime import datetime\n\n\nresult = pd.DataFrame({\n 'bioguide_id': [],\n 'first_name': [],\n 'last_name': [],\n 'idx': [],\n 'start_date': [],\n 'end_date': [],\n 'ticker': [],\n 'return(mean)': [],\n 'return(std)': []\n })\n\nfrom octopus.db import PostgresqlManager\nload_dotenv(\"/Users/syyun/Dropbox (MIT)/efd/.envlv\", override=True)\npm = PostgresqlManager(dotenv_path=\"/Users/syyun/Dropbox (MIT)/efd/.envlv\")\ndf = pm.execute_sql(fetchall=True, sql=\n f\"\"\"\n with union4ab as (\n select * from _sandbox_suyeol.senate_annual_4a saa \n union\n select * from _sandbox_suyeol.senate_annual_4b sab \n )\n select distinct bioguide_id, sa.first_name, sa.last_name, u.ticker from union4ab u\n inner join _sandbox_suyeol.senate_annual sa on sa.report_type_url = u.report_url\n inner join _sandbox_suyeol.senator_bioguide sb on sb.first_name = sa.first_name and sb.last_name = sa.last_name\n -- inner join _sandbox_suyeol.senator s on s.url = sa.url\n order by first_name, last_name\n \"\"\"\n )\n\n# row = ('Angus S', 'King, Jr.', 'ARKK')\n# row = ('Angus S', 'King, Jr.', 'QQQ')\n\n# read fed rates\nfed_rates = pd.read_csv(\"external-data/FEDFUNDS.csv\")\n\nfrom tqdm import tqdm\n\nfor row in tqdm(df): # name and ticker pairs\n print(row)\n senator = row[0] + row[1] + ' ' + row[2] \n ticker = row[3]\n backslash_char = '\\''\n trnscs = pm.execute_sql(fetchall=True, sql=\n f\"\"\"\n with union4ab as (\n select * from _sandbox_suyeol.senate_annual_4a saa \n union\n select * from _sandbox_suyeol.senate_annual_4b sab \n )\n select distinct bioguide_id, p.ticker, trans_type, trans_date, amount_min, vwap, amount_max from union4ab u\n inner join _sandbox_suyeol.senate_annual sa on sa.report_type_url = u.report_url\n inner join _sandbox_suyeol.senator_bioguide sb on sb.first_name = sa.first_name and sb.last_name = sa.last_name\n inner join _sandbox_suyeol.price p on (p.ticker=u.ticker and p.date=u.trans_date)\n where bioguide_id = {backslash_char}{row[0]}{backslash_char} and u.ticker = {backslash_char}{row[3]}{backslash_char} and vwap is not null\n order by trans_date asc\n \"\"\"\n )\n \n def find_subchains(transaction_chain):\n import re\n pattern = r'p+[^ps]*(?:s(?!p)[^ps]*)*s+'\n matches = re.finditer(pattern, transaction_chain)\n subchains = []\n for match in matches:\n start, end = match.start(), match.end()\n subchain = transaction_chain[start:end]\n longest_match = max(re.findall(r'p+[^ps]*s+', subchain), key=len)\n subchains.append((start, end, longest_match))\n return subchains\n \n transaction_chain = ''.join([t[2][0] for t in trnscs]).lower()\n subchains = find_subchains(transaction_chain)\n \n if len(subchains) == 0:\n print(\"No subchains found for\", senator, ticker)\n continue\n else:\n print(\"Subchains found for\", senator, ticker, len(subchains))\n \n for idx, subchain in enumerate(subchains):\n start, end, pattern = subchain\n trnscs_subchain = trnscs[start:end]\n\n start_date = trnscs[start][3]\n end_date = trnscs[end-1][3]\n\n return_rds = []\n\n def compute_return_rd(trnscs_subchain):\n\n cash_random = 0\n purchase_units_random = []\n cashout_random = 0\n buy_random = 0 # this is the sum of the money spent on buying stocks and securities\n matched_buy_random = 0\n\n\n trnsc_types = [trnsc[2] for trnsc in trnscs_subchain]\n for idx, trnsc in enumerate(trnscs): # all transactions of a name and ticker pair\n ps = trnsc[2] # purchase or sale\n date = datetime.strptime(trnsc[3], \"%Y-%m-%d\")\n amount_min = trnsc[4]\n vwap = trnsc[5]\n amount_max = trnsc[6]\n # random select one value between amount_min and amount_max\n amount_random = random.uniform(amount_min, amount_max)\n # print(amount_min, amount_max, amount_random)\n \n future_transactions = trnsc_types[idx+1:]\n\n if \"Purchase\" in ps and set([\"Purchase\"]) == set(future_transactions): # if there's no more sales in the future, just end the loop\n break\n\n def _cash_out(purchase_units, cash, ps, amount, vwap, buy, cash_out, matched_buy, date):\n if \"Purchase\" in ps:\n units = amount / vwap # this is the minimum amount of units purchased\n date_purchase = date.strftime('%Y-%m-01') # get the monthly rates\n purchase_units.append((units, vwap, date_purchase)) # fed_rate is kind of opportunity cost\n cash -= amount\n buy += amount\n elif \"Sale\" in ps:\n date_sale = date.strftime('%Y-%m-01')\n units = amount / vwap # minimum units estimated as being sold.\n for idx, purchase in enumerate(purchase_units):\n stock, purchase_price, date_purchase = purchase\n if units == 0:\n break \n if units >= stock:\n units -= stock\n purchase_units[idx] = (0, purchase_price, date_purchase)\n matched_buy += stock * purchase_price\n\n purchase_date_index = fed_rates.index[fed_rates['date'] == date_purchase]\n sale_date_index = fed_rates.index[fed_rates['date'] == date_sale]\n fed_rates_avg = np.mean(fed_rates.iloc[purchase_date_index[0]:sale_date_index[0]+1]['rate'])\n opp_cost = fed_rates_avg * stock * purchase_price / 100\n cash_out += stock * vwap - opp_cost\n cash += stock * vwap\n elif units < stock:\n units -= units\n purchase_units[idx] = (stock - units, purchase_price, date_purchase)\n matched_buy += units * purchase_price\n\n purchase_date_index = fed_rates.index[fed_rates['date'] == date_purchase]\n sale_date_index = fed_rates.index[fed_rates['date'] == date_sale]\n fed_rates_avg = np.mean(fed_rates.iloc[purchase_date_index[0]:sale_date_index[0]+1]['rate'])\n opp_cost = fed_rates_avg * units * purchase_price / 100\n\n cash_out += units * vwap - opp_cost\n cash += units * vwap \n return purchase_units, cash, buy, cash_out, matched_buy\n \n purchase_units_random, cash_random, buy_random, cashout_random, matched_buy_random = _cash_out(purchase_units_random, cash_random, ps, amount_random, vwap, buy_random, cashout_random, matched_buy_random, date)\n\n return_rd = (cashout_random - abs(matched_buy_random)) * 100 / abs(matched_buy_random) if matched_buy_random != 0 else 0\n return return_rd\n\n warm_up = 1000\n for i in range(warm_up):\n return_rd = compute_return_rd(trnscs_subchain)\n return_rds.append(return_rd)\n\n return_rd_mean_prev = np.mean(return_rds)\n return_rd_std_prev = np.std(return_rds)\n\n # thres = 1e-3\n # i = 0\n # while abs(return_rd_mean_prev-np.mean(return_rds)) >= thres and abs(return_rd_std_prev-np.std(return_rds)) >= thres and i < 10000:\n # return_rd = compute_return_rd()\n # print(return_rd)\n # return_rds.append(return_rd)\n # return_rd_mean_prev = np.mean(return_rds)\n # return_rd_std_prev = np.std(return_rds)\n # i += 1\n\n print(row, np.mean(return_rds), np.std(return_rds))\n\n result.loc[len(result.index)] = [\n row[0],\n row[1],\n row[2],\n idx,\n start_date,\n end_date,\n row[3],\n np.mean(return_rds),\n np.std(return_rds)\n ]\n pass\n\n # Plot density using a kernel density estimation (KDE)\n import numpy as np \n import matplotlib.pyplot as plt\n\n plt.hist(return_rds, bins=100, density=True, alpha=0.7, color='b')\n plt.title('Density Plot of Excess Return')\n plt.xlabel('Excess Return (%)')\n plt.ylabel('Density')\n # plt.show()\n plt.savefig(f'./anlys/cashout/rd-results-fed-pppsss-include-etf/{senator}-{ticker}-{idx}-{start_date}-{end_date}.png')\n plt.close()\n\nimport pickle\nwith open(f'./anlys/cashout/rd-results-fed-pppsss-include-etf.pkl', 'wb') as f:\n pickle.dump(result, f)\n","repo_name":"syyunn/efd","sub_path":"anlys/cashout/fifo-rd-fed-pppsss-include-etf.py","file_name":"fifo-rd-fed-pppsss-include-etf.py","file_ext":"py","file_size_in_byte":9781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14219674842","text":"import pytest\nfrom core.tag import Tag\n\n\nclass TestTag:\n def test_failed_creation():\n with pytest.raises(TypeError) as e:\n Tag()\n\n def test_creation():\n tag = Tag(\"payment_method\", [\"cash\", \"card\"])\n assert tag.name == \"payment_method\"\n assert tag.values == [\"cash\", \"card\"]\n\n def test_shortform():\n tag = Tag(\n \"payment_method\",\n [\"cash\", \"card\"],\n shortform=\"pm\",\n shortform_values={\"cash\": \"b\", \"card\": \"k\"},\n )\n assert tag.shortform == \"pm\"\n assert tag.shortform_values == {\"cash\": \"b\", \"card\": \"k\"}","repo_name":"ThePhisch/bills","sub_path":"test/core/test_tag.py","file_name":"test_tag.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3300927288","text":"# %% [markdown]\n# ##\nimport os\nimport time\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib.patches import Circle\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom scipy.integrate import tplquad\nfrom scipy.stats import gaussian_kde\n\nimport pymaid\nfrom graspy.utils import pass_to_ranks\nfrom hyppo.ksample import KSample\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import readcsv, savecsv, savefig\nfrom src.pymaid import start_instance\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n adjplot,\n barplot_text,\n get_mid_map,\n gridmap,\n matrixplot,\n remove_axis,\n remove_spines,\n set_axes_equal,\n stacked_barplot,\n)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, fmt=\"pdf\", **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name, foldername=FNAME, **kws)\n\n\n# params\n\nlevel = 7\nclass_key = f\"lvl{level}_labels\"\n\nmetric = \"bic\"\nbic_ratio = 1\nd = 8 # embedding dimension\nmethod = \"color_iso\"\n\nbasename = f\"-method={method}-d={d}-bic_ratio={bic_ratio}\"\ntitle = f\"Method={method}, d={d}, BIC ratio={bic_ratio}\"\n\nexp = \"137.2-BDP-omni-clust\"\n\n\n# load data\npair_meta = readcsv(\"meta\" + basename, foldername=exp, index_col=0)\npair_meta[\"lvl0_labels\"] = pair_meta[\"lvl0_labels\"].astype(str)\npair_adj = readcsv(\"adj\" + basename, foldername=exp, index_col=0)\npair_adj = pair_adj.values\nmg = MetaGraph(pair_adj, pair_meta)\nmeta = mg.meta\n\nlevel_names = [f\"lvl{i}_labels\" for i in range(level + 1)]\n\n\ndef sort_mg(mg, level_names):\n meta = mg.meta\n sort_class = level_names + [\"merge_class\"]\n class_order = [\"sf\"]\n total_sort_by = []\n for sc in sort_class:\n for co in class_order:\n class_value = meta.groupby(sc)[co].mean()\n meta[f\"{sc}_{co}_order\"] = meta[sc].map(class_value)\n total_sort_by.append(f\"{sc}_{co}_order\")\n total_sort_by.append(sc)\n mg = mg.sort_values(total_sort_by, ascending=False)\n return mg\n\n\n# def calc_ego_connectivity(adj, meta, label, axis=0):\n# this_inds = meta[meta[class_key] == label][\"inds\"].values\n# uni_cat = meta[key].unique()\n# connect_mat = []\n# for other_label in uni_cat:\n# other_inds = meta[meta[key] == other_label][\"inds\"].values\n# if axis == 0:\n# sum_vec = adj[np.ix_(other_inds, this_inds)].sum(axis=axis)\n# elif axis == 1:\n# sum_vec = adj[np.ix_(this_inds, other_inds)].sum(axis=axis)\n# connect_mat.append(sum_vec)\n# return np.array(connect_mat)\n\n\nmg = sort_mg(mg, level_names)\nmeta = mg.meta\nmeta[\"inds\"] = range(len(meta))\nadj = mg.adj\n\n\nskeleton_color_dict = dict(\n zip(meta.index, np.vectorize(CLASS_COLOR_DICT.get)(meta[\"merge_class\"]))\n)\n\n\n# load connectors\nconnector_path = \"maggot_models/data/processed/2020-05-08/connectors.csv\"\nconnectors = pd.read_csv(connector_path)\n\n\n# %% [markdown]\n# ##\n\n# plot params\nscale = 5\nn_col = 10\nn_row = 3\nmargin = 0.01\ngap = 0.02\n\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n \"axes.edgecolor\": \"grey\",\n \"ytick.color\": \"dimgrey\",\n \"xtick.color\": \"dimgrey\",\n \"axes.labelcolor\": \"dimgrey\",\n \"text.color\": \"dimgrey\",\n}\nfor k, val in rc_dict.items():\n mpl.rcParams[k] = val\ncontext = sns.plotting_context(context=\"talk\", font_scale=1, rc=rc_dict)\nsns.set_context(context)\n\n# compare dendrite inputs\n\ncompartment = \"dendrite\"\ndirection = \"postsynaptic\"\n\n\ndef filter_connectors(connectors, ids, direction, compartment):\n label_connectors = connectors[connectors[f\"{direction}_to\"].isin(ids)]\n label_connectors = label_connectors[\n label_connectors[f\"{direction}_type\"] == compartment\n ]\n label_connectors = label_connectors[\n ~label_connectors[\"connector_id\"].duplicated(keep=\"first\")\n ]\n return label_connectors\n\n\ndef run_dcorr(data1, data2):\n ksamp = KSample(\"Dcorr\")\n stat, pval = ksamp.test(data1, data2, auto=True, workers=-1)\n return stat, pval\n\n\ndef spatial_dcorr(data1, data2, method=\"full\", max_samples=1000, n_subsamples=10):\n if (len(data1) == 0) or (len(data2) == 0):\n return np.nan, np.nan\n\n if method == \"full\":\n stat, p_val = run_dcorr(data1, data2)\n elif method == \"subsample\":\n stats = np.empty(n_subsamples)\n p_vals = np.empty(n_subsamples)\n for i in range(n_subsamples):\n subsampled_data = []\n for data in [data1, data2]:\n n_subsamples = min(len(data), max_samples)\n inds = np.random.choice(n_subsamples, size=n_subsamples, replace=False)\n subsampled_data.append(data[inds])\n stat, p_val = run_dcorr(*subsampled_data)\n stats[i] = stat\n p_vals[i] = p_val\n stat = np.median(stats)\n p_val = np.median(p_vals)\n elif method == \"max-d\":\n max_dim_stat = -np.inf\n best_p_val = np.nan\n for dim in range(data1.shape[1]):\n dim_stat, dim_p_val = run_dcorr(data1[:, dim], data2[:, dim])\n if dim_stat > max_dim_stat:\n max_dim_stat = dim_stat\n best_p_val = dim_p_val\n stat = max_dim_stat\n p_val = best_p_val\n else:\n raise ValueError()\n\n return stat, p_val\n\n\n# %% [markdown]\n# ##\n\n\ncurrtime = time.time()\n\nn_reps = 5\nlabels = [\"uPN\", \"mPN\", \"KC\"]\nclass_keys = 3 * [\"class1\"]\n\nrows = []\n\nfor _ in range(n_reps):\n class_ids = []\n class_names = []\n for label, class_key in zip(labels, class_keys):\n # split the class in half\n all_ids = meta[meta[class_key] == label].index.values\n label1_ids = np.random.choice(all_ids, size=len(all_ids) // 2, replace=False)\n label2_ids = np.setdiff1d(all_ids, label1_ids)\n class_ids.append(label1_ids)\n class_ids.append(label2_ids)\n class_names.append(label + \"_1\")\n class_names.append(label + \"_2\")\n\n for i, (label1_ids, label1) in enumerate(zip(class_ids, class_names)):\n for j, (label2_ids, label2) in enumerate(zip(class_ids, class_names)):\n if i < j:\n label1_connectors = filter_connectors(\n connectors, label1_ids, direction, compartment\n )\n label2_connectors = filter_connectors(\n connectors, label2_ids, direction, compartment\n )\n data1 = label1_connectors[[\"x\", \"y\", \"z\"]].values\n data2 = label2_connectors[[\"x\", \"y\", \"z\"]].values\n print(len(data1))\n print(len(data2))\n stat, p_val = spatial_dcorr(data1, data2, method=\"full\")\n same = label1[:-2] == label2[:-2]\n row = {\n \"stat\": stat,\n \"p_val\": p_val,\n \"label\": f\"{label1} vs {label2}\",\n \"same\": same,\n }\n rows.append(row)\n\nprint(f\"{time.time() - currtime} elapsed\")\n\n\nres_df = pd.DataFrame(rows)\nres_df[\"-log10_p_val\"] = -np.log10(res_df[\"p_val\"])\nfig, ax = plt.subplots(1, 1, figsize=(10, 5))\nsns.stripplot(data=res_df, x=\"label\", y=\"stat\", hue=\"same\")\nplt.xticks(rotation=90)\nax.get_legend().remove()\nax.legend(bbox_to_anchor=(1, 1), loc=\"upper left\", title=\"Same class\")\nax.set_title(\"Axon outputs\")\nstashfig(\"spatial-dcorr-dendrite-inputs-max-d\")\n\n# print(stats)\n# print(p_vals)\n\n# plot_p_vals = -np.log10(p_vals)\n# adjplot(\n# plot_p_vals,\n# meta=cluster_meta,\n# center=0,\n# vmax=np.nanmax(plot_p_vals[~np.isinf(plot_p_vals)]),\n# cbar_kws=dict(shrink=0.7),\n# )\n# %% [markdown]\n# ##\nfirst = 3\nclass_labels = meta[class_key].unique()[::-1][10:15]\np_vals = np.zeros((len(class_labels), len(class_labels)))\nstats = np.zeros_like(p_vals)\ncluster_meta = pd.DataFrame(index=class_labels)\n\nfor i, label1 in enumerate(class_labels):\n label1_meta = meta[meta[class_key] == label1]\n label1_ids = label1_meta.index.values\n label1_connectors = filter_connectors(\n connectors, label1_ids, direction, compartment\n )\n cluster_meta.loc[label1, \"n_samples\"] = len(label1_connectors)\n for j, label2 in enumerate(class_labels):\n if i < j:\n label2_meta = meta[meta[class_key] == label2]\n label2_ids = label2_meta.index.values\n label2_connectors = filter_connectors(\n connectors, label2_ids, direction, compartment\n )\n data1 = label1_connectors[[\"x\", \"y\", \"z\"]].values\n data2 = label2_connectors[[\"x\", \"y\", \"z\"]].values\n stat, p_val = spatial_dcorr(data1, data2, method=\"full\")\n stats[i, j] = stat\n p_vals[i, j] = p_val\n\n\nprint(stats)\nprint(p_vals)\n\nplot_p_vals = -np.log10(p_vals)\nadjplot(\n plot_p_vals,\n meta=cluster_meta,\n center=0,\n vmax=np.nanmax(plot_p_vals[~np.isinf(plot_p_vals)]),\n cbar_kws=dict(shrink=0.7),\n)\n","repo_name":"neurodata/maggot_models","sub_path":"notebooks/160.0-BDP-morpho-dcorr.py","file_name":"160.0-BDP-morpho-dcorr.py","file_ext":"py","file_size_in_byte":9186,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"70203386089","text":"import turtle\n\ndef drawFace():\n '''\n Draws a silly face.\n pre: origin, facing right, pendown\n post: origin, facing right, pendown\n :return:\n '''\n drawHead()\n# drawEyes()\n# drawNose()\n drawHair()\n\n\n\ndef drawHead():\n '''\n Draws a circle of fixed radius.\n\n pre: origin, right, down\n post: origin, right, down\n\n :return:\n '''\n\n radius = 140\n\n turtle.right(90)\n turtle.penup()\n turtle.forward(radius)\n turtle.pendown()\n turtle.left(90)\n turtle.circle(radius)\n turtle.penup()\n turtle.left(90)\n turtle.forward(radius)\n turtle.right(90)\n\n\ndef drawTriangle(length):\n for _ in range(3):\n turtle.forward(length)\n turtle.right(360/3)\n\ndef drawPolygon(length, sides):\n # the usual pre/post conditions\n for _ in range(sides):\n turtle.forward(length)\n turtle.right(360/sides)\n\n\ndef drawShape(name, size):\n if name == \"Triangle\":\n drawPolygon(size, 3)\n elif name == \"Square\":\n drawPolygon(size, 4)\n else:\n print(\"I don't know how to draw a \" + name)\n\ndef drawSingleHair(hairlength):\n '''\n Draw a single hair.\n pre: origin, pendown, relative zero heading\n post: origin, pendown, relative zero heading\n param: hairlength -- length of hair to draw\n :return:\n '''\n radius = 140\n turtle.penup()\n turtle.forward(radius)\n turtle.pendown()\n turtle.forward(hairlength)\n turtle.penup()\n turtle.backward(hairlength + radius)\n\ndef hairLengthForAngle(angle):\n radius = 140\n# if (angle )\n\ndef drawHair():\n start_angle = 30\n stop_angle = 150\n short_start_angle = 70\n short_stop_angle = 110\n step = 5\n turtle.setheading(start_angle)\n while turtle.heading() < stop_angle:\n if (turtle.heading() > short_start_angle) and (turtle.heading() < short_stop_angle):\n drawSingleHair(10)\n else:\n drawSingleHair(30)\n turtle.left(step)\n\n\n\ndef main():\n '''\n Draws a silly face.\n pre: none\n post: origin, right, down.\n :return:\n '''\n drawFace()\n turtle.mainloop()\n\nif __name__ == '__main__':\n main()","repo_name":"joetomjob/PythonProjects","sub_path":"src/Sem1/lecture3s2.py","file_name":"lecture3s2.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14334250114","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe.utils import getdate, flt, add_to_date\n\ndef execute(filters=None):\n\tvalidate_filers(filters)\n\tcolumns, data = get_columns(filters), get_data(filters)\n\treturn columns, data\n\ndef validate_filers(filters):\n\tif not filters.from_date:\n\t\tfrappe.throw(\"From Date is mandatory\")\n\t\n\tif not filters.to_date:\n\t\tfrappe.throw(\"To Date is mandatory\")\n\t\t\n\tif not filters.detailed_report and filters.item_code:\n\t\tfrappe.throw(\"Item Code filter is valid only in detailed report\")\n\ndef get_data(filters):\n\tdata = []\n\tinvoices = get_invoices(filters)\n\ttotal_row = get_total_template_row(filters)\n\tfor invoice in invoices:\n\t\tif filters.detailed_report == 1:\n\t\t\tdata, total_row = process_invoice_items(invoice, data, total_row, filters)\n\t\telse:\n\t\t\tdata, total_row = process_invoice(invoice, data, total_row)\n\t\n\tif len(data) > 0:\n\t\tdata.append(get_total_row(filters, data, total_row));\n\treturn data\n\ndef get_total_template_row(filters):\n\treturn { 'qty': 0, 'unit_price': 0, 'total': 0, 'vat_amount': 0, 'amount': 0, 'rebate': 0, 'net_value': 0, 'valuation_rate': 0, 'total_valuation': 0, 'profit': 0 } if filters.detailed_report == 1 else \\\n\t\t\t\t\t{ 'total': 0, 'vat_amount': 0, 'amount': 0, 'rebate': 0, 'net_value': 0 }\n\t\t\t\t\t\ndef get_total_row(filters, data, total_row):\n\treturn ['', '', '', '', '', '', total_row['qty'], total_row['unit_price'], total_row['total'], total_row['vat_amount'], total_row['amount'], total_row['rebate'] / total_row['total'] * 100, total_row['rebate'], total_row['net_value'], total_row['valuation_rate'], total_row['total_valuation'], total_row['profit'], total_row['profit'] * 100 / total_row['total_valuation'] ] if filters.detailed_report == 1 else \\\n\t\t['', '', '', total_row['total'], total_row['vat_amount'], total_row['amount'], total_row['rebate'], total_row['net_value']]\n\ndef process_invoice(invoice, data, total_row):\n\tdata.append([\n\t\tinvoice.posting_date,\n\t\tinvoice.customer,\n\t\tinvoice.name,\n\t\tinvoice.consoleerp_customer_total,\n\t\tinvoice.total_taxes_and_charges,\n\t\tinvoice.consoleerp_customer_grand_total,\n\t\tinvoice.consoleerp_customer_discount_total,\n\t\tinvoice.grand_total\n\t])\n\ttotal_row['total'] += invoice.consoleerp_customer_total\n\ttotal_row['vat_amount'] += invoice.total_taxes_and_charges\n\ttotal_row['amount'] += invoice.consoleerp_customer_grand_total\n\ttotal_row['rebate'] += invoice.consoleerp_customer_discount_total\n\ttotal_row['net_value'] += invoice.grand_total\n\treturn data, total_row\n\t\ndef process_invoice_items(invoice, data, total_row, filters):\n\tmonth = invoice.posting_date.strftime(\"%b\")\n\tfor item in get_invoice_items(invoice, filters):\n\t\tvat_amount = get_item_tax(invoice, item)\n\t\trebate_value = flt(item.consoleerp_original_amt - item.amount) or 0 # rebate value\n\t\tvaluation_rate = flt(get_valuation_rate(invoice, item)) * item.conversion_factor\n\t\tprofit = item.amount - (valuation_rate * item.qty)\n\t\tdata.append([\n\t\t\tinvoice.name,\n\t\t\tmonth,\n\t\t\tinvoice.posting_date,\n\t\t\tinvoice.po_no,\n\t\t\tinvoice.customer,\n\t\t\titem.item_code + \": \" + item.item_name,\n\t\t\titem.qty, # qty, not taking stock_qty\n\t\t\titem.consoleerp_customer_rate, # unit rate\n\t\t\titem.consoleerp_original_amt, # customer total\n\t\t\tvat_amount,\n\t\t\titem.consoleerp_original_amt + vat_amount,\n\t\t\titem.consoleerp_customer_disc_percent, # rebate %\n\t\t\trebate_value,\n\t\t\titem.amount,\n\t\t\tvaluation_rate,\n\t\t\tvaluation_rate * item.qty,\n\t\t\tprofit,\n\t\t\tprofit / item.amount * 100 if item.amount > 0 else 0\n\t\t])\n\t\ttotal_row['qty'] += item.qty\n\t\ttotal_row['unit_price'] += item.consoleerp_customer_rate\n\t\ttotal_row['total'] += item.consoleerp_original_amt\n\t\ttotal_row['vat_amount'] += vat_amount\n\t\ttotal_row['amount'] += item.consoleerp_original_amt + vat_amount\n\t\ttotal_row['rebate'] += rebate_value\n\t\ttotal_row['net_value'] += item.amount\n\t\ttotal_row['valuation_rate'] += valuation_rate\n\t\ttotal_row['total_valuation'] += valuation_rate * item.qty\n\t\ttotal_row['profit'] += profit\n\t\t\n\treturn data, total_row\n\ndef get_columns(filters):\n\tif filters.detailed_report == 1:\n\t\treturn [\n\t\t\t\"Invoice No:Link/Sales Invoice:120\",\n\t\t\t\"Month::80\",\n\t\t\t\"Posting Date:Date:90\",\n\t\t\t\"PO No.::60\",\n\t\t\t\"Customer:Link/Customer:120\",\n\t\t\t\"Description::180\",\n\t\t\t\"Qty:Float:60\",\n\t\t\t\"U/Price:Currency/currency:80\",\n\t\t\t\"Total:Currency/currency:80\",\n\t\t\t\"Vat Amount:Currency/currency:80\",\n\t\t\t\"Amount:Currency/currency:80\",\n\t\t\t\"Rebate %:Float:80\",\n\t\t\t\"R/ Value:Currency/currency:80\",\n\t\t\t\"Net Value:Currency/currency:80\",\n\t\t\t\"P/Kg:Currency/currency:80\",\n\t\t\t\"P/Value:Currency/currency:80\",\n\t\t\t\"Profit:Currency/currency:80\",\n\t\t\t\"Profit %:Float:80\"\n\t\t]\n\telse:\n\t\treturn [\n\t\t\t\"Date:Date:90\",\n\t\t\t\"Customer:Link/Customer:120\",\n\t\t\t\"Invoice No:Link/Sales Invoice:120\",\n\t\t\t\"Total:Currency/currency:80\",\n\t\t\t\"Vat Amount:Currency/currency:80\",\n\t\t\t\"Amount:Currency/currency:80\",\n\t\t\t\"Rebate:Currency/currency:80\",\n\t\t\t\"Net Value:Currency/currency:80\"\n\t\t]\n\t\ndef get_valuation_rate(invoice, item):\n\tvaluation_rate = frappe.db.sql(\"\"\"\n\t\tSELECT valuation_rate FROM `tabStock Ledger Entry`\n\t\twhere item_code = %(item_code)s and warehouse = %(warehouse)s and valuation_rate > 0\n\t\t and timestamp(posting_date, posting_time) < timestamp(%(posting_date)s, %(posting_time)s)\n\t\torder by posting_date desc, posting_time desc, name desc limit 1\n\t\"\"\", {\"item_code\": item.item_code, \"warehouse\": item.warehouse, \"posting_date\": invoice.posting_date, \"posting_time\": invoice.posting_time}, debug=False)\n\treturn valuation_rate[0][0] if valuation_rate else 0\n\ndef get_item_tax(invoice, item):\n\t# consoleerp_customer_total - without tax\n\tif not invoice.total_taxes_and_charges:\n\t\treturn 0\n\treturn item.consoleerp_original_amt / invoice.consoleerp_customer_total * invoice.total_taxes_and_charges\n\ndef get_invoices(filters):\n\t# real weird\n\t_filters = {\"posting_date\": (\"between\", [filters.from_date, filters.to_date]), \"docstatus\": 1}\n\t# _filters = {\"posting_date\": (\"between\", [filters.from_date, filters.to_date]), \"docstatus\": 1}\n\tif filters.sales_invoice:\n\t\t_filters[\"name\"] = filters.sales_invoice\n\tif filters.customer:\n\t\t_filters[\"customer\"] = filters.customer;\n\tprint(_filters)\n\treturn frappe.get_all(\"Sales Invoice\", fields=[\"*\"], filters=_filters, order_by=\"posting_date, name\")\n\ndef get_invoice_items(invoice, filters):\n\t_filters = {\"parent\": invoice.name}\n\tif filters.item_code:\n\t\t_filters[\"item_code\"] = filters.item_code\n\treturn frappe.get_all(\"Sales Invoice Item\", fields=[\"*\"], filters=_filters)\n","repo_name":"consoleerp/siyar_erpnext","sub_path":"siyar_erpnext/accounts_siyar/report/detailed_sales_register___siyar/detailed_sales_register___siyar.py","file_name":"detailed_sales_register___siyar.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16489486080","text":"def foo():\n a, b = 0, 1\n while True:\n result = a\n a, b = b, a + b\n yield result\n\n\ngen_1 = foo()\nfor i in range(20):\n print(next(gen_1), end=\"; \")\n","repo_name":"vbelousPy/py_data_science","sub_path":"lesson_03/main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8806555559","text":"import subprocess, os, fnmatch\n\n# cubemap toktx --t2 --uastc 4 --srgb --zcmp 20 --cubemap DayInTheClouds4k.ktx2 px.jpg nx.jpg py.jpg ny.jpg pz.jpg nz.jpg\n# target_dir = \"../assets/meshes\"\ntarget_dir = \"F:\\\\materials\\\\metals\\\\worn-metal4-bl\"\ntarget_ext = []\nuastc_quality = 4\ncompression_level = 20\n\ndef main():\n for root, dir, files in os.walk(target_dir):\n for file in files:\n if file.endswith((\".png\", \".jpg\")):\n file_path = os.path.abspath(os.path.normpath(os.path.join(root, file)))\n outfile_path = os.path.join(root, \"basis_universal\")\n if not os.path.exists(outfile_path):\n os.makedirs(outfile_path)\n outfile_path = os.path.join(outfile_path, os.path.splitext(file)[0] + '.ktx2')\n print(\"Working on: \", os.path.basename(file_path))\n\n packer_args = [\"toktx\", \"--t2\", f\"--uastc\", f\"{uastc_quality}\"]\n additional_args = [\"--srgb\"]\n if \"normal\" in file_path:\n additional_args = [\"--linear\", \"--normal_map\"]\n \n packer_args.extend(additional_args)\n packer_args.extend([\"--genmipmap\", \"--zcmp\", f\"{compression_level}\", f\"{outfile_path}\", f\"{file_path}\"])\n\n subprocess.Popen(packer_args).wait()\n\nif __name__ == \"__main__\":\n main()","repo_name":"AdamFull/VulkanEngine","sub_path":"tools/toktx.py","file_name":"toktx.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28546068812","text":"import pymongo\nimport os\nimport sys\nimport pprint\nfrom flask import Flask, redirect, Markup, url_for, session, request, jsonify, flash\nfrom flask_oauthlib.client import OAuth\nfrom flask import render_template\nfrom bson.objectid import ObjectId\n\n\napp = Flask(__name__)\n\napp.debug = False #Change this to False for production\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' #Remove once done debugging\n\napp.secret_key = os.environ['SECRET_KEY'] #used to sign session cookies\noauth = OAuth(app)\noauth.init_app(app) #initialize the app to be able to make requests for user information\n\nconnection_string = os.environ[\"MONGO_CONNECTION_STRING\"]\ndb_name1 = os.environ[\"MONGO_DBNAME1\"]\ndb_name2 = os.environ[\"MONGO_DBNAME2\"]\nclient = pymongo.MongoClient(connection_string)\ndb1 = client[db_name1]\ndb2 = client[db_name2]\ncollection1 = db1['Items']\ncollection2 = db2['Prices']\n\n\n \n#Set up GitHub as OAuth provider\ngithub = oauth.remote_app(\n 'github',\n consumer_key=os.environ['GITHUB_CLIENT_ID'], #your web app's \"username\" for github's OAuth\n consumer_secret=os.environ['GITHUB_CLIENT_SECRET'],#your web app's \"password\" for github's OAuth\n request_token_params={'scope': 'user:email'}, #request read-only access to the user's email. For a list of possible scopes, see developer.github.com/apps/building-oauth-apps/scopes-for-oauth-apps\n base_url='https://api.github.com/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://github.com/login/oauth/access_token', \n authorize_url='https://github.com/login/oauth/authorize' #URL for github's OAuth login\n)\n\n\n#context processors run before templates are rendered and add variable(s) to the template's context\n#context processors must return a dictionary \n#this context processor adds the variable logged_in to the conext for all templates\n@app.context_processor\ndef inject_logged_in():\n return {\"logged_in\":('github_token' in session)}\n\n\n#redirect to GitHub's OAuth page and confirm callback URL\n@app.route('/login')\ndef login(): \n return github.authorize(callback=url_for('authorized', _external=True, _scheme='http')) #callback URL must match the pre-configured callback URL\n\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return render_template('homeMessage.html', message='Logged out successfully!')\n\n@app.route('/login/authorized')\ndef authorized():\n resp = github.authorized_response()\n if resp is None:\n session.clear()\n flash('Access denied: reason=' + request.args['error'] + ' error=' + request.args['error_description'] + ' full=' + pprint.pformat(request.args)) \n else:\n try:\n session['github_token'] = (resp['access_token'], '') #save the token to prove that the user logged in\n session['user_data']=github.get('user').data\n #pprint.pprint(vars(github['/email']))\n #pprint.pprint(vars(github['api/2/accounts/profile/']))\n flash('You were successfully logged in as ' + session['user_data']['login'] + '.')\n except Exception as inst:\n session.clear()\n print(inst)\n flash('Unable to login, please try again.')\n return redirect('/')\n #Change the render\n \n@app.route('/googleb4c3aeedcc2dd103.html')\ndef render_google_verification():\n return render_template('googleb4c3aeedcc2dd103.html')\n\n#the tokengetter is automatically called to check who is logged in.\n@github.tokengetter\ndef get_github_oauth_token():\n return session['github_token']\n \n \n@app.route('/')\ndef home():\n return render_template('home.html', bckgrnd = \"hmepge\")\n \n@app.route('/order')\ndef render_order():\n if 'github_token' in session:\n menu = getMenu(\"Food\")\n menu2 = getMenu(\"Drink\")\n menu3 = getMenu(\"Dessert\")\n return render_template('order.html', menu=menu, menu2=menu2, menu3=menu3, bckgrnd = \"ordr\")\n else:\n return render_template('pleaseLog.html')\n \n \ndef getMenu(menu):\n m=\"\"\n for doc in collection2.find( {menu:{\"$gt\":\"\"}}):\n m += Markup('
' + str(doc[menu]) + \"
\" + str(doc[\"Price\"]) + \"
')\n return m\n\n \n \n@app.route('/ordered', methods=['GET','POST'])\ndef render_ordered():\n food = []\n drink = []\n dessert = []\n filters = {\"ID\": session['user_data']['id']}\n order = collection1.find_one(filters)\n if order == None:\n if 'Food' in request.form:\n food=request.form.getlist('Food')\n if 'Drink' in request.form:\n drink=request.form.getlist('Drink')\n if 'Dessert' in request.form:\n dessert=request.form.getlist('Dessert')\n doc = {\"Food/s\":food, \"Drink/s\":drink, \"Dessert/s\":dessert, \"ID\": session['user_data']['id']}\n collection1.insert_one(doc)\n else:\n if 'Food' in request.form:\n food=request.form.getlist('Food')\n if 'Drink' in request.form:\n drink=request.form.getlist('Drink')\n if 'Dessert' in request.form:\n dessert=request.form.getlist('Dessert')\n newvalues = {'$push': {'Food/s': {'$each': food}, 'Drink/s': {'$each': drink}, 'Dessert/s': {'$each': dessert}}}\n collection1.update_one(filters, newvalues)\n return render_template('ordered.html')\n \n@app.route('/cart')\ndef render_cart():\n if 'github_token' in session:\n order=getOrder()\n total = getTotal()\n return render_template('cart.html', order=order, total=total, bckgrnd = \"ordr\")\n else:\n return render_template('pleaseLog.html')\n \ndef getOrder():\n items=\"\"\n filters = {\"ID\": session['user_data']['id']}\n menu = collection1.find_one(filters)\n if 'Food/s' in menu:\n for food in menu[\"Food/s\"]:\n f = collection2.find_one({\"_id\": ObjectId(food)})\n items += Markup('
' + \"Food/s: \" + f['Food'] + \"
\" + \" \" + \"
\")\n if 'Drink/s' in menu: \n for drink in menu[\"Drink/s\"]:\n d = collection2.find_one({\"_id\": ObjectId(drink)})\n items += Markup('
' + \"Drink/s: \" + d['Drink'] + \"
\" + \"
\" + \"
\")\n if 'Dessert/s' in menu:\n for dessert in menu[\"Dessert/s\"]:\n ds = collection2.find_one({\"_id\": ObjectId(dessert)})\n items += Markup('
' + \"Dessert/s: \" + ds['Dessert'] +\"
\" + \"
\")\n if items == \"\":\n items=\"You must first add things to your cart in order to view it.\"\n return items\n \ndef getTotal():\n total=0\n filters = {\"ID\": session['user_data']['id']}\n menu = collection1.find_one(filters)\n for food in menu[\"Food/s\"]:\n pf = collection2.find_one({\"_id\": ObjectId(food)})\n total += pf[\"Price\"]\n for drink in menu[\"Drink/s\"]:\n pd = collection2.find_one({\"_id\": ObjectId(drink)})\n total += pd[\"Price\"]\n for dessert in menu[\"Dessert/s\"]:\n pde = collection2.find_one({\"_id\": ObjectId(dessert)})\n total += pde[\"Price\"]\n return total\n \n@app.route(\"/delete\", methods=['POST'])\ndef renderDelete():\n filters = {\"ID\": session['user_data']['id']}\n menu = collection1.find_one(filters)\n item_type = list(request.form.keys())[0][:-2]\n item_filter = {item_type: list(request.form.values())[0]}\n dis = collection2.find_one(item_filter)\n var = list(request.form.keys())[0]\n deletevalues = {'$pull': {var: str(dis['_id'])}}\n collection1.update_one(filters, deletevalues)\n return redirect(url_for(\"render_cart\"))\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"AJsBusiness/FinalV.2","sub_path":"Final.py","file_name":"Final.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5450578918","text":"from django.urls import path, include\nfrom qym_api import views\nfrom rest_framework_simplejwt import views as jwt_views\n\napp_name = \"qym_api\"\nurlpatterns = [\n path('register/', views.UserRegister.as_view(), name ='user_register'),\n path('login/', jwt_views.TokenObtainPairView.as_view(), name ='user_login'), \n path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name ='token_refresh'),\n path('user-sending-query/', views.UserSendingQuery.as_view(), name ='user_sending_query'),\n path('view-query//', views.ViewQuery.as_view(), name ='view_query'),\n path('mentor-respond-to-query//', views.MentorRespondToQuery.as_view(), name ='mentor_respond_to_query'),\n]\n","repo_name":"shidharthadas/question-your-mentor","sub_path":"qym_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72070827047","text":"# 给你一个大小为 n x n 的整数矩阵 grid 。 \n# \n# 生成一个大小为 (n - 2) x (n - 2) 的整数矩阵 maxLocal ,并满足: \n# \n# \n# maxLocal[i][j] 等于 grid 中以 i + 1 行和 j + 1 列为中心的 3 x 3 矩阵中的 最大值 。 \n# \n# \n# 换句话说,我们希望找出 grid 中每个 3 x 3 矩阵中的最大值。 \n# \n# 返回生成的矩阵。 \n# \n# \n# \n# 示例 1: \n# \n# \n# \n# \n# 输入:grid = [[9,9,8,1],[5,6,2,6],[8,2,6,4],[6,2,2,2]]\n# 输出:[[9,9],[8,6]]\n# 解释:原矩阵和生成的矩阵如上图所示。\n# 注意,生成的矩阵中,每个值都对应 grid 中一个相接的 3 x 3 矩阵的最大值。 \n# \n# 示例 2: \n# \n# \n# \n# \n# 输入:grid = [[1,1,1,1,1],[1,1,1,1,1],[1,1,2,1,1],[1,1,1,1,1],[1,1,1,1,1]]\n# 输出:[[2,2,2],[2,2,2],[2,2,2]]\n# 解释:注意,2 包含在 grid 中每个 3 x 3 的矩阵中。\n# \n# \n# \n# \n# 提示: \n# \n# \n# n == grid.length == grid[i].length \n# 3 <= n <= 100 \n# 1 <= grid[i][j] <= 100 \n# \n# \n# 👍 37 👎 0\n\nfrom typing import List\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n # def largestLocal(self, grid: List[List[int]]) -> List[List[int]]:\n # \"\"\"\n # 方法1:遍历\n # \"\"\"\n # n = len(grid)\n # res = []\n # for i in range(n-2):\n # r = []\n # for j in range(n-2):\n # temp = max(grid[i][j], grid[i][j+1], grid[i][j+2], grid[i+1][j], grid[i+1][j+1], grid[i+1][j+2],\n # grid[i+2][j], grid[i+2][j+1], grid[i+2][j+2])\n # r.append(temp)\n # res.append(r)\n # return res\n\n def largestLocal(self, grid: List[List[int]]) -> List[List[int]]:\n \"\"\"\n 方法1.2:枚举\n \"\"\"\n n = len(grid)\n res = [[0 for _ in range(n-2)] for _ in range(n-2)]\n for i in range(n-2):\n for j in range(n-2):\n m = max(grid[a][b] for a in range(i, i+3) for b in range(j, j+3))\n res[i][j] = m\n return res\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n grid = [[9, 9, 8, 1], [5, 6, 2, 6], [8, 2, 6, 4], [6, 2, 2, 2]]\n result = Solution().largestLocal(grid)\n print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[2373]矩阵中的局部最大值.py","file_name":"[2373]矩阵中的局部最大值.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27873081983","text":"from gensim.models import FastText\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom data.data_constructor import get_company_data,get_items_cat,get_location_data\nimport numpy as np\nfrom sklearn.utils import shuffle\n\nclass FeatureBuilder:\n\n def __init__(self, ordering = ['company','location','goods']):\n self.feature_encoder = None\n self.sizes = []\n self.train = None\n self.validation = None\n self.ordering = ordering\n self.word_mapping = {}\n self.company_feature_encoder = None\n self.location_feature_encoder = None\n self.goods_feature_encoder = None\n\n def load(self):\n self.load_data()\n\n def load_model(self):\n try:\n #self.feature_encoder = FastText.load('./models/fasttext.model')\n self.company_feature_encoder = FastText.load('./models/company_fasttext.model')\n self.location_feature_encoder = FastText.load('./models/location_fasttext.model')\n self.goods_feature_encoder = FastText.load('./models/goods_fasttext.model')\n except:\n print('Existing model does not exist. Training from scratch')\n self.classType_fasttext_train('company')\n self.classType_fasttext_train('location')\n self.classType_fasttext_train('goods')\n #self.train_fasttext_encoder()\n #self.validate_encoder()\n\n def load_data(self):\n data = []\n datasets = [get_company_data(),get_location_data(),get_items_cat()]\n for idx, dataset in enumerate(datasets):\n print('Is any entry Null?:',dataset.isnull().values.any())\n for idx2, row in dataset.iterrows():\n if row['name'] not in self.word_mapping:\n self.word_mapping[row['name']] = []\n self.word_mapping[row['name']].append(self.ordering[idx])\n self.sizes.append(dataset.shape[0])\n data += list(dataset['name'].values)\n #data = shuffle(data,random_state=0)\n self.train, self.validation = train_test_split(data,random_state=0,test_size=0.2)\n print('Train Test Constructed')\n\n\n def classType_fasttext_train(self,classType):\n\n train_sentences = []\n\n for word in self.train:\n sentence = []\n mappings = self.word_mapping[word]\n for mapping in mappings:\n if mapping == classType:\n sentence.append(word)\n if len(sentence) > 0:\n train_sentences.append(sentence)\n\n feature_encoder = FastText(size=50, window=2, min_count=1,min_n=2,max_n=6)\n feature_encoder.build_vocab(sentences=train_sentences)\n feature_encoder.train(sentences=train_sentences, total_examples=feature_encoder.corpus_count, epochs=1000)\n feature_encoder.save('./models/'+classType+'_fasttext.model')\n if classType == 'company':\n self.company_feature_encoder = feature_encoder\n elif classType == 'location':\n self.location_feature_encoder = feature_encoder\n elif classType == 'goods':\n self.goods_feature_encoder = feature_encoder\n else:\n raise Exception('Allowed arguments are company, location and goods')\n #self.feature_encoder = FastText(size=25, window=1, min_count=1, sentences=train_sentences, iter=50)\n\n\n def train_fasttext_encoder(self):\n train_sentences = []\n\n for word in self.train:\n mappings = self.word_mapping[word]\n for mapping in mappings:\n sentence = [word]\n train_sentences.append(sentence)\n\n self.feature_encoder = FastText(size=50, window=2, min_count=1,min_n=2,max_n=6)\n self.feature_encoder.build_vocab(sentences=train_sentences)\n self.feature_encoder.train(sentences=train_sentences, total_examples=self.feature_encoder.corpus_count, epochs=1000)\n self.feature_encoder.save('./models/fasttext.model')\n\n #self.feature_encoder = FastText(size=25, window=1, min_count=1, sentences=train_sentences, iter=50)\n\n def validate_encoder(self):\n test_words = self.validation\n\n ## Finding the closest cluster center (Company, Location or Good)\n tp = 0\n\n\n for word in test_words:\n distances = []\n encoding = self.feature_encoder[word]\n for order in self.ordering:\n category_encoding = self.feature_encoder[order]\n distances.append(np.linalg.norm(encoding-category_encoding))\n idx = distances.index(min(distances))\n\n gt_categories = self.word_mapping[word]\n for gt_category in gt_categories:\n if self.ordering[idx] == gt_category:\n tp+= 1\n break\n\n print('Closest cluster center validation approach accuracy:',str(tp/len(test_words)))\n\n ## Doing the K-nearest analysis\n tp = 0\n\n order_idx = {}\n for idx,order in enumerate(self.ordering):\n order_idx[order] = idx\n\n for word in test_words:\n distances = []\n encoding = self.feature_encoder[word]\n nearest_neighbours = self.feature_encoder.most_similar(word,topn=15)\n votes = [0,0,0]\n\n for neighbour in nearest_neighbours:\n mappings = self.word_mapping[neighbour[0]]\n for mapping in mappings:\n votes[order_idx[mapping]]+=1\n\n assigned_idx = votes.index(max(votes))\n\n gt_categories = self.word_mapping[word]\n for gt_category in gt_categories:\n if self.ordering[assigned_idx] == gt_category:\n tp += 1\n break\n\n print('Nearest 15-Neighbour accuracy:', str(tp / len(test_words)))\n\n\n def one_vs_rest_generator(self,positive_index=None):\n\n assert positive_index is not None, \"Requires index for the positive class(see ordering)\"\n if self.ordering[positive_index] == 'company':\n feature_encoder = self.company_feature_encoder\n elif self.ordering[positive_index] == 'location':\n feature_encoder = self.location_feature_encoder\n elif self.ordering[positive_index] == 'goods':\n feature_encoder = self.location_feature_encoder\n else:\n raise Exception('Marked positive class not in the set {0,1,2}')\n\n X_train = []\n y_train = []\n X_test = []\n y_test = []\n\n for word in self.train:\n try:\n X_train.append(feature_encoder[word])\n if self.ordering[positive_index] in self.word_mapping[word]:\n y_train.append(1)\n else:\n y_train.append(0)\n except KeyError:\n print('all ngrams for word %s absent from model. Skipping for %s'% (word,self.ordering[positive_index]))\n\n\n for word in self.validation:\n try:\n X_test.append(feature_encoder[word])\n if self.ordering[positive_index] in self.word_mapping[word]:\n y_test.append(1)\n else:\n y_test.append(0)\n except KeyError:\n print('all ngrams for word %s absent from model. Skipping for %s' % (word, self.ordering[positive_index]))\n\n return np.asarray(X_train,dtype=np.float64),np.asarray(y_train,dtype=np.float64),\\\n np.asarray(X_test,dtype=np.float64),np.asarray(y_test,dtype=np.float64)\n\n\n def get_encoding(self,word):\n return self.feature_encoder[word]","repo_name":"paganpasta/DocumentUnderstanding","sub_path":"FeatureBuilder.py","file_name":"FeatureBuilder.py","file_ext":"py","file_size_in_byte":7619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9672875484","text":"import uuid\n\nfrom flask import Flask, request, Response\nimport json\n\napp = Flask(__name__)\n\nemails = []\n\n\n@app.route('/v2/notifications/email', methods=['POST'])\ndef send_email():\n data = json.loads(request.data)\n\n emails.append(data)\n\n template_id = 'https://api.notifications.service.gov.uk/templates/' + \\\n data[\"template_id\"]\n\n response = {'id': str(uuid.uuid4()),\n 'reference': data['reference'],\n 'template': {\n 'version': 1,\n 'id': data['template_id'],\n 'uri': template_id},\n 'content': {\n 'body': json.dumps(data),\n 'subject': 'An example subject'}\n }\n\n return Response(response=json.dumps(response),\n status=201, mimetype='application/json')\n\n\n@app.route('/inbox/emails', methods=['GET'])\ndef get_emails():\n return json.dumps(emails), 200\n\n\n@app.route('/inbox/emails/', methods=['GET'])\ndef get_emails_for(email_address):\n messages = [m for m in emails if m['email_address'] == email_address]\n return json.dumps(messages), 200\n\n\n@app.route('/inbox/emails', methods=['DELETE'])\ndef clear_emails():\n emails.clear()\n return json.dumps({'success': True}), 200\n","repo_name":"ONSdigital/sdc-mock-gov-notify","sub_path":"sdc_mock_gov_notify/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27965889676","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n#https://www.acmicpc.net/problem/3518\n# n_dict= {}\n# n_list = []\n# for i in sys.stdin.readlines():\n# a = i.rstrip().split()\n# for i in range(len(a)):\n# if i not in n_dict:\n# n_dict[i]=[a[i]]\n# else:\n# n_dict[i]+=[a[i]]\n# n_list.append(a)\n# a_dict = {}\n# for key in n_dict.keys():\n# list_list = n_dict[key]\n# max_max = 0\n# for i in list_list:\n# max_max=max(max_max,len(i))\n# a_dict[key]=max_max\n\n# # print(a_dict)\n# for i in n_list:\n# list_list = i\n# for a in range(len(list_list)):\n# list_list[a]=list_list[a].ljust(a_dict[a],\" \")\n# list_list[-1]=list_list[-1].rstrip()\n# print(*list_list)\n\n#https://www.acmicpc.net/problem/11507\n# n_list = inputing()\n# n_dict = {}\n# check = \"yes\"\n# for i in range(len(n_list)//3):\n# str_str = n_list[3*i:3*i+3]\n# symbol = str_str[0]\n# number = int(str_str[1:])\n# if symbol not in n_dict:\n# n_dict[symbol]=[number]\n# else:\n# if number not in n_dict[symbol]:\n# n_dict[symbol]+=[number]\n# else:\n# check = \"no\"\n# break\n# if check ==\"no\":\n# print(\"GRESKA\")\n# else:\n# for i in \"P,K,H,T\".split(\",\"):\n# if i in n_dict:\n# print(13-len(n_dict[i]),end=\" \")\n# else:\n# print(13,end=\" \")\n# print()\n\nn_dict = {}\nnumber = int((4*(10**9))**(1/2))+1\nfor i in range(2,number):\n if i not in n_dict:\n for a in range(i,number,i):\n n_dict[a]=0\n n_dict[i]=1\nlist_list = [a for a,b in n_dict.items() if b == 1]\nprint(list_list)\n \n\n\n\n\n\n\n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2022/11월/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5379353095","text":"# import the necessary packages\n# import RPi.GPIO as GPIO\n# from gpiozero\nimport winsound\nfrom playsound import playsound\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nfrom imutils.video import VideoStream\nfrom threading import Thread\nfrom subprocess import call\nimport numpy as np\nimport imutils\nimport time\nimport cv2\nimport pyttsx3\nimport os\nimport pickle\nfrom datetime import date\nimport pymysql.cursors\nfrom datetime import datetime\nimport Edit\n\n\nif __name__ == '__main__':\n import Edit\n\ncheck = False\nengine = pyttsx3.init() # object creation\nface_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')\neye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye.xml')\nsmile_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_smile.xml')\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read(\"./recognizers/face-trainner.yml\")\n\nlabels = {\"person_name\": 1}\nwith open(\"pickles/face-labels.pickle\", 'rb') as f:\n\tog_labels = pickle.load(f)\n\tlabels = {v:k for k,v in og_labels.items()}\n\n\ndef alarm():\n\n\t# print('call')\n\t# a = 0\n\t# s = 'espeak \"' + msg + '\"'\n\t# os.system(s)\n\tif check:\n\t\tvoices = engine.getProperty('voices') # getting details of current voice\n\t\t# engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male\n\t\tengine.setProperty('voice', voices[0].id) # changing index, changes voices. 1 for female\n\t\tengine.setProperty('rate', 400)\n\t\tengine.say(\"No Mask\")\n\t\t# engine.say('My current speaking rate is Tu')\n\t\tengine.runAndWait()\n\t\tengine.stop()\n\t\t# filename = 'tingtingmav.wav'\n\t\t# winsound.PlaySound(filename, winsound.SND_FILENAME)\n\t\t# playsound('tingtingmav.wav')\n\t\t# engine = pyttsx3.init()\n\t\t# # engine.say(\"\")\n\t\t# # engine.runAndWait()\n\t\t# voices = engine.getProperty('voices')\n\t\t# engine.setProperty('voice', voices[1].id)\n\t\t# engine.setProperty('rate', 400)\n\t\t# engine.say(\"No Mask\")\n\t\t# engine.runAndWait()\n\t\t# engine.stop()\n\ndef detect_and_predict_mask(frame, faceNet, maskNet):\n\t# grab the dimensions of the frame and then construct a blob\n\t# from it\n\t# lấy kích thước của khung và sau đó tạo một đốm màu từ nó\n\t(h, w) = frame.shape[:2]\n\tblob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),\n\t\t(104.0, 177.0, 123.0))\n\n\t# pass the blob through the network and obtain the face detections\n\tfaceNet.setInput(blob)\n\tdetections = faceNet.forward()\n\tprint(detections.shape)\n\n\t# initialize our list of faces, their corresponding locations,\n\t# and the list of predictions from our face mask network\n\tfaces = []\n\tlocs = []\n\tpreds = []\n\n\t# loop over the detections\n\tfor i in range(0, detections.shape[2]):\n\t\t# extract the confidence (i.e., probability) associated with\n\t\t# the detection\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t# filter out weak detections by ensuring the confidence is\n\t\t# greater than the minimum confidence\n\t\tif confidence > 0.5:\n\t\t\t# compute the (x, y)-coordinates of the bounding box for\n\t\t\t# the object\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# ensure the bounding boxes fall within the dimensions of\n\t\t\t# the frame\n\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\n\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n\t\t\t# extract the face ROI, convert it from BGR to RGB channel\n\t\t\t# ordering, resize it to 224x224, and preprocess it\n\t\t\tface = frame[startY:endY, startX:endX]\n\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\t\t\tface = cv2.resize(face, (224, 224))\n\t\t\tface = img_to_array(face)\n\t\t\tface = preprocess_input(face)\n\n\t\t\t# add the face and bounding boxes to their respective\n\t\t\t# lists\n\t\t\tfaces.append(face)\n\t\t\tlocs.append((startX, startY, endX, endY))\n\n\t# only make a predictions if at least one face was detected\n\tif len(faces) > 0:\n\t\t# for faster inference we'll make batch predictions on *all*\n\t\t# faces at the same time rather than one-by-one predictions\n\t\t# in the above `for` loop\n\t\tfaces = np.array(faces, dtype=\"float32\")\n\t\tpreds = maskNet.predict(faces, batch_size=32)\n\n\t# return a 2-tuple of the face locations and their corresponding\n\t# locations\n\treturn (locs, preds)\n\n# load our serialized face detector model from disk\nprototxtPath = r\"face_detector\\deploy.prototxt\"\nweightsPath = r\"face_detector\\res10_300x300_ssd_iter_140000.caffemodel\"\nfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n# load the face mask detector model from disk\nmaskNet = load_model(\"mask_detector.model\")\n\n# initialize the video stream\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\n# cap = cv2.VideoCapture(0)\n# loop over the frames from the video stream\nwhile True:\n\tlabel = \"\"\n\tcheck = False\n\t# grab the frame from the threaded video stream and resize it\n\t# to have a maximum width of 400 pixels\n\tframe = vs.read()\n\tframe = imutils.resize(frame, width=400)\n\t# ret, frame = vs.read()\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tfaces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)\n\t# detect faces in the frame and determine if they are wearing a\n\t# face mask or not\n\t(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\n\n\t# loop over the detected face locations and their corresponding\n\t# locations\n\tfor (box, pred) in zip(locs, preds):\n\t\t# unpack the bounding box and predictions\n\t\t(startX, startY, endX, endY) = box\n\t\t(mask, withoutMask) = pred\n\t\t# determine the class label and color we'll use to draw\n\t\t# the bounding box and text\n\t\tif mask > withoutMask:\n\t\t\tlabel = \"mask\"\n\t\t\tcolor = (0, 255, 0)\n\t\t\tpass\n\t\telse:\n\t\t\tcolor = (0, 0, 255)\n\t\t\ttemp = False\n\t\t\tfor (x, y, w, h) in faces:\n\t\t\t\troi_gray = gray[y:y + h, x:x + w] # (ycord_start, ycord_end)\n\t\t\t\troi_color = frame[y:y + h, x:x + w]\n\t\t\t\tid_, conf = recognizer.predict(roi_gray)\n\t\t\t\tif conf >= 4 and conf <= 85:\n\t\t\t\t\tcheck = True\n\t\t\t\t\ttemp = True\n\t\t\t\t\t# recognize? deep learned model predict keras tensorflow pytorch scikit learn\n\t\t\t\t\tid_, conf = recognizer.predict(roi_gray)\n\t\t\t\t\tlabel = labels[id_]\n\t\t\t\t\tEdit.insert(label)\n\t\t\t\t\t# winsound.Beep(1000, 100)\n\t\t\t\t\tt = Thread(target=alarm)\n\t\t\t\t\t# t.deamon = True\n\t\t\t\t\tt.start()\n\t\t\t\t\t# engine = pyttsx3.init()\n\t\t\t\t\t# voices = engine.getProperty('voices')\n\t\t\t\t\t# engine.setProperty('voice', voices[1].id)\n\t\t\t\t\t# engine.setProperty('rate', 400)\n\t\t\t\t\t# engine.say(\"No mask\")\n\t\t\t\t\t# engine.runAndWait()\n\t\t\t\t\t# winsound.Beep(1000, 100)\n\t\t\t\t\t# filename = 'tingtingmav.wav'\n\t\t\t\t\t# winsound.PlaySound(filename, winsound.SND_FILENAME)\n\t\t\t\t\t# playsound('tingtingmav.wav')\n\t\t\t\t\t# pass\n\t\t\t\t# else :\n\t\t\t\t# \tcheck = True\n\t\t\t\t# \tlabel = \"No name\"\n\t\t\t\t# \tt = Thread(target=alarm)\n\t\t\t\t# \tt.deamon = True\n\t\t\t\t# \tt.start()\n\t\t\t\t# \tcheck = False\n\t\t\t\t\t# t = Thread(target=alarm, args=('No mask',))\n\t\t\t\t\t# t.deamon = True\n\t\t\t\t\t# t.start()\n\t\t\t\t\t# t.deamon = True\n\t\t\t\t\t# t.start()\n\t\t\t\t\t# engine = pyttsx3.init()\n\t\t\t\t\t# voices = engine.getProperty('voices')\n\t\t\t\t\t# engine.setProperty('voice', voices[1].id)\n\t\t\t\t\t# engine.setProperty('rate', 400)\n\t\t\t\t\t# engine.say(\"No mask\")\n\t\t\t\t\t# engine.runAndWait()\n\t\t\t\t\t# winsound.Beep(1000, 100)\n\t\t\t\t\t# filename = 'tingtingmav.wav'\n\t\t\t\t\t# winsound.PlaySound(filename, winsound.SND_FILENAME)\n\t\t\t\t\t# playsound('tingtingmav.wav')\n\t\t\t\t\t# pass\n\t\t\tif temp is False :\n\t\t\t\tlabel = \"No Name\"\n\t\t\t\tcheck = True\n\t\t\t\t# filename = 'tingtingmav.wav'\n\t\t\t\t# winsound.PlaySound(filename, winsound.SND_FILENAME)\n\t\t\t\t# playsound('tingtingmav.wav')\n\t\t\t\t# winsound.Beep(1000, 100)\n\t\t\t\tt = Thread(target=alarm)\n\t\t\t\t# t.deamon = True\n\t\t\t\tt.start()\n\t\t# include the probability in the label\n\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\t\t# display the label and bounding box rectangle on the output\n\t\t# frame\n\t\tcv2.putText(frame, label, (startX, startY - 10),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\ttemp = True\n\tcheck = False\n\t# show the output frame\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()","repo_name":"tinpham99/Face_Mask_Detection","sub_path":"Face-Mask-Detection-master/Face-Mask-Detection-master/detect_mask_video.py","file_name":"detect_mask_video.py","file_ext":"py","file_size_in_byte":8169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42391950729","text":"# Best First Start\r\ntree = {\r\n 'A': [('B', 12), ('C', 4)],\r\n 'B': [('D', 7), ('E', 3)],\r\n 'C': [('F', 8), ('G', 2)],\r\n 'D': [],\r\n 'E': [('H', 0)],\r\n 'G': [('H', 0)],\r\n 'F': [('H', 0)],\r\n 'H': []\r\n}\r\n\r\nstart = input(\"Enter the start node: \")\r\ngoal = input(\"Enter the goal node: \")\r\n\r\n\r\ndef bestfs(start, goal, tree, open=[], close=[]):\r\n if start == goal:\r\n print(\"Start is the goal node\")\r\n return goal\r\n if start not in close:\r\n print(start, end=\"->\")\r\n close.append(start)\r\n neighour = tree[start]\r\n for i in neighour:\r\n if i[0][0] not in open:\r\n open.append(i)\r\n open.sort(key=lambda x: x[1])\r\n if open[0][0] == goal:\r\n print(open[0][0], end='')\r\n else:\r\n node = open[0]\r\n open.remove(node)\r\n bestfs(node[0], goal, tree, open, close)\r\n\r\n\r\nbestfs(start, goal, tree, open=[], close=[])\r\n","repo_name":"ChandrakantChodankar/Best_FIrst_Search","sub_path":"BestFirstSearch.py","file_name":"BestFirstSearch.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38930332254","text":"import pygame\r\nimport random\r\n\r\nclass Ghost(pygame.sprite.Sprite):\r\n def __init__(self, x, y, img_file, direction, color, gate):\r\n '''\r\n Initializes a ghost object with attributes: image, rect, rect.x, rect.y, speed, direction, wall_right, wall_below, wall_above, wall_left, collide_wall_list.\r\n '''\r\n #initializes the sprite class\r\n pygame.sprite.Sprite.__init__(self)\r\n #loads the image for the ghost\r\n self.image = pygame.image.load(\"assets/\" + img_file).convert_alpha()\r\n #makes the ghost 15x15\r\n self.image = pygame.transform.scale(self.image, (15,15)).convert_alpha()\r\n #the rectangle for the ghost image\r\n self.rect = self.image.get_rect()\r\n #x value of rectangle\r\n self.rect.x = x\r\n #y value of rectangle\r\n self.rect.y = y\r\n #speed of the ghosts\r\n self.speed = 1\r\n #the initial direction of the ghost\r\n self.direction = direction\r\n #The variable that is changed if there is a wall right, below, above, or left of the ghost\r\n self.wall_right = False\r\n self.wall_below = False\r\n self.wall_above = False\r\n self.wall_left = False\r\n #the list that is appended to depending on how many walls are near the ghost\r\n self.collide_wall_list = []\r\n #the color state of the ghost\r\n self.color=color\r\n #1 or 2 depending of it the ghosts are in the gate or not respectively\r\n self.gate=gate\r\n\r\n def wallCollide(self, walls):\r\n '''\r\n Determines if the ghost collides with a wall.\r\n '''\r\n #loops through the list of wall sprites\r\n for wall in walls:\r\n #If the one of the ghosts sides is in a small radius of the walls side.\r\n #For example, if the ghost's right side is in a two pixel radius of the walls left side, then there is a wall to the right\r\n #of the ghost \r\n if self.rect.midright[0] in range(wall.rect.midleft[0]-2, wall.rect.midleft[0]+2) and self.rect.midright[1] in range(wall.rect.midleft[1]-2, wall.rect.midleft[1]+2):\r\n self.wall_right = True\r\n self.collide_wall_list.append(self.wall_right)\r\n \r\n if self.rect.midtop[1] in range(wall.rect.midbottom[1]-2, wall.rect.midbottom[1]+2) and self.rect.midtop[0] in range(wall.rect.midbottom[0]-2, wall.rect.midbottom[0]+2):\r\n self.wall_above = True\r\n self.collide_wall_list.append(self.wall_above)\r\n \r\n if self.rect.midleft[0] in range(wall.rect.midright[0]-2, wall.rect.midright[0]+2) and self.rect.midleft[1] in range(wall.rect.midright[1]-2,wall.rect.midright[1]+2):\r\n self.wall_left = True\r\n self.collide_wall_list.append(self.wall_left)\r\n \r\n if self.rect.midbottom[1] in range(wall.rect.midtop[1]-2, wall.rect.midtop[1]+2) and self.rect.midbottom[0] in range(wall.rect.midtop[0]-2,wall.rect.midtop[0]+2):\r\n self.wall_below = True\r\n self.collide_wall_list.append(self.wall_below)\r\n\r\n def nodeCollide(self, nodes):\r\n '''\r\n Determines if ghost collides with a node.\r\n '''\r\n #If the ghost's center is equal to the nodes center, then the ghost is at a node\r\n for node in nodes:\r\n if self.rect.center == node.rect.center:\r\n return True\r\n return False\r\n \r\n\r\n def move(self):\r\n '''\r\n Ghost moves based on set direction and speed.\r\n '''\r\n if self.direction == 0:\r\n self.rect.x += self.speed\r\n elif self.direction == 1:\r\n self.rect.y -= self.speed\r\n elif self.direction == 2:\r\n self.rect.x -= self.speed\r\n elif self.direction == 3:\r\n self.rect.y += self.speed\r\n\r\n \r\n def outsideMap(self):\r\n '''\r\n If ghost leaves the map, it will return from the opposide side.\r\n '''\r\n #420 is the right edge of the map and it changes his position to the other side\r\n if self.rect.midleft[0] > 420:\r\n self.rect.x = 1\r\n\r\n #14 is the left edge of the map and it changes his position to the other side\r\n elif self.rect.midright[0] < 15:\r\n self.rect.x = 405\r\n\r\n def oppositeDirection(self):\r\n '''\r\n Ghost reverses direction.\r\n '''\r\n if self.direction==0:\r\n self.direction=2\r\n elif self.direction==1:\r\n self.direction=3\r\n elif self.direction==2:\r\n self.direction=0\r\n elif self.direction==3:\r\n self.direction=1\r\n\r\n def inGateMove(self, walls):\r\n '''\r\n Ghost bounces in the box.\r\n '''\r\n #changes the wall variables if there is a wall above or below\r\n self.wallCollide(walls)\r\n\r\n #if there is a wall above, change the direction to down\r\n if self.wall_above:\r\n self.oppositeDirection()\r\n #moves the ghost up 2 so that it is no longer in the wall above range\r\n self.rect.y += 2\r\n\r\n #if there is a wall belowm change the direction to up\r\n elif self.wall_below:\r\n self.oppositeDirection()\r\n #moves the ghost down 2 so that is no longer in the wall below range\r\n self.rect.y -= 2\r\n\r\n #resets the wall values to be false\r\n self.wall_above = False\r\n self.wall_below = False\r\n self.collide_wall_list = []\r\n\r\n\r\n \r\n\r\n def update(self, nodes, walls):\r\n '''\r\n Determines which way the ghost goes.\r\n '''\r\n #If the ghost collides with a node\r\n if self.nodeCollide(nodes) == True:\r\n #changes the wall variables if there is a wall above or below\r\n self.wallCollide(walls)\r\n\r\n #if the length of the list is 0, that means that there are no walls around the ghost so it\r\n #can go any direction but backwards\r\n if len(self.collide_wall_list) == 0:\r\n if self.direction == 0:\r\n self.direction = random.choice([0,1,3])\r\n elif self.direction == 1:\r\n self.direction = random.choice([0,1,2])\r\n elif self.direction == 2:\r\n self.direction = random.choice([1,2,3])\r\n elif self.direction == 3:\r\n self.direction = random.choice([0,2,3])\r\n \r\n #If the length of the list is 1, that means that there is one wall in some direction of the ghost.\r\n #The ghost can then move any direction but in the direction of the wall or backwards.\r\n elif len(self.collide_wall_list) == 1:\r\n if self.wall_right == True:\r\n if self.direction == 0:\r\n self.direction = random.choice([1,3])\r\n elif self.direction == 1:\r\n self.direction = random.choice([1,2])\r\n elif self.direction == 2:\r\n self.direction = random.choice([1,2,3])\r\n elif self.direction == 3:\r\n self.direction = random.choice([2,3])\r\n elif self.wall_above == True:\r\n if self.direction == 0:\r\n self.direction = random.choice([0,3])\r\n elif self.direction == 1:\r\n self.direction = random.choice([0,2])\r\n elif self.direction == 2:\r\n self.direction = random.choice([2,3])\r\n elif self.direction == 3:\r\n self.direction = random.choice([0,2,3])\r\n elif self.wall_left == True:\r\n if self.direction == 0:\r\n self.direction = random.choice([0,1,3])\r\n elif self.direction == 1:\r\n self.direction = random.choice([0,1])\r\n elif self.direction == 2:\r\n self.direction = random.choice([1,3])\r\n elif self.direction == 3:\r\n self.direction = random.choice([0,3])\r\n elif self.wall_below == True:\r\n if self.direction == 0:\r\n self.direction = random.choice([0,1])\r\n elif self.direction == 1:\r\n self.direction = random.choice([0,1,2])\r\n elif self.direction == 2:\r\n self.direction = random.choice([1,2])\r\n elif self.direction == 3:\r\n self.direction = random.choice([0,2])\r\n\r\n #If the length of the list is 2, then there are 2 walls around the ghost and the ghost is in a corner.\r\n #If the ghost is in a corner, it can only move one other direction that is not either of the 2 walls, or backwards\r\n elif len(self.collide_wall_list) == 2:\r\n #Makes a variable to determine where where the walls are.\r\n self.bot_right = self.wall_below and self.wall_right\r\n self.top_right = self.wall_above and self.wall_right\r\n self.top_left = self.wall_above and self.wall_left\r\n self.bot_left = self.wall_below and self.wall_left\r\n if self.bot_right:\r\n if self.direction == 3:\r\n self.direction = 2\r\n elif self.direction == 0:\r\n self.direction = 1\r\n elif self.top_right:\r\n if self.direction == 1:\r\n self.direction = 2\r\n elif self.direction == 0:\r\n self.direction = 3\r\n elif self.top_left:\r\n if self.direction == 1:\r\n self.direction = 0\r\n elif self.direction == 2:\r\n self.direction = 3\r\n elif self.bot_left:\r\n if self.direction == 3:\r\n self.direction = 0\r\n elif self.direction == 2:\r\n self.direction = 1\r\n \r\n #Sets all of the wall variables to False to make sure that there is a new test every time.\r\n self.wall_below = False\r\n self.wall_right = False\r\n self.wall_left = False\r\n self.wall_above = False\r\n self.collide_wall_list = []\r\n\r\n \r\n\r\n","repo_name":"theresagun/cs110FinalProject","sub_path":"ghosts.py","file_name":"ghosts.py","file_ext":"py","file_size_in_byte":10486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18648609195","text":"#31. Write a program to find the sum of following series:\n#1 + 2 + 6 + 24 + 120 . . . . . n terms\nn=int(input(\"enter the number\"))\ni=1\np=1\ns=0\nwhile i<=n:\n p=i*p\n s=p+s\n i=i+1\n print(s) ","repo_name":"shanti96/while-loop","sub_path":"31 while sum series.py","file_name":"31 while sum series.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"585703048","text":"import pickle\nfrom unittest.mock import patch\n\nimport pytest\nimport torch\n\nfrom innofw.constants import CheckpointFieldKeys\n\n\ndef t_execute_w_credentials(func):\n def executor_w_credentials(*args, **kwargs):\n return func(*args, **kwargs)\n\n return executor_w_credentials\n\n\npatch(\n \"innofw.utils.executors.execute_w_creds.execute_w_credentials\",\n t_execute_w_credentials,\n).start()\n\nfrom innofw.zoo.show_model_metadata import show_model_metadata\n\ntags = {\n \"Accuracy\": 0.85,\n \"Precision\": 0.75,\n \"Recall\": 0.80,\n \"F1 Score\": 0.77,\n \"ROC AUC\": 0.90,\n \"Confusion Matrix\": [[500, 50], [100, 350]],\n \"Mean Absolute Error\": 0.10,\n \"Mean Squared Error\": 0.02,\n \"R Squared\": 0.60,\n \"Explained Variance\": 0.70,\n}\n\n\ndef t_get_object_tags(*args):\n return tags\n\n\n@patch(\"minio.api.Minio.get_object_tags\", side_effect=t_get_object_tags)\n@pytest.mark.parametrize(\n [\"ckpt_path\"],\n [\n [\n \"https://api.blackhole.ai.innopolis.university/pretrained/testing/lin_reg_house_prices.pickle\",\n ]\n ],\n)\ndef test_show_model_metadata(mock_get_object_tags, ckpt_path, tmp_path):\n content = {\n CheckpointFieldKeys.model: torch.nn.Module().state_dict(),\n CheckpointFieldKeys.metadata: tags,\n }\n\n # Test case 1: ckpt_path is url\n metadata = show_model_metadata(ckpt_path)\n\n mock_get_object_tags.assert_called_once()\n\n # Test case 2: ckpt_path is file path with .pkl/.pickle/.cmb extension\n ckpt_path = tmp_path / \"model.pkl\"\n with open(ckpt_path, \"wb+\") as f:\n pickle.dump(content, f)\n metadata = show_model_metadata(ckpt_path)\n assert metadata == tags\n\n # Test case 3: ckpt_path is file path with .ckpt/.pt extension\n ckpt_path = tmp_path / \"model.pt\"\n with open(ckpt_path, \"wb+\") as f:\n torch.save(content, f)\n metadata = show_model_metadata(ckpt_path)\n assert metadata == tags\n","repo_name":"InnopolisUni/innofw","sub_path":"tests/unit/zoo/test_show_model_metadata.py","file_name":"test_show_model_metadata.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"71888552809","text":"# 백준 1459 걷기\n# https://www.acmicpc.net/problem/1459\n\nx,y,w,s = map(int,input().split())\ntime1 = (x+y)*w\ntime2 = 0\n\nif (x+y)%2:\n time2 = (max(x,y)-1)*s+w\nelse:\n time2 = max(x,y)*s\n\ntime3 = min(x,y)*s + (max(x,y)-min(x,y))*w\n\nprint(min(time1,time2,time3))","repo_name":"do0134/solostudy","sub_path":"algorithm/7월/0701/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9205700552","text":"import pandas as pd\nimport numpy as np\nimport pickle\n\n\ndef preprocess_data_for_labels(ticker):\n hm_days = 7\n df = pd.read_csv(\n \"./python_programming_for_finance/sp500_joined_closes.csv\", index_col=0)\n tickers = df.columns.values\n df.fillna(0, inplace=True)\n # print(df[ticker])\n # print(df.iloc[:3])\n # print(df[ticker].shift(-3))\n for d in np.arange(1, hm_days+1):\n df[\"{}_{}d\".format(ticker, d)] = (\n df[ticker].shift(-d) - df[ticker]) / df[ticker]\n df.fillna(0, inplace=True)\n # print(df)\n return tickers, df\n\n\npreprocess_data_for_labels(\"XOM\")\n","repo_name":"luxiaotong/machine_learning_practice","sub_path":"python_programming_for_finance/python_for_finance_9.py","file_name":"python_for_finance_9.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72244382249","text":"from Tkinter import *\nimport ttk\nimport tkMessageBox\nimport os\nimport tkFont\nfrom PIL import Image, ImageTk\nimport facebook\n\nclass mainwindow():\n \"\"\"This is the welcome window where the user can choose to upload, download or sign out\"\"\"\n def __init__(self, tkinstance, token, switch_module):\n self.root = tkinstance\n self.token = token\n self.switch = switch_module\n self.name = self.get_Name() \n self.draw_Window()\n\n def get_Name(self):\n \"\"\"Get's user's name for optimum friendliness\"\"\"\n graph = facebook.GraphAPI(self.token)\n profile = graph.get_object('me')\n return profile[\"name\"]\n\n def draw_Window(self):\n \"\"\"Draws the window\"\"\"\n self.appwindow = ttk.Frame(self.root, padding = \"5 5 5 5\", width = 400, height = 500)\n self.root.title(\"Facebook Pictures\")\n self.appwindow.grid(column = 0, row = 0, sticky=(N,W,E,S))\n Label(self.appwindow, text=\"Welcome \" + str(self.name), font = (\"Heveltica\", 24)).grid(column=1, row=1, columnspan=2)\n\n self.uparwpic = Image.open('arrow-right.png')\n photo_up = ImageTk.PhotoImage(self.uparwpic)\n self.uploadarrow = ttk.Label(self.appwindow, image=photo_up)\n self.uploadarrow.image = photo_up\n self.uploadarrow.grid(column=1, row=2)\n self.uploadbut = ttk.Button(self.appwindow, text=\"Upload\", command=self.upload)\n self.uploadbut.grid(column = 1, row=3, sticky=(E,W))\n \n \n self.downarw = Image.open('arrow-Down.png')\n photo_down = ImageTk.PhotoImage(self.downarw)\n self.downloadarrow = ttk.Label(self.appwindow, image=photo_down)\n self.downloadarrow.image = photo_down\n self.downloadarrow.grid(column=2, row=2)\n self.downloadbut = ttk.Button(self.appwindow, text=\"Download\", command=self.download)\n self.downloadbut.grid(column = 2, row=3, sticky=(E,W))\n\n self.quit = ttk.Button(self.appwindow, text = 'Quit', command=self.quit_)\n self.quit.grid(column= 1, row=4, columnspan = 2)\n \n self.root.bind('', self.quit_)\n \n\n def upload(self):\n \"\"\"Runs the upload window\"\"\"\n self.appwindow.grid_forget() #Notice the use of grid_forget() rather than destroy()\n next_ = self.switch()\n next_.run_upload(self.root, self.name, self.token, self.switch)\n \n \n\n def download(self):\n \"\"\"Downloads pictures from facebook\"\"\"\n self.appwindow.grid_forget() #We might need this window again!\n next_ = self.switch()\n next_.run_download(self.root, self.name, self.token, self.switch)\n\n def quit_(self):\n quit()\n \n","repo_name":"lolpack/Facebook-app","sub_path":"Welcome_Window.py","file_name":"Welcome_Window.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24398349697","text":"import vtk\nimport numpy as np\nimport os\nimport sys\nfrom pyCellAnalyst import CellMech\nfrom vtk.util import numpy_support\nimport pickle\n\n\ndef writePoly(name, tri):\n writer = vtk.vtkXMLPolyDataWriter()\n writer.SetFileName(name)\n writer.SetInputData(tri.GetOutput())\n writer.Write()\n\n\ndef writeSTL(name, tri):\n writer = vtk.vtkSTLWriter()\n writer.SetFileName(name)\n writer.SetInputData(tri.GetOutput())\n writer.Write()\n\ntry:\n os.mkdir(\"MaterialCase\")\nexcept:\n pass\ntry:\n os.mkdir(\"SpatialCase\")\nexcept:\n pass\ntry:\n os.mkdir(\"DistanceErrors\")\nexcept:\n pass\n\nif len(sys.argv) == 1:\n seed = np.random.randint(0, sys.maxint)\n print(\"New seed generated for RNG: {:d}\".format(seed))\nelif len(sys.argv) == 2:\n seed = int(sys.argv[-1])\n print(\"RNG seed specified by user: {:d}\".format(seed))\nelse:\n raise SystemExit(\n (\"Too many arguments: expected 0 (generate new random seed) \",\n \"or 1 (user-specified seed). Exitting...\"))\nnp.random.seed(seed)\ncases = (\"MaterialCase\", \"SpatialCase\")\nN = 1000\nvoxel_dims = np.zeros((N, 3), np.float64)\nfor i in xrange(N):\n dims = np.zeros((2, 3), np.float64)\n for j, case in enumerate(cases):\n a = 5.0\n b = 2.0\n c = 2.0\n lam1 = np.random.uniform(0.9, 1.1)\n lam2 = np.random.uniform(0.9, 1.1)\n lam3 = np.random.uniform(0.9, 1.1)\n N1 = np.random.uniform(0.8, 1.2)\n N2 = np.random.uniform(0.8, 1.2)\n\n ellipFunc = vtk.vtkParametricSuperEllipsoid()\n ellipFunc.SetXRadius(lam1 * a)\n ellipFunc.SetYRadius(lam2 * b)\n ellipFunc.SetZRadius(lam3 * c)\n ellipFunc.SetN1(N1)\n ellipFunc.SetN2(N2)\n\n ellip = vtk.vtkParametricFunctionSource()\n ellip.SetParametricFunction(ellipFunc)\n ellip.SetUResolution(50)\n ellip.SetVResolution(30)\n ellip.SetWResolution(30)\n ellip.SetScalarModeToNone()\n ellip.Update()\n\n d = np.pi / 15 * lam1 * a\n resample = vtk.vtkPolyDataPointSampler()\n resample.SetInputData(ellip.GetOutput())\n resample.SetDistance(d)\n resample.Update()\n\n delaunay = vtk.vtkDelaunay3D()\n delaunay.SetInputData(resample.GetOutput())\n delaunay.Update()\n\n geo = vtk.vtkGeometryFilter()\n geo.SetInputData(delaunay.GetOutput())\n geo.Update()\n\n decim = vtk.vtkDecimatePro()\n decim.SetInputData(geo.GetOutput())\n decim.SetTargetReduction(.2)\n decim.Update()\n\n writeSTL(\"{:s}/cell{:04d}.stl\".format(case, i + 1), decim)\n dims[j, 0] = lam1 * a / 50\n dims[j, 1] = lam2 * b / 50\n dims[j, 2] = lam3 * c / 50\n\n voxel_dims[i, :] = np.max(dims, axis=0)\n\nmech = CellMech(\n ref_dir=\"MaterialCase\",\n def_dir=\"SpatialCase\",\n rigidInitial=False,\n deformable=True,\n saveFEA=False,\n deformableSettings={'Iterations': 200,\n 'Maximum RMS': 0.01,\n 'Displacement Smoothing': 1.5,\n 'Precision': 0.02},\n display=False)\n\nresults = {\"seed\": seed,\n \"rms\": np.zeros(N, np.float32),\n \"voxel_dims\": voxel_dims}\nfor i in xrange(N):\n probe = vtk.vtkProbeFilter()\n probe.SetInputData(mech.rsurfs[i])\n probe.SetSourceData(mech.cell_fields[i])\n probe.Update()\n\n poly = probe.GetOutput()\n poly.GetPointData().SetActiveVectors(\"Displacement\")\n\n warp = vtk.vtkWarpVector()\n warp.SetInputData(poly)\n warp.Update()\n\n dist = vtk.vtkDistancePolyDataFilter()\n dist.SetInputData(0, mech.dsurfs[i])\n dist.SetInputData(1, warp.GetPolyDataOutput())\n dist.Update()\n residual = numpy_support.vtk_to_numpy(\n dist.GetOutput().GetPointData().GetArray(\"Distance\"))\n rms = np.linalg.norm(residual) / np.sqrt(residual.size)\n results[\"rms\"][i] = rms\n writePoly(\"DistanceErrors/dist_error{:04d}.vtp\".format(i + 1), dist)\n\nfid = open(\"results.pkl\", \"wb\")\npickle.dump(results, fid, 2)\nfid.close()\n","repo_name":"siboles/pyCellAnalyst","sub_path":"src/testing/Nonhomogeneous/nonhomogeneous.py","file_name":"nonhomogeneous.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"9231543281","text":"from PyQt5.QtWidgets import QDialog, QPushButton, QApplication, QLineEdit\nfrom PyQt5 import uic\nfrom PyQt5.QtGui import QIntValidator\nfrom PyQt5.QtCore import pyqtSignal\nfrom numpy import arange, ndarray, shape, fft, abs, amax, exp\nimport pyqtgraph as pg\nimport sys\nfrom bin_functions import loadBin\n\n\nclass FilterDialog(QDialog):\n filterButtonClicked = pyqtSignal(ndarray)\n\n def __init__(self, data, parent=None):\n super().__init__(parent=parent)\n\n uic.loadUi(\"uis/filter_dialog.ui\", self)\n\n self.data = data\n\n self.filter_button = self.findChild(QPushButton, \"filter_button\")\n self.preview_button = self.findChild(QPushButton, \"preview_button\")\n self.f0_edit = self.findChild(QLineEdit, \"f0_edit\")\n self.f1_edit = self.findChild(QLineEdit, \"f1_edit\")\n self.pw1 = self.findChild(pg.PlotWidget, \"pw1\")\n self.pw2 = self.findChild(pg.PlotWidget, \"pw2\")\n\n self.preview_button.clicked.connect(self.update_plots)\n self.filter_button.clicked.connect(self.start_filtration)\n\n amplitude_label = \"amplitude\"\n\n self.pw1.setTitle(\"Ascan - time domain\")\n self.pw1.setLabel('bottom', \"samples\")\n self.pw1.setLabel('left', amplitude_label)\n self.pw2.setTitle(\"Ascan - frequency domain\")\n self.pw2.setLabel('bottom', \"frequency [Hz]\")\n self.pw2.setLabel('left', amplitude_label)\n\n validator = FreqInputValidator()\n self.f0_edit.setValidator(validator)\n self.f1_edit.setValidator(validator)\n\n fs = 200_000_000\n N = shape(self.data)[0]\n df = fs/N\n self.f = arange(0, fs/2+df, df)\n\n f0 = int(100 * 1e3)\n f1 = int(8.42 * 1e6)\n self.f0_edit.setText(str(f0))\n self.f1_edit.setText(str(f1))\n\n self.pw1_curve1 = pg.PlotCurveItem()\n self.pw1_curve1.setPen(0,150,255)\n self.pw1_curve2 = pg.PlotCurveItem()\n self.pw1_curve2.setPen(255,165,0)\n self.pw2_curve1 = pg.PlotCurveItem()\n self.pw2_curve1.setPen(0,150,255)\n self.pw2_curve2 = pg.PlotCurveItem()\n self.pw2_curve2.setPen(255,0,0)\n\n self.pw1.addItem(self.pw1_curve1)\n self.pw1.addItem(self.pw1_curve2)\n self.pw2.addItem(self.pw2_curve1)\n self.pw2.addItem(self.pw2_curve2)\n\n legend1 = pg.LegendItem((80,60), offset=(100,20))\n legend1.setParentItem(self.pw1.getPlotItem())\n legend1.addItem(self.pw1_curve1, 'original')\n legend1.addItem(self.pw1_curve2, 'filtered')\n\n legend2 = pg.LegendItem((80,60), offset=(100,20))\n legend2.setParentItem(self.pw2.getPlotItem())\n legend2.addItem(self.pw2_curve1, 'fft')\n legend2.addItem(self.pw2_curve2, 'filter window')\n\n self.pw1_curve1.setData(self.data)\n\n A_fft = fft.rfft(self.data)\n A_fft = abs(A_fft)\n A_fft = A_fft[0:(N//2+1)]\n self.pw2_curve1.setData(self.f, A_fft/amax(A_fft))\n\n self.update_plots()\n self.show()\n\n def update_plots(self):\n f0 = int(self.f0_edit.text())\n f1 = int(self.f1_edit.text())\n self.filter_win = (1 - exp(-(self.f/f0)**2)) * exp(-(self.f/f1)**2 - (self.f/f1)**4)\n self.pw2_curve2.setData(self.f, self.filter_win)\n\n Ascan_fft = fft.rfft(self.data)\n Ascan_filt = Ascan_fft * self.filter_win\n Ascan_t = fft.irfft(Ascan_filt)*2\n self.pw1_curve2.setData(Ascan_t)\n \n def start_filtration(self):\n self.filterButtonClicked.emit(self.filter_win)\n \n\nclass FreqInputValidator(QIntValidator):\n def __init__(self):\n super().__init__(bottom=1)\n \n def fixup(self, input):\n input = \"1\"\n return super().fixup(input)\n\n\nif __name__ == '__main__':\n # Initialize the App\n app = QApplication(sys.argv)\n\n fid = \"data\\\\500avg.bin\"\n\n Bscan, n_Ascans, n_pts = loadBin(fid)\n Ascan = Bscan[1400,:]\n\n dialog = FilterDialog(Ascan)\n app.exec_()\n","repo_name":"tmaz00/ScansGuiApp","sub_path":"src/filter_dialog.py","file_name":"filter_dialog.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5378379256","text":"class FriendRequest:\n def numFriendRequests(self, ages: 'List[int]') -> int:\n memo = [0 for _ in range(121)] # 1 <= ages[i] <= 120\n for a in ages:\n memo[a] += 1\n result = 0\n for i in range(15, 121): # age of A\n if memo[i] == 0: continue\n for j in range(i//2+8, i+1): # age of B\n if i == j: # subtract a request to self\n result += memo[i] * (memo[j] - 1)\n else:\n result += memo[i] * memo[j]\n return result\n\n def numFriendRequests2(self, ages: 'List[int]') -> int:\n n = len(ages)\n if n < 2: return 0\n memo = [0 for _ in range(121)]\n for a in ages:\n memo[a] += 1\n result = 0\n for i in range(15, 121):\n if memo[i] > 0:\n yougest = i//2+7 + 1\n result += memo[i] * (sum(memo[yougest:i + 1]) - 1)\n return result","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/sorting_searching/friend_request.py","file_name":"friend_request.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1358892481","text":"# N = int(input())\n\n# list = input().split()\n# x,y = 1,1\n# direct = ['L','R','U','D']\n# for way in list:\n# if direct[0] == way :\n# y = y - 1\n# elif direct[1] == way :\n# y = y + 1\n# elif direct[2] == way :\n# x = x - 1\n# elif direct[3] == way : \n# x = x + 1\n \n\n\n# print(x, y)\n\nn = int(input())\nx,y = 1,1\nplans = input().split()\n\ndx = [0,0,-1,1]\ndy = [-1,1,0,0]\nmove_types = ['L','R','U','D']\n\nfor plan in plans:\n for i in range(len(move_types)):\n if plan == move_types[i]:\n nx = x + dx[i]\n ny = y + dy[i]\n \n if(nx<1 or ny<1 or nx> n or ny>n):\n continue\n x,y= nx,ny\n\nprint(x,y)\n\n\n","repo_name":"alexrider94/algorithm_study","sub_path":"dongbinbook/4-1.py","file_name":"4-1.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17970764213","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n shapeCreator\n A QGIS plugin\n ***************************************************************************/\n\"\"\"\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import QColor\nfrom qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsPointXY, QgsWkbTypes\nfrom qgis.gui import QgsMapToolEmitPoint, QgsRubberBand\n\n# Initialize Qt resources from file resources.py\n\nrubber_band = None\nactive = False\n\n\nclass ShapeCreator:\n def select(self):\n def end_selection():\n canvas.setMapTool(self.previous_map_tool)\n self.myMapTool.canvasClicked.disconnect()\n global active\n active = False\n global rubber_band\n canvas.scene().removeItem(rubber_band)\n rubber_band = None\n self.actions[1].setChecked(False)\n\n def draw_band(current_pos, clicked_button):\n canvas.xyCoordinates.connect(draw_band)\n if rubber_band and rubber_band.numberOfVertices():\n rubber_band.removeLastPoint()\n rubber_band.addPoint(current_pos)\n\n def mouse_click(current_pos, clicked_button):\n global rubber_band\n if clicked_button == Qt.LeftButton and rubber_band is None:\n create_rubber_band()\n rubber_band.addPoint(QgsPointXY(current_pos))\n\n if clicked_button == Qt.LeftButton and rubber_band:\n rubber_band.addPoint(QgsPointXY(current_pos))\n\n if clicked_button == Qt.RightButton and rubber_band:\n poly = QgsFeature()\n geometry = rubber_band.asGeometry()\n poly.setGeometry(geometry)\n data_provider.addFeatures([poly])\n layer.updateExtents()\n canvas.refresh()\n QgsProject.instance().addMapLayers([layer])\n end_selection()\n if clicked_button == Qt.RightButton and rubber_band is None and active:\n end_selection()\n\n def create_rubber_band():\n global rubber_band\n rubber_band = QgsRubberBand(canvas, QgsWkbTypes.PolygonGeometry)\n color = QColor(78, 97, 114)\n color.setAlpha(190)\n rubber_band.setColor(color)\n\n global active\n canvas = self.iface.mapCanvas()\n\n if active:\n end_selection()\n else:\n active = True\n self.actions[1].setChecked(True)\n global layer\n rubber_band = None\n crs = \"\"\n if self.dockwidget:\n base_file = self.dockwidget.baseMapFile.text()\n crs = \"?crs=\" + QgsVectorLayer(base_file).sourceCrs().toWkt()\n elif self.iface.activeLayer() and self.iface.activeLayer().name() != \"OSM\":\n crs = \"?crs=\" + self.iface.activeLayer().crs().toWkt()\n else:\n srs = QgsProject.instance().defaultCrsForNewLayers()\n crs = \"?crs=\" + srs.toWkt()\n\n layer = QgsVectorLayer(\"Polygon\" + crs, \"selection layer\", \"memory\")\n layer.setOpacity(0.5)\n data_provider = layer.dataProvider()\n self.previous_map_tool = canvas.mapTool()\n self.myMapTool = QgsMapToolEmitPoint(canvas)\n self.myMapTool.canvasClicked.connect(mouse_click)\n canvas.setMapTool(self.myMapTool)\n\n","repo_name":"Planheat/Planheat-Tool","sub_path":"PlanheatMappingModule/shapeCreator/shapeCreator.py","file_name":"shapeCreator.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32837778205","text":"'''\n# 데이터 분포 확인\n# 'dogType': 여기서 경계(1), 화남(2), 슬픔(3), 행복(4)\ncnt = 0\nfor i in range(len(data)):\n if data[i]['dogType'] == 4:\n cnt += 1\n\nprint(cnt)\n\n# 396~399의 길이를 가진 오디오!\ncnt = 0\nfor i in range(len(data)):\n if data[i]['melSpec'].shape[1] >= 396:\n if data[i]['melSpec'].shape[1] < 400:\n cnt += 1\n\nprint(cnt)\n# 224!\n# 정작 400(10sec)인것은 4개 밖에 안됨! 198로 잘라야되는 이유!\n########################################################################\n# 경계는 280, 화남은 952, 슬픔은 430, 행복은 338\n# 198랑 같거나 큰 오디오의 수\n# 경계는 280, 화남은 946, 슬픔은 430, 행복은 337\n# 396보다 같거나 큰 오디오의 수\n# 경계는 211, 화남은 622, 슬픔은 317, 행복은 251\n# 594랑 같거나 큰 오디오의 수\n# 경계는 6, 화남은 11, 슬픔은 5, 행복은 1 --> 따라서 그냥 [198,396,594]까지만 자르기\n'''\n\nimport os, pickle\nimport numpy as np\n\n# 데이터 읽기\ndir = os.getcwd()\nf = open(dir + '/dataset' + '/16k_50ms_25ms_128.pkl','rb')\ndata = pickle.load(f)\n\n### melSpec 뽑기 & testData 뽑기 & 경계(1)만 2배로 data balancing\n\nnp.random.shuffle(data)\n\ndef make_dataset(cnt,specX,chop_len,balancing,label,trainLabel, trainSpec, testLabel, testSpec):\n if cnt > 25:\n make_trainset(cnt,specX,chop_len,balancing,label,trainLabel,trainSpec)\n else:\n make_testset(cnt,specX,chop_len,label,testLabel,testSpec)\n\ndef make_trainset(cnt,specX,chop_len,balancing,label,trainLabel,trainSpec):\n for X in specX:\n if X.shape[1] == chop_len:\n for i in range(balancing):\n tempX = np.reshape(X,[1,X.shape[0]*chop_len])\n trainLabel.append(np.ones([1,X.shape[0]*chop_len])*label)\n trainSpec.append(tempX)\n\n# test dataset을 만들때, 이 데이터가 이 오디오인지 바로 소리 들을 수 있게 다시 짜야할듯!\ndef make_testset(cnt,specX,chop_len,label,testLabel,testSpec):\n X = specX[0]\n if X.shape[1] == chop_len:\n tempX = np.reshape(X,[1,X.shape[0]*chop_len])\n testLabel.append(np.ones([1,X.shape[0]*chop_len])*label)\n testSpec.append(tempX)\n\ndef dataset():\n cnt1 = 0\n cnt2 = 0\n cnt3 = 0\n cnt4 = 0\n trainLabel = []\n trainSpec = []\n testLabel = []\n testSpec = []\n\n chop_len = 198\n for specFile in data:\n label = specFile['dogType']\n specX = np.split(specFile['melSpec'],[chop_len,chop_len*2,chop_len*3],axis=1)\n \n if label == 1:\n balancing = 2\n cnt1 += 1\n make_dataset(cnt1,specX,chop_len,balancing,label,trainLabel, trainSpec, testLabel, testSpec)\n elif label == 2:\n balancing = 1\n cnt2 += 1\n make_dataset(cnt2,specX,chop_len,balancing,label,trainLabel, trainSpec, testLabel, testSpec)\n elif label == 3:\n balancing = 1\n cnt3 += 1\n make_dataset(cnt3,specX,chop_len,balancing,label,trainLabel, trainSpec, testLabel, testSpec)\n else:\n balancing = 1\n cnt4 += 1\n make_dataset(cnt4,specX,chop_len,balancing,label,trainLabel, trainSpec, testLabel, testSpec)\n\n trainSpec = np.asarray(trainSpec)\n trainLabel = np.asarray(trainLabel)\n testSpec = np.asarray(testSpec)\n testLabel = np.asarray(testLabel)\n trainData = np.concatenate((trainSpec,trainLabel), axis=1)\n testData = np.concatenate((testSpec,testLabel), axis=1)\n\n return trainData, testData\n","repo_name":"yena53/BE","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71021195689","text":"from pysiriproxy.plugins.plugin import BasePlugin\nfrom pysiriproxy.plugins.objectClasses import ClearContext, \\\n StartSpeechRequest, CancelRequest, CancelSpeech, CommandFailed\n\nfrom pyamp.logging import Colors\n\n\nclass Plugin(BasePlugin):\n '''Handles resetting the context.'''\n\n # Define the name and log color for this plugin\n name = \"Reset-Plugin\"\n logColor = Colors.Foreground.Green\n\n ##### Define all of the filters for this plugin. #####\n\n @CancelRequest\n @CancelSpeech\n @ClearContext\n @CommandFailed\n def resetFilter(self, obj, direction):\n '''Reset the context when a request is completed, or the context is\n cleared.\n\n * obj -- The received object\n * direction -- The direction of the received data\n\n '''\n self.log.debug(\"Resetting object manager: %s\" % obj.get(\"class\", None),\n level=0)\n self.resetContext()\n return obj\n","repo_name":"bponsler/pysiriproxy","sub_path":"pysiriproxy/config/plugins/resetPlugin.py","file_name":"resetPlugin.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"603201541","text":"from datetime import datetime\nfrom urllib.parse import urlparse\n\n# from Knowledge_Base import log, to_json\n# from Knowledge_Base.enums.logs_enums import LogLevel\n\n\nclass HttpResponse(object):\n\n def __init__(self, request_id=None, content=None, headers=None, status_code=None, cookies=None, original=None,\n is_redirect=None, response_url=None, from_server_id=None, to_ip=None, decision=None, time_stamp=None):\n self.original = original\n self.request_id = request_id\n self.content = content\n self.headers = headers\n self.status_code = status_code\n self.cookies = cookies\n self.is_redirect = is_redirect\n self.response_url = response_url\n self.from_server_id = from_server_id\n self.to_ip = to_ip\n self.decision = decision\n self.time_stamp = time_stamp\n\n\nclass HttpRequest(object):\n\n def __init__(self, method=None, content=None, headers=None, path=None,\n host_name=None, to_server_id=None, from_ip=None, decision=None, time_stamp=None):\n self.method = method\n self.content = content\n self.headers = headers\n self.path = path\n self.host_name = host_name\n self.from_ip = from_ip\n self.to_server_id = to_server_id\n self.decision = decision\n self.time_stamp = time_stamp\n self.text = None\n self.form = None\n self.args = None\n\n\nclass Parser(object):\n\n def parse(self, data_to_parse):\n \"\"\"\n This method will parse the data.\n :data: the request / response\n :method: the request / response method (e.g GET)\n :data_type: Enum of DataType to identify.\n :return: ORM HttpRequest/HttpResponse Object\n \"\"\"\n raise NotImplementedError()\n\n\nclass BaseHTTPRequestParser(Parser):\n\n def parse(self, data_to_parse):\n parsed_data = HttpRequest()\n parsed_data.method = \"{}\".format(data_to_parse.command).upper()\n content_length = int(data_to_parse.headers.get('Content-Length', 0))\n parsed_data.content = data_to_parse.rfile.read(content_length)\n parsed_data.headers = data_to_parse.headers\n parsed_data.query = '{uri.query}'.format(uri=urlparse(data_to_parse.path))\n parsed_data.path = '{uri.path}'.format(uri=urlparse(data_to_parse.path))\n parsed_data.host_name = '{uri.netloc}'.format(uri=urlparse(\n \"https://{}\".format(data_to_parse.headers.get('HOST').replace(\"http://\", \"\").replace(\"https://\", \"\"))))\n parsed_data.from_ip = data_to_parse.client_address[0]\n parsed_data.time_stamp = data_to_parse.log_date_time_string()\n return parsed_data\n\n\nclass HTTPResponseParser(Parser):\n\n def __init__(self, request):\n \"\"\"\n :param request: The original request\n \"\"\"\n self.__request = request\n\n def parse(self, data_to_parse, is_user_protection=False):\n parsed_data = HttpResponse()\n parsed_data.original = data_to_parse\n parsed_data.text = data_to_parse.text\n parsed_data.content = data_to_parse.text\n parsed_data.headers = data_to_parse.headers\n parsed_data.status_code = data_to_parse.status_code\n parsed_data.cookies = data_to_parse.cookies\n parsed_data.is_redirect = data_to_parse.is_redirect\n parsed_data.response_url = data_to_parse.url\n parsed_data.time_stamp = datetime.now()\n if not is_user_protection:\n parsed_data.from_server_id = self.__request.to_server_id\n parsed_data.to_ip = self.__request.from_ip\n parsed_data.from_dns_name = self.__request.host_name\n # log(\"The Parsed data is: {}\".format(to_json(parsed_data)), LogLevel.DEBUG, self.parse)\n # log(\"Finish parsing the request.\", LogLevel.INFO, self.parse)\n return parsed_data\n\n\nclass FlaskHTTPRequestParser(Parser):\n\n def parse(self, data_to_parse):\n parsed_data = HttpRequest()\n # log(\"Parse the url {}\".format(data_to_parse.url), LogLevel.DEBUG, self.parse)\n url = urlparse(data_to_parse.url)\n # log(\"The Parsed url is: {}\".format(url), LogLevel.DEBUG, self.parse)\n parsed_data.method = \"{}\".format(data_to_parse.method).upper()\n parsed_data.content = data_to_parse.get_data()\n parsed_data.headers = data_to_parse.headers\n parsed_data.query = '{uri.query}'.format(uri=url)\n parsed_data.path = '{uri.path}'.format(uri=url)\n parsed_data.host_name = '{uri.netloc}'.format(uri=url)\n parsed_data.from_ip = data_to_parse.remote_addr\n parsed_data.time_stamp = datetime.now()\n parsed_data.args = data_to_parse.args\n parsed_data.form = data_to_parse.form\n # log(\"The Parsed data is: {}\".format(to_json(parsed_data)), LogLevel.DEBUG, self.parse)\n # log(\"Finish parsing the request.\", LogLevel.INFO, self.parse)\n return parsed_data\n","repo_name":"FA-PengFei/NGWAF","sub_path":"ngwaf-app/waf/WafApp/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"53"} +{"seq_id":"23047006677","text":"#!/usr/bin/python3\n\"\"\"\nCreated on Tue Oct 5 16:35:49 2021\n\n@author: Apala\n\"\"\"\nimport sys\nimport os\nimport numpy as np\n\nos.chdir('tests/')\nsys.path.append(\"..\")\n\nfrom engine.set.star import Star\n\ndef main():\n lb = np.matrix('1; 1')\n ub = np.matrix('2; 2')\n \n S = Star(lb = lb, ub = ub)\n print('S: ', S.__repr__())\n \n V = S.V\n C = S.C\n d = S.d\n\n S2 = Star(V, C, d)\n print('\\nS2: ', S2.__repr__())\n \n W = np.matrix('1 -1; 1 1')\n b = np.matrix('0.5; 0.5')\n \n Sa = S2.affineMap(W, b)\n print('\\nSa: ', Sa.__repr__()) \n\n \nif __name__ == '__main__':\n main()\n","repo_name":"V2A2/StarV_temp","sub_path":"tests/set/star/test_star_affineMap.py","file_name":"test_star_affineMap.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"21631612069","text":"import pygame\n\npygame.init()\n\n#创建窗口\nscreen = pygame.display.set_mode((480,700))\n#创建图像\nbg = pygame.image.load(\"./images/background.png\")\nscreen.blit(bg,(0,0))\n#pygame.display.update()\n\n#创建飞机图像\nbg = pygame.image.load(\"./images/hero.gif\")\nscreen.blit(bg,(180,500))\n\nclock = pygame.time.Clock()\n\npygame.display.update()\n\ni = 0\nwhile True:\n\tclock.tick(60)\n\ti+=1\n\tprint(i)\n\npygame.quit()\n","repo_name":"nmww/p1804_ceshi","sub_path":"p1804_02高级/plan/7-创建时钟对象.py","file_name":"7-创建时钟对象.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"ja","doc_type":"code","stars":23,"dataset":"github-code","pt":"53"} +{"seq_id":"10269495965","text":"from matplotlib import pyplot as plt\nimport visa\nimport numpy as np\nimport time\nimport os\nrm=visa.ResourceManager()\ncarpeta='C:/Users/Admin/Desktop/L6 Caprile Rosenberg/python/mediciones/4-3/'\n#carpeta=path+day+'/'\nresource_name=rm.list_resources()[0]#'USB0::0x0699::0x0363'*?\n \n\nosci=rm.open_resource(resource_name)\n \n#print(osci.query('*IDN?'))\ndef setup():\n osci.write('DAT:ENC RPB')\n osci.write('DAT:WID 1')\n xze,xin,yze1,ymu1,yoff1=osci.query_ascii_values('WFMPRE:XZE?;XIN?;CH1:YZE?;YMU?;YOFF?',separator=';')\n yze2,ymu2,yoff2=osci.query_ascii_values('WFMPRE:CH2:YZE?;YMU?;YOFF?',separator=';')\n return xze,xin,yze1,ymu1,yoff1,yze2,ymu2,yoff2\n \ndef medir_trigger():\n osci.write('ACQuire:STATE RUN')\n osci.write('ACQuire:STOPAfter SEQuence')\n r=osci.query('ACQuire:STATE?')\n while r=='1\\n':\n r=osci.query('ACQuire:STATE?')\n time.sleep(0.02)\n osci.write('DAT:SOU CH1' )\n data1=osci.query_binary_values('CURV?', datatype='B',container=np.array)\n osci.write('DAT:SOU CH2') \n data2=osci.query_binary_values('CURV?', datatype='B',container=np.array)\n return data1,data2\n\ndef medir(ch):\n xze,xin,yze1,ymu1,yoff1=osci.query_ascii_values('WFMPRE:XZE?;XIN?;CH1:YZE?;YMU?;YOFF?',separator=';')\n yze2,ymu2,yoff2=osci.query_ascii_values('WFMPRE:CH2:YZE?;YMU?;YOFF?',separator=';')\n osci.write('DAT:ENC RPB')\n osci.write('DAT:WID 1')\n osci.write('DAT:SOU CH{}'.format(ch) )\n data=osci.query_binary_values('CURV?', datatype='B',container=np.array)\n if ch==1:\n data=(data-yoff1)*ymu1+yze1\n if ch==2:\n data=(data-yoff2)*ymu2+yze2\n \n tiempo = xze + np.arange(len(data)) * xin\n\n return tiempo,data\n\n \n#%%\nN=500\nmediciones=np.zeros([3,N,2500])\nparametros=np.zeros([8,N])\nxze,xin,yze1,ymu1,yoff1,yze2,ymu2,yoff2=setup()\n\nprint('A MEDIR!!')\nfor i in range(N):\n data1,data2=medir_trigger()\n mediciones[1,i,:]=data1\n mediciones[2,i,:]=data2\n if i%30==0 and i!=0:\n print('Se realizaron',i,'mediciones')\nfor i in range(N):\n data1=mediciones[1,i,:]\n data2=mediciones[2,i,:]\n data2=(data2-yoff2)*ymu2+yze2 \n tiempo = xze + np.arange(len(data1)) * xin\n data1=(data1-yoff1)*ymu1+yze1 \n mediciones[:,i,:]=tiempo,data1,data2\n\nif N<10:\n for i in range(N):\n plt.figure(num=i, figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k')\n plt.plot(mediciones[0,i,:], mediciones[1,i,:]*1000,'b-')\n plt.plot(mediciones[0,i,:], mediciones[2,i,:],'r-')\n plt.xlabel('Tiempo [s]')\n plt.ylabel('Voltaje [V]')\n\nData1=np.zeros([1+N,2500])\nData1[0,:]=mediciones[0,0,:]\nfor i in range(N):\n Data1[i+1,:]=mediciones[1,i,:]\nData2=np.zeros([1+N,2500])\nData2[0,:]=mediciones[0,0,:]\nfor i in range(N):\n Data2[i+1,:]=mediciones[2,i,:]\n\n\n\nnp.savetxt(carpeta+'datoscanal1_punta100-'+str(time.localtime()[0])+'-'+str(time.localtime()[1])+'-'+str(time.localtime()[2])+'-'+str(time.localtime()[3])+'-'+str(time.localtime()[4])+'.txt',Data1, fmt='%.18g', delimiter='\\t', newline=os.linesep)\nnp.savetxt(carpeta+'datoscanal2_punta100-'+str(time.localtime()[0])+'-'+str(time.localtime()[1])+'-'+str(time.localtime()[2])+'-'+str(time.localtime()[3])+'-'+str(time.localtime()[4])+'.txt',Data2, fmt='%.18g', delimiter='\\t', newline=os.linesep)\n\n\n#usar para medir ambos canales 1 sola vez\n\n#t,ch1=medir(1)\n#t,ch2=medir(2)\n#plt.plot(t,ch1*1000)\n#plt.plot(t,ch2)\n#tiempo,data1=medir(1)\n#tiempo,data2=medir(2)\n","repo_name":"fcaprile/labo6","sub_path":"scripts/bobina rogowski/leer osci.py","file_name":"leer osci.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41084113081","text":"import requests\n\ndef validador_cep(cep):\n if len(cep) != 8 or not cep.isdigit():\n return False\n\n api_cep = f'https://viacep.com.br/ws/{cep}/json/'\n dados = requests.get(api_cep).json()\n \n if 'erro' in dados:\n return False\n else:\n endereco = {\n 'rua_avenida' : dados['logradouro'],\n 'bairro' : dados['bairro'],\n 'localidade' : dados['localidade'],\n 'uf' : dados['uf']\n }\n return endereco","repo_name":"Duarts-D/E-COMMERCE","sub_path":"apps/utilidade/validators/cep.py","file_name":"cep.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71796546727","text":"import argparse\nimport json \nimport datetime\n\nimport requests\n\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import Session, sessionmaker\n\nfrom spearmint_libs.pi_utils import PiUtils\nfrom spearmint_libs.utils import Utils, format_time\nfrom spearmint_libs.sql.db_connect import Connect\n\nimport evelink\n\n\n\nclass Command():\n def __init__(self):\n self.config = self.read_config()\n self.utils = Utils(self.config)\n self.pi_utils = PiUtils(self.config, self.utils)\n self.eve = evelink.eve.EVE()\n self.corp_api = evelink.api.API(api_key=(self.config['corp_api']['key'], self.config['corp_api']['code']))\n self.corp = evelink.corp.Corp(self.corp_api)\n self.db = Connect(self.config['database']['data'])\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--pi', help='update the PI cache', action='store_true')\n parser.add_argument('--losses', help='update the items and ships that have been destroyed', action='store', type=int)\n parser.add_argument('--coalition', help='coalition or alliance for --losses', action='store', type=str)\n parser.add_argument('--create-db', help='create the databases', action='store_true')\n parser.add_argument('--start', help='page to start at for updating losses', action='store', type=int)\n self.args = parser.parse_args()\n\n if self.args.pi:\n self.update_pi()\n\n if self.args.losses:\n self.update_losses()\n\n if self.args.create_db:\n self.create_databases()\n\n def read_config(self, path='config.json'):\n with open(path) as cfg:\n config = json.loads(cfg.read())\n assert(config)\n\n return config\n\n def display_completion(self, percentage):\n print(int(percentage))\n\n def update_losses(self):\n print('Updating losses...')\n\n losses = self.db\n items_lost_ = {}\n alliance_ids = []\n alliances_requested = []\n\n for coalition in self.config['coalitions']:\n alliance_ids.extend(self.config['coalitions'][coalition])\n\n if self.args.start:\n start_page = self.args.start\n\n else:\n start_page = 0\n\n for count in range(start_page, self.args.losses):\n print('Page %s of %s' % (count, self.args.losses))\n\n for alliance_id in alliance_ids:\n\n if alliance_id in alliances_requested:\n continue\n\n alliances_requested.append(alliance_id)\n print('Alliance %s' % (alliance_id))\n kb_url = 'https://zkillboard.com/api/kills/allianceID/%s/page/%s/' % (alliance_id, count)\n \n data = json.loads(requests.get(kb_url).text)\n\n for row in data:\n \n # I'm putting row_count up here because its possible to already have the kill in the db. \n\n #self.display_completion(row_count)\n # 'killTime': '2014-09-19 21:27:00'\n time_format = '%Y-%m-%d %H:%M:%S'\n kill_time = datetime.datetime.strptime(row['killTime'], time_format)\n kill_id = row['killID']\n \n query = losses.session.query(losses.base.classes.kills).filter_by(killID=kill_id).first()\n\n if query:\n #print('killID already exists, skipping')\n continue\n \n \n kill = losses.base.classes.kills(killID=kill_id, \n shipTypeID=row['victim']['shipTypeID'], \n killTime=kill_time,\n characterID=row['victim']['characterID'],\n corporationID=row['victim']['corporationID'],\n corporationName=row['victim']['corporationName'],\n allianceID=row['victim']['allianceID'])\n\n\n for line in row['items']:\n #print('storing item: %s' % (self.utils.lookup_typename(line['typeID'])))\n item = losses.base.classes.items_lost(typeID=line['typeID'])\n kill.items_lost_collection.append(item)\n\n for line in row['attackers']:\n attacker = losses.base.classes.attacker(weaponTypeID=line['weaponTypeID'], \n allianceID=line['allianceID'],\n corporationName=line['corporationName'],\n shipTypeID=line['shipTypeID'],\n characterName=line['characterName'],\n characterID=line['characterID'],\n allianceName=line['allianceName'])\n\n kill.attacker_collection.append(attacker)\n\n\n #print('storing ship: %s' % (self.utils.lookup_typename(row['victim']['shipTypeID'])))\n \n losses.session.add(kill) \n losses.session.commit()\n\n alliances_requested = [] \n\n def update_pi(self):\n print('updating PI statistics...')\n\n for system_name in self.config['statistics']['pi_systems']:\n system = self.utils.lookup_system(system_name).__dict__\n\n for tier in self.config['statistics']['pi_tiers']:\n self.pi_utils.store_prices(tier, system['solarSystemID'])\n \n print('Done')\n\n def create_databases(self):\n # You must import the metadata file, then connect it to the engine, otherwise\n # it will create a db with no tables. \n from spearmint_libs.sql import initialize_sql\n from spearmint_libs.sql.losses import ItemsLost, Kills\n from spearmint_libs.sql.users import Users, Character\n from spearmint_libs.sql.pi import Pi\n\n initialize_sql(self.db.engine)\n\n\n\nif __name__ == '__main__':\n cli = Command()\n","repo_name":"Cightline/Spearmint","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":6254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5395961090","text":"from Questions.q0027RemoveElement.BruteForce import removeElement\n\nnums =[0,1,2,2,3,0,4,2]\nval = 2\nans = [0,1,3,0,4]\noutput = removeElement(nums, val)\nfor i in range(output):\n if ans[i] in nums:\n nums.remove(ans[i])\n elif ans[i] not in nums:\n print(\"wrong answer\")\n break\nprint(\"right answer\")","repo_name":"Lazy-yin/LeetCode-practice","sub_path":"Tests/test_q27.py","file_name":"test_q27.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19577463552","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('',views.home, name='home'),\n path('about//',views.about,name='about'),\n path('login/',views.login,name='login'),\n path('signup/',views.signup,name='signup'),\n path('ticket/',views.ticket,name='ticket'),\n path('booking/',views.booking,name='booking'),\n path('user-logout/',views.user_logout,name='user-logout'),\n path('myadmin/',views.admin,name='admin'),\n\n path('cities/',views.cities,name='cities'),\n path('cities//',views.deletecity,name='deletecity'),\n path('updatecity//',views.updatecity,name='updatecity'),\n\n path('movies/',views.movies,name='movies'),\n path('movies//',views.deletemovie,name='deletemovie'),\n path('updatemovie//',views.updatemovie,name='updatemovie'),\n\n path('shows/',views.shows,name='shows'),\n path('shows/',views.deleteshow,name='deleteshow'),\n path('updateshow/',views.updateshow,name='updateshow'),\n\n # path('bookings/',views.bookings,name='bookings'),\n\n path('halls/',views.halls,name='halls'),\n path('halls/',views.deletehall,name='deletehall'),\n path('updatehall/',views.updatehall,name='updatehall'),\n \n path('Tcket_booking/',views.tcket_booking,name='Tcket_booking')\n\n]\n","repo_name":"aman-SINGH7999/BOOKING_ticket","sub_path":"booking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31682000932","text":"from django.conf.urls import patterns, url\n\n\nurlpatterns = patterns('',\n url(r'^(?P(\\d+))/$',\n 'lists.views.display_list',\n name='display_list'\n ),\n url(r'^(?P(\\d+))/new_item/$',\n 'lists.views.add_item',\n name='add_item'\n ),\n url(r'^(?P(\\d+))/$', 'lists.views.view_list',\n name='view_list'\n ),\n url(r'^new/$', 'lists.views.new_list', name='new_list'),\n)\n","repo_name":"HassenPy/tdd-goat","sub_path":"lists/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18159822748","text":"from django.db.models import F\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import gettext as _\nfrom main.helpers.permissions import permission_required_json, direct_object\nfrom main.models import DeTable, Stage\nfrom main.models.de_table import UnfinishedTableException\nfrom main.utils.api_responses import api_failure, api_success\n\n\ndef de_table(request, table_id):\n table = get_object_or_404(DeTable, pk=table_id)\n if request.method == \"POST\":\n rtype = request.POST['type']\n if rtype == 'add_result':\n return add_result(request, table)\n elif rtype == 'table_complete':\n return mark_table_complete(request, table)\n else:\n return api_failure('unrecognised request')\n else:\n return get_bouts(table)\n\n\ndef get_bouts(table):\n entries = table.detableentry_set.order_by('table_pos').annotate(seed=F('entry__deseed__seed')).\\\n values('id', 'entry_id', 'score', 'victory', 'seed', 'entry__competitor__name', 'entry__club__name')\n bouts = []\n x = 0\n while x < len(entries):\n bouts.append({'e0': entries[x], 'e1': entries[x + 1]})\n x += 2\n return JsonResponse({'bouts': bouts})\n\n\ndef add_result(request, table):\n \"\"\"add a result to a de table\n\n :param request: expecting POST parameters:\\n\n :int entryA: detableentry id of the first entry the results pertain to\n :int entryB: detableentry id of the second entry the results pertain to\n :int scoreA: first entries score\n :int scoreB: second entries score\n :bool victoryA: boolean indicating if first entry won. If false 2nd is assumed to have won\n :param DeTable table: table these results are from\n :return:\n \"\"\"\n if table.de.stage.state != Stage.STARTED:\n return api_failure('incorrect state', 'stage not currently running')\n if table.children.exists():\n return api_failure('child_exists', _('Cannot add results after next round of tables made'))\n if table.complete:\n return api_failure('table_complete', _('This table has already been marked as complete'))\n e1_id = request.POST['entryA']\n e2_id = request.POST['entryB']\n e1 = table.detableentry_set.get(pk=e1_id)\n e2 = table.detableentry_set.get(pk=e2_id)\n e1_victory = bool(int(request.POST['victoryA']))\n e1_score = int(request.POST['scoreA'])\n e2_score = int(request.POST['scoreB'])\n return do_add_result(request, e1, e2, e1_score, e2_score, e1_victory)\n\n\n@permission_required_json('main.change_de_table_entry', fn=direct_object)\ndef do_add_result(request, e1, e2, e1_score, e2_score, e1_victory):\n if e1.against() != e2:\n return api_failure('bad entry pair', \"these entries aren't fighting each other this round\")\n if (e1.entry is None and e1_victory) or (e2.entry is None and not e1_victory):\n return api_failure('bye_victory', _('Byes cannot win a match'))\n if (e1_victory and e2_score > e1_score) or (not e1_victory and e2_score < e1_score):\n return api_failure('score victory mismatch')\n e1.victory = e1_victory\n e1.score = e1_score\n e2.victory = not e1_victory\n e2.score = e2_score\n e1.save()\n e2.save()\n return api_success()\n\n\n@permission_required_json('main.manage_competition', fn=direct_object)\ndef mark_table_complete(request, table):\n if table.complete:\n return api_failure('already_complete', _(\"This table is already marked as complete\"))\n if table.detableentry_set.count() == 2:\n table.complete = True\n table.save()\n return api_success()\n try:\n table.make_children()\n return api_success()\n except UnfinishedTableException:\n return api_failure('incomplete_bouts', _('One or more bouts incomplete'))\n\n\n","repo_name":"saty9/allez","sub_path":"main/views/de_table.py","file_name":"de_table.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32807722832","text":"import copy\n# 수족관 세팅\nboard = [[] for _ in range(4)]\nans = 0\n\n# 방향 세팅\ndx = [-1, -1, 0, 1, 1, 1, 0, -1]\ndy = [0, -1, -1, -1, 0, 1, 1, 1]\n\n# 입력값 받기\n\nfor i in range(4) :\n data = list(map(int, input().split()))\n fish = []\n for j in range(4) :\n # 물고기 번호, 방향\n fish.append([data[2*j], data[2*j+1]-1])\n\n board[i] = fish\n\ndef dfs(s_x, s_y, score, board) :\n global ans\n score += board[s_x][s_y][0]\n ans = max(ans, score)\n board[s_x][s_y][0] = 0\n\n # 물고기의 움직임 구현\n for i in range(1, 17) :\n f_x, f_y = -1, -1\n for x in range(4) :\n for y in range(4) :\n if board[x][y][0] == i :\n f_x, f_y = x, y\n break\n\n if f_x == -1 and f_y == -1 :\n continue\n f_d = board[f_x][f_y][1]\n\n for j in range(8) :\n nd = (f_d + j) % 8\n nx = f_x + dx[nd]\n ny = f_y + dy[nd]\n\n if not (0 <= nx < 4 and 0 <= ny < 4) or (nx == s_x and ny == s_y) :\n continue\n board[f_x][f_y][1] = nd\n board[f_x][f_y], board[nx][ny] = board[nx][ny], board[f_x][f_y]\n break\n\n # 청소년 상어의 식사\n s_d = board[s_x][s_y][1]\n for i in range(1, 5):\n nx = s_x + dx[s_d] * i\n ny = s_y + dy[s_d] * i\n\n if (0 <= nx < 4 and 0 <= ny < 4) and board[nx][ny][0] > 0 :\n dfs(nx, ny, score, copy.deepcopy(board))\n\n\ndfs(0, 0, 0, board)\nprint(ans)\n\n","repo_name":"uss96/CodingTest","sub_path":"삼전코테/백준 삼전 코테 문제/청소년 상어/청소년 상어.py","file_name":"청소년 상어.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7543969198","text":"from sklearn.cluster import OPTICS, cluster_optics_dbscan\nimport numpy as np\nimport open3d as o3d\n\nx = np.asarray(object_cloud.points)\n\nmodel = OPTICS(min_samples=20, xi=.05, min_cluster_size=.05)\n\nmodel.fit(x)\n\n\npcd = o3d.geometry.PointCloud()\n\npcd.points = o3d.utility.Vector3dVector(x)\n\nlabel = model.labels_\n\npcd_select = pcd.select_by_index(np.transpose(np.where(label==1)))\no3d.visualization.draw([pcd_select])\n\n","repo_name":"thanthamky/Bin-Picking_PointCloud","sub_path":"code/utils/test_optics_clustering.py","file_name":"test_optics_clustering.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4838133058","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 31 14:23:41 2023\n\n@author: iyer\n\"\"\"\n\nimport multiprocessing as mp\nimport acquire, os, sys\nimport datetime\nimport time\nimport subprocess\nimport shutil\nimport hdf2video\nimport serial\nimport h5py\n \ndef start_filming(cam_num, out_path, framerate, duration):\n acquire.start_capture(cam_num, out_path, framerate, duration)\n\ndef make_folders(folder_name):\n out_path = [f'/home/scholz_la/Desktop/Data/videos/Camera0/{folder_name}/',\n f'/home/scholz_la/Desktop/Data/videos/Camera1/{folder_name}/']\n for i in out_path:\n os.makedirs(i, exist_ok = True)\n print(f'Folder created {i}')\n return out_path\n\ndef post_process_h5(folder_name, h5_file, framerate):\n hdf2video.vidwrite(h5_file, framerate)\n shutil.rmtree(folder_name)\n\ndef initiate_acquisition(out_path, time_):\n process_list = []\n out_path = make_folders(folder_name)\n for i in range(n_cams):\n p = mp.Process(target= start_filming, args = [i, out_path[i] + time_, framerate, duration])\n p.start()\n process_list.append(p)\n \n for process in process_list:\n process.join()\n return out_path\n\ndef start_stimulus():\n stimulus_process = subprocess.Popen(['python', '../grating_2.py'])\n \ndef start_trigger():\n trigger_process = subprocess.Popen(['python', 'trigger.py'])\n \nif __name__ == '__main__':\n mp.set_start_method('spawn')\n folder_name = sys.argv[1]\n time_ = datetime.datetime.now().strftime('%Y%m%d_%H%M') #Like 20230201_0845\n print(f'Starting at {time.time()}')\n framerate = 50; # Required FrameRate\n duration = 120; # Required Duration of Filming\n n_cams = 2;\n\n start_trigger();\n\n start_stimulus()\n out_path = initiate_acquisition(folder_name, time_)\n \n # for i in range(n_cams):\n # cam_name = camera_array[i][1]\n # file_path = f'{out_path[i]}{time_}_{cam_name}.h5'\n # post_process_h5(file_path, framerate)\n \n for i in range(2):\n h5_file = [out_path[i] + j for j in os.listdir(out_path[i]) if 'h5' in j][0]\n post_process_h5(out_path[i], h5_file, framerate)\n","repo_name":"monikascholz/leechbehavior","sub_path":"acquisition/start_expt.py","file_name":"start_expt.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35909069622","text":"\"\"\"\nDjango settings for FancyLists project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'social_django',\n 'lists',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'social_core.backends.google.GoogleOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nSOCIAL_AUTH_LOGIN_REDIRECT_URL = '/lists/'\n\nTEMPLATES = [\n {\n \"APP_DIRS\": True,\n \"BACKEND\": 'django.template.backends.django.DjangoTemplates',\n \"DIRS\": [os.path.join(BASE_DIR, 'templates')],\n \"OPTIONS\": {\n \"context_processors\": [\n \"social_django.context_processors.backends\",\n \"social_django.context_processors.login_redirect\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n \"debug\": True,\n },\n },\n]\n\nSTATIC_FOUNDATION = os.path.join(os.path.join(BASE_DIR, 'static'), 'foundation')\nSTATIC_JQUERYUI = os.path.join(os.path.join(BASE_DIR, 'static'), 'jquery-ui')\nSTATIC_LISTS = os.path.join(os.path.join(os.path.join(BASE_DIR, 'lists'), 'static'), 'lists')\nSTATICFILES_DIRS = (\n ('css', os.path.join(STATIC_FOUNDATION, 'css')),\n ('css', os.path.join(STATIC_FOUNDATION, 'foundation-icons')),\n ('js', os.path.join(STATIC_FOUNDATION, 'js')),\n ('js', os.path.join(STATIC_JQUERYUI, 'js')),\n ('css', os.path.join(STATIC_LISTS, 'css')),\n ('js', os.path.join(STATIC_LISTS, 'js')),\n)\n\nROOT_URLCONF = 'urls'\n\nWSGI_APPLICATION = 'lists.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/dev/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Australia/Canberra'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/dev/howto/static-files/\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\n","repo_name":"MomomeYeah/FancyLists","sub_path":"settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25341913039","text":"from tkinter import *\nfrom functools import partial\n\n\ndef Sum(label, x, y):\n n1 = int(x.get())\n n2 = int(y.get())\n add = n1 + n2\n label.config(text='Sum is %d' % add)\n\n\nwin = Tk()\n\n# text = Text()\n# text.insert(INSERT, 'Hello')\n# text.pack()\n\nlabel1 = Label(win, text='First number')\nlabel1.grid(row=1, column=0)\n\nlabel2 = Label(win, text='Second Number')\nlabel2.grid(row=2, column=0)\n\nlabel = Label(win)\nlabel.grid(row=6, column=2)\n\nx1 = StringVar()\nentry1 = Entry(win, textvariable=x1)\nentry1.grid(row=1, column=2)\n\nx2 = StringVar()\nentry2 = Entry(win, textvariable=x2)\nentry2.grid(row=2, column=2)\n\nSum = partial(Sum, label, x1, x2)\nbutton = Button(win, text='Calculate', command=Sum)\nbutton.grid(row=3, column=3)\n\nwin.geometry(\"300x300\")\nwin.mainloop()\n","repo_name":"JayantGoel001/Python-Extras","sub_path":"Tkinter/ContentWidget.py","file_name":"ContentWidget.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"19776808689","text":"import threading\r\nimport pygame\r\nimport client_stub\r\nfrom desenho_x import Xdrawing\r\nfrom navios import Navio\r\n\r\n\r\n# ---------------------\r\n# The grid now is built based on the number of squares in x and y.\r\n# This allows us to associate the size of the space to a matrix or to a dictionary\r\n# that will keep data about each position in the environment.\r\n# Moreover, we now can control the movement of the objects.\r\n# We now separate the control of the environment\r\n\r\nclass GameUI(object):\r\n def __init__(self, stub: client_stub.StubClient, grid_size: int = 50):\r\n dim: tuple = stub.dimension_size()\r\n self.x_max = dim[0]\r\n self.y_max = dim[1]\r\n self.grid = stub.get_grid()\r\n self.stub = stub\r\n self.ships = pygame.sprite.LayeredDirty()\r\n self.ships_list = []\r\n self.drawn_squares = []\r\n self.hit_location = []\r\n self.shot_location = []\r\n self.lock = threading.Lock()\r\n self.is_player_turn = False\r\n self.my_number = 0\r\n self.pl = dict()\r\n\r\n self.x_group = pygame.sprite.LayeredDirty()\r\n self.width, self.height = self.x_max * grid_size, self.y_max * grid_size\r\n self.screen = pygame.display.set_mode((self.width, self.height))\r\n pygame.display.set_caption(\"Batalha Naval\")\r\n self.clock = pygame.time.Clock()\r\n self.grid_size = grid_size\r\n self.background = pygame.Surface(self.screen.get_size())\r\n self.background = self.background.convert()\r\n self.background.fill((30, 144, 255))\r\n self.screen.blit(self.background, (0, 0))\r\n self.draw_grid((0, 0, 0))\r\n pygame.display.update()\r\n\r\n def draw_grid(self, colour: tuple):\r\n \"\"\"\r\n Desenha a grelha\r\n :param colour: Cor rgb\r\n :return: None\r\n \"\"\"\r\n for x in range(0, self.x_max):\r\n pygame.draw.line(self.screen, colour, (x * self.grid_size, 0), (x * self.grid_size, self.height))\r\n for y in range(0, self.y_max):\r\n pygame.draw.line(self.screen, colour, (0, y * self.grid_size), (self.width, y * self.grid_size))\r\n\r\n def find_ships(self):\r\n \"\"\"\r\n Função responsável por encontrar os navios, ao encontrar estes é posto na lista listagem_navios um tuplo com\r\n a primeira posição ocupada pelo navio, o seu comprimento e a sua orientação\r\n :return: uma lista de tuplos\r\n \"\"\"\r\n aux = []\r\n val_x = 1\r\n val_y = 1\r\n listagem_navios = []\r\n auxv2 = []\r\n\r\n for x in range(len(self.grid)):\r\n for y in range(len(self.grid[x])):\r\n if self.grid[x][y] == 1:\r\n aux.append((x, y))\r\n\r\n for i in range(len(aux)):\r\n if (aux[i][0] + 1, aux[i][1]) in aux and (aux[i][0] + 1, aux[i][1]) not in auxv2:\r\n for k in range(1, 7):\r\n if (aux[i][0] + k, aux[i][1]) in aux:\r\n val_y += 1\r\n auxv2.append((aux[i][0] + k, aux[i][1]))\r\n else:\r\n listagem_navios.append((aux[i], val_y, \"vertical\"))\r\n break\r\n val_y = 1\r\n elif (aux[i][0], aux[i][1] + 1) in aux and (aux[i][0], aux[i][1] + 1) not in auxv2:\r\n for k in range(1, 7):\r\n if (aux[i][0], aux[i][1] + k) in aux:\r\n val_x += 1\r\n auxv2.append((aux[i][0], aux[i][1] + k))\r\n else:\r\n listagem_navios.append((aux[i], val_x, \"horizontal\"))\r\n break\r\n val_x = 1\r\n elif (aux[i][0], aux[i][1]) not in auxv2:\r\n listagem_navios.append((aux[i], 1, \"vertical\"))\r\n return listagem_navios\r\n\r\n def create_ships(self, size: int):\r\n \"\"\"\r\n Função responsável por criar os objetos Navio, ao criar-los este são postos dentro da lista self.ship_list\r\n :param size: Tamanho de cada célula\r\n :return: None\r\n \"\"\"\r\n listagem_navios = self.find_ships()\r\n for i in listagem_navios:\r\n pos_x = int(i[0][1])\r\n pos_y = int(i[0][0])\r\n length = int(i[1])\r\n direcao = str(i[2])\r\n navioadd = Navio(pos_x, pos_y, length, direcao, size)\r\n self.ships_list.append(navioadd)\r\n self.ships.add(navioadd)\r\n\r\n def ship_size(self, y, x):\r\n \"\"\"\r\n Função responsável por calcular o tamanho do navio\r\n :param y: Valor y\r\n :param x: Valor x\r\n :return: tamanho do navio\r\n \"\"\"\r\n size = 0\r\n x -= 1\r\n y -= 1\r\n j = y\r\n i = x\r\n if self.grid[x][y] == 1:\r\n size += 1\r\n while j + 1 < self.y_max and self.grid[x][j + 1] == 1:\r\n j += 1\r\n size += 1\r\n while y - 1 >= 0 and self.grid[x][y - 1] == 1:\r\n y -= 1\r\n size += 1\r\n while i + 1 < self.x_max and self.grid[i + 1][y] == 1:\r\n i += 1\r\n size += 1\r\n while x - 1 >= 0 and self.grid[x - 1][y] == 1:\r\n x -= 1\r\n size += 1\r\n return size\r\n\r\n def hit_or_miss(self, x, y):\r\n \"\"\"\r\n Função responsável por procurar se uma posição foi acertada ou não\r\n :param x:\r\n :param y:\r\n :return: uma string\r\n \"\"\"\r\n if self.grid[y-1][x-1] == 1:\r\n return \"hit\"\r\n return \"miss\"\r\n\r\n def draw_x_miss(self, pos):\r\n \"\"\"\r\n Função que desenha a sprite X de falhar\r\n :param pos: posição\r\n :return: None\r\n \"\"\"\r\n x = pos[0]\r\n y = pos[1]\r\n self.x_group.add(Xdrawing(x, y, 2, self.grid_size))\r\n rects = self.x_group.draw(self.screen)\r\n pygame.display.update(rects)\r\n\r\n def draw_x_sunken(self, pos, player):\r\n \"\"\"\r\n Função que desenha a sprite X de acertar\r\n :param pos: posição\r\n :return: None\r\n \"\"\"\r\n x = pos[0]\r\n y = pos[1]\r\n self.x_group.add(Xdrawing(x, y, player, self.grid_size))\r\n rects = self.x_group.draw(self.screen)\r\n pygame.display.update(rects)\r\n\r\n def check_hit(self):\r\n \"\"\"\r\n Função que verifica se acertou num navio\r\n :return: None\r\n \"\"\"\r\n self.hit_location = self.stub.get_hit()\r\n for i in self.hit_location:\r\n if i[0] not in self.drawn_squares:\r\n self.draw_x_sunken((i[0][0], i[0][1]), i[1])\r\n self.drawn_squares.append(i[0])\r\n\r\n def set_players(self):\r\n \"\"\"\r\n Recebe os jogadores e o numero de jogadores do servidor\r\n :return: None\r\n \"\"\"\r\n self.pl = self.stub.get_players()\r\n nr_players = self.stub.get_nr_players()\r\n print(\"Nr. de jogadores:\", nr_players)\r\n print(\"Jogadores no jogo:\", self.pl)\r\n\r\n def add_player(self, name: str) -> int:\r\n return self.stub.add_player(name)\r\n\r\n def run(self):\r\n \"\"\"\r\n Função que corre o game_client\r\n :return: None\r\n \"\"\"\r\n self.create_ships(self.grid_size)\r\n rects = self.ships.draw(self.screen)\r\n pygame.display.update(rects)\r\n nome = input(\"Insira o seu nome: \")\r\n self.my_number = self.add_player(nome)\r\n self.stub.start_game()\r\n self.set_players()\r\n end = False\r\n while not end:\r\n self.shot_location = self.stub.get_shot()\r\n if len(self.shot_location) == 0:\r\n self.is_player_turn = self.my_number == 0\r\n else:\r\n if self.shot_location[-1][1] == self.my_number:\r\n self.is_player_turn = False\r\n else:\r\n self.is_player_turn = True\r\n data = self.stub.get_end()\r\n if data[0] == 9:\r\n print(\"Pontuação obtida:\", data[1][self.my_number])\r\n end = True\r\n self.check_hit()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n end = True\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\r\n end = True\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_m:\r\n end = True\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if self.is_player_turn:\r\n with self.lock:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n grid_x = mouse_x // self.grid_size + 1\r\n grid_y = mouse_y // self.grid_size + 1\r\n if self.hit_or_miss(grid_x, grid_y) == \"hit\":\r\n if not any(coord[0][0] == grid_x - 1 and coord[0][1] == grid_y - 1 for coord in\r\n self.hit_location):\r\n print(\"Acertou num barco com o tamanho:\", self.ship_size(grid_x, grid_y))\r\n self.stub.send_hit(((grid_x - 1, grid_y - 1), self.my_number))\r\n self.stub.send_shot(((grid_x - 1, grid_y - 1), self.my_number))\r\n self.draw_x_sunken((grid_x - 1, grid_y - 1), self.my_number)\r\n elif self.hit_or_miss(grid_x, grid_y) == \"miss\":\r\n self.draw_x_miss((grid_x - 1, grid_y - 1))\r\n self.stub.send_shot(((grid_x - 1, grid_y - 1), self.my_number))\r\n print(\"Não acertou em nenhum barco.\")\r\n self.is_player_turn = False\r\n pygame.time.wait(100)\r\n","repo_name":"Jarjarbinks-exe/BatalhaNavalSD","sub_path":"game_client.py","file_name":"game_client.py","file_ext":"py","file_size_in_byte":9863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24936494956","text":"num_list = [int(num) for num in input().split(\", \")]\nbeggar_count = int(input())\ncounter = 0\ncurrent_order = 1\nsum_list = []\n\nwhile current_order <= beggar_count:\n order = current_order\n sum_nums = 0\n\n for idx in range(0, len(num_list)):\n counter += 1\n if counter % order == 0:\n sum_nums += num_list[idx]\n order += beggar_count\n sum_list.append(sum_nums)\n current_order += 1\n counter = 0\n\nprint(sum_list)\n","repo_name":"Polishko/SoftUni","sub_path":"Python Fundamentals/Exercises/Exercise_3/qu_5_qu_4.py","file_name":"qu_5_qu_4.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6009455938","text":"import sys\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent\nreadme = (this_directory / \"README.md\").read_text()\n\ntry:\n from skbuild import setup\nexcept ImportError:\n print(\n \"Please update pip, you need pip 10 or greater,\\n\"\n \" or you need to install the PEP 518 requirements in pyproject.toml yourself\",\n file=sys.stderr,\n )\n raise\n\nfrom setuptools import find_packages\n\nsetup(\n name=\"flexbox\",\n version=\"0.0.2\",\n description=\"FlexBox layout engine for Python\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Thijs Vogels\",\n url=\"https://github.com/tvogels/flexbox\",\n license=\"MIT\",\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n cmake_install_dir=\"src/flexbox\",\n include_package_data=True,\n install_requires=[\"domtree>=0.0.4\", \"typing_extensions>=4\", \"numpy\"],\n extras_require={\"test\": [\"pytest\"]},\n python_requires=\">=3.7\",\n)\n","repo_name":"tvogels/flexbox","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"11967789799","text":"#Write an if-else statement that assigns 0 to the variable b if the variable a is less\n#than 10. Otherwise, it should assign 99 to the variable b .\na=int (input(\"enter the value of 'a', choose betwween 0 to 100 \"))\ncompare_v=10\nif (a< compare_v):\n b=0\n print (\"the value of b is \" , b)\nelse:\n b=99\n print (\"the value of b is \", b) ","repo_name":"entonu1224/python_fun","sub_path":"fun/code-re.py","file_name":"code-re.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35146034878","text":"#input: sciengine URLs\n#output: json files\n\nfrom functions import *\n\nDEBUG = True\nBASE_URL = \"https://www.sciengine.com/sands/issue/\"\nSTART_VOLUME = 1\nEND_VOLUME = 2\narticleLinks = []\narticleCount = 0\n\ndef getLinksFromSoup(soup):\n articleLinks = []\n # finding links to HTML articles\n for a in soup.find_all(\"a\", target=\"_blank\"):\n #print(a)\n if (a[\"href\"].startswith(\"https://doi.org\")):\n articleLink = a[\"href\"]\n articleLinks.append(articleLink)\n print(articleLink)\n return articleLinks\n\nhumanReadable = []\npageLinks = []\npage = 1\n# loop through links\nfor volumeNum in range(START_VOLUME, END_VOLUME + 1):\n print(\"volume: \" + str(volumeNum) + \"/\" + str(END_VOLUME))\n page += 1\n for issueNum in range(0, 1):\n print(\"issue: \" + str(issueNum) + \"/\" + str(0))\n pageLink = BASE_URL + str(volumeNum) + \"/\" + str(issueNum)\n if (DEBUG): print(\"pageLink: \" + pageLink)\n pageSoup = URLtoSoup(pageLink)\n articleLinks = getLinksFromSoup(pageSoup)\n articleCount += len(articleLinks)\n count = 1\n for articleLink in articleLinks:\n if (DEBUG): print(\"article: \" + str(count) + \"/\" + str(len(articleLinks)))\n count += 1\n articleSoup = URLtoSoup(articleLink)\n textList = articleSoup.find_all(\"p\")\n #print(textList)\n\n for aTag in textList:\n soup = aTag\n rawText = soup.get_text().strip()\n humanReadable.append(soup.get_text().strip())\n\n\nif (DEBUG): print(\"1st element: \" + humanReadable[0])\nif (DEBUG): print(\"last element: \" + humanReadable[-1])\n\nprint(\"articleCount:\" + str(articleCount))\n\nlist2json(humanReadable, \"sciEngineEN.json\")","repo_name":"theTY2002/web-crawling","sub_path":"sciEngineENDownloader.py","file_name":"sciEngineENDownloader.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7946223728","text":"#!/usr/bin/env python\n#!/-*-coding:utf-8-*-\nimport RPi.GPIO as GPIO\nimport pygame\nimport time\nimport datetime\n\nGPIO.setmode(GPIO.BOARD)\n\nbin_high=[13,15]\nGPIO.setup(bin_high,GPIO.OUT,initial=GPIO.HIGH)\n#GPIO.12\nbin_low=[11,16,18,22,29,31,32,33,35,36,37,38,40]\nGPIO.setup(bin_low,GPIO.OUT,initial=GPIO.LOW)\n\nGPIO.setup(7,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\n\n#pygame.mixer.init()\n#pygame.mixer.music.load('getup.mp3')\n\nD_zero=[31,32,33,35,36,37]\nD_one=[32,33]\nD_two=[31,32,35,36,38]\nD_three=[31,32,33,35,38]\nD_four=[32,33,37,38]\nD_five=[31,33,35,37,38]\nD_six=[31,33,35,36,37,38]\nD_seven=[31,32,33]\nD_eight=[31,32,33,35,36,37,38]\nD_nine=[31,32,33,35,37,38]\n\nD_list=[D_zero,D_one,D_two,D_three,D_four,D_five,D_six,D_seven,D_eight,D_nine]\n\ndef display(wei,num):\n GPIO.output(wei,GPIO.HIGH)\n num_n=D_list[num]\n for i in num_n:\n GPIO.output(i,GPIO.LOW)\n time.sleep(0.0005)\n GPIO.output(i,GPIO.HIGH)\n time.sleep(0.0005)\n GPIO.output(wei,GPIO.LOW)\n\ndef xianshi(shi,feng):\n shi_o=shi//10\n shi_t=shi%10\n feng_o=feng//10\n feng_t=feng%10\n display(16,shi_o)\n display(18,shi_t)\n GPIO.output(18,GPIO.HIGH)\n GPIO.output(40,GPIO.LOW)\n time.sleep(0.001)\n GPIO.output(18,GPIO.LOW)\n GPIO.output(40,GPIO.HIGH)\n display(22,feng_o)\n display(29,feng_t)\n\n#def fan(num):\n# GPIO.output(11,GPIO.HIGH)\n# GPIO.output(12,GPIO.LOW)\n# time.sleep(num)\n# GPIO.output(11,GPIO.LOW)\n# GPIO.output(13,GPIO.LOW)\n\ndef light(num):\n\tGPIO.output(13,GPIO.LOW)\n\ttime.sleep(num)\n\tGPIO.output(13,GPIO.HIGH)\n\tGPIO.output(15,GPIO.LOW)\n\ttime.sleep(num)\n\tGPIO.output(13,GPIO.LOW)\n\ttime.sleep(num)\n\tGPIO.output(13,GPIO.HIGH)\n\tGPIO.output(15,GPIO.HIGH)\n\nhour=input('Input alarm clock hour:')\nminute=input('Input alarm clock minute:')\nprint('The program is running...')\nwhile True:\n\tnow_hour= datetime.datetime.now().strftime('%H')\n\tnow_minute= datetime.datetime.now().strftime('%M')\n\tnowh=int(now_hour)\n\tnowm=int(now_minute)\n\txianshi(nowh,nowm)\n\tif hour == now_hour and minute == now_minute:\n\t\tprint('Now time is '+now_hour+':'+now_minute+'!')\n\t\t#pygame.mixer.music.play()\n\t\tsign=0\n\t\twhile True:\n if GPIO.input(7)==1:\n print('The alarm clock closes!')\n sign=1\n break\n else:\n GPIO.output(11,GPIO.HIGH)#11-蜂鸣器\n light(0.3)\n GPIO.output(11,GPIO.LOW)\n\t\tif sign==1:\n break\n\ntime.sleep(1)\nwhile True:\n now_hour= datetime.datetime.now().strftime('%H')\n now_minute= datetime.datetime.now().strftime('%M')\n nowh=int(now_hour)\n nowm=int(now_minute)\n xianshi(nowh,nowm)\n if GPIO.input(7)==1:\n print('End of the program!')\n GPIO.cleanup()\n exit(0)\n","repo_name":"Sch-ray/raspberry-pi","sub_path":"闹钟&蜂鸣器&音乐&LED.py","file_name":"闹钟&蜂鸣器&音乐&LED.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33350371154","text":"import time\nfrom Strategies.BollingerBands import BollingerBands\nfrom Server.trade import trade\n\n\nclass Server:\n STOCK_QTY = 10\n\n def __init__(self, strategy, stock, balance, after_trade):\n self.strategy = strategy\n self.bollinger_bands = BollingerBands()\n self.trade = trade(stock, balance=balance)\n self.after_trade = after_trade\n\n def run(self, time_to_repeat, delay_in_seconds):\n self.__one_time_action()\n # self.bollinger_bands.plot_data()\n while time_to_repeat > 0:\n time.sleep(delay_in_seconds)\n time_to_repeat -= 1\n self.__repeat_action()\n if time_to_repeat % self.after_trade == 0:\n self.trade.insert_total_assets_for_hit_ration(self.bollinger_bands.price_at_end())\n # self.bollinger_bands.plot_data()\n print('--------------------')\n print(self.trade.get_backtesting_result(self.bollinger_bands.price_at_end()))\n print('--------------------')\n print(self.trade.get_hit_ratio())\n print('--------------------')\n print(self.trade.get_history())\n\n def __one_time_action(self):\n self.bollinger_bands.train_strategy(self.strategy.get_train_data())\n\n def __repeat_action(self):\n has_sig, sig_type, price_per_stock = self.bollinger_bands.add_new_data(self.strategy.get_new_data())\n if has_sig:\n if sig_type == \"buy\":\n self.trade.buy(price_per_stock, self.STOCK_QTY)\n else:\n self.trade.sell(price_per_stock, self.STOCK_QTY)\n stock, balance = self.trade.get_curr_statistics()\n print(\"stock : \", stock, \" balance : \", balance)\n # self.bollinger_bands.plot_data()\n\n","repo_name":"rohitjain00/Trading-Bot","sub_path":"Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3214736889","text":"#!/usr /bin/env python\n# -*- coding: utf-8 -*-\n# Created by qiu on 16-6-26\n#\n\nimport os\nimport hashlib\nimport datetime\nfrom PIL import Image\nfrom handler import BaseHandler\n\nclass AdminHandler(BaseHandler):\n\n def get(self, post_type):\n self.render(\"admin.html\")\n def post(self, post_type):\n\n self.set_header(\"Content-Type\", \"application/json\")\n\n if post_type == \"book\":\n name = self.get_argument(\"name\", None)\n author = self.get_argument(\"author\", None)\n tag = self.get_argument(\"tag\", None)\n mode = self.get_argument(\"mode\", None)\n\n file = self.request.files[\"img\"]\n if not file:\n self.write_error(404)\n return\n\n file = file[0]\n upload_path = os.path.join(self.settings[\"static_path\"], \"image\")\n\n if not os.path.exists(upload_path):\n os.mkdir(upload_path)\n\n file_name = file[\"filename\"]\n m = hashlib.md5()\n m.update(file_name.encode(\"utf-8\"))\n img = m.hexdigest() + \".\" + file_name.split(\".\")[-1]\n file_img = os.path.join(upload_path, img)\n\n with open(file_img, 'wb') as up:\n up.write(file['body'])\n content_type = file[\"content_type\"]\n\n im = Image.open(file_img)\n im_reszie = im.resize((170, 150), Image.ANTIALIAS)\n im_reszie.save(file_img)\n\n self.add_book(img, author, name, tag, mode)\n self.write({\"name\": name, \"author\": author, \"mode\": mode, \"tag\": tag,\n \"img\": img, \"content_type\": content_type})\n\n elif post_type == \"tag\":\n index = self.get_argument(\"index\", None)\n name = self.get_argument(\"name\", None)\n self.add_tag(index, name)\n self.write({ \"index\": index, \"name\": name})\n\n elif post_type == \"coll\":\n colletion = self.get_argument(\"collection\", None)\n self.write({\"collection\": colletion, \"status\": self.drop_collection()})\n self.flush()\n\n def add_book(self, img, author, name, tag, mode):\n self.db.book.insert({\n \"img\": \"image/\" + img,\n \"author\": author,\n \"name\": name,\n \"tag\": tag,\n \"mode\": mode,\n \"createdAt\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n })\n\n def add_tag(self, index, name):\n self.db.tags.insert({\n \"index\": index,\n \"name\": name,\n })\n\n def drop_collection(self, col):\n reslut = \"\"\n if col == \"tags\":\n self.db.tags.drop()\n reslut = \"drop tags\"\n elif col == \"book\":\n self.db.book.drop()\n reslut = \"drop book\"\n elif col == \"online\":\n self.db.online.drop()\n reslut = \"drop online\"\n return reslut\n\n def create_collection(self, coll=\"online\", size=1024, max=6, flag = True):\n self.db.create_collection(coll, size = size, max = max, capped = flag)\n\n","repo_name":"qiu0130/Qkindle","sub_path":"qkindle/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33126713747","text":"def binarySearch(arr, item):\n low = 0\n high = len(arr)-1\n\n while low <= high:\n mid = int((low + high)/2)\n guess = arr[mid]\n\n if guess == item:\n return mid\n\n if guess > item:\n high = mid -1\n\n if guess < item:\n low = mid +1\n\n return None\n\n\ndef findSmallest(arr):\n smallest = arr[0]\n smallestIndex = 0\n for i in range(0,len(arr)):\n if arr[i] < smallest:\n smallest = arr[i]\n smallestIndex = i\n return smallestIndex\n\ndef selectionSort(arr):\n newArr = []\n for i in range(0,len(arr)):\n smallest = findSmallest(arr)\n newArr.append(arr.pop(smallest))\n return newArr\n\n\n\n\na = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17]\nb = [9,32,53,4,1,5,3,55,30953,4,290,44]\n\nprint(selectionSort(b))\n#print (binarySearch(a, 5))","repo_name":"liederchris1/code-samples","sub_path":"Data Struct and Alg/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17268859883","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nx=np.arange(-15,3*np.pi,0.01)\ny=np.sin(x+math.pi)\n\nplt.title('具有初始相位的sin(t)的图像')\nplt.xlabel(' t')\nplt.ylabel('x(t)=Asin(ωt+φ)')\n\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\nplt.plot(x,y)\nplt.show()","repo_name":"243459529/XinHaoXiTong","sub_path":"sin.py","file_name":"sin.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74313078249","text":"#!/usr/bin/python3\nimport sys\n\n\ndef main():\n args = sys.argv\n n = len(args)\n sum = 0\n if (n == 1):\n print(\"0\")\n else:\n for i in range(1, n):\n sum = sum + int(sys.argv[i])\n print(sum)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IyasuH/alx-higher_level_programming","sub_path":"0x02-python-import_modules/3-infinite_add.py","file_name":"3-infinite_add.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74753857767","text":"import open3d as o3d\nimport numpy as np\nimport sys\nimport os\n\ndef main():\n pcd_file = sys.argv[1] + sys.argv[2]\n center_point = [float(sys.argv[3]), float(sys.argv[4]), float(sys.argv[5])]\n area_radius = float(sys.argv[6]) \n filename = sys.argv[7]\n print(\"=\"*20)\n print(\"Point cloud file: \" + str(pcd_file))\n print(\"Center_point: \" + str(center_point))\n print(\"Area radius: \" + str(area_radius))\n print(\"New filename: \" + str(filename))\n special = None\n if len(sys.argv) > 8:\n special = sys.argv[8]\n print(\"Special cropping name: \" + str(special))\n crop_pcd(pcd_file, center_point, area_radius, filename, special)\n print(\"=\"*20)\n\n\ndef crop_pcd(pcd_file, center_point, area_radius, filename, special):\n print(\"Reading point cloud file...\")\n #pcd = o3d.io.read_point_cloud(os.getcwd() + \"/src/urbanroadsweeper/urbanroadsweeper/\" + pcd)\n pcd = o3d.io.read_point_cloud(pcd_file)\n print(str(pcd_file) + \" consists of \" + str(len(pcd.points)) + \" points.\")\n print(\"Cropping point cloud...\")\n \n all_points = np.asarray(pcd.points)\n all_points = all_points - np.array(center_point) \n all_points = all_points[ np.linalg.norm(all_points[:,0:2], axis=1) < area_radius]\n\n new_pcd = o3d.geometry.PointCloud()\n new_pcd.points = o3d.utility.Vector3dVector(all_points)\n\n if special == \"bridge\":\n cell_bounding_box = o3d.geometry.AxisAlignedBoundingBox(\n [-108.32318115, -51.00197983, -100],\n [76.61431885, 54.74802399, 100]\n )\n new_pcd = new_pcd.crop(cell_bounding_box) \n all_points = np.asarray(new_pcd.points)\n all_points= all_points[~(np.logical_and(all_points[:,0] < -45, all_points[:,1] < -1))] \n new_pcd.points = o3d.utility.Vector3dVector(all_points)\n\n print(\"Cropped Point cloud \" + str(filename) + \" consists of \" + str(len(new_pcd.points)) + \" points.\")\n o3d.io.write_point_cloud(filename, new_pcd)\n print(\"Done cropping\")\n\nif __name__ == '__main__':\n main()","repo_name":"danneengelson/urbanroadsweeper","sub_path":"src/urbanroadsweeper/crop_pcd.py","file_name":"crop_pcd.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"16533895293","text":"import collections\nimport nltk\nimport re\n\nclass BIOLabels(object):\n B = \"B\"\n I = \"I\"\n O = \"O\"\n\n\nclass TokenLabel(object):\n def __init__(self, bio, entity):\n self.bio = bio\n self.entity = entity\n\n# ====================== Parse processing ===================================\n\n\ndef get_parse(parser, sentence):\n result = parser.raw_parse(sentence)\n parse = next(result)\n return parse\n\ndef indexify(index, word):\n return str(index) + \"_\" + word\n\ndef unindexify(indexed_word):\n str_index, word = indexed_word.split(\"_\")\n return int(str_index), word\n\ndef enumerate_parse(node, token_list, index=0):\n if len(node) == 1 and type(node[0]) == str:\n (maybe_word, ) = node\n i_word = indexify(index, maybe_word)\n _ = node.pop(0)\n node.insert(0, i_word)\n token_list.append(i_word)\n return index + 1\n else:\n for child in node:\n index = enumerate_parse(child, token_list, index)\n return index\n\n\ndef get_span(node):\n label = node.label()\n nodes = [node]\n tokens = []\n while nodes:\n curr_node = list(nodes.pop(0))\n if len(curr_node) == 1 and type(curr_node[0]) == str:\n (maybe_word, ) = curr_node\n tokens.append(maybe_word)\n else:\n child_list = [i for i in curr_node]\n nodes = child_list + nodes\n\n just_tokens = [x.split(\"_\")[1] for x in tokens]\n start_token = int(tokens[0].split(\"_\")[0])\n end_token = int(tokens[-1].split(\"_\")[0])\n return (start_token, end_token), label, just_tokens\n\ndef get_all_spans(parse, span_list):\n idxs, label, tokens = get_span(parse)\n span_list[idxs] = (label, tokens)\n for child in parse:\n if type(child) != str:\n get_all_spans(child, span_list)\n\n\n# ====================== Coref processing ===================================\n\nlabel_splitter = re.compile(\"([0-9]+|\\(|\\))\")\n\ndef get_entities_from_label(label):\n curr_entities = []\n to_end = []\n contents = re.findall(label_splitter, label)\n while contents:\n elem = contents.pop(0)\n if elem == \"(\":\n # Should start a new entity span\n num = contents.pop(0)\n assert num.isdigit()\n curr_entities.append(num)\n\n if contents:\n # Check for single-token mentions\n maybe_close_brace = contents.pop(0)\n if maybe_close_brace == \")\":\n to_end.append(num)\n else:\n contents.insert(0, maybe_close_brace)\n\n elif elem != \")\":\n # Check for span ending\n close_brace = contents.pop(0)\n assert close_brace == \")\"\n to_end.append(elem)\n return curr_entities, to_end\n\ndef change_label(bio_label, token_label):\n token_label.bio = bio_label\n return token_label\n\nclass Sentence(object):\n def __init__(self, lines, idx):\n sequences = list(zip(*[line.strip().split() for line in lines]))\n coref_tags = [line.strip().split(\"\\t\")[:1] for line in lines]\n\n (document_ids, _, _, tokens, pos, cparse) = sequences[0:6]\n coref_tags = sequences[-1]\n self.tokens = tokens\n\n assert len(set(document_ids)) == 1\n self.document_id = document_ids[0]\n self.idx = idx\n self.sentence_id = self.document_id + \"_\" + str(self.idx).zfill(4)\n\n #self.dep_tree = self.make_dep_tree()\n self.con_tree = self.make_con_tree(cparse, pos, tokens)\n #self.con_spans = self.get_con_spans()\n self.coref_spans = self.get_coref_spans(tokens, coref_tags)\n\n token_list = []\n enumerate_parse(self.con_tree, token_list)\n self.parse_spans = {}\n get_all_spans(self.con_tree, self.parse_spans)\n self.match_spans()\n\n def match_spans(self):\n for span, entities in self.coref_spans.items():\n (first, last) = span\n if span in self.parse_spans:\n tree_label, tokens = self.parse_spans[span]\n print(\"\\t\".join([\"Y\", str(first), str(last), \"|\".join(entities), tree_label, \" \".join(tokens)]))\n else:\n print(\"\\t\".join([\"N\", str(first), str(last), \"|\".join(entities), \"-\", \" \".join(self.tokens[first:last + 1])]))\n\n def make_con_tree(self, cparse, pos, tokens):\n new_cparse = \"\"\n for i, fragment in enumerate(\" \".join(cparse).split(\"*\")):\n new_cparse += fragment\n if i == len(tokens):\n break\n new_cparse += \"\".join([\"(\", pos[i], \" \", tokens[i], \")\"])\n\n return nltk.tree.ParentedTree.fromstring(new_cparse)\n\n def get_coref_spans(self, tokens, labels):\n span_map = collections.defaultdict(list)\n open_spans = collections.defaultdict(list)\n for i, (token, label) in enumerate(zip(tokens, labels)):\n span_starts, span_ends = get_entities_from_label(label)\n for label in span_starts:\n open_spans[label].append(i)\n for label in span_ends:\n open_idx = open_spans[label].pop(-1)\n label_span = (open_idx, i)\n span_map[label_span].append(label)\n return span_map\n\n\n\n\n\nclass Document(object):\n def __init__(self, input_sentence_lines):\n self.sentences = []\n for i, sentence_lines in enumerate(input_sentence_lines):\n self.sentences.append(Sentence(sentence_lines, i))\n\n\n def create_e2e_input(self):\n pass\n\n\nclass Dataset(object):\n \"\"\" Get documents from preprocessed (BIO + deps) file.\"\"\"\n def __init__(self, filename):\n self.documents = self.get_documents_from_file(filename)\n\n def get_documents_from_file(self, filename):\n\n curr_sentence = []\n curr_doc = []\n curr_doc_id = None\n doc_lines = []\n\n with open(filename, 'r') as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n if not line.strip():\n curr_doc.append(curr_sentence)\n curr_sentence = []\n else:\n curr_sentence.append(line)\n fields = line.strip().split()\n doc_id = fields[0]\n if curr_doc_id is not None:\n if doc_id != curr_doc_id:\n doc_lines.append(curr_doc)\n curr_doc = []\n curr_doc_id = doc_id\n\n documents = []\n for single_doc_lines in doc_lines:\n documents.append(Document(single_doc_lines))\n","repo_name":"nnkennard/coref_analysis","sub_path":"coref_lib.py","file_name":"coref_lib.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22448702301","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def BalancedTree(self, head:TreeNode):\n\n def dfshelper(head):\n if head is None:\n return [True, 0]\n left = dfshelper(head.left)\n right = dfshelper(head.right)\n balanced = left[0] and right[0] and abs(left[1] - right[1]) <= 1\n\n return [balanced, 1 + max(left[1], right[1])]\n\n return dfshelper(head)[0]\n\n \nBin_tree = TreeNode(3)\nBin_tree.left = TreeNode(9)\nBin_tree.right = TreeNode(20)\nBin_tree.right.left = TreeNode(15)\nBin_tree.right.right = TreeNode(7)\n\nB_tree = Solution()\nB_tree.BalancedTree(Bin_tree)","repo_name":"Dondecoder/LeetCode","sub_path":"Trees/Q20 Balanced Binary Tree/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31045385536","text":"ls = [\n 1, 2, 3,\n [1, 2, 3],\n \"Apple\", \"556\",\n [\"Arpit\", 23, \"Male\"],\n 4.98, 8.90,\n [9.00, \"Banana\"]\n]\n\n# List counting funcn\n\n\ndef count_ls(l):\n var = 0\n for i in l:\n if type(i) == list:\n var += 1\n return var\n\n\nprint(count_ls(ls))\n","repo_name":"arpitgupta630/Python","sub_path":"Youtube/C05E06.py","file_name":"C05E06.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72990207529","text":"# importing required libraries\nfrom datetime import date\nfrom sre_constants import BRANCH\nimport mysql.connector\n \n\n# Coneccion a la base de datos MySQL\ndataBase = mysql.connector.connect(\n host =\"localhost\",\n user =\"root\",\n passwd =\"Ramirito-51\",\n database = \"BaseDatosPy\"\n)\n\ndef crearTableTeacher():\n # Creo la tabla en la base de datos\n teacherRecord = \"\"\"CREATE TABLE TEACHER (\n NAME VARCHAR(20) NOT NULL,\n LASTNAME VARCHAR(50),\n CEDULA INT NOT NULL,\n USERNAME VARCHAR(15),\n EDAD INT\n )\"\"\"\n # manda a executar en la base de datos el comando de my SQL que le envio\n cursorObject.execute(teacherRecord)\n\ndef insertData(name,lastName,cedula,userName,edad):\n sql = \"INSERT INTO TEACHER (NAME, LASTNAME, CEDULA, USERNAME, EDAD)\\\n VALUES (%s, %s, %s, %s, %s)\"\n val = [(name,lastName,cedula,userName,edad)]\n cursorObject.executemany(sql, val)\n dataBase.commit()\n# disconnecting from server \n\ndef selectData():\n query = \"\"\"SELECT NAME,USERNAME FROM TEACHER\"\"\"\n cursorObject.execute(query)\n myresult = cursorObject.fetchall()\n for x in myresult:\n print(x)\n\ndef updateData():\n dataUpdate = input(\"Ingrese el nombre de usuario del profesor que desea actualizar\\n\")\n dateToUpdate = int(input(\"Eliga el datos que desea modificar:\\n\" +\n \"1.Nombre\\n2.Apellido\\n3.Cedula\\n4.UserName\\n5.Edad\\n\"))\n data = \"\"\n if dateToUpdate == 1:\n data = \"NAME\"\n elif dateToUpdate == 2:\n data = \"LASTNAME\"\n elif dateToUpdate == 3:\n data = \"CEDULA\"\n elif dateToUpdate == 4:\n data = \"CEDULA\"\n elif dateToUpdate == 5:\n data = \"EDAD\"\n else:\n print(\"Eleccion fuera del rango\")\n dataOfAtribute= input(\"Ingrese el dato por el cual va a reemplazar: \")\n query = \"UPDATE TEACHER SET \"+ data +\" = '\"+str(dataOfAtribute) + \"' WHERE USERNAME = '\" + str(dataUpdate) +\"'\"\n print(query)\n cursorObject.execute(query)\n dataBase.commit()\n\n# Preparando un cursoObject\ncursorObject = dataBase.cursor()\nopcion = int(input(\"Ingrese la opcion que desee realizar:\\n\" +\n \"1.Agregar un profesor a la base de datos.\\n\" +\n \"2.Actualizar registros de la base de datos\\n\" + \n \"3.Consultar informacion de la base de datos\\n\"\n \"4.Crear tabla de profesores\\n\"))\nif opcion == 1:\n name= input(\"Nombre del profesor: \")\n lastName= input(\"Apellido del profesor: \")\n cedula= input(\"Cedula del profesor: \")\n userName= input(\"Nombre de usuario del profesor: \")\n edad= input(\"Edad del profesor: \")\n insertData(name,lastName,cedula,userName,edad)\nelif opcion == 2:\n updateData()\nelif opcion == 3:\n selectData()\nelif opcion == 4:\n crearTableTeacher()\nelse:\n print(\"Eligio una opcion fuera del rango\")\n\n# Me desconecto de la base de datos\ndataBase.close()","repo_name":"Knowledge-Based-Systems/ejemplobasedatos-NixonVuele","sub_path":"BaseDatosPy.py","file_name":"BaseDatosPy.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5626031472","text":"__author__ = 'zhuangli'\nfrom PyML import *\nfrom PyML.classifiers.svm import SVR\nimport sys\ndef train(trainfile,tempRes,predictFile):\n s=SVR()\n data = SparseDataSet(trainfile,numericLabels = True)\n s.train(data)\n s.save(tempRes)\n new_svm = SVR()\n new_svm.load(tempRes, data)\n test_data=SparseDataSet(\"data/test.data\",numericLabels = True)\n results = new_svm.test(test_data)\n for i in results:\n for predict in i.Y:\n with open(predictFile, \"a\") as myfile:\n myfile.write(str(predict)+' ')\nif __name__ == \"__main__\":\n if sys.argv[1]=='comment':\n train(\"data/comment.data\",\"data/tempResComment\",\"data/predictComment\")\n elif sys.argv[1]=='forward':\n train(\"data/forward.data\",\"data/tempResForward\",\"data/predictForward\")\n elif sys.argv[1]=='like':\n train(\"data/like.data\",\"data/tempResLike\",\"data/predictLike\")\n","repo_name":"zhuang-li/Weibo","sub_path":"TrainPredict.py","file_name":"TrainPredict.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73227326889","text":"import cv2\nimport numpy as np\nimport time\nimport os\nimport advancedcv.hand_tracking as htm\n\n\nbrush_thickness = 15\neraser_thickness = 100\n\nfolder_path = 'headers'\nmy_list = os.listdir(folder_path)\nmy_list.pop(0)\nmy_list.sort()\n\noverlay_list = []\n\nfor img_path in my_list:\n image = cv2.imread(f'{folder_path}/{img_path}')\n overlay_list.append(image)\n\nheader = overlay_list[0]\ndraw_color = (255, 0, 255)\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 960)\ncap.set(4, 540)\n\nimg_canvas = np.zeros((540, 960, 3), np.uint8)\n\ndetector = htm.HandDetector(detection_confidence=0.85)\nxp, yp = 0, 0\n\np_time = 0\n\nwhile True:\n # Import the image\n success, img = cap.read()\n img = cv2.flip(img, 1)\n\n # Find hand landmarks\n img = detector.find_hands(img, draw=False)\n lm_list = detector.get_position(img, draw=False)\n\n if len(lm_list) != 0:\n\n # tip of index and middle fingers\n x1, y1 = lm_list[8][1:]\n x2, y2 = lm_list[12][1:]\n\n # Check which fingers are up\n fingers = detector.fingers_up()\n\n # If selection mode - two finger are up\n if fingers[1] and fingers[2]:\n xp, yp = 0, 0\n\n if y1 < 175:\n if 200 < x1 < 250:\n header = overlay_list[0]\n draw_color = (255, 0, 255)\n elif 450 < x1 < 500:\n header = overlay_list[1]\n draw_color = (255, 0, 0)\n elif 600 < x1 < 650:\n header = overlay_list[2]\n draw_color = (0, 255, 0)\n elif 800 < x1 < 850:\n header = overlay_list[3]\n draw_color = (0, 0, 0)\n\n cv2.rectangle(img, (x1, y1 - 25), (x2, y2 + 25), draw_color, cv2.FILLED)\n\n # If drawing mode - index finger is up\n if fingers[1] and not fingers[2]:\n cv2.circle(img, (x1, y1), 15, draw_color, cv2.FILLED)\n\n if xp == 0 and yp == 0:\n xp, yp = x1, y1\n\n if draw_color == (0, 0, 0):\n cv2.line(img, (xp, yp), (x1, y1), draw_color, eraser_thickness)\n cv2.line(img_canvas, (xp, yp), (x1, y1), draw_color, eraser_thickness)\n else:\n cv2.line(img, (xp, yp), (x1, y1), draw_color, brush_thickness)\n cv2.line(img_canvas, (xp, yp), (x1, y1), draw_color, brush_thickness)\n \n xp, yp = x1, y1\n\n c_time = time.time()\n fps = 1/(c_time - p_time)\n p_time = c_time\n \n img_gray = cv2.cvtColor(img_canvas, cv2.COLOR_BGR2GRAY)\n _, img_inverse = cv2.threshold(img_gray, 50, 255, cv2.THRESH_BINARY_INV)\n img_inverse = cv2.cvtColor(img_inverse, cv2.COLOR_GRAY2BGR)\n img = cv2.bitwise_and(img, img_inverse)\n img = cv2.bitwise_or(img, img_canvas)\n\n # Setting the header image\n img[0:110, 0:912] = header\n img = cv2.addWeighted(img, 0.8, img_canvas, 0.2, 0)\n cv2.putText(img, f'FPS: {str(int(fps))}', (40, 50), cv2.FONT_HERSHEY_PLAIN, 5, (255, 0, 0), 3)\n cv2.imshow('Image', img)\n cv2.waitKey(1)\n\n\n","repo_name":"dnovai/advancedCVProject","sub_path":"ai_virtual_painter.py","file_name":"ai_virtual_painter.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20009550178","text":"import request\nimport json\n\ndef speak(str):\n from win32com.client import Dispatch\n speak = Dispatch(\"SAPI.SpVoice\")\n speak.Speak(str)\n\nif __name__ == '__main__':\n speak(\"Today's Hadlines are!\")\n url = \"put news api int this string from website\" \n\n news = request.get(url).text\n news_dict = json.loads(news)\n print(news_dict[\"articles\"]) #this is news api articles\n arts = news_dict['articles']\n for articles in arts:\n speak(articles['title'])\n print(articles['title'])\n speak(\"Moving on to the next...Listen Carefully!\")\n\n speak(\"Thank You For Listening..!\") ","repo_name":"khand420/Learn-Python","sub_path":"jarvis(speak_news).py","file_name":"jarvis(speak_news).py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20147947952","text":"import os\nimport psutil\nimport warnings\n\n# from pyspark.conf import SparkConf\n# from pyspark.context import SparkContext\nfrom pyspark.sql import SparkSession\nimport pandas as pd\n\nfrom via_pandas import ViaPandas\nimport dataset\n\n\nclass ViaPySpark(ViaPandas):\n \"\"\"\n Instead of using the raw Spark, we switch to the faster PyArrow-enabled Spark.\n https://spark.apache.org/docs/latest/api/python/user_guide/sql/arrow_pandas.html\n https://spark.apache.org/docs/latest/sql-data-sources-parquet.html\n \"\"\"\n\n def __init__(self) -> None:\n total_ram_gb: int = int(psutil.virtual_memory().total // 1e9)\n\n # Pandas API on Spark automatically uses this Spark session with the configurations set.\n # Available properties:\n # https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/best_practices.html#leverage-pyspark-apis\n # https://spark.apache.org/docs/latest/configuration.html\n # conf = SparkConf()\n # conf.setMaster('local').setAppName('ADSB')\n # conf.set('spark.driver.cores', str(os.cpu_count() - 4))\n # conf.set('spark.driver.maxResultSize', '10g')\n # conf.set('spark.executor.memory', f'{total_ram_gb}g')\n # conf.set('spark.sql.execution.arrow.enabled', 'true')\n # conf.setExecutorEnv('PYARROW_IGNORE_TIMEZONE', '1')\n # ctx = SparkContext(conf=conf)\n # ctx.setLogLevel('ERROR')\n builder = SparkSession.builder.appName('ADSB')\n builder = builder.config('spark.driver.cores', str(os.cpu_count() - 4))\n builder = builder.config('spark.driver.maxResultSize', '10g')\n builder = builder.config('spark.executor.memory', f'{total_ram_gb}g')\n builder = builder.config(\n 'spark.sql.execution.arrow.pyspark.enabled', 'true')\n builder.getOrCreate()\n\n # from pyspark.sql import SparkSession\n # spark = SparkSession.builder.appName('ADSB').getOrCreate()\n import pyspark.pandas as psp\n super().__init__(psp)\n\n warnings.filterwarnings('ignore')\n\n def load(self, df_or_paths):\n # PySpark has to convert raw `pd.DataFrames` with `from_pandas`\n if isinstance(df_or_paths, pd.DataFrame):\n self.df = psp.from_pandas(df_or_paths)\n else:\n super().load(df_or_paths)\n\n def _yield_tuples(self, df):\n # PySpark would export the category IDs, but not the labels, unless we cast back\n for column_name in df.columns:\n if df[column_name].dtype == 'category':\n df[column_name] = df[column_name].astype('string')\n return df.itertuples(index=False, name=None)\n\n def _replace_with_years(self, df, column_name: str):\n # PySpark doesn't recognize the `[s]`, but works with `[ns]`\n df['year'] = df[column_name].astype('datetime64[ns]').dt.year\n df.drop([column_name])\n return df\n\n\nif __name__ == '__main__':\n dataset.test_engine(ViaPySpark())\n","repo_name":"unum-cloud/udsb","sub_path":"table/via_spark.py","file_name":"via_spark.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"70246454890","text":"# training with the help of list\r\n\r\nfrom chatterbot import ChatBot\r\nfrom chatterbot.trainers import ListTrainer\r\n\r\n#creation of new chat bot\r\nbot =ChatBot(\"Pawan\")\r\n\r\ntrainer=ListTrainer(bot)\r\n\r\ntrainer.train([\r\n \"Hi\",\r\n \"Hello\",\r\n \"Who are you ?\",\r\n \"I am sharma \",\r\n \"Are you abnormal ?\",\r\n \"Yes I'm..\"\r\n])\r\n\r\nwhile True:\r\n query=input(\"You : \")\r\n response=bot.get_response(query)\r\n print(response)","repo_name":"goswamiprashant/test","sub_path":"test/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11026599155","text":"import numpy as np\nfrom astra.database.astradb import Task, DataProduct\nfrom astra.base import TaskInstance, Parameter, DictParameter\nfrom run_MWMPayne import fit_BOSS\n\n\nclass AstraInterface(TaskInstance):\n NN_path = Parameter(bundled=True)\n data_format = Parameter(bundled=True)\n log_dir = Parameter(bundled=True)\n\n wave_range = Parameter()\n spectral_R = Parameter()\n N_chebyshev = Parameter(default=15)\n N_presearch_iter = Parameter(default=1)\n N_presearch = Parameter(default=4000)\n\n\n def pre_execute(self):\n # Iterate over the tasks (either one task or a bundle)\n # and check that the object types are as we expect.\n for task, input_data_products, parameters in self.iterable():\n\n # Here, the \"task\" variable is a database record\n assert isinstance(task, Task)\n\n # The input_data_products is a list of DataProduct objects for this task\n assert isinstance(input_data_products, list)\n for data_product in input_data_products:\n assert isinstance(data_product, DataProduct)\n\n # The \"parameters\" is a dictionary of parameters for this task.\n assert isinstance(parameters, dict)\n\n self.opt_list = ['NN_path', 'wave_range', 'spectral_R', 'N_chebyshev', 'N_presearch_iter', 'N_presearch', 'data_format', 'log_dir']\n self.db_field_list = ['teff', 'u_teff', 'logg', 'u_logg', 'vsini', 'u_vsini', 'v_micro', 'u_v_micro', 'm_h', 'u_m_h', 'v_rad', 'u_v_rad']\n return None\n\n\n def get_opt_dict(self):\n opt = {}\n for pn in self.opt_list:\n opt[pn] = getattr(self, pn)\n return opt\n\n\n def execute(self):\n opt = self.get_opt_dict()\n\n NN = Network()\n NN.read_in(self.NN_path)\n\n logger = FitLoggerDB(self.log_dir)\n logger.init_DB()\n logger.new_run(str(opt))\n\n loader = SpectrumLoader(self.data_format)\n\n results = []\n for task, input_data_products, parameters in self.iterable():\n for dp in input_data_products:\n sp = loader.get_single(dp.path)\n sd = sp.load()\n\n SNR, db_values, db_cheb = fit_BOSS(sd, NN, parameters, logger)\n result = {}\n result['snr'] = SNR\n for i,name in enumerate(self.db_field_list):\n result[name] = db_values[i]\n result['theta'] = db_cheb\n\n # here 'result' is a dict of results for this task, where each key will correspond to a field name in the `PayneCheOutputs` table\n with database.atomic():\n task.create_or_update_outputs('thepayne_che', [result])\n\n results.append(res)\n\n return results\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"istraumit/Payne-Che","sub_path":"AstraInterface.py","file_name":"AstraInterface.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39157432923","text":"k = 10\nt = 1e-5\nlr = 0.01\nnum_epochs = 50\nbatch_size = 512\ncontext_window = 5\nembedding_size = 64\ninput_folder = \"Input\"\noutput_folder = \"Output\"\ntoken_file = \"tokens.pkl\"\nfile_name = \"complaints.csv\"\ncol_name = \"Consumer complaint narrative\"\n","repo_name":"Abhimanyu9539/NLP_skipgram_model","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71476686568","text":"from django.urls import path\nfrom src.application.api.v1.views import (\n CreateContractView,\n GetDailyIncomeView,\n GetAllContractsView,\n ChangeLastContractPaymentStatus,\n CalculateContractPriceView\n)\n\nurlpatterns = [\n path('create/', CreateContractView.as_view(), name='create_contract'),\n path(\n 'get_price////',\n CalculateContractPriceView.as_view(),\n name='get_price'\n ),\n path(\n 'check_payment/',\n ChangeLastContractPaymentStatus.as_view(),\n name='check_payment'\n ),\n path('/', GetDailyIncomeView.as_view(), name='get_incomes'),\n path('', GetAllContractsView.as_view(), name='all_contracts')\n]\n","repo_name":"EasyDev-co/CloudMiningWebsite","sub_path":"backend/src/application/api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26142717965","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coursewarehistoryextended', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterIndexTogether(\n name='studentmodulehistoryextended',\n index_together=set([('student_module',)]),\n ),\n ]\n","repo_name":"analyseuc3m/ANALYSE-v1","sub_path":"lms/djangoapps/coursewarehistoryextended/migrations/0002_force_studentmodule_index.py","file_name":"0002_force_studentmodule_index.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"2727255197","text":"# coding=utf-8\n\"\"\"\n Filename: my_solution\n Author: Tanyee\n Date: 2020/4/9\n Description: https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array/\n Status: Passed\n Performance: 68ms and 14.8MB\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n @staticmethod\n def removeDuplicates(nums: List[int]) -> int:\n \"\"\"\n :param nums: 排好序的数组\n :return: 不含重复元素的新数组的长度\n \"\"\"\n \"\"\"\n 因���之前先做了第27题,所以这个第26题就不费功夫了。\n 这题的关键点是:把数(按照顺序)往前挪。\n \"\"\"\n count = 1\n length = len(nums)\n if length <= 1:\n return length\n for i in range(1, length):\n if nums[i] != nums[i-1]:\n nums[count] = nums[i]\n count += 1\n # print(nums)\n return count\n\n\nprint(Solution.removeDuplicates([-1, 0, 1, 1, 2, 3, 3, 3, 4, 4, 5]))\n","repo_name":"TanyeeZhang/leet-note-code","sub_path":"problems/26_remove_duplicates/my_solution.py","file_name":"my_solution.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4939914632","text":"import shutil\nimport os\nimport time\nfrom matplotlib import pyplot as plt\nfrom queue import Queue\nimport threading\nimport cv2\n\n\n# Function to Convert the Image to Gray Scale\n\ndef task(video_path):\n video = cv2.VideoCapture(f'./input/{video_path}')\n while True: \n ret, frame = video.read()\n if ret: \n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n cv2.imshow('temp',gray)\n if cv2.waitKey(1) & 0xFF == ord('s'):\n break\n else : \n break\n\n video.release()\n\n \n# Function to send task to threads\ndef do_stuff(q):\n while not q.empty():\n value = q.get()\n task(value)\n q.task_done()\n\n\n# Create 50 Video Files\nos.mkdir(f'input')\nfor i in range(10):\n shutil.copy(src='check.mp4',dst=f'./input/temp{i}.mp4')\n \n\n\n# Convert Images to Gray Scale and note the time taken\ndata = {}\n\nfor t in [j*5 for j in range(1,6)]:\n jobs = Queue()\n for elem in os.listdir('input'):\n jobs.put(elem)\n start = time.time()\n for i in range(t):\n worker = threading.Thread(target=do_stuff, args=(jobs,))\n worker.start()\n \n jobs.join()\n end = time.time()\n print(t, end - start)\n data[t] = end - start\n\n\n# Plot the required Data\nprint(data)\n\nplt.plot(list(data.keys()),list(data.values()))\nplt.xlabel(\"Number of threads\")\nplt.ylabel(\"Time Taken\")\nplt.title(\"Time taken to convert 100 Images from color to gray Scale\")\nplt.show()","repo_name":"dragonman164/Mini-Projects","sub_path":"Convert Video to GrayScale (Multithreading Solution)/mainscript.py","file_name":"mainscript.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20375804439","text":"#coding=utf-8\nimport pymongo\nconn=pymongo.MongoClient(host='127.0.0.1',port=27017)\ndb=conn.test\ndatacheck=db.xiyouji.find({},{'_id':0})\nwhile 1:\n\tfor i in datacheck:\n\t\tprint(i)\n\n\tchose=input('1:search;2:add;3:dele;4update;5:type other you will quit:')\n\tif chose =='1':\n\t\tsearch1=input('search by:1:name;2:phone;3:exp')\n\t\tif search1=='1':\n\t\t\tsearch=input('name?')\n\t\t\tsearchname=db.xiyouji.find({'name':search},{'_id':0})\n\t\t\tfor i in searchname:\n\t\t\t\tprint(i)\n\t\telif search1=='2':\n\t\t\tsearch=input('phone?')\n\t\t\tsearchphone=db.xiyouji.find({'phone':search},{'_id':0})\n\t\t\tfor i in searchphone:\n\t\t\t\tprint(i)\n\t\telif search1=='3':\n\t\t\tsearch=input('exp?')\n\t\t\tsearchexp=db.xiyouji.find({'exp':search},{'_id':0})\n\t\t\tfor i in searchexp:\n\t\t\t\tprint(i)\n\telif chose=='2':\n\t\tadd=input('name?')\n\t\tlist1=[]\n\t\tread=db.xiyouji.find()\n\t\tfor i in read:\n\t\t\tlist1.append(i)\n\t\tif add in list1:\n\t\t\tprint('name exist!')\n\t\telse:\n\t\t\tadd1=int(input('tel?'))\n\t\t\tadd2=int(input('exp?'))\n\t\t\tdb.xiyouji.insert({'name':add,'tel':add1,'exp':add2})\n\telif chose=='3':\n\t\tdele=input('name?')\n\t\tdatacheck=db.xiyouji.find({'name':dele})\n\t\tif datacheck.count()!=0:\n\t\t\tdb.xiyouji.remove({'name':dele})\t\n\t\t\tprint('dele sucess')\n\t\telse:\n\t\t\tprint('not exist name.')\n\t\n\telif chose=='4':\n\t\tupdate_name=input('update name?')\n\t\tupdate_tel=int(input('update tel?'))\n\t\tupdate_exp=int(input('update exp?'))\t\n\t\tdb.xiyouji.update({'name':update_name},{'$set':{'tel':update_tel,'exp':update_exp}})\n\telse:\n\t\tprint('bye')\n\t\tbreak\n","repo_name":"aallenchen2018/login-sys","sub_path":"李泽燃mongo作业new.py","file_name":"李泽燃mongo作业new.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19174835072","text":"import os\r\n\r\nimport numpy as np\r\n\r\nimport tensorflow as tf\r\n\r\nimport cv2\r\n\r\n\r\n# 读取图片的函数,接收六个参数\r\n\r\n# 输入参数分别是图片名,图片路径,标签路径,图片格式,标签格式,需要调整的尺寸大小\r\n\r\ndef ImageReader(file_name, picture_path, label_path, picture_format=\".tif\", label_format=\".tif\", size=256):\r\n picture_name = picture_path + file_name + picture_format # 得到图片名称和路径\r\n\r\n label_name = label_path + file_name + label_format # 得到标签名称和路径\r\n\r\n picture = cv2.imread(picture_name, 1) # 读取图片\r\n\r\n label = cv2.imread(label_name, 1) # 读取标签\r\n\r\n\r\n height = label.shape[0] # 得到图片的高\r\n\r\n width = label.shape[1] # 得到图片的宽\r\n\r\n picture_resize_t = cv2.resize(picture, (size, size)) # 调整图片的尺寸,改变成网络输入的大小\r\n\r\n##YUV颜色空间\r\n img_YUV = cv2.cvtColor(picture_resize_t, cv2.COLOR_BGR2HSV)\r\n y_image_H = (img_YUV[:, :, 0:1])\r\n y_image_S = (img_YUV[:, :, 1:2])\r\n y_image_V = (img_YUV[:, :, 2:3])\r\n\r\n\r\n picture_resize = y_image_V / 127.5 - 1. # 归一化图片\r\n\r\n label_resize_t = cv2.resize(label, (size, size)) # 调整标签的尺寸,改变成网络输入的大小\r\n label_resize_t = label_resize_t[:, :, 0:1] #单通道灰度图\r\n\r\n label_resize = label_resize_t / 127.5 - 1. # 归一化标签\r\n\r\n return picture_resize, label_resize, height, width, y_image_H, y_image_S # 返回网络输入的图片,标签,还有原图片和标签的长宽\r\n","repo_name":"huangshanshan33/Remote_Sensing_Image_Fusion","sub_path":"image_reader.py","file_name":"image_reader.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"29146589853","text":"\"\"\" maxsus masala. 3x3 matritsa berilgan for yordamida ustunlarini satrga\n satrlarini ustunga almashtiring.\n masalan:\n \"\"\"\n\nb = [[[],[],[]],[[],[],[]],[[],[],[]]]\na = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nfor i in range(3):\n for j in range(3):\n b[i][j] = a[j][i]\nprint(b)\n\n\n\"\"\" 8-masala ikki o'lchovli mattrisanining juft elementlarini chiqaring\"\"\"\nd = []\nc = [[1,2],[3,4]]\nfor i in c:\n for j in i:\n if j % 2 == 0:\n d.append(j)\n\nprint(f\"d = {d}\")\n\n\"\"\" 9-masala. ikki o'lchovli matritsaning toq elementlarini ikkiga ko'paytirib qo'ying\njuft elementlarini ikkiga bo'lib qo'ying\n\"\"\"\n\ne = [[1,2],[3,4]]\nprint(e)\nfor i in range(2):\n for j in range(2):\n if e[i][j] % 2 == 0:\n e[i][j] = int(a[i][j] / 2)\n else:\n e[i][j] = e[i][j] * 2\n\nprint(e)\n\n","repo_name":"ShohruhAbduqayumov/Python_tutorial","sub_path":"05.07.2021/maxsus masala.py","file_name":"maxsus masala.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31938309404","text":"from typing import Dict, List\nfrom base.core.Event.Event import Event\nfrom base.core.Event.Events import Events\nfrom base.core.Object.GameObject import GameObject\n\n# CollisionWatcher beobachtet die Bewegung zweier Objekte und prüft, ob eine Kollision stattfindet. \n# Sobald die Kollision stattfindet wird ein von der watch Methode zurückgegebenes Event ausgegeben.\nclass CollisionWatcher():\n watchList: Dict[GameObject, List[GameObject]] = {}\n\n # Startet die Beobachtung der Objekte in beide Richtung \n # und gibt ein Eventname zurück, der bei einer Kollision ausgegeben wird.\n def watch(obj: GameObject, watch: GameObject) -> str:\n if watch not in CollisionWatcher.watchList:\n CollisionWatcher.watchList[watch] = []\n \n if obj not in CollisionWatcher.watchList:\n CollisionWatcher.watchList[obj] = []\n \n CollisionWatcher.watchList[watch].append(obj)\n CollisionWatcher.watchList[obj].append(watch)\n\n Events.subscribe(f\"{watch.id}.moved\", CollisionWatcher.receiveEvent)\n Events.subscribe(f\"{obj.id}.moved\", CollisionWatcher.receiveEvent)\n return f\"collisionWatcher.collision.{obj.id}.{watch.id}\"\n\n def receiveEvent(event: Event):\n if event.isOfType(\"moved\"):\n movedObj, before = event.value\n watchingObjects = CollisionWatcher.watchList[movedObj]\n\n for obj in watchingObjects:\n if obj.active and obj.collidesWith(movedObj.rect):\n Events.dispatch(f\"collisionWatcher.collision.{obj.id}.{movedObj.id}\")\n Events.dispatch(f\"collisionWatcher.collision.{movedObj.id}.{obj.id}\")\n \n # Entfernt die Beobachtung in eine Richtung\n def removeWatcher(obj: GameObject, watch: GameObject):\n if watch in CollisionWatcher.watchList:\n CollisionWatcher.watchList[watch].remove(obj)\n if len(CollisionWatcher.watchList[watch]) == 0:\n Events.unsubscribe(f\"{watch.id}.moved\", CollisionWatcher.receiveEvent)\n\n # Entfernt die Beobachtung zweier Objekte in beide Richtungen \n def unwatch(obj: GameObject, watch: GameObject):\n CollisionWatcher.removeWatcher(obj, watch)\n CollisionWatcher.removeWatcher(watch, obj)","repo_name":"finnstamer/pygameInformatik","sub_path":"base/core/Dependencies/CollisionWatcher.py","file_name":"CollisionWatcher.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31978086546","text":"import os\nimport json\nimport logging\nimport botocore\nimport boto3\n\n\nlogLevel = os.getenv(\"LOG_LEVEL\", \"DEBUG\").upper()\nlogger = logging.getLogger()\nlogger.setLevel(logLevel)\n\n\ndef lambda_handler(event, context):\n demo_logger()\n list_buckets()\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n\n\ndef demo_logger():\n logger.debug(f\"D: In {__name__}\")\n logger.info(f\"I: In {__name__}\")\n logger.warning(f\"W: In {__name__}\")\n logger.error(f\"E: In {__name__}\")\n logger.critical(f\"C: In {__name__}\")\n\n\ndef list_buckets():\n s3_client = boto3.client('s3')\n try:\n response = s3_client.list_buckets()\n except botocore.exceptions.ClientError as exception:\n logger.exception(f\"Unexpected Error: {exception}\")\n for bucket in response['Buckets']:\n logger.info(f\"Processing bucket {bucket['Name']}\")\n\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logLevel)\n demo_logger()\n list_buckets()\n","repo_name":"simonhanmer/aws-python-logging-example","sub_path":"source/lambda/logging_lambda/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7177884299","text":"class Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n duplicateFilter = []\n for i in nums:\n i = abs(i)\n \n if nums[i-1] > 0:\n nums[i-1] *= -1\n '''else:\n duplicateFilter.append(i+1)'''\n \n # print(duplicateFilter)\n # print(nums)\n for j in range(len(nums)):\n if(nums[j] > 0):\n duplicateFilter.append(j+1)\n # return duplicateFilter\n # for i in range(len())\n '''for j in range(len(duplicateFilter)):\n # print(j)\n duplicateFilter[j] = len(nums)-duplicateFilter[j];'''\n # print(duplicateFilter.revese())\n # duplicateFilter.reverse()\n \n return duplicateFilter\n \n ","repo_name":"mmkvdev/leetcode","sub_path":"Problems/Find All Numbers Disappeared in an Array/workArea/findNum.py","file_name":"findNum.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"4376276303","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# importando os arquivos\nvendasDf = pd.read_csv(r'Contoso - Vendas - 2017.csv', sep=';')\nprodutosDf = pd.read_csv(r'Contoso - Cadastro Produtos.csv', sep=';')\nlojasDf = pd.read_csv(r'Contoso - Lojas.csv', sep=';')\nclientesDf = pd.read_csv(r'Contoso - Clientes.csv', sep=';')\n\nprodutosDf = produtosDf[['ID Produto', 'Nome do Produto']]\n\nvendasDf = vendasDf.merge(produtosDf, on='ID Produto')\n\nprodutosVendidos=vendasDf.groupby('Nome do Produto').sum()\n\nprodutoQueMenosVendeu=produtosVendidos.sort_values('Quantidade Vendida')[:1]\n\nprodutoQueMenosVendeu=produtoQueMenosVendeu['Quantidade Vendida'].idxmin()\n\nprint(produtoQueMenosVendeu)\n\n","repo_name":"jharbes/hashtagPython","sub_path":"021-analiseDadosComPandas-integracaoExcel/07-analiseVisualizacaoDadosPandas-parte2/produtoQueMenosVendeu.py","file_name":"produtoQueMenosVendeu.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35013522790","text":"import turtle\n# Hiển thị độ to màn hình\nturtle.setup(1530,800,0,0)\npen = turtle.Turtle()\npen.speed(0)\npen.pensize(5)\n\ndef rectangle(length = 150,width = 100,color = \"skyblue\"):\n # Bút vẽ \n pen.fillcolor(color)\n pen.begin_fill()\n for _ in range(2):\n pen.fd(length)\n pen.rt(90)\n pen.fd(width)\n pen.rt(90)\n pen.end_fill()\n\ndef roof():\n pen.color(\"black\")\n pen.fillcolor(\"#FF33FF\")\n pen.begin_fill()\n pen.fd(100)\n pen.rt(135)\n pen.fd(180)\n pen.rt(45)\n pen.fd(650)\n pen.rt(45)\n pen.fd(180)\n pen.rt(135)\n pen.fd(800)\n pen.end_fill()\n \ndef house_body():\n pen.penup()\n pen.goto(0,-450)\n pen.lt(90)\n pen.pendown()\n color1 = [\"#6699FF\",\"#9999FF\",\"#CC99FF\"]\n for i in range(7):\n for j in color1:\n pen.penup()\n pen.color(j)\n pen.fd(150)\n pen.pendown()\n rectangle(color = j)\n pen.penup()\n pen.bk(450)\n pen.lt(90)\n pen.fd(100)\n pen.rt(90)\n pen.pendown()\n\ndef door():\n pen.fillcolor(\"#33CCCC\")\n pen.begin_fill()\n for _ in range(2):\n pen.fd(150)\n pen.rt(90)\n pen.fd(100)\n pen.rt(90)\n pen.end_fill()\n pen.rt(90)\n pen.fd(50)\n pen.lt(90)\n pen.fd(150)\n \ndef window():\n pen.color(\"black\")\n pen.fillcolor(\"white\")\n pen.begin_fill() \n for _ in range (2):\n for _ in range (2):\n pen.fd(150)\n pen.rt(90)\n pen.fd(50)\n pen.rt(90)\n pen.rt(90)\n pen.fd(50)\n pen.lt(90)\n pen.end_fill()\n pen.fd(75)\n pen.lt(90)\n pen.fd(100)\n pen.lt(90)\n pen.fd(75)\n\ndef main():\n house_body()\n pen.up()\n pen.home()\n pen.goto(-600,150)\n pen.lt(180)\n pen.down()\n roof()\n pen.up()\n pen.home()\n pen.goto(-200,-150)\n pen.rt(90)\n pen.down()\n door()\n pen.penup()\n pen.home()\n pen.rt(90)\n pen.pendown()\n window()\n pen.up()\n pen.lt(90)\n pen.fd(400)\n pen.lt(90)\n pen.down()\n window()\n \n turtle.exitonclick()\n\nif __name__ == '__main__':\n main()","repo_name":"DuyNguyen555/Codegyms","sub_path":"python_basic/07.function_module_1/draw_house.py","file_name":"draw_house.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21198309662","text":"# Library importation===========================================================\r\nimport xarray as xr\r\nimport numpy as np\r\n\r\n# Function definition===========================================================\r\n# The following function takes as arguments the file path and name of a NetCDF\r\n# file and the resolution in meters( NOTE: The function only works in grids with\r\n# EPSG : 4326). The function calculates the area of each pixel by first \r\n# converting the latitude from degrees to radians and finding radius of the \r\n# earth at all latitude value. The area of the pixel is calculated as the product\r\n# of the width of the pixel (dx) and the height of the pixel (dy).\r\n\r\ndef calculate_pixel_area():\r\n polar_radius = 6356.7523\r\n eq_radius = 6378.1370\r\n pi = np.pi\r\n input_file = str(input('Input file path and name: '))\r\n resolution_meters = int(input('Input resolution in meters: '))\r\n\r\n def coord_transf(input_DataArray):\r\n Lat_array_rad = (input_DataArray.data * pi) / 180.0\r\n output_DataArray = input_DataArray.assign_coords(lat=Lat_array_rad)\r\n return output_DataArray\r\n\r\n i_file = xr.open_dataset(input_file)\r\n n_lat = len(i_file[\"lat\"].values)\r\n n_lon = len(i_file[\"lon\"].values)\r\n i_latitude = i_file[\"lat\"]\r\n i_longitude = i_file[\"lon\"]\r\n\r\n div_val = resolution_meters / 111000\r\n polar_radius_length = 2 * pi * polar_radius\r\n dy = polar_radius_length / (360 / div_val)\r\n\r\n new_lat = coord_transf(i_latitude)\r\n new_lat = new_lat.lat\r\n cos_list = np.zeros([n_lat,1])\r\n radius_list = np.zeros([n_lat,1])\r\n eq_radius_length = np.zeros([n_lat,1])\r\n dx_list = np.zeros([n_lat,1])\r\n\r\n for i in range(0,n_lat):\r\n cos_list[i] = np.cos(new_lat[i])\r\n radius_list[i] = eq_radius * cos_list[i]\r\n eq_radius_length[i]= 2 * pi * radius_list[i]\r\n dx_list[i] = eq_radius_length[i] / (360 / div_val)\r\n\r\n area = np.zeros([n_lat,n_lon])\r\n for i in range(0, n_lat):\r\n for j in range(0, n_lon):\r\n area[i,j] = dx_list[i] * dy\r\n\r\n output_nc = xr.Dataset({\"areapixel\": ([\"lat\",\"lon\"], area),\r\n \"lat\": i_latitude,\r\n \"lon\": i_longitude})\r\n\r\n output_file = str(input(\"Input output file path and name: \"))\r\n output_nc.to_netcdf(output_file)\r\n\r\ncalculate_pixel_area()\r\n","repo_name":"IgorErhardt/Cod_Bank","sub_path":"Cal_pixel_area.py","file_name":"Cal_pixel_area.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70672377129","text":"\"\"\"\nDefine the basic class of Genemsa,\nincluding GenemsaBase, BlockInfo, IndexInfo\n\"\"\"\n\nimport re\nimport copy\nimport logging\nimport dataclasses\nfrom typing import TypeVar, Union, Any\nfrom collections.abc import Iterable, KeysView, Iterator\n\nfrom Bio.Seq import Seq\n\nfrom ..utils import cigar\n\n\n@dataclasses.dataclass\nclass BlockInfo:\n \"\"\"\n A class to save block information\n\n Attributes:\n length: The length of each block\n name: (Optional) The name of the block. e.g. intron1, exon2\n type: (Optional) The tag of the block defined in Category:SO:SOFA.\n \"\"\"\n\n length: int\n name: str = \"\"\n type: str = \"\"\n\n\n@dataclasses.dataclass\nclass IndexInfo:\n \"\"\"\n A class to save index information\n\n Attributes:\n pos: The position index\n name: (Optional) The belonged block name for the position\n type: (Optional) The belonged block tag for the position\n \"\"\"\n\n pos: int\n name: str = \"\"\n type: str = \"\"\n\n\nGenemsaType = TypeVar(\"GenemsaType\", bound=\"GenemsaBase\")\nBlockInput = Union[int, str, BlockInfo]\n\n\nclass GenemsaBase:\n \"\"\"\n The MSA has additional block-level infomations,\n which is useful for integrating dna and rna data.\n\n The structre of the msa looks like:\n ```\n Block-level:\n name intron1 exon1 intron2\n region <-------><-----><------------>\n\n Index-level:\n (column-level) 123456789012345678901234567890\n\n Sequence-level:\n allele_name1 ATTTTTCTTTTTTGTTTTTTATTTTTTCTT\n allele_name2 ATTTTTCTTTTTTGTTTTTTATTTTTTCTT\n ```\n\n The sequence has basic bases, \"A\", \"T\", \"C\", \"G\" and \"-\" for gap,\n \"E\" stands for error.\n (Mostly because some sequence has exon part only, so I fill the intron with E.)\n \"\"\"\n\n def __init__(\n self,\n gene_name: str = \"Unamed\",\n alleles: dict[str, str] = {},\n blocks: Iterable[BlockInfo] = [],\n index: Iterable[IndexInfo] = [],\n reference: str = \"\",\n ):\n \"\"\"\n Attributes:\n gene_name: The name of the gene\n\n alleles: MSA data.\n\n Allele name as the key and the sequence string as the value.\n\n\n blocks: list of block information\n index: list of index(position) information\n reference: allele of the msa (Optional)\n \"\"\"\n self.gene_name = gene_name\n self.alleles: dict[str, str] = {}\n self.blocks = copy.deepcopy(list(blocks or [])) # intron exon length\n self.index = copy.deepcopy(list(index or []))\n self.logger = logging.getLogger(__name__)\n self.reference = reference\n\n if alleles:\n self.extend(alleles)\n\n def copy(self: GenemsaType, copy_allele: bool = True) -> GenemsaType:\n \"\"\"\n Clone a new MSA.\n\n Args:\n copy_allele: Copy the sequences as well\n \"\"\"\n # Child's Type\n Genemsa = type(self)\n new_msa = Genemsa(\n self.gene_name,\n blocks=self.blocks,\n index=self.index,\n reference=self.reference,\n alleles=self.alleles if copy_allele else {},\n )\n return new_msa\n\n def __repr__(self) -> str:\n \"\"\"Show name, number of alleles and block infos in this MSA\"\"\"\n block_info = \" \".join([f\"{b.name}({b.length})\" for b in self.list_blocks()])\n return f\"<{self.gene_name} alleles={len(self)} block={block_info}>\"\n\n # size\n def get_length(self) -> int:\n \"\"\"Get the length of MSA\"\"\"\n # 0 sequences is allow\n if not self:\n # another way to calculate length\n return sum(i.length for i in self.list_blocks())\n else:\n # when extend, reference allele may not exists here\n # return len(self.get_reference()[1])\n return len(next(iter(self.items()))[1])\n\n def size(self) -> tuple[int, int]:\n \"\"\"Get the size (num_of_allele, length_of_sequence)\"\"\"\n return (len(self), self.get_length())\n\n def list_alleles(self) -> KeysView[str]:\n \"\"\"\n List all the allele's sequence name in MSA\n\n Example:\n >>> a_gen.list_alleles()[:3]\n ['A*01:01:01:01', 'A*01:01:01:02N', 'A*01:01:01:03']\n \"\"\"\n return self.alleles.keys()\n\n def get_sequence_names(self) -> KeysView[str]:\n \"\"\"Same as list_alleles\"\"\"\n return self.list_alleles()\n\n # dict-like function\n def __iter__(self) -> Iterator[str]:\n \"\"\"Iter allele like iter(dict)\"\"\"\n return iter(self.alleles)\n\n def items(self) -> Iterable[tuple[str, str]]:\n \"\"\"list allele name along with allele sequence like dict.items()\"\"\"\n return self.alleles.items()\n\n def get(self, allele: str) -> str:\n \"\"\"Get the sequence by allele name\"\"\"\n return self.alleles[allele]\n\n def contains(self, allele: str) -> bool:\n \"\"\"Implement `in` operator\"\"\"\n return allele in self.alleles\n\n def __len__(self) -> int:\n \"\"\"\n Get the number of alleles in the MSA\n\n Example:\n >>> len(a_gen)\n 4100\n \"\"\"\n return len(self.alleles)\n\n def truth(self) -> bool:\n \"\"\"\n If msa has 0 alleles return False\n else return Implement if(self) function\n \"\"\"\n return bool(self.alleles)\n\n # reference-related function\n def set_reference(self: GenemsaType, allele: str) -> GenemsaType:\n \"\"\"Set the reference in msa (inplace)\"\"\"\n if allele not in self:\n self.logger.warning(f\"Cannot find {allele} in MSA\")\n self.reference = allele\n return self\n\n def get_reference(self) -> tuple[str, str]:\n \"\"\"\n Get the reference in MSA, if not existed, output the first one\n\n Returns:\n (allele_name, allele_seq)\n \"\"\"\n if self.reference in self:\n return (self.reference, self.get(self.reference))\n if not self:\n raise ValueError(\"MSA is empty\")\n self.logger.warning(\n f\"Reference {self.reference} not existed in MSA.\"\n \" Using the first allele instead.\"\n )\n allele, seq = next(iter(self.items()))\n self.reference = allele # set it\n return allele, seq\n\n def get_allele_or_error(self, allele: str = \"\") -> tuple[str, str]:\n \"\"\"\n Get the sequence by allele name\n\n Args:\n allele (str): Allele name. If not provided, reference allele are used\n\n Returns:\n allele_name (str) and its sequence (str):\n \"\"\"\n if not allele:\n allele = self.get_reference()[0]\n if allele not in self:\n raise ValueError(f\"{allele} not found\")\n return allele, self.get(allele)\n\n # allele function\n def sort_name(self: GenemsaType) -> GenemsaType:\n \"\"\"Sort the allele by alelle name (inplace)\"\"\"\n self.alleles = dict(sorted(self.items(), key=lambda i: i[0]))\n return self\n\n def append(self: GenemsaType, name: str, seq: str) -> GenemsaType:\n \"\"\"\n Add a sequence into MSA (inplace)\n\n Make sure the sequence length is same as in MSA\n \"\"\"\n if len(seq) == 0: # OK to add 0 length string\n self.alleles[name] = seq\n return self\n if len(seq) != self.get_length():\n raise ValueError(\"Length not match to alignments\")\n if name in self:\n raise ValueError(f\"{name} already exist\")\n\n self.alleles[name] = seq\n return self\n\n def extend(\n self: GenemsaType, msa: Union[GenemsaType, dict[str, str]]\n ) -> GenemsaType:\n \"\"\"Add MSA's alleles into this MSA (inplace)\"\"\"\n if isinstance(msa, GenemsaBase):\n if [b.length for b in self.list_blocks()] != [\n b.length for b in msa.list_blocks()\n ]:\n raise ValueError(\"Block length is different\")\n for name, seq in msa.items():\n self.append(name, seq)\n return self\n\n def remove_allele(\n self: GenemsaType, query: Union[str, Iterable[str]], inplace: bool = True\n ) -> GenemsaType:\n \"\"\"\n Remove allele sequences by regex(when query is string)\n or by exactly deleting (when query is a list)\n \"\"\"\n if inplace:\n new_msa = self\n else:\n new_msa = self.copy()\n if isinstance(query, str):\n for allele in list(self):\n if re.match(query, allele):\n del new_msa.alleles[allele]\n elif isinstance(query, Iterable):\n for allele in query:\n del new_msa.alleles[allele]\n else:\n raise NotImplementedError\n return new_msa\n\n def select_allele(\n self: GenemsaType, query: Union[str, Iterable[str]]\n ) -> GenemsaType:\n \"\"\"\n Select allele name by regex(when query is string)\n or by exactly selecting (when query is a list)\n\n Examples:\n >>> # select allele name start with A*01:01\n >>> msa.select_allele(r\"A\\\\*01:01.*\")\n >>> # select allele by list of string\n >>> msa.select_allele([\"A*01:01\", \"A*02:03\"])\n \"\"\"\n new_msa = self.copy(copy_allele=False)\n if isinstance(query, str):\n new_msa.extend(\n {allele: seq for allele, seq in self.items() if re.match(query, allele)}\n )\n elif isinstance(query, Iterable):\n new_msa.extend({allele: self.get(allele) for allele in query})\n else:\n raise NotImplementedError\n return new_msa\n\n # sequence functions\n def sort(self: GenemsaType) -> GenemsaType:\n \"\"\"Sort the allele by their sequences (inplace)\"\"\"\n self.alleles = dict(sorted(self.items(), key=lambda i: i[1]))\n return self\n\n def reverse_complement(self: GenemsaType) -> GenemsaType:\n \"\"\"Reverse the sequences\"\"\"\n new_msa = self.copy(copy_allele=False)\n new_msa.blocks = copy.deepcopy(list(reversed(self.blocks)))\n new_msa.index = copy.deepcopy(list(reversed(self.index)))\n new_msa.extend(\n {allele: str(Seq(seq).reverse_complement()) for allele, seq in self.items()}\n )\n return new_msa\n\n def get_cigar(\n self, target_allele: str, ref_allele: str = \"\"\n ) -> list[tuple[str, int]]:\n \"\"\"\n Get the cigar string of target_allele from ref_allele\n\n If ref_allele not set,\n it will automatically find the reference by get_reference\n\n Return:\n cigar(list[op_str, num]): The list of operator and number of bases\n Exmaple:\n `cigar = [(M, 1), (X, 1), (D, 2), (M, 2)]`\n \"\"\"\n if target_allele not in self:\n raise KeyError(f\"{target_allele} not found\")\n ref_allele, _ = self.get_allele_or_error(ref_allele)\n return cigar.calculate_cigar(self.get(ref_allele), self.get(target_allele))\n\n # column operation part\n def calculate_frequency(self) -> list[list[int]]:\n \"\"\"\n Calculate ATCG and gap frequency of each bp in MSA\n\n Returns:\n frequency (list[list[int]]):\n Each items contains the number of ATCG and gap.\n \"\"\"\n freqs = []\n for i in zip(*self.alleles.values()):\n freqs.append(\n [i.count(\"A\"), i.count(\"C\"), i.count(\"G\"), i.count(\"T\"), i.count(\"-\")]\n )\n return freqs\n\n def get_consensus(self, include_gap: bool = False) -> str:\n \"\"\"\n Generate the consensus sequence by choosing maximum frequency base\n\n Args:\n include_gap (bool):\n Allow consensus contains gap if gap is the maximum item.\n\n If include_gap=False and all the base on that position is gap\n (not shrinked before),\n it will warning and fill with A.\n\n `E` will be ignored.\n\n Example:\n ```\n a0 CCATT-GGT--GTCGGGTTTCCAG\n a1 CCACTGGGT--ATCGGGTTTCCAG\n c2 CAATTGGGT--GTCGGGT---AAG\n consensus CCATTGGGT--GTCGGGTTTCCAG\n consensus(no-gap) CCATTGGGTAAGTCGGGTTTCCAG\n ```\n \"\"\"\n freqs = self.calculate_frequency()\n if not include_gap:\n if any(sum(f[:4]) == 0 for f in freqs):\n self.logger.warning(\n \"MSA contains gap, try .shrink() before .get_consensus()\"\n )\n max_ind = [max(range(4), key=lambda i: f[i]) for f in freqs]\n else:\n max_ind = [max(range(5), key=lambda i: f[i]) for f in freqs]\n return \"\".join(map(lambda i: \"ACGT-\"[i], max_ind))\n\n def get_variantion_base(self) -> list[int]:\n \"\"\"\n Get the base positions where variation occurs\n\n Example:\n ```\n msa:\n s0: AAT\n s1: AAC\n s2: CAC\n >>> msa.get_variantion_base()\n [0, 2]\n ```\n\n Returns:\n positions:\n Each integer represent the position of variation\n \"\"\"\n freqs = self.calculate_frequency()\n num = len(self)\n base = []\n for i, freq in enumerate(freqs):\n if num not in freq:\n base.append(i)\n return base\n\n # block functions\n def list_blocks(self) -> list[BlockInfo]:\n \"\"\"Return list of blocks\"\"\"\n return self.blocks\n\n def get_block_length(self) -> int:\n \"\"\"Return list of blocks\"\"\"\n return len(self.list_blocks())\n\n def list_index(self) -> list[IndexInfo]:\n \"\"\"Return list of index\"\"\"\n return self.index\n\n def get_block(self, block: BlockInput) -> BlockInfo:\n \"\"\"Get block by str or id\"\"\"\n return self.blocks[self.get_block_index(block)]\n\n def set_blocks(\n self: GenemsaType, blocks: Iterable[Union[int, BlockInfo]]\n ) -> GenemsaType:\n \"\"\"Set blocks (inplace)\"\"\"\n new_blocks = []\n for i in blocks:\n if isinstance(i, int):\n new_blocks.append(BlockInfo(length=i))\n else:\n new_blocks.append(copy.deepcopy(i))\n\n if len(self) and self.get_length() != sum(blk.length for blk in new_blocks):\n raise ValueError(\"Total block length not match to alignments\")\n\n self.blocks = new_blocks\n return self\n\n def get_block_index(self, block: BlockInput) -> int:\n \"\"\"Find the index of the block\"\"\"\n if isinstance(block, str):\n for i, b in enumerate(self.list_blocks()):\n if b.name == block:\n return i\n elif isinstance(block, BlockInfo):\n for i, b in enumerate(self.list_blocks()):\n if b.name == block.name:\n return i\n elif isinstance(block, int):\n id = block\n if id < 0:\n id = self.get_block_length() + id\n if 0 <= id < self.get_block_length():\n return id\n else:\n raise NotImplementedError(f\"Type of {block} not work now\")\n raise IndexError(f\"{block} not found or out of index\")\n\n def get_block_interval(self, block: BlockInput) -> tuple[int, int]:\n \"\"\"Calculate the start(included) and end index (excluded) of the block\"\"\"\n index = self.get_block_index(block)\n start = sum(self.blocks[i].length for i in range(index))\n return start, start + self.blocks[index].length\n\n def select_block(self: GenemsaType, index: Iterable[BlockInput]) -> GenemsaType:\n \"\"\"\n Extract blocks by index\n\n Args:\n index (list of int): Leave empty if you want all the blocks.\n\n Index start from 0. e.g.\n\n * 0 for 5-UTR\n * 1 for exon1\n * 2 for intron1\n * 3 for exon2\n * 4 for 3-UTR(for two exons gene)\n * -1 for last block\n\n or you can use list of string,\n it will select by block name\n\n Example:\n >>> a_gen.select_block([-1])\n \n\n >>> a_gen.select_block([2, 3])\n \n\n >>> a_gen.select_block([\"5UTR\", \"exon3\"])\n \n \"\"\"\n new_msa = self.copy(copy_allele=False)\n\n # choose block index by index\n new_block = []\n new_index = []\n all_pos = []\n index_ids = [self.get_block_index(i) for i in index]\n for i in index_ids:\n new_block.append(self.blocks[i])\n start, end = self.get_block_interval(i)\n all_pos.append((start, end))\n new_index.extend(self.index[start:end])\n new_msa.blocks = new_block\n new_msa.index = new_index\n\n # extract the sequences inside block region\n for allele, gen_seq in self.items():\n new_seq = \"\".join([gen_seq[start:end] for start, end in all_pos])\n new_msa.append(allele, new_seq)\n return new_msa.copy()\n\n def select_exon(\n self: GenemsaType, exon_index: Iterable[BlockInput] = []\n ) -> GenemsaType:\n \"\"\"\n Extract the exon by index.\n\n Args:\n exon_index (list[str|int]): Index start from 1. i.e.\n\n * 1 for exon1\n * 2 for exon2\n\n Leave empty if you want all the exons\n\n If the exon_index contains list of string,\n it will select by name\n\n Example:\n >>> a_gen.select_exon([2]))\n \n\n >>> a_gen.select_exon([2, 3]))\n \n\n >>> a_gen.select_exon([\"exon2\", \"exon3\"]))\n \n \"\"\"\n exons = [b for b in self.list_blocks() if b.type == \"exon\"]\n\n # If not specific the index, extract all exons\n exon_list: list[BlockInput] = []\n if not exon_index:\n exon_list = exons # type: ignore\n else:\n for i in exon_index:\n if isinstance(i, int):\n # exon -> blocks position\n if i < 1 or i > len(exons):\n raise IndexError(f\"{i} is out of exon index\")\n i = exons[i - 1]\n exon_list.append(i)\n\n # check\n for i in exon_list:\n block = self.get_block(i)\n if block.type != \"exon\":\n raise IndexError(f\"{block} is not exon: input={i}\")\n return self.select_block(exon_list)\n\n def split_block(self: GenemsaType) -> list[GenemsaType]:\n \"\"\"Split the msa by blocks\"\"\"\n return [self.select_block([i]) for i in range(len(self.list_blocks()))]\n\n # block + column function\n def shrink(self: GenemsaType) -> GenemsaType:\n \"\"\"Remove empty base if all bases in that column is gap\"\"\"\n # index to delete\n freqs = self.calculate_frequency()\n masks = [f[4] != sum(f) for f in freqs]\n new_msa = self.copy(copy_allele=False)\n\n # recalcuate blocks\n for i in range(len(self.blocks)):\n start, end = self.get_block_interval(i)\n new_msa.blocks[i].length = sum(masks[start:end])\n assert sum(masks) == new_msa.get_length()\n new_msa.index = [new_msa.index[i] for i in range(len(masks)) if masks[i]]\n\n # remove base in allele\n for allele, seq in self.items():\n new_msa.append(allele, \"\".join(seq[i] for i in range(len(seq)) if masks[i]))\n\n return new_msa\n\n def reset_index(self: GenemsaType) -> GenemsaType:\n \"\"\"\n Reset index:\n The old position information will be discard.\n\n Each position information will be counted from 0 and\n the label and name will copy from its block information\n\n Example:\n ``` python\n >>> print(a.format_alignment_diff())\n 101 103 105 107 109 111 113 115 117 119\n | | | | | | | | | |\n A*01:01:01:01 G G T C C A C C G A\n A*01:01:01:02N - - - - - - - - - -\n\n >>> print(a.reset_index().format_alignment_diff())\n 1\n |\n A*01:01:01:01 GGTCCACCGA\n A*01:01:01:02N ----------\n ```\n \"\"\"\n new_msa = self.copy()\n start = 0\n new_msa.index = []\n for block in self.list_blocks():\n for _ in range(block.length):\n new_msa.index.append(\n IndexInfo(\n pos=start,\n type=block.type,\n name=block.name,\n )\n )\n start += 1\n assert start == self.get_length()\n return new_msa\n\n def __add__(self: GenemsaType, msa: GenemsaType) -> GenemsaType:\n \"\"\"\n Concat 2 MSA\n\n Example:\n >>> print(a_gen.select_exon([2]) + a_gen.select_exon([3]))\n \n \"\"\"\n names0 = set(self.get_sequence_names())\n names1 = set(msa.get_sequence_names())\n if names0 != names1:\n raise ValueError(\n \"Can not concat because some allele is miss: \"\n + str(names0.symmetric_difference(names1))\n )\n new_msa = self.copy()\n new_msa.blocks.extend(copy.deepcopy(msa.blocks))\n new_msa.index.extend(copy.deepcopy(msa.index))\n for name, seq in msa.items():\n new_msa.alleles[name] += seq\n return new_msa\n\n def __getitem__(self: GenemsaType, index: Any = None) -> GenemsaType:\n \"\"\"\n Extract the region of the sequences by index (start from 0),\n but block information will not preserved\n\n Example:\n >>> msa = Genemsa(\"A\", \"gen\")\n >>> msa.read_alignment_file(\"A_gen.txt\")\n >>> # Inspect 50-100bp in the MSA\n >>> extract_msa = msa[50:100]\n >>> print(extract_msa)\n\n >>> # Inspect 2nd 3rd 5th bp in the MSA\n >>> extract_msa = msa[[1,2,4]]\n >>> print(extract_msa)\n \"\"\"\n if not index:\n return self.copy()\n if not self:\n raise ValueError(\"MSA is empty\")\n\n # Extract specific region in alignment\n if isinstance(index, int):\n index = [index]\n if isinstance(index, slice):\n alleles = {allele: seq[index] for allele, seq in self.items()}\n index = self.index[index]\n elif isinstance(index, (tuple, list)):\n alleles = {\n allele: \"\".join([seq[i] for i in index]) for allele, seq in self.items()\n }\n index = [self.index[i] for i in index]\n # Fail\n else:\n raise TypeError(\"Bad usage\")\n\n new_msa = self.copy(copy_allele=False)\n new_msa.set_blocks([len(next(iter(alleles.values())))])\n new_msa.index = copy.deepcopy(index)\n new_msa.extend(alleles)\n return new_msa\n\n # type-related functions\n def select_complete(self: GenemsaType) -> GenemsaType:\n \"\"\"Select non exon-only sequences (No `E` in the sequence)\"\"\"\n new_msa = self.copy(copy_allele=False)\n new_msa.extend({allele: seq for allele, seq in self.items() if \"E\" not in seq})\n return new_msa\n\n def select_incomplete(self: GenemsaType) -> GenemsaType:\n \"\"\"Select exon-only sequences (`E` exists in the sequence)\"\"\"\n new_msa = self.copy(copy_allele=False)\n new_msa.extend({allele: seq for allele, seq in self.items() if \"E\" in seq})\n return new_msa\n\n def fill_incomplete(self: GenemsaType, ref_allele: str) -> GenemsaType:\n \"\"\"Fill the `E` in exon-only sequences with ref_allele sequence (inplace)\"\"\"\n if ref_allele not in self:\n raise KeyError(f\"{ref_allele} not found\")\n\n ref_seq = self.get(ref_allele)\n for allele, seq in self.items():\n if \"E\" in seq:\n # replace it\n self.alleles[allele] = \"\".join(\n [seq[i] if seq[i] != \"E\" else ref_seq[i] for i in range(len(seq))]\n )\n return self\n\n def merge_exon(self: GenemsaType, msa_nuc: GenemsaType) -> GenemsaType:\n \"\"\"\n Merge nuc MSA into gen MSA\n\n It's allow that nuc MSA has new allele name than gen MSA,\n Genemsa will add the sequence in MSA, and the intron will fill by `E`\n\n If the exon part of gen MSA is differnet (e.g. less gapped) from nuc MSA,\n Genemsa will try to merge if it can\n\n Note that the index will be reset\n\n Example:\n ```\n # case1\n msa_gen:\n 1: \"AA|TT|CC\",\n 2: \"AA|TC|CC\",\n msa_nuc:\n 3: \"TC\",\n After merge:\n 1: \"AA|TT|CC\",\n 2: \"AA|TC|CC\",\n 3: \"EE|TC|EE\"\n ```\n\n ```\n # case2\n msa_gen:\n 1: \"AA|TT|CC\",\n 2: \"AA|TC|CC\",\n msa_nuc:\n 1: \"TT-\",\n 2: \"T-C\",\n 4: \"TTC\",\n After merge:\n 1: \"AA|TT-|CC\",\n 2: \"AA|T-C|CC\",\n 4: \"EE|TTC|EE\"\n ```\n \"\"\"\n # A mapping from gen name to nuc index\n nuc_name_index = {\n b.name: i for i, b in enumerate(msa_nuc.list_blocks()) if b.type == \"exon\"\n }\n\n # check it's one-to-one mapping\n exon_set = set(b.name for b in self.list_blocks() if b.type == \"exon\")\n if set(nuc_name_index.keys()) != exon_set:\n raise ValueError(\n f\"Cannot match blocks: \" f\"gen={exon_set} nuc={nuc_name_index.keys()}\"\n )\n\n # create new msa and make sure the order of alleles\n new_msa = self.copy(copy_allele=False)\n new_msa.set_blocks([])\n new_msa.index = []\n new_msa.extend(\n {\n name: \"\"\n for name in self.get_sequence_names() | msa_nuc.get_sequence_names()\n }\n )\n\n # allele names\n gen_names = set(self.get_sequence_names())\n nuc_names = set(msa_nuc.get_sequence_names())\n exclude_name: set[str] = set()\n\n # block-wise\n msas_gen = self.split_block()\n msas_nuc = msa_nuc.split_block()\n for i_gen, blk_gen in enumerate(self.blocks):\n # intron -> fill with E\n if blk_gen.name not in nuc_name_index:\n for name in nuc_names - gen_names:\n msas_gen[i_gen].append(name, \"E\" * blk_gen.length)\n new_msa += msas_gen[i_gen].remove_allele(\n list(exclude_name), inplace=True\n )\n # exon -> check before merge\n else:\n i_nuc = nuc_name_index[blk_gen.name]\n # content change or length change\n if msas_nuc[i_nuc].get_length() != msas_gen[i_gen].get_length() or any(\n msas_nuc[i_nuc].get(name) != msas_gen[i_gen].get(name)\n for name in (nuc_names & gen_names)\n ):\n # check before merge\n if len(gen_names - nuc_names):\n raise ValueError(\n f\"Some alleles doesn't exist in nuc MSA: \"\n f\"{gen_names - nuc_names}\"\n )\n\n diff_name = filter(\n lambda name: msas_nuc[i_nuc].get(name).replace(\"-\", \"\")\n != msas_gen[i_gen].get(name).replace(\"-\", \"\"),\n gen_names,\n )\n diff_names = list(diff_name)\n if diff_names:\n self.logger.warning(\n f\"Some exon sequences in gen MSA \"\n f\"is not same as in nuc MSA \"\n f\"{blk_gen.name}: {diff_names}\"\n )\n new_msa.remove_allele(diff_names)\n exclude_name.update(diff_names)\n new_msa += msas_nuc[i_nuc].remove_allele(list(exclude_name))\n return new_msa.reset_index()\n\n def assume_label(self: GenemsaType, seq_type: str = \"gen\") -> GenemsaType:\n \"\"\"\n It will automatically generate the block's label\n according on `seq_type`. (inplace)\n\n seq_type:\n * gen: 5UTR-exon1-intron1-exon2-...-exon9-3UTR\n * nuc: exon1-exon2-...-exon9\n * other: block1-block2-block3-...\n\n block_length:\n If manually assign the block_length, the old block will be cleared.\n \"\"\"\n if not self.get_block_length():\n raise ValueError(\"This msa doesn't have any blocks\")\n\n if seq_type == \"gen\":\n assert self.get_block_length() % 2 == 1 and self.get_block_length() >= 3\n for i, blk in enumerate(self.blocks):\n if i == 0:\n blk.type = \"five_prime_UTR\"\n blk.name = \"5UTR\"\n elif i == self.get_block_length() - 1:\n blk.type = \"three_prime_UTR\"\n blk.name = \"3UTR\"\n elif i % 2:\n blk.type = \"exon\"\n blk.name = f\"exon{i // 2 + 1}\"\n else:\n blk.type = \"intron\"\n blk.name = f\"intron{i // 2}\"\n\n elif seq_type == \"nuc\":\n for i, blk in enumerate(self.blocks):\n blk.type = \"exon\"\n blk.name = f\"exon{i+1}\"\n else:\n for i, blk in enumerate(self.blocks):\n blk.type = \"gene_fragment\"\n blk.name = f\"block{i+1}\"\n\n # inplace to reset the index\n self.index = self.reset_index().index\n return self\n","repo_name":"linnil1/pyHLAMSA","sub_path":"pyhlamsa/gene/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":30001,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9237751905","text":"import carla \n\nclass Town(object):\n \"\"\"\n Description:\n Class Town is used to specify the characteristics of each final location \n\n \"\"\" \n def __init__(self, world):\n \"\"\"\n Method __init__ is the Constructor of Class Town that initializes the coordinates of each location\n\n Args:\n world (carla.World) : World object of CARLA API\n \"\"\"\n self.destinations = {\"Gas Station\" : [ -33, 176, 0], \\\n \"Central Roundabout\" : [ 0, 20, 0], \\\n \"Circular Plaza\" : [ 97, 57, 0], \\\n \"Tunnel\" : [ 247, -40, 0], \\\n \"Railstation\" : [-149, -26, 0], \\\n \"Skyscraper\" : [ -43, -3, 0], \\\n \"Hotel\" : [ 80, -91, 8], \\\n \"Square\" : [ 116, -76, 8], \\\n \"Highway\" : [ 56, 193, 0], \\\n \"Mall\" : [ -89, -70, -1], \\\n \"Office\" : [ 151, -167, 2], \\\n \"Neighborhood\" : [ 56, 130, 0], \\\n \"Cafeteria\" : [ -74, 99, 0], \\\n \"Restaurant\" : [ -73, -170, 0]\n }\n\n for key in self.destinations.keys():\n gs = carla.Location(self.destinations[key][0], self.destinations[key][1], self.destinations[key][2])\n world.debug.draw_string(gs, key, draw_shadow=False, color=carla.Color(r=0, g=0, b=0), life_time=1000, persistent_lines=True)\n\n\n def get_destinations(self):\n \"\"\"\n Description:\n Method get_destinations is used to return the dictionary with the available destinations\n\n Returns:\n dictionary: A dictionary with keys the names of destinations and values the coordinates of the destinations\n \"\"\" \n\n return self.destinations","repo_name":"stefanosPap/autonomous-vehicle","sub_path":"town.py","file_name":"town.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31001420110","text":"import subprocess\nimport re\nimport sys\n\n\nclass netcommands():\n\n def getnetsh(self):\n result = subprocess.check_output([\"netsh\",\"wlan\",\"show\",\"interfaces\"],shell=True).decode(sys.stdout.encoding)\n ssid,strength = self.findstrength(result)\n return ssid,strength\n\n def findstrength(self,string):\n match1 = re.search(\"Signal.+: (\\d{1,3})%\",string)\n strength = match1.group(1)\n\n match2 = re.search(\"SSID.+: (.+)\",string)\n ssid = match2.group(1)\n\n return ssid,strength\n\nif __name__ == \"__main__\":\n nc = netcommands()\n ssid,strength = nc.getnetsh()","repo_name":"greenboxer/Wifi-Strength","sub_path":"netcommands.py","file_name":"netcommands.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38203666527","text":"import numpy as np\nimport maxflow\nimport copy\nfrom profilehooks import profile\nimport time\n\n\n\nclass Segmentor:\n\n def __init__(self, img):\n self.img = copy.copy(img)\n self.mask_color = (255, 1, 255)\n\n @profile\n def max_flow_gray(self):\n start = time.time()\n height, width = self.img.shape\n graph = maxflow.Graph[int](height, width)\n nodes = graph.add_grid_nodes(self.img.shape)\n graph.add_grid_edges(nodes, 0), graph.add_grid_tedges(nodes, self.img, 255 - self.img)\n graph.maxflow()\n mask = graph.get_grid_segments(nodes)\n end = time.time()\n return end - start, self.__plot(mask)\n\n def __plot(self, mask):\n height, width = self.img.shape\n out = np.zeros((height, width), dtype=np.uint8)\n for i in range(height):\n for j in range(width):\n if mask[i, j]:\n out[i, j] = self.img[i, j]\n else:\n out[i, j] = 0\n\n return out\n\n\n # def __plot(self, mask):\n # height, width = self.img.shape\n # out = np.zeros((height, width, 3), dtype=np.uint8) # Inicializar com 3 canais\n # for i in range(height):\n # for j in range(width):\n # if mask[i, j]:\n # out[i, j, 0], out[i, j, 1], out[i, j, 2] = self.mask_color\n # else:\n # out[i, j] = self.img[i, j]\n #\n # return out\n\n\n\n\n\n\n","repo_name":"alanoMartins/image_segmentator","sub_path":"segmentor.py","file_name":"segmentor.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28979371325","text":"from sklearn import svm\nimport numpy as np\nimport pylab as pl\ndef abs():\n x = [[2,0], [1,1], [2,3]]\n y = [0, 0, 1]\n clf = svm.SVC(kernel=\"linear\")\n # 构建模型 矩阵数据\n clf.fit(x, y)\n print(\"clf : \", clf)\n # 支持向量(在同超平面平行的平面上)的点\n print(\"vectors : \", clf.support_vectors_)\n # 支持向量的角标\n print(\"support : \", clf.support_)\n # 统计每个域中的支持向量\n # print(\"n_support : \", clf.n_support_)\n\ndef acd():\n # 随机函数随机抓取数据\n np.random.seed(0)\n # 产生20行,2列的数据。\n x = np.r_[np.random.rand(20,2) - [2,2], np.random.rand(20,2) + [2,2]]\n # 产生标签数据\n y = [0]*20 + [1]*20\n # print(x)\n # print(y)\n # 构建模型\n clf = svm.SVC(kernel=\"linear\")\n clf.fit(x, y)\n print(clf)\n # switching to the generic n-dimensional parameterization of the hyperplan to the 2D-specific equation\n # of a line y=a.x +b: the generic w_0x + w_1y +w_3=0 can be rewritten y = -(w_0/w_1) x + (w_3/w_1)\n w = clf.coef_[0]\n # 转成点斜式求斜率\n a = -w[0]/w[1]\n # 在15 到5之间产生数据\n xx = np.linspace(-5, 5)\n yy = a * xx - (clf.intercept_[0])/w[1]\n # 找出边际线 斜率相同截距不同\n b = clf.support_vectors_[0]\n # print(\"b : \", b)\n yy_down = a * xx + (b[1] - a*b[0])\n b = clf.support_vectors_[-1]\n yy_up = a * xx + (b[1] - a*b[0])\n # print(\"yy : \", yy)\n # print(\"yy_down : \", yy_down)\n # print(\"yy_up : \", yy_up)\n pl.plot(xx, yy)\n pl.plot(xx, yy_down)\n pl.plot(xx, yy_up)\n\n pl.scatter(clf.support_vectors_[:,0], clf.support_vectors_[:,1],\n s= 80, facecolors='none')\n pl.scatter(x[:,0],x[:,1], c=y, cmap=pl.cm.Paired)\n\n pl.axis('tight')\n pl.show()\n\nif __name__ == '__main__':\n acd()","repo_name":"ScofieldShen/MLRep","sub_path":"DL/SVM/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10767092229","text":"\r\n#starting word, goal word\r\nstart = 'tsudmi'\r\ngoal = 'submit'\r\n\r\n#ask user for a name\r\nname = input(\"Enter you name: \")\r\n\r\n#greeting the user by name\r\nprint(\" Hello %s , Welcome!\" % (name)) \r\n\r\n#print instructions\r\nprint(\"------------Instruction-----------\")\r\nprint(\"This game will represents the user with a starting word, then ask user to make a single change at each step untill it matches a goal word.\")\r\nprint(\"At each step, the user has three options: a)insert a character, b)remove a character, or c)replace a character\")\r\n\r\nstart1 = [i for i in start] #create a list,it contains each character of starting word\r\ndic = {} #create a dictionary \r\nfor i in range(len(start1)):\r\n dic[i] = start1[i] #add key,value into dictionary dic\r\nprint(\"Note: this is the starting word %s, and the index of each character is the integer \" % str(start), dic)\r\nprint(\"When the goal word is reached, you win the game!\")\r\nprint(\"--------Let's begin!----------\")\r\nprint(\"--------the starting and goal words--------------\")\r\nprint(\"Starting word= \", start)\r\nprint(\"Goal word= \", goal)\r\n\r\n#creat a list, it contains 3 user options\r\noptions=['a','b','c']\r\ndef option_u():\r\n ''' ask user input option and handle the exceptions'''\r\n option = input(\"Please enter your choice as a, b, or c: \") #ask user input\r\n if option not in options: #check if the input is valid\r\n return option_u() #recursion call\r\n return option #return the user choice\r\n\r\ndef positions(start):\r\n '''ask user input the position of the character and handle exceptions'''\r\n try:\r\n position = input(\"Enter the index: \") #ask user input\r\n d = [i for i in range(0,len(start)+1)] #index range \r\n if int(position) not in d: #the input is valid if the input is in d\r\n return positions(start) #recursion call\r\n except ValueError:\r\n return positions(start) \r\n \r\n return int(position) #the position of the character , or index\r\n\r\n\r\ncount = 0 #create variable count\r\nwhile ((start==goal)==False):\r\n #user options\r\n print(\"Options: a)insert a character, b)remove a character, or c)replace a character\")\r\n\r\n option = option_u() # user's option\r\n position = positions(start) #the position of the character occur\r\n\r\n if option=='a': #insert a character\r\n char = input(\"Enter the character: \")\r\n start1.insert(position,char) #insert a character at the index of the position\r\n elif option=='b': #remove a character\r\n start1.pop(position) #remove the index\r\n \r\n elif option=='c': #replace a character\r\n char = input(\"Enter the character: \")\r\n start1[position]=char #replace the index with new character\r\n \r\n else:\r\n continue\r\n count+=1 #add 1 to the count\r\n start=''.join(start1) #set new word start1 as str and use start1 to replace start\r\n print(\"starting word= \", start) #print the result\r\n\r\n#Congratulate the user by name and output the number of steps it took to achieve the goal. \r\nprint(\"Congratulations, %s ! You only took %d steps to achieve the goal!\" % (name,count))\r\n\r\n\r\n\r\n#--------------------Example------------------\r\n# start = tsudmi\r\n# goal = submit\r\n# step 0: tsudmi #starting word\r\n# step 1: sudmi #remove index 0\r\n# step 2: submi #replace index2 with 'b'\r\n# step 3: submit #insert 't' at index 5\r\n\r\n \r\n\r\n\r\n \r\n","repo_name":"ysimokat/Learning-Python","sub_path":"words-game.py","file_name":"words-game.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6461981193","text":"import os\nfrom pathlib import Path\n\nimport hydra\nfrom google.cloud import storage\nfrom hydra_configs.pytorch_lightning.callbacks import ModelCheckpointConf\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom tqdm import tqdm\n\nfrom deepspeech_pytorch.configs.train_config import GCSCheckpointConfig\n\n\nclass CheckpointHandler(ModelCheckpoint):\n\n def __init__(self, cfg: ModelCheckpointConf):\n super().__init__(\n dirpath=cfg.dirpath,\n filename=cfg.filename,\n monitor=cfg.monitor,\n verbose=cfg.verbose,\n save_last=cfg.save_last,\n save_top_k=cfg.save_top_k,\n save_weights_only=cfg.save_weights_only,\n mode=cfg.mode,\n period=cfg.period,\n prefix=cfg.prefix\n )\n\n def find_latest_checkpoint(self):\n raise NotImplementedError\n\n\nclass FileCheckpointHandler(CheckpointHandler):\n\n def find_latest_checkpoint(self):\n \"\"\"\n Finds the latest checkpoint in a folder based on the timestamp of the file.\n If there are no checkpoints, returns None.\n :return: The latest checkpoint path, or None if no checkpoints are found.\n \"\"\"\n paths = list(Path(self.dirpath).rglob(self.prefix + '*'))\n if paths:\n paths.sort(key=os.path.getctime)\n latest_checkpoint_path = paths[-1]\n return latest_checkpoint_path\n else:\n return None\n\n\nclass GCSCheckpointHandler(CheckpointHandler):\n def __init__(self, cfg: GCSCheckpointConfig):\n self.client = storage.Client()\n self.gcs_bucket = cfg.gcs_bucket\n self.gcs_save_folder = cfg.gcs_save_folder\n self.bucket = self.client.bucket(bucket_name=self.gcs_bucket)\n super().__init__(cfg=cfg)\n\n def find_latest_checkpoint(self):\n \"\"\"\n Finds the latest checkpoint in a folder based on the timestamp of the file.\n Downloads the GCS checkpoint to a local file, and returns the local file path.\n If there are no checkpoints, returns None.\n :return: The latest checkpoint path, or None if no checkpoints are found.\n \"\"\"\n prefix = self.gcs_save_folder + self.prefix\n paths = list(self.client.list_blobs(self.gcs_bucket, prefix=prefix))\n if paths:\n paths.sort(key=lambda x: x.time_created)\n latest_blob = paths[-1]\n latest_blob.download_to_filename(self.local_save_file)\n return self.local_save_file\n else:\n return None\n\n def _save_model(self, filepath: str, trainer, pl_module):\n\n # in debugging, track when we save checkpoints\n trainer.dev_debugger.track_checkpointing_history(filepath)\n\n # make paths\n if trainer.is_global_zero:\n tqdm.write(f\"Saving model to {filepath}\")\n trainer.save_checkpoint(filepath)\n self._save_file_to_gcs(filepath, self.save_weights_only)\n\n def _save_file_to_gcs(self, model_path):\n tqdm.write(f\"Saving model to gs://{self.gcs_bucket}/{self.gcs_save_folder}/{self.filename}{self.FILE_EXTENSION}\")\n blob = self.bucket.blob(f\"{self.gcs_save_folder}/{self.filename}{self.FILE_EXTENSION}\")\n blob.upload_from_filename(model_path)\n","repo_name":"khg0343/Research-Project-I","sub_path":"deepspeech.pytorch-master/deepspeech_pytorch/checkpoint.py","file_name":"checkpoint.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"43080488514","text":"import turtle # for graphics\r\nimport os\r\nimport sys # the other 4 imports are for end screen/restart\r\nimport subprocess\r\nimport easygui # for start screen\r\n\r\n# start screen for the game\r\nyn1 = easygui.ynbox('Instructions: Get through the maze to the red square\\nControls: W - up S - down D - right '\r\n 'a - left\\n\\nStart game?', 'START?', ['Yes', 'No'])\r\nif yn1 != True:\r\n exit()\r\n\r\nwin = turtle.Screen() # setup for the window\r\nwin.setup(800, 700)\r\nwin.title(\"MAZE\")\r\nwin.bgcolor(\"black\")\r\n\r\n\r\n# Creating pen class\r\n\r\n\r\nclass Pen(turtle.Turtle):\r\n def __init__(self):\r\n turtle.Turtle.__init__(self)\r\n self.penup()\r\n self.shape(\"square\")\r\n self.color(\"white\")\r\n self.speed(0)\r\n\r\n\r\nclass Endblock(turtle.Turtle):\r\n def __init__(self):\r\n turtle.Turtle.__init__(self)\r\n self.penup()\r\n self.shape(\"square\")\r\n self.color(\"red\")\r\n self.speed(0)\r\n\r\n\r\npen = Pen()\r\n\r\n\r\n# creating character and movements/ block interaction\r\nclass Char(turtle.Turtle):\r\n def __init__(self):\r\n turtle.Turtle.__init__(self)\r\n self.penup()\r\n self.shape(\"turtle\")\r\n self.color(\"red\")\r\n self.speed(0)\r\n\r\n def up_w(self): # for up movement\r\n if (self.xcor(), self.ycor() + 24) not in walls:\r\n self.goto(self.xcor(), self.ycor() + 24)\r\n if (self.xcor(), self.ycor() + 24) in endblocks:\r\n self.goto(1000,1000)\r\n self.hideturtle()\r\n endscreen()\r\n\r\n def left_a(self): # for left movement\r\n if (self.xcor() - 24, self.ycor()) not in walls:\r\n self.goto(self.xcor() - 24, self.ycor())\r\n if (self.xcor() - 24 , self.ycor()) in endblocks:\r\n self.goto(1000, 1000)\r\n self.hideturtle()\r\n endscreen()\r\n\r\n def right_d(self): # for right movement\r\n if (self.xcor() + 24, self.ycor()) not in walls:\r\n self.goto(self.xcor() + 24, self.ycor())\r\n if (self.xcor() + 24 , self.ycor()) in endblocks:\r\n self.goto(1000, 1000)\r\n self.hideturtle()\r\n endscreen()\r\n\r\n def down_s(self): # for down movement\r\n if (self.xcor(), self.ycor() - 24) not in walls:\r\n self.goto(self.xcor(), self.ycor() - 24)\r\n if (self.xcor(), self.ycor() - 24) in endblocks:\r\n self.goto(1000,1000)\r\n self.hideturtle()\r\n endscreen()\r\n\r\n\r\nchar = Char()\r\n\r\n# List for levels\r\nlevels = [\"\"]\r\n\r\n# Format for first level\r\nlevel_1 = [\r\n\"NNNNNNNNNNNNNNNNNNNNNNNNNNN\",\r\n\"NC NNNNNNNNNNNNNNNNNNNNNNNN\",\r\n\"N NNNNNNNNNNN\",\r\n\"N NNNNNNNNNNN NNNNNNN NN\",\r\n\"NNNNNNN NN\",\r\n\"NNNNNNN NNNNNNNNNNNNNNNNNN\",\r\n\"NNNNNNN NNNNN N\",\r\n\"NNNNNNNN NNNNNNNNNNN\",\r\n\"NNNNNNN NNNNNNNNNNNNNNNNNN\",\r\n\"NNNN N\",\r\n\"N NNNNNNNNNNNNNNN NNNN\",\r\n\"NNNNNNNNNNNNNNNNNNNNN NNNN\",\r\n\"NNNNNNNN NNNN\",\r\n\"NNNNNNNN NNNNNNNNNNNNNNNN\",\r\n\"NNNNNNNN NNNNNNNNNNNNNNNNN\",\r\n\"N NNNNNNNNNNNNNNNNN\",\r\n\"NNNNNNNN EN\",\r\n\"NNNNNNNN NNNNNNNNNNNNNNNNNN\",\r\n\" NNN \"\r\n]\r\n\r\n# adds level_1 to list\r\nlevels.append(level_1)\r\n\r\n# creates wall list for collision mechanic\r\nwalls = []\r\n\r\n# creates list for the red block that signals end of maze\r\nendblocks = []\r\n\r\nendblock = Endblock()\r\n\r\n# for the actual generation of the maze\r\n\r\n\r\ndef mazelvl(maze):\r\n for y in range(len(maze)):\r\n for x in range(len(maze[y])):\r\n drawer = maze[y][x]\r\n screen_x = -300 + (x * 24)\r\n screen_y = 270 - (y * 24)\r\n\r\n if drawer == \"E\":\r\n endblock.goto(screen_x, screen_y)\r\n endblocks.append((screen_x, screen_y))\r\n\r\n if drawer == \"N\":\r\n pen.goto(screen_x, screen_y)\r\n pen.stamp()\r\n\r\n # adds the coordinates of walls to the walls list\r\n walls.append((screen_x, screen_y))\r\n\r\n if drawer == \"C\":\r\n char.goto(screen_x, screen_y)\r\n\r\n# calls mazelvl function\r\nmazelvl(levels[1])\r\n\r\n# keybinds for movement\r\nturtle.listen()\r\nturtle.onkey(char.up_w, \"w\")\r\nturtle.onkey(char.left_a, \"a\")\r\nturtle.onkey(char.right_d, \"d\")\r\nturtle.onkey(char.down_s, \"s\")\r\n\r\n# for the end screen input\r\ndef endscreen():\r\n yn = easygui.ynbox('You won, would you like to play again?', 'Continue?', ['Yes', 'No'])\r\n if yn == True:\r\n subprocess.call([sys.executable, os.path.realpath(__file__)] + sys.argv[1:])\r\n win.exitonclick()\r\n\r\n\r\n# to keep the window running\r\nwhile True:\r\n win.update()\r\n","repo_name":"GEOSAR15/GEO","sub_path":"AP.py","file_name":"AP.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42046812967","text":"# 6.1 Fibonacci Numbers\r\n# Figure 6-3: Recursive implementation of Fibonacci sequence\r\n\r\ndef fib_rec(n):\r\n \"\"\"\r\n Assumes n int >= 0\r\n Returns Fibonacci of n\r\n \"\"\"\r\n if n == 0 or n == 1:\r\n return 1\r\n else:\r\n return fib_rec(n-1) + fib_rec(n-2)\r\n \r\ndef fib_iter(n):\r\n \"\"\"\r\n Assumes n int >= 0\r\n Returns Fibonacci of n\r\n \"\"\"\r\n a, b = 0, 1\r\n for i in range(n+1):\r\n a, b = b, a + b\r\n return a\r\n \r\nif __name__ == \"__main__\":\r\n n = 10\r\n \r\n print(\"Recursive Fibonacci:\")\r\n for i in range(n+1):\r\n print(f\" fib({i}) = {fib_rec(i)}\")\r\n \r\n print()\r\n \r\n print(\"Iterative Fibonacci:\")\r\n for i in range(n+1):\r\n print(f\" fib({i}) = {fib_iter(i)}\")\r\n","repo_name":"samkramer/intro-computation-programming-python-3ed","sub_path":"ch06/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69969979047","text":"import re\nimport nltk\nimport numpy as np\n\nfrom dataloader import *\nfrom edit import *\nfrom eval import evaluation\n\n\nediterror,testdata =load_testdata() #加载测试数据\nvocab=load_vocab() #加载词典\nedit_dict=load_edit() #编辑距离概率\n\n# #加载ans.txt作为训练集\n# unigram_dict_ans=load_ngram(1) #一元文法词典+概率\n# bigram_dict_ans=load_ngram(2) #二元文法词典+概率\n# trigram_dict_ans=load_ngram(3) #三元文法词典+概率\n#\n# #加载外部语料作为训练\n# unigram_dict=load_unigram() #一元文法词典+概率\n# bigram_dict=load_bigram() #二元文法词典+概率\n\nclass spell_correction():\n\n def __init__(self,LM,channel,editdistance,corpus,defauttpro,lamda):\n self.LM=LM\n self.channel=channel\n self.editdistance=editdistance\n self.corpus=corpus\n self.defaultpro=defauttpro\n self.lamda=lamda\n\n # 定义LM词典\n if corpus==True:\n self.ngram_dict=load_ngram(int(LM))\n elif LM=='1':\n self.ngram_dict=unigram_reuters()\n elif LM=='2':\n self.ngram_dict=bigram_reuters()\n elif LM=='3':\n self.ngram_dict=trigram_reuters()\n\n #查找句子里面的词汇是否在词典里\n def find_in_vocab(self,words):\n return set(word for word in words if word in vocab)\n\n #判断词汇的错误类型\n def edit_type(self, candidate, word):\n wrong_type = edittype(candidate,word) # 和上面的不同\n if wrong_type == None: # w|w的情况\n return np.log(0.95)\n elif wrong_type[0] in edit_dict.keys():\n return edit_dict[wrong_type[0]]\n else:\n return self.defaultpro\n\n #返回LM-prob\n def ngram_prob(self,s):\n if s in self.ngram_dict.keys():\n return self.ngram_dict[s]\n else:\n return self.defaultpro\n\n #计算候选单词在句子里面的LM-prob\n def sentence_prob(self,candidate,j,sentence):\n if self.LM =='1':\n return self.ngram_prob(candidate)\n elif self.LM =='2':\n if j == 0:\n return self.ngram_prob(candidate + ' ' + sentence[j + 1].lower())\n else:\n return self.ngram_prob(candidate + ' ' + sentence[j + 1].lower()) + self.ngram_prob(sentence[j - 1].lower() + ' ' + candidate)\n elif self.LM=='3':\n if j == 0 and j + 2 < len(sentence):\n return self.ngram_prob(candidate + ' ' + sentence[j + 1].lower() + ' ' + sentence[j + 2].lower())\n elif j == len(sentence) - 1 and j - 2 > -1:\n return self.ngram_prob(sentence[j - 2] + ' ' + sentence[j - 1].lower() + ' ' + candidate)\n elif j == 1 and j + 2 < len(sentence):\n return self.ngram_prob(candidate + ' ' + sentence[j + 1].lower() + ' ' + sentence[j + 2].lower()) + \\\n self.ngram_prob(sentence[j - 1].lower() + ' ' + candidate + ' ' + sentence[j + 1].lower())\n elif j == len(sentence) - 2 and j - 2 > -1:\n return self.ngram_prob(sentence[j - 2] + ' ' + sentence[j - 1].lower() + ' ' + candidate) + \\\n self.ngram_prob(sentence[j - 1] + ' ' + candidate + ' ' + sentence[j + 1])\n else:\n if len(sentence) == 3:\n return self.ngram_prob(sentence[j - 1] + ' ' + candidate + ' ' + sentence[j + 1])\n else:\n return self.ngram_prob(sentence[j - 2] + ' ' + sentence[j - 1].lower() + ' ' + candidate) + \\\n self.ngram_prob(sentence[j - 1] + ' ' + candidate + ' ' + sentence[j + 1]) + \\\n self.ngram_prob(candidate + ' ' + sentence[j + 1] + ' ' + sentence[j + 2])\n else:\n print(' wrong LM type and corpus type!')\n return 0\n\n # 非词纠正\n def non_word_correct(self,sentence):\n '''\n 非词拼写错误纠正\n :param sentence: 还有错误的句子\n :return: wrong num 检测处理的错误数\n '''\n wrong = 0\n for j in range(len(sentence)):\n word = sentence[j]\n if bool(re.search(r\"[\\d.,/'-]\", word)) or word.lower() in vocab:\n continue\n word_lower = word.lower()\n candidates = self.find_in_vocab(edit1(word_lower))\n if self.editdistance==2 and len(candidates)==0: candidates = self.find_in_vocab(edit2(word_lower))\n p_flag = -2e5\n right = word\n for candidate in candidates:\n if self.channel==False:\n p = self.sentence_prob(candidate, j, sentence)\n else :\n p = self.lamda*self.sentence_prob(candidate, j, sentence) + self.edit_type(candidate, word_lower) #channel model for edits\n\n if p > p_flag:\n p_flag = p\n right = candidate\n # 还原大小写\n if not word.islower():\n flag = 0\n for each in word:\n flag += int(each.isupper())\n if flag == 1:\n right = right[0].upper() + right[1:]\n else:\n right = right.upper()\n sentence[j] = right # to do supper letters\n wrong += 1\n return wrong\n\n #词纠正\n def real_word_correct(self,sentence):\n '''\n 词拼写错误纠正\n :param sentence:\n :return:\n '''\n for j in range(len(sentence)):\n word = sentence[j]\n if bool(re.search(r\"[\\d.,/'-]\", word)):\n continue\n # edit distance = 1\n word_lower = word.lower()\n candidates = self.find_in_vocab(edit1(word_lower))\n if self.editdistance==2 and len(candidates) == 0: candidates = spell_correction.find_in_vocab(edit2(word_lower))\n # 加入原本的单词\n candidates.add(word_lower)\n p_flag = -2e5\n right = word_lower\n for candidate in candidates:\n if self.channel == False:\n p = self.sentence_prob(candidate, j, sentence)\n elif self.channel == True:\n p = self.lamda*self.sentence_prob(candidate, j, sentence) +self.edit_type(candidate, word_lower) # channel model for edits1\n else:\n print(\"wrong LM and Channel model choices ! \")\n return\n if p > p_flag:\n p_flag = p\n right = candidate\n # print(candidates,right)\n if right == word_lower:\n continue\n # 还原大小写\n if word != word_lower:\n flag = 0\n for each in word:\n flag += int(each.isupper())\n if flag == 1:\n right = right[0].upper() + right[1:]\n else:\n right = right.upper()\n sentence[j] = right\n if right.lower() != word_lower:\n return None\n\n\n def word_correct(self):\n '''\n 语法拼写纠正\n :return:\n '''\n sentences=[]\n for i in range(1000):\n sentence = testdata[i]\n wrong = self.non_word_correct(sentence) #先进行non-word拼写纠正\n if wrong < editerror[i]: #进行real-word拼写纠正\n self.real_word_correct(sentence)\n sentences.append(sentence)\n file = open('./result.txt', 'w')\n for index in range(1000):\n file.write(str(index + 1) + '\\t' + ' '.join(sentences[index]) + '\\n')\n file.close()\n\n\n\n","repo_name":"kokolerk/spell-correction-fudan","sub_path":"sid-homework-1/program/spell_correct2.py","file_name":"spell_correct2.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6439196845","text":"# supreme_dlw.py - part of the Loonymod project\n# import this file and use the supreme_dlw construct.\n\nfrom common import *\n\n# constructs and adapters\n\nclass RleBitmap(Construct):\n\tdef isEncoded(self, method, row):\n\t\treturn bool((ord(method[row / 8]) >> (row % 8)) & 1)\n\t\t\n\tdef _parse(self, stream, context):\n\t\trows = []\n\t\tfor row in range(24):\n\t\t\tif self.isEncoded(context[\"method\"], row):\n\t\t\t\tcols = []\n\t\t\t\twhile len(cols) < 32:\n\t\t\t\t\tcols += map(ord, ord(stream.read(1)) * stream.read(1))\n\t\t\t\trows.append(cols)\n\t\t\telse:\n\t\t\t\trows.append(map(ord, stream.read(32)))\n\t\treturn rows\n\t\t\n\tdef _build(self, obj, stream, context):\n\t\tfor row in range(24):\n\t\t\tcols = obj[row]\n\t\t\tif self.isEncoded(context[\"method\"], row):\n\t\t\t\tvalue, run = cols[0], 1\n\t\t\t\tfor pixel in cols[1:]:\n\t\t\t\t\tif pixel == value:\n\t\t\t\t\t\trun += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tstream.write(chr(run) + value.build())\n\t\t\t\t\t\tvalue, run = pixel, 1\n\t\t\t\tstream.write(chr(run) + value.build())\n\t\t\telse:\n\t\t\t\tstream.write(''.join(map(chr, cols)))\n\t\t\n\tdef _sizeof(self, context):\n\t\traise SizeofError\n\nclass RleLevel(Construct):\n\tdef _parse(self, stream, context):\n\t\twidth, height = context[\"width\"], context[\"height\"]\n\t\trawTiles = []\n\t\twhile len(rawTiles) < width * height:\n\t\t\trun = SLInt8(\"run\").parse(stream.read(1))\n\t\t\tif run < 0:\n\t\t\t\ttile = levelTile.parse(stream.read(levelTile.sizeof()))\n\t\t\t\tfor i in range(-run):\n\t\t\t\t\trawTiles.append(tile)\n\t\t\telse:\n\t\t\t\tfor i in range(run):\n\t\t\t\t\trawTiles.append(levelTile.parse(stream.read(levelTile.sizeof())))\n\t\trows = []\n\t\tfor row in range(height):\n\t\t\tcols = []\n\t\t\tfor col in range(width):\n\t\t\t\tcols.append(rawTiles[row * width + col])\n\t\t\trows.append(cols)\n\t\treturn rows\n\t\t\n\tdef _build(self, obj, stream, context):\n\t\ttiles = []\n\t\tfor row in obj:\n\t\t\tfor col in row:\n\t\t\t\ttiles.append(col)\n\t\tvalue, run = tiles[0], run\n\t\tfor tile in tiles[1:]:\n\t\t\tif tile == value and run < 127:\n\t\t\t\trun += 1\n\t\t\telse:\n\t\t\t\tstream.write(chr(run) + levelTile.build(value))\n\t\t\t\tvalue, run = tile, 1\n\t\tstream.write(chr(run) + levelTile.build(value))\n\t\t\n\tdef _sizeof(self, context):\n\t\traise SizeofError\n\nclass ItemContainer(Construct):\n\tdef _parse(self, stream, context):\n\t\tresult = []\n\t\titemId = 0\n\t\tfor i in range(context[\"itemCount\"]):\n\t\t\tif itemId != 255:\n\t\t\t\titemId = ord(stream.read(1))\n\t\t\t\tdata = item.parse(stream.read(item.sizeof()))\n\t\t\t\tdata.itemId = itemId\n\t\t\t\tresult.append(data)\n\t\t\telse:\n\t\t\t\tdata = item.parse(stream.read(item.sizeof()))\n\t\t\t\tdata.itemId = 255\n\t\t\t\tresult.append(data)\n\t\treturn result\n\t\t\n\tdef _build(self, obj, stream, context):\n\t\tpart = 0\n\t\tfor data in obj:\n\t\t\tif data.itemId == 255:\n\t\t\t\tif part == 0:\n\t\t\t\t\tstream.write(chr(255))\n\t\t\t\t\tpart = 1\n\t\t\t\tstream.write(item.build(data))\n\t\t\telse:\n\t\t\t\tstream.write(chr(data.itemId))\n\t\t\t\tstream.write(item.build(data))\n\t\t\n\tdef _sizeof(self, context):\n\t\traise SizeofError\n\nclass ItemDropAdapter(Adapter):\n\tdef _encode(self, obj, ctx):\n\t\treturn chr(256 * (obj - int(obj))) + chr(obj)\n\tdef _decode(self, obj, ctx):\n\t\treturn ord(obj[1]) + ord(obj[0]) / 256.0\n\n# structures\n\nmonster = Struct(\"monster\",\n\tULInt8(\"x\"),\n\tULInt8(\"y\"),\n\tULInt8(\"type\"),\n\tULInt8(\"item\"),\n)\n\ntrigger = Struct(\"trigger\",\n\tULInt8(\"parameter\"),\n\tULInt8(\"type\"),\n\tULInt8(\"x\"),\n\tULInt8(\"y\"),\n\tULInt32(\"index1\"),\n\tULInt32(\"index2\"),\n)\n\neffect = Struct(\"effect\",\n\tEmbed(trigger),\n\tPackedString(\"text\"),\n)\n\nspecial = Struct(\"special\",\n\tULInt8(\"x\"),\n\tULInt8(\"y\"),\n\tULInt8(\"uses\"),\n\tBitStruct(\"length\",\n\t\tBitField(\"effects\", 5),\n\t\tBitField(\"triggers\", 3)\n\t),\n\tMetaRepeater(lambda ctx: ctx[\"length\"][\"triggers\"], trigger),\n\tMetaRepeater(lambda ctx: ctx[\"length\"][\"effects\"], effect),\n)\n\nlevelTile = Struct(\"levelTile\",\n\tULInt16(\"floor\"),\n\tULInt16(\"wall\"),\n\tULInt8(\"item\"),\n\tSLInt8(\"light\"),\n)\n\nlevel = Struct(\"level\",\n\tULInt8(\"width\"),\n\tULInt8(\"height\"),\n\tPackedString(\"name\"),\n\tPackedString(\"song\"),\n\tULInt8(\"monsterCount\"),\n\tMetaRepeater(lambda ctx: ctx[\"monsterCount\"], monster),\n\tULInt8(\"specialCount\"),\n\tMetaRepeater(lambda ctx: ctx[\"specialCount\"], special),\n\tBitStruct(\"flags\",\n\t\tFlag(\"underwater\"),\n\t\tFlag(\"starry\"),\n\t\tFlag(\"lantern\"),\n\t\tFlag(\"torch\"),\n\t\tFlag(\"secret\"),\n\t\tFlag(\"hub\"),\n\t\tFlag(\"raining\"),\n\t\tFlag(\"snowing\"),\n\t\tFlag(\"reserved5\"),\n\t\tFlag(\"reserved4\"),\n\t\tFlag(\"reserved3\"),\n\t\tFlag(\"reserved2\"),\n\t\tFlag(\"reserved1\"),\n\t\tFlag(\"reserved0\"),\n\t\tFlag(\"stealth\"),\n\t\tFlag(\"underlava\"),\n\t),\n\tULInt16(\"brains\"),\n\tULInt16(\"candles\"),\n\tItemDropAdapter(Bytes(\"itemDrop\", 2)),\n\tRleLevel(\"tiles\"),\n)\n\ntileImage = Struct(\"tileImage\",\n\tBytes(\"method\", 3),\n\tRleBitmap(\"bitmap\"),\n)\n\ntileData = Struct(\"tileData\",\n\tBitStruct(\"flags\",\n\t\tFlag(\"animate\"),\n\t\tFlag(\"canpushon\"),\n\t\tFlag(\"pushable\"),\n\t\tFlag(\"lava\"),\n\t\tFlag(\"water\"),\n\t\tFlag(\"muddy\"),\n\t\tFlag(\"icy\"),\n\t\tFlag(\"impassible\"),\n\t\tFlag(\"bouncy\"),\n\t\tFlag(\"enemyProof\"),\n\t\tFlag(\"ghostProof\"),\n\t\tFlag(\"bunnyPath\"),\n\t\tFlag(\"minecartPath\"),\n\t\tFlag(\"transparentRoof\"),\n\t\tFlag(\"animateHit\"),\n\t\tFlag(\"animateStep\"),\n\t),\n\tULInt16(\"nextTile\"),\n)\n\nitem = Struct(\"item\",\n\tPackedString(\"name\"),\n\tSLInt8(\"offsetX\"),\n\tSLInt8(\"offsetY\"),\n\tULInt16(\"sprite\"),\n\tULInt8(\"fromColor\"),\n\tULInt8(\"toColor\"),\n\tSLInt8(\"light\"),\n\tULInt8(\"rarity\"),\n\tBitStruct(\"flags\",\n\t\tPadding(1),\n\t\tFlag(\"useTileGraphic\"),\n\t\tFlag(\"loonyColor\"),\n\t\tFlag(\"pickup\"),\n\t\tFlag(\"bulletproof\"),\n\t\tFlag(\"impassible\"),\n\t\tFlag(\"glowing\"),\n\t\tFlag(\"shadow\"),\n\t\tPadding(8)\n\t),\n\tBitStruct(\"themes\",\n\t\tFlag(\"crate\"),\n\t\tFlag(\"rock\"),\n\t\tFlag(\"tree\"),\n\t\tFlag(\"door\"),\n\t\tFlag(\"bulletproof\"),\n\t\tFlag(\"obstacle\"),\n\t\tFlag(\"decoration\"),\n\t\tFlag(\"pickup\"),\n\t\tFlag(\"chair\"),\n\t\tFlag(\"entrance\"),\n\t\tFlag(\"food\"),\n\t\tFlag(\"collectible\"),\n\t\tFlag(\"key\"),\n\t\tFlag(\"powerup\"),\n\t\tFlag(\"weapon\"),\n\t\tFlag(\"sign\"),\n\t\tPadding(7),\n\t\tFlag(\"custom\"),\n\t\tPadding(8)\n\t),\n\tBitStruct(\"trigger\",\n\t\tFlag(\"always\"),\n\t\tFlag(\"minecart\"),\n\t\tFlag(\"machete\"),\n\t\tFlag(\"friendbump\"),\n\t\tFlag(\"enemybump\"),\n\t\tFlag(\"playerbump\"),\n\t\tFlag(\"shoot\"),\n\t\tFlag(\"pickup\"),\n\t\tPadding(8)\n\t),\n\tULInt8(\"effect\"),\n\tSLInt16(\"effectData\"),\n\tPackedString(\"message\", 64),\n\tULInt16(\"sound\")\n)\n\nsound = Struct(\"sound\",\n\tULInt16(\"soundId\"),\n\tPackedString(\"name\"),\n\tBitStruct(\"theme\",\n\t\tPadding(2),\n\t\tFlag(\"custom\"),\n\t\tFlag(\"vocal\"),\n\t\tFlag(\"effect\"),\n\t\tFlag(\"monster\"),\n\t\tFlag(\"player\"),\n\t\tFlag(\"intface\")\n\t),\n\tSLInt32(\"dataSize\"),\n\tMetaField(\"data\", lambda ctx: ctx[\"dataSize\"])\n)\n\nsupreme_dlw = Struct(\"world\",\n\tString(\"gameid\", 8),\n\tPackedString(\"author\"),\n\tPackedString(\"name\"),\n\tULInt8(\"levelCount\"),\n\tULInt32(\"totalPoints\"),\n\tULInt16(\"tileCount\"),\n\tMetaRepeater(lambda ctx: ctx[\"tileCount\"], tileImage),\n\tMetaRepeater(lambda ctx: ctx[\"tileCount\"], tileData),\n\tMetaRepeater(lambda ctx: ctx[\"levelCount\"], level),\n\tULInt16(\"itemCount\"),\n\tItemContainer(\"items\"),\n\tSLInt16(\"soundCount\"),\n\tMetaRepeater(lambda ctx: ctx[\"soundCount\"], sound)\n)\n","repo_name":"SpaceManiac/Lunatic","sub_path":"tools/formats/supreme_dlw.py","file_name":"supreme_dlw.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"74267462566","text":"from django.urls import path\nimport api.views as api_views\n\nurlpatterns = [\n path('charts', api_views.ChartsListView.as_view()),\n path('update_submission_ids//', api_views.UpdateSubmissionIds.as_view()),\n path('defect_origin_area_count',\n api_views.DefectOriginAreaCount.as_view()),\n path('fault_source_count', api_views.FaultSourceCount.as_view()),\n path('defect_origin_area_ncr_cost',\n api_views.DefectOriginAreaNCRCost.as_view()),\n path('supplier_ncr_cost', api_views.SupplierNCRCost.as_view()),\n path('current_area_issues',\n api_views.CurrentAreaIssues.as_view()),\n path('average_time_wasted', api_views.AverageTimeWasted.as_view()),\n path('average_time_to_done', api_views.AverageTimetoDone.as_view()),\n path('defect_categories_per_supplier',\n api_views.DefectCategoriesPerSupplier.as_view()),\n path('total_conq_cost', api_views.TotalCONQCost.as_view()),\n path('total_part_cost', api_views.TotalPartCost.as_view()),\n path('fault_source_ncr_cost', api_views.FaultSourceNCRCost.as_view()),\n path('submission_status_ncr_cost', api_views.SubmissionStatusNCRCost.as_view()),\n path('liability_count_per_month', api_views.LiabilityCountPerMonth.as_view()),\n path('liability_average_closing_time_per_month', api_views.LiabilityAverageClosingTimePerMonth.as_view()),\n path('conq_per_classification', api_views.CONQPerClassification.as_view()),\n path('conq_per_liability', api_views.CONQPerLiability.as_view()),\n path('conq_per_month', api_views.CONQPerMonth.as_view())\n\n]\n","repo_name":"josewails/faultregister","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34557185915","text":"def get_single_integer(integers):\n \"\"\"\n Given a list of `integers`, every element appears twice except for one.\n Return that single one.\n \"\"\"\n if not integers:\n return None\n p = integers[0]\n for i in range(1, len(integers)):\n p = p ^ integers[i]\n return p\n\n\ndef reverse_integer(number):\n t = 0\n if number > 0:\n positive = True\n else:\n positive = False\n number = -number\n while number != 0:\n t = 10 * t + number % 10\n number /= 10\n return t if positive else -t\n\n\ndef max_subarray(a):\n \"\"\"\n Maximum subarray problem\n\n In case of all negative numbers, zero-length subarrays does not count.\n http://en.wikipedia.org/wiki/Maximum_subarray_problem\n \"\"\"\n if not a:\n raise Exception('The input array must contain at least one number.')\n max_sum = a[0]\n\n # Initialize a list `m` of same size of a (which may be redundant for this\n # problem alone.\n # `m[i]` represents the max sum of all subarrays ending `i`\n m = [0] * len(a)\n m[0] = max_sum\n for i in range(1, len(a)):\n m[i] = max(a[i], m[i - 1] + a[i])\n\n # Find the largest in `m`\n for e in m:\n if e > max_sum:\n max_sum = e\n return max_sum\n\n\ndef max_subarray_alt(a):\n \"\"\"\n Simpler implementation without extra space (auxiliary array `m`)\n \"\"\"\n if not a:\n raise Exception('The input array must contain at least one number.')\n max_sum = a[0]\n max_current = max_sum\n for i in range(1, len(a)):\n max_current = max(a[i], max_current + a[i])\n max_sum = max(max_sum, max_current)\n return max_sum\n\n\ndef get_max_profit_ii(prices):\n \"\"\"\n Multiple transactions allowed\n\n :param prices: array of prices where `prices[i]` represents the price on\n ith day\n \"\"\"\n if not prices:\n return 0\n max_profit = 0\n for i in range(1, len(prices)):\n if prices[i] > prices[i - 1]:\n max_profit += prices[i] - prices[i - 1]\n return max_profit\n\n\ndef get_max_profit_ii_alt(prices):\n \"\"\"\n Utilized idea from `max_subarray`\n \"\"\"\n if not prices:\n return 0\n max_profit = 0\n\n # `m[i]` represents the max profit ending `i`\n m = [0] * len(prices)\n m[0] = 0\n for i in range(1, len(prices)):\n if prices[i] > prices[i - 1]:\n m[i] = m[i - 1] + prices[i] - prices[i - 1]\n else:\n m[i] = 0\n m += [0]\n # Get the sum of highest points\n for i, e in enumerate(m):\n if e == 0:\n max_profit += m[i - 1]\n return max_profit\n\n\ndef get_max_profit_iii(prices):\n \"\"\"\n Two transactions allowed\n \"\"\"\n if not prices:\n return 0\n n = len(prices)\n m1 = [0] * n\n m2 = [0] * n\n max_profit1 = 0\n min_price1 = prices[0]\n max_profit2 = 0\n max_price2 = prices[-1]\n for i in range(n):\n max_profit1 = max(max_profit1, prices[i] - min_price1)\n m1[i] = max_profit1\n min_price1 = min(min_price1, prices[i])\n for i in range(n):\n max_profit2 = max(max_profit2, max_price2 - prices[n - 1 - i])\n m2[n - 1 - i] = max_profit2\n max_price2 = max(max_price2, prices[n - 1 - i])\n max_profit = 0\n for i in range(n):\n max_profit = max(m1[i] + m2[i], max_profit)\n return max_profit\n\n\ndef get_max_profit_alt(prices):\n \"\"\"\n :param prices: array of prices where `prices[i]` represents the price on\n ith day\n Only one transaction allowed\n \"\"\"\n if not prices:\n return 0\n min_index = 0\n max_profit = 0\n buy_index = 0\n sell_index = 0\n for i in range(len(prices)):\n if prices[i] < prices[min_index]:\n min_index = i\n profit = prices[i] - prices[min_index]\n if profit > max_profit:\n max_profit = profit\n buy_index = min_index\n sell_index = i\n return prices[sell_index] - prices[buy_index]\n\n\ndef get_max_profit_alt2(prices):\n if not prices:\n return 0\n # small[i] indicates smallest price ending `i` (`i` included)\n # large[i] indicates largest prices after `i`\n n = len(prices)\n small = [0] * n\n large = [0] * n\n small[0] = prices[0]\n large[n - 1] = prices[n - 1]\n for i in range(1, n):\n small[i] = min(small[i - 1], prices[i])\n large[n - 1 - i] = max(large[n - 1], prices[n - 1 - i])\n max_profit = 0\n for i in range(n):\n d = large[i] - small[i]\n if d > max_profit:\n max_profit = d\n return max_profit\n\n\ndef get_max_profit(prices):\n if not prices:\n return 0\n max_profit = 0\n min_price = prices[0]\n for i, p in enumerate(prices, 0):\n max_profit = max(max_profit, (p - min_price))\n min_price = min(min_price, p)\n return max_profit\n\n\ndef search_insert(a, target):\n \"\"\"\n Given a sorted array and a target value, return the index if the\n target is found. If not, return the index where it would be if it\n were inserted in order.\n \"\"\"\n if not a:\n return 0\n n = len(a)\n left = 0\n right = n - 1\n while left <= right:\n mid = (left + right) / 2\n if a[mid] == target:\n return mid\n elif a[mid] < target:\n left = mid + 1\n else:\n right = mid - 1\n return left if left >= 0 else 0\n\n\ndef remove_element(a, element):\n \"\"\"\n Given an array and a value, remove all instances of that value in place\n and return the new length.\n\n The order of elements can be changed. It doesn't matter what you leave\n beyond the new length.\n\n Implemented the C way.\n \"\"\"\n n = len(a)\n m = 0 # new length\n for i in range(n):\n a[m] = a[i]\n if a[i] == element:\n pass\n else:\n m += 1\n return m\n\n\ndef backtrack_binary(n, A):\n if n < 1:\n print(A)\n else:\n A[n - 1] = 0\n backtrack_binary(n - 1, A)\n A[n - 1] = 1\n backtrack_binary(n - 1, A)\n\n\ndef get_gcd(a, b):\n while b != 0:\n (a, b) = (b, a % b)\n return a\n\n\ndef get_lcm(a, b):\n return a * b / get_gcd(a, b)\n\n\ndef largest_rectangle(height):\n if not height:\n return 0\n if len(height) == 1:\n return height[0]\n stack = [] # The bottom element in the stack is the lowest\n max_area = 0\n n = len(height)\n for i in range(n + 1):\n while stack and (i == n or height[stack[-1]] > height[i]):\n h = height[stack.pop()]\n if stack:\n w = i - stack[-1] - 1\n else:\n w = i\n max_area = max(max_area, h * w)\n stack.append(i)\n return max_area\n\n\nif __name__ == '__main__':\n assert reverse_integer(-173) == -371\n assert reverse_integer(976) == 679\n assert reverse_integer(500) == 5\n assert reverse_integer(0) == 0\n a = [2, 5, 5, 8, 9, 6, 2, 3, 9, 3, 6]\n assert get_single_integer(a) == 8\n b = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n c = [-3, -4, -5, -1, -9]\n assert max_subarray(b) == 6\n assert max_subarray_alt(b) == 6\n assert max_subarray(c) == -1\n assert max_subarray_alt(c) == -1\n d = [4, 2, 5, 7, 4, 3, 3, 6, 9]\n e = [3, 2, 1]\n f = [6, 1, 3, 2, 4, 7]\n g = [2, 4, 1]\n assert get_max_profit(d) == 7\n assert get_max_profit(e) == 0\n assert get_max_profit(f) == 6\n assert get_max_profit(g) == 2\n assert(get_max_profit_ii_alt(d) == 11)\n assert(get_max_profit_ii(d) == 11)\n assert(get_max_profit_ii_alt(f) == 7)\n assert(get_max_profit_ii(f) == 7)\n assert(get_max_profit_ii_alt(g) == 2)\n assert(get_max_profit_ii(g) == 2)\n s = [1, 3, 5, 6]\n assert(search_insert(s, 5) == 2)\n assert(search_insert(s, 2) == 1)\n assert(search_insert(s, 7) == 4)\n assert(search_insert(s, 0) == 0)\n assert remove_element([3, 1, 2, 4, 5, 1, 7], 1) == 5\n A = [0] * 5\n #backtrack_binary(5, A)\n h = [6, 1, 3, 2, 4, 7]\n assert get_max_profit_iii(h) == 7\n assert get_max_profit_iii(d) == 11\n assert get_gcd(48, 18) == 6\n assert get_gcd(18, 48) == 6\n assert get_lcm(48, 18) == 144\n h1 = [4, 2]\n r1 = largest_rectangle(h1)\n print(r1)\n","repo_name":"shichao-an/practice","sub_path":"misc/misc_number.py","file_name":"misc_number.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33734769415","text":"# a\n\nx = {i: (i - 1) * i for i in range(1, 31)}\n\nprint(x)\n\n# b\nfor a, b in x.items():\n print(f\"{a}: {b}\")\n\n# c\nsumm = 0\nfor y in x.values():\n summ += y\nprint(\"summary =\", summ)\n\n# d\n\na = int(input(\"Write a key you want to remove: \"))\n\nif a in x:\n del x[a]\n print(\"After the removal of the item with key\", a, \":\")\n print(x)\nelse:\n print(\"Wrong input!\")\n","repo_name":"rauf112/SE226-LAB4","sub_path":"SE226–LAB#4-4.py","file_name":"SE226–LAB#4-4.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15652035323","text":"import json\nimport sqlite3\n\n# create connection to database\nconn = sqlite3.connect('Company.db')\n\n# create cursor object to execute SQL commands\ncur = conn.cursor()\n\n# open JSON file and read contents\nwith open('employees_part_2.json') as jsonfile:\n data = json.load(jsonfile)\n for row in data:\n # insert row data into employees table\n cur.execute('''INSERT INTO Employees \n (EMPLOYEE_ID, FIRST_NAME, LAST_NAME, EMAIL, PHONE_NUMBER, HIRE_DATE, JOB_ID, SALARY, MANAGER_ID, DEPARTMENT_ID)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',\n (row['EMPLOYEE_ID'], row['FIRST_NAME'], row['LAST_NAME'], row['EMAIL'], row['PHONE_NUMBER'], row['HIRE_DATE'], row['JOB_ID'], row['SALARY'], row['MANAGER_ID'], row['DEPARTMENT_ID']))\n\n# commit changes and close connection\nconn.commit()\nconn.close()","repo_name":"mohammedbaa/Sql-in-python-","sub_path":"Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18814792020","text":"import csv\n\n\ndef carregar_acessos():\n X = []\n Y = []\n arquivo = open('acesso_pagina.csv', 'rt', encoding='utf-8')\n leitor = csv.reader(arquivo)\n next(leitor)\n for acessou_home, acessou_como_funciona, acessou_contato, comprou in leitor:\n X.append([int(acessou_home), int(acessou_como_funciona), int(acessou_contato)])\n Y.append([int(comprou)])\n return X, Y\n\n\ndef carregar_buscas():\n X = []\n Y = []\n arquivo = open('buscas.csv', 'rt', encoding='utf-8')\n leitor = csv.reader(arquivo)\n next(leitor)\n for home, busca, logado, comprou in leitor:\n X.append([int(home), busca, int(logado)])\n Y.append([int(comprou)])\n return X, Y\n","repo_name":"higornucci/classificacao-aulas","sub_path":"acessos/dados.py","file_name":"dados.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"12013420337","text":"'''\ndivide pictiures into train and val tow parts\n'''\nimport os\nfrom random import sample\nfrom collections import defaultdict\nfrom PIL import Image\nimport numpy as np\n\ndef creat_paths(root_path, classes):\n image_paths = defaultdict(list)\n for class_ in classes:\n image_dir = os.path.join(root_path, class_)\n for filepath in os.listdir(image_dir):\n if filepath.endswith('.jpg'):\n image_paths[class_].append(os.path.join(image_dir, filepath))\n return image_paths\n\n\ndef prepare_data(image_paths, data_path, classes, ratio = 0.2):\n train_sizes = 0\n val_sizes = 0\n for class_ in classes:\n \n # 创建每类的文件夹\n train_path = os.path.join(data_path,'train', class_)\n if os.path.exists(train_path):\n if len(os.listdir(train_path)) > 0:\n train_file_num = int(os.listdir(train_path)[-1][0:5])\n else:\n train_file_num = 0\n else: \n os.makedirs(train_path)\n \n val_path = os.path.join(data_path,'val', class_)\n if os.path.exists(val_path):\n if len(os.listdir(val_path)) > 0:\n val_file_num = int(os.listdir(val_path)[-1][0:5])\n else:\n val_file_num = 0\n else:\n os.makedirs(val_path)\n \n # 如果存在文件夹,其中文件数\n \n #train_file_num = int(os.listdir(train_path)[-1][0:5])\n #val_file_num = int(os.listdir(val_path)[-1][0:5])\n \n # train 数目\n train_size = int(len(image_paths[class_]) * (1 - ratio))\n val_size = len(image_paths[class_]) - train_size\n train_sizes += train_size\n val_sizes += val_size\n np.random.shuffle(image_paths[class_])\n train_files = image_paths[class_][:train_size]\n val_files = image_paths[class_][train_size:]\n\n # 生成训练数据\n for name, path in enumerate(train_files):\n pic_name = train_path + '/{:05d}.jpg'.format(train_file_num + name + 1)\n images = Image.open(path)\n images.save(pic_name)\n # 生成val数据\n for name, path in enumerate(val_files):\n pic_name = val_path + '/{:05d}.jpg'.format(val_file_num + name + 1)\n images = Image.open(path)\n images.save(pic_name)\n print(class_+' is done')\n return train_sizes, val_sizes \n\nclasses = ['forward', 'left', 'right'\n ,'stop', 'turn_left', 'turn_right', 'walk']\n\n\ndire_paths = creat_paths('c:/py/car_go/data/', classes)\n\n\nprepare_data(dire_paths, 'c:/py/car_go', classes)","repo_name":"ginger21/project-j","sub_path":"useful/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71888505129","text":"# 백준 물병\n# https://www.acmicpc.net/problem/1052\n\nn,k = map(int,input().split())\ntemp = 2\nanswer = list()\nwhile temp < n :\n temp *= 2\n if temp > n :\n n -= temp // 2\n answer.append(temp//2)\n temp = 2\n\n\nif len(answer)+n < k :\n print(0)\nelse :\n answer.append(n)\n bottle = 0\n while len(answer) > k :\n now = answer.pop()\n bottle += answer[-1] - now\n answer[-1] *= 2\n print(bottle)\n\n\n","repo_name":"do0134/solostudy","sub_path":"algorithm/3월/0315/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29154471492","text":"## SPDX-License-Identifier: MIT\r\n## Copyright 2023, 2022 Emily Bui\r\n\r\nfrom urllib.parse import urlparse\r\nfrom urllib.parse import parse_qs\r\n#import requests\r\nfrom pprint import pprint\r\nfrom github import Github\r\nimport time\r\nimport datetime\r\nfrom datetime import date, timedelta\r\nimport csv\r\ntoken = ''\r\npageNo = 1\r\npageNoEmpty = False\r\ng = Github('')\r\n\r\n# Just point base_url to your proxy server\r\nclient = Github(base_url=\"http://localhost:3000\", login_or_token='')\r\n\r\n# a simple call to test if it is working\r\n#repo = client.get_repo(\"hsborges/github-proxy-server\")\r\n\r\n#specifying date ranges of the gathered repos\r\nstart = date(2008,1,1)\r\nend=date(2022,6,16)\r\ndelta = timedelta(days=365)\r\n\r\n#specify number of stars increments. Queries are broken up into these incremented bounds as to not overload the server\r\n\r\nloopstart = 100\r\nloopend = loopstart/10\r\nwritten = 0\r\napilimit = 1\r\ntotal = 1\r\npageNo = -1\r\n\r\n#open a basic csv to input the repo info in. This CSV only contains basic info, as the attributes we were looking for were\r\n#not yet fleshed out. This script is moreso used to get the general info into a CSV, which can be later used to extract all\r\n#necessary attributes\r\nfile = open(\"yourOutputFile.csv\", \"a\", newline='')\r\nwriter = csv.writer(file)\r\nfile_header = ['Name', 'Language', 'Stars', 'Has Snap']\r\nwriter.writerow(file_header)\r\n\r\n#while the lower bound of the star increment boundary is above or equal to 0\r\nwhile loopstart >= 0:\r\n #slowing down the program as to not go over the API limit\r\n time.sleep(7)\r\n print(loopstart)\r\n #deincrement the upper and lower bounds\r\n loopstart = int(loopstart/10)\r\n loopend = int(loopend/10)\r\n print(\"We are at \" + str(loopstart) + \"-\" + str(loopend))\r\n\r\n #The queries were split up by these star increments and by language, as not doing so would make the query too large\r\n #and time consuming at the time to manage.\r\n queryWords = 'language:javascript topic:jest sort:stars-desc stars:{}..{}'.format(loopend,loopstart)\r\n #queryWords = 'language:TypeScript topic:jest sort:stars-desc created:{}..{}'.format(loopstart,loopend)\r\n print(queryWords)\r\n\r\n #Query the repositories and increment by pages\r\n repositories = g.search_repositories(query=queryWords)\r\n repositories.get_page(0)\r\n print(str(repositories.totalCount))\r\n pageNo += 1\r\n for repos in repositories:\r\n #print repo information and current API limit\r\n print(str(written) + \" pageNo:\" + str(pageNo) + \" total:\" + str(total) + \"startdate:\" + str(loopstart) + \"apilimit: \" + str(apilimit))\r\n if written <= 200:\r\n print(repos.html_url)\r\n\r\n #for each repo, locate if the repository has snap files. This into will later be used to fetch the exact\r\n #number of snap files\r\n findSnap = \"repo:{} extension:snap\".format(repos.full_name)\r\n time.sleep(7)\r\n searchingSnap = g.search_code(query=findSnap)\r\n hasSnap = False\r\n for files in searchingSnap:\r\n if files == None:\r\n hasSnap = False\r\n\r\n else:\r\n hasSnap = True\r\n break\r\n\r\n #record necessary information into spreadsheet\r\n if hasSnap == True:\r\n rowInfo = [repos.full_name, \"JS\", repos.stargazers_count,\"true\"]\r\n writer.writerow(rowInfo)\r\n elif hasSnap == False:\r\n rowInfo = [repos.full_name, \"JS\", repos.stargazers_count,\"false\"]\r\n writer.writerow(rowInfo)\r\n\r\n #increment counts\r\n written+=1\r\n total += 1\r\n apilimit += 1\r\n\r\n #In order to bypass the API limit, the program needs to take a break when the limit has been reached and wait\r\n #until it has the permission to continue\r\n if apilimit > 3600:\r\n print(\"Break\")\r\n apilimit = 0\r\n time.sleep(3900)\r\n\r\n #To not go over the API limit, the program will sleep for a minute every 200 repositories recorded, and then resets\r\n else:\r\n time.sleep(60)\r\n written = 0\r\n\r\n if loopend == 1:\r\n break\r\n\r\nfile.close()\r\n","repo_name":"hscrocha/SnapshotTestingDataset","sub_path":"gathering_scripts/python/GetReposJestToCSV.py","file_name":"GetReposJestToCSV.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"14025933078","text":"\"\"\"empty message\n\nRevision ID: 215b30712e1a\nRevises: b99bbbc6fb05\nCreate Date: 2022-09-22 13:01:44.183628\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '215b30712e1a'\ndown_revision = 'b99bbbc6fb05'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('comments', sa.Column('title', sa.Text(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('comments', 'title')\n # ### end Alembic commands ###\n","repo_name":"BrenLau/9-man-Social","sub_path":"migrations/versions/20220922_130144_.py","file_name":"20220922_130144_.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74923133289","text":"import random\nimport time\n\n\ndef insertSort(n):\n tab = []\n for i in range(n):\n tab.append(random.randint(1, 20))\n print(\"Nieposortowana tablica:\\n\", tab, \"\\n\")\n i = 1\n while i < len(tab):\n start = time.process_time()\n key = tab[i]\n j = i - 1\n while j >= 0 and key < tab[j]:\n tab[j + 1] = tab[j]\n j -= 1\n tab[j + 1] = key\n i += 1\n print(\"Posortowana tablica:\\n\", tab, \"\\n\\nCzas sortowania:\", time.process_time() - start)\n return \"\"\n\n\nprint(insertSort(50))\n","repo_name":"tTargiel/UNI-Python-Programming","sub_path":"Lista 07/zadanie_02.py","file_name":"zadanie_02.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70185193768","text":"from matplotlib import pyplot as plt\r\n\r\n\r\ndef showBarGraphs(subj_names=[], subj_totals=[]):\r\n plt.bar(subj_names, subj_totals, label=\"Total hours\")\r\n\r\n plt.title(\"Time Subject Tracker\")\r\n plt.xlabel(\"Subjects\")\r\n plt.ylabel(\"Time (in hours)\")\r\n\r\n plt.legend()\r\n\r\n plt.show()\r\n\r\n\r\nread = open(\"Saves.txt\", \"r\")\r\nlines = list(read.readlines())\r\nread.close()\r\n\r\nsubj = []\r\ntime_totals = []\r\n\r\n\r\nfor line in lines:\r\n partitions = str(line).partition(\": \")\r\n print(partitions)\r\n if float(partitions[2]) > 0:\r\n subj.append(partitions[0])\r\n time_totals.append(float(partitions[2]) / 60)\r\n\r\n\r\nshowBarGraphs(subj, time_totals)\r\n","repo_name":"Fingolfin7/Time-Tracker","sub_path":"Time-tracker/Time-tracker/Bar Graph.py","file_name":"Bar Graph.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19817750450","text":"from ... import registry\nfrom . import common\n\n# pylint: disable=C0103,C0415,W0613,C0301\n\n\n@registry.reg(\"rocm.conv2d_bias_add_identity.config\")\ndef conv2d_config(func_attrs):\n import ck_lib\n\n op_kind = ck_lib.library.Conv2dKind.GroupConv2dBiasRelu\n extra_kind = ck_lib.library.TensorOperation.AddAdd\n func_attrs[\"op_instance\"] = common.extract_config(op_kind, extra_kind)\n\n\n@registry.reg(\"rocm.conv2d_bias_add_identity.gen_profiler\")\ndef gen_profiler(func_attrs, workdir, shape_template):\n return common.gen_profiler(\n func_attrs=func_attrs,\n workdir=workdir,\n shape_template=shape_template,\n conv2d_flag=\"bias_add_identity\",\n extra_code=common.HEADER_CODE.render(),\n )\n\n\n@registry.reg(\"rocm.conv2d_bias_add_identity.gen_function\")\ndef gen_function(\n func_attrs,\n exec_cond_remplate,\n shape_eval_template,\n shape_save_template,\n):\n return common.gen_function(\n func_attrs,\n exec_cond_remplate,\n shape_eval_template,\n shape_save_template,\n \"bias_add_identity\",\n common.HEADER_CODE.render(),\n )\n\n\n@registry.reg(\"rocm.conv2d_bias_add_identity.func_decl\")\ndef conv2d_gen_function_decl(func_attrs):\n func_name = func_attrs[\"name\"]\n return common.gen_function_decl(\n func_name=func_name, conv2d_flag=\"bias_add_identity\"\n )\n\n\n@registry.reg(\"rocm.conv2d_bias_add_identity.func_call\")\ndef conv2d_gen_function_call(func_attrs, indent=\" \"):\n return common.gen_function_call(func_attrs, indent, conv2d_flag=\"bias_add_identity\")\n\n\n@registry.reg(\"rocm.conv2d_bias_add_identity.filter\")\ndef conv2d_function_filter(cfg, func_attrs, x_shape):\n \"\"\"Generates function filter.\n\n Parameters\n ----------\n cfg: str\n The filename generated for profiler.\n func_attrs : Dict\n Stores the operation attributes.\n x_shape:\n Input shapes.\n\n Returns\n -------\n bool\n If input cfg should be filtered.\n \"\"\"\n return True\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/backend/rocm/conv2d/conv2d_bias_add.py","file_name":"conv2d_bias_add.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"42990018434","text":"import argparse\nimport re\nimport locale\nimport subprocess\nfrom subprocess import PIPE\nimport sys\nimport os\nimport time\n\n__version__ = '1.0.1'\n\nEVENT_LINE_RE = re.compile(r\"(\\S+): (\\S+) (\\S+) (\\S+)$\")\nSTORE_LINE_RE = re.compile(r\"(\\S+) (\\S+) (\\S+) (\\S+) (\\S+)$\")\n\nclass Colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef dlog(msg):\n print(str(msg))\n\ndef ilog(msg):\n print(Colors.OKBLUE + str(msg) + Colors.ENDC)\n\ndef elog(msg):\n print(Colors.FAIL + str(msg) + Colors.ENDC)\n\nclass AdbEventRecorder(object):\n def __init__(self, adb):\n self.adb_command = adb\n self.adb_shell_command = adb + ['shell']\n\n def push(self, src, dst):\n if subprocess.call(self.adb_command + ['push', src, dst]) != 0:\n raise OSError('push failed')\n\n def goToActivity(self, activity):\n ilog('Go to the activity:' + activity)\n if subprocess.call(self.adb_shell_command + ['am', 'start', '-a', activity]) != 0:\n raise OSError('push failed')\n\n def checkPermission(self):\n ilog('Checking permission')\n if subprocess.call(self.adb_command + ['root']) != 0:\n raise OSError('Insufficient permissions')\n\n def listAllEvent(self):\n ilog('List all events')\n adb = subprocess.Popen(self.adb_shell_command + ['getevent', '-i'], stdin=PIPE, stdout=PIPE,\n stderr=PIPE, shell=True)\n while adb.poll() is None:\n try:\n line = adb.stdout.readline().decode('utf-8', 'replace').strip()\n if len(line) != 0:\n dlog(line)\n except KeyboardInterrupt:\n break\n\n def displayAllEvents(self):\n adb = subprocess.Popen(self.adb_shell_command + ['getevent', '-r', '-q'], stdin=PIPE, stdout=PIPE,\n stderr=PIPE)\n\n while adb.poll() is None:\n try:\n millis = int(round(time.time() * 1000))\n line = adb.stdout.readline().decode('utf-8', 'replace').strip()\n if len(line) != 0:\n dlog(\"{} {}\".format(millis, line))\n except KeyboardInterrupt:\n break\n if len(line) == 0:\n break\n\n def record(self, fpath, eventNum=None):\n ilog('Start recording')\n record_command = self.adb_shell_command + ['getevent']\n adb = subprocess.Popen(record_command,\n stdin=PIPE, stdout=PIPE,\n stderr=PIPE)\n\n outputFile = open(fpath, 'w')\n while adb.poll() is None:\n try:\n millis = int(round(time.time() * 1000))\n line = adb.stdout.readline().decode('utf-8', 'replace').strip()\n match = EVENT_LINE_RE.match(line.strip())\n if match is not None:\n dev, etype, ecode, data = match.groups()\n ## Filter event\n if eventNum is not None and '/dev/input/event%s' % (eventNum) != dev:\n continue\n ## Write to the file\n etype, ecode, data = int(etype, 16), int(ecode, 16), int(data, 16)\n rline = \"%s %s %s %s %s\\n\" % (millis, dev, etype, ecode, data)\n dlog(rline)\n outputFile.write(rline)\n except KeyboardInterrupt:\n break\n if len(line) == 0:\n break\n outputFile.close()\n ilog('End recording')\n\n def play(self, fpath, repeat=False):\n ilog('Start playing')\n while True:\n lastTs = None\n with open(fpath) as fp:\n for line in fp:\n match = STORE_LINE_RE.match(line.strip())\n ts, dev, etype, ecode, data = match.groups()\n ts = float(ts)\n if lastTs and (ts - lastTs) > 0:\n delta_second = (ts - lastTs) / 1000\n time.sleep(delta_second)\n\n lastTs = ts\n cmds = self.adb_shell_command + ['sendevent', dev, etype, ecode, data]\n dlog(cmds)\n if subprocess.call(cmds) != 0:\n raise OSError('sendevent failed')\n\n if repeat == False:\n break\n ilog('End playing')\n\ndef main(*args):\n parser = argparse.ArgumentParser(\n description='Record events from an Android device')\n parser.add_argument('-e', '--adb', metavar='COMMAND', default='adb', type=str,\n help='Use the given adb binary and arguments.')\n parser.add_argument('--device', action='store_true',\n help='Directs command to the only connected USB device; ' +\n 'returns an error if more than one USB device is ' +\n 'present. ' +\n 'Corresponds to the \"-d\" option of adb.')\n parser.add_argument('--repeat', action='store_true',\n help='Repeat to play the events.')\n parser.add_argument('--show', action='store_true',\n help='Show all of the events from the device')\n parser.add_argument('-n', '--event', type=str,\n help='The event number, n, to record /dev/input/event[n]')\n parser.add_argument('-r', '--record', type=str,\n help='Store the record data to the file')\n parser.add_argument('-p', '--play', type=str,\n help='Play the record data')\n parser.add_argument('--activity', type=str,\n help='Go the activity when play the record events')\n\n args = parser.parse_args()\n adb = args.adb.split(' ')\n if args.device:\n adb += ['-d']\n\n adb_recorder = AdbEventRecorder(adb)\n adb_recorder.listAllEvent()\n if args.record:\n adb_recorder.checkPermission()\n adb_recorder.record(args.record, args.event)\n elif args.play and os.path.exists(args.play):\n if args.activity:\n adb_recorder.goToActivity(args.activity)\n adb_recorder.play(args.play, args.repeat)\n elif args.show:\n adb_recorder.checkPermission()\n adb_recorder.displayAllEvents()\n else:\n elog('Add -r [Path] to record')\n elog('Add -p [Path] to play')\n\nif __name__ == '__main__':\n main(*sys.argv)\n","repo_name":"tzutalin/adb-event-record","sub_path":"adbrecord.py","file_name":"adbrecord.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"53"} +{"seq_id":"32594457004","text":"import time\nimport sys\nimport csv\nfrom gmssl.sm4 import CryptSM4, SM4_ENCRYPT, SM4_DECRYPT\nfrom gmssl import sm2, sm3, func\n\n# require info from the client\n# info length: 4 + 64 + 4 + 6\n\ndef chain(info, file_path):\n\n\n# with open('database/visitors.csv') as file:\n# prev_visitor = list(csv.reader(file))[-1]\n\n# info = {\n# 'time_stamp': eval(prev_visitor[0]), \n# 'signature': eval(prev_visitor[1]),\n# 'ip_addr': eval(prev_visitor[2]),\n# 'id': eval(prev_visitor[3])\n# }\n\n with open(file_path, 'rb') as file:\n file_enc = file.read()\n print(1, len(file_enc))\n assert len(file_enc) < 2 ** 30\n\n data = func.destructure(file_enc)[0]\n file_new = b''\n\n stream = func.cat_bytes(info)\n last_block = file_enc[len(file_enc) - 32:] + stream\n\n # set the first bit of flags(visited) to 1\n file_new = ((1 << 7) + file_enc[0] % (1 << 7)).to_bytes(1, func.endianness) + \\\n file_enc[1:] + stream + sm3.hash(last_block)\n\n with open(file_path, 'wb') as file:\n file.write(file_new)\n print(2, len(file_new))\n","repo_name":"loranloranvl/cryptocontest","sub_path":"chain.py","file_name":"chain.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74264887846","text":"from django.db import transaction, DatabaseError\nfrom django.db.models import Sum, F\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom drf_yasg.utils import swagger_auto_schema\nfrom requests import Response\nfrom rest_framework import status\n\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\n\nfrom nolsatu_courses.api.authentications import UserAuthAPIView\nfrom nolsatu_courses.api.response import ErrorResponse\nfrom nolsatu_courses.apps.products.models import Product, Order, Cart\n\nfrom .serializers import AddCartSerializer, CartIDSerializer, CartSerializer, CartCountSerializer, FullCartSerializer\n\n\nclass AddToCartView(UserAuthAPIView):\n @swagger_auto_schema(tags=['Carts'], operation_description=\"Add to My Carts\",\n responses={status.HTTP_200_OK: AddCartSerializer()},\n request_body=AddCartSerializer)\n def post(self, request):\n data = request.data\n\n serializer = AddCartSerializer(data=data)\n\n if serializer.is_valid():\n status_check = [Order.STATUS.created, Order.STATUS.pending, Order.STATUS.success]\n\n pick_product = get_object_or_404(Product, id=data.get('product_id'))\n user_order_item = pick_product.orderitem_set.filter(order__user=self.request.user).first()\n\n try:\n if pick_product.course.has_enrolled(user=self.request.user):\n return ErrorResponse(error_message=_('Gagal Menambahkan, Anda Telah terdaftar '\n 'di dalam kursus!'))\n elif user_order_item and user_order_item.order.status in status_check:\n return ErrorResponse(error_message=_('Gagal Menambahkan, Anda Telah Melakukan '\n 'Pembelian Pada Kursus ini!'))\n else:\n Cart.objects.get(product=pick_product, user=self.request.user)\n return ErrorResponse(error_message=_('Gagal Menambahkan, Kursus Sudah Ada '\n 'di Keranjang!'))\n except Cart.DoesNotExist:\n Cart(product=pick_product, user=self.request.user).save()\n return Response({'message': _('Berhasil Menambahkan Kursus ke Keranjang')})\n else:\n return ErrorResponse(error_message=_('Gagal Menambahkan Kursus ke Keranjang'))\n\n\nclass DeleteItemCartView(UserAuthAPIView):\n @swagger_auto_schema(tags=['Carts'], operation_description=\"Delete Item in Carts\",\n responses={status.HTTP_200_OK: CartIDSerializer()},\n request_body=CartIDSerializer)\n def post(self, request):\n data = request.data\n serializer = CartIDSerializer(data=data)\n\n if serializer.is_valid():\n validated_data = serializer.validated_data\n\n carts = Cart.objects.filter(id__in=validated_data['cart_ids'], user=self.request.user)\n\n if not carts:\n return Response({'message': _('Kursus tidak ada dalam keranjang')})\n\n try:\n with transaction.atomic():\n carts.delete()\n except DatabaseError:\n return Response({'message': _('Gagal menghapus kursus dalam keranjang')})\n\n return Response({'message': _('Berhasil menghapus kursus pada keranjang')})\n\n\nclass CartListView(UserAuthAPIView):\n @swagger_auto_schema(tags=['Carts'], operation_description=\"My Carts\",\n responses={status.HTTP_200_OK: FullCartSerializer()}, )\n def get(self, request):\n carts = Cart.objects.filter(user=self.request.user)\n\n total = carts.annotate(final_price=F('product__price') - F('product__discount')\n ).aggregate(total_price=Sum('final_price'))\n\n carts = [\n {\n \"id\": c.id,\n \"product\": {\n 'id': c.product.id,\n 'price': c.product.price,\n 'code': c.product.code,\n 'discount_type': c.product.discount_type,\n 'discount_value': c.product.discount_value,\n 'discount': c.product.discount,\n 'course': {'id': c.product.course.id, 'title': c.product.course.title},\n }\n } for c in carts\n ]\n\n data = {\n \"carts\": carts,\n \"total\": total['total_price'] or 0\n }\n\n serializer = FullCartSerializer(data=data)\n\n if serializer.is_valid():\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n\n\nclass CheckoutView(UserAuthAPIView):\n @swagger_auto_schema(tags=['Carts'], operation_description=\"Checkout\",\n responses={status.HTTP_200_OK: CartSerializer(many=True)})\n def get(self, request):\n carts = Cart.objects.filter(user=self.request.user, is_select=True)\n\n total = carts.annotate(final_price=F('product__price') - F('product__discount')\n ).aggregate(total_price=Sum('final_price'))\n\n data = [{\"id\": c.id,\n \"product\": {'id': c.product.id,\n 'price': c.product.price,\n 'code': c.product.code,\n 'discount_type': c.product.discount_type,\n 'discount_value': c.product.discount_value,\n 'discount': c.product.discount,\n 'course': {'id': c.product.course.id, 'title': c.product.course.title},\n }\n } for c in carts]\n\n serializer = CartSerializer(data=data, many=True)\n\n if serializer.is_valid(raise_exception=True):\n resp = {\n \"carts\": serializer.data,\n \"total\": total['total_price']\n }\n return Response(resp)\n else:\n return Response(serializer.errors)\n\n\nclass CartCountView(UserAuthAPIView):\n @swagger_auto_schema(\n tags=['Carts'],\n operation_description=\"Cart Count\",\n responses={\n status.HTTP_200_OK: CartCountSerializer()\n }\n )\n def get(self, request):\n data = {\n \"count\": Cart.objects.filter(user=self.request.user).count(),\n }\n\n serializer = CartCountSerializer(data=data)\n if serializer.is_valid(raise_exception=True):\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n","repo_name":"nolsatuid/courses","sub_path":"nolsatu_courses/api/carts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38991042909","text":"from fer import FER\nimport cv2\nfrom sys import argv\nfrom os import listdir\n\npath = (\n \"../Datasets/Facial Emotion Recognition/AffectNet/Annotated/images/\"\n)\n\n\ndef get_emotion(img_path: str) -> str:\n try:\n img = cv2.imread(img_path)\n detector = FER()\n emotions = detector.detect_emotions(img)\n # print(emotions)\n weights = emotions[0][\"emotions\"]\n weights = [(weights[emotion], emotion) for emotion in weights]\n weights.sort()\n return weights[-1][1]\n except:\n return \"No face\"\n\n\ndef main():\n labels = listdir(path)\n\n total_correct = 0\n total_present = 0\n\n for label in labels:\n if label == \"Contempt\":\n continue\n else:\n label_correct = 0\n label_present = 0\n print(\"Testing:\", label, flush=True)\n for image in listdir(path + label):\n got = get_emotion(path + label + \"/\" + image)\n expected = label.lower()\n if expected == \"anger\":\n expected = \"angry\"\n if got == expected:\n label_correct += 1\n total_correct += 1\n label_present += 1\n total_present += 1\n accuracy = label_correct / label_present\n print(\"Accuracy for\", label, \":\", accuracy, flush=True)\n print(\"Label Present:\", label_present, flush=True)\n print(\"Label Correct:\", label_correct, flush=True)\n print(\"\", flush=True)\n print(\n \"Total Accuracy:\",\n total_correct / total_present,\n flush=True,\n )\n print(\"Total Present:\", total_present, flush=True)\n print(\"Total Correct:\", total_correct, flush=True)\n\n\nmain()\n","repo_name":"ChitturiSaiSuman/Emotion-Aware-Music-Recommendation-System","sub_path":"MTCNN/Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"30179116705","text":"import random\nimport operator\n\nHEADER = 'What is the result of the expression?'\nOPERATIONS = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n}\n\n\ndef prepare_round_data(start: int = 0, end: int = 20):\n a = random.randint(start, end)\n b = random.randint(start, end)\n op = random.choice(list(OPERATIONS.keys()))\n correct_answer = OPERATIONS[op](a, b)\n\n return f'{a} {op} {b}', f'{correct_answer}'\n","repo_name":"ikrivosheev/python-project-lvl1","sub_path":"brain_games/games/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31649080648","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\n\"\"\"\n怪しい枝刈りで通したのでメモを残しておきます。\n普通にやるとx軸、y軸ごとに二分探索を60回程度やる必要があります。\nが、これだとTLEします。\n(雑な計測ですが、私のコードだと50回ぐらいでTLEになりそうです。)\n\nなので、二分探索する範囲を減らしたいです。\n交点のx,y座標は最悪abs(2*10**8)くらいになると思われるのですが、\nNが大きいケースでの出力解はもっと原点に近かったり、\nあるいは交点が出現する範囲はもっと狭いのではないかと推測します。\n\nそこで交点を乱択します。\nこのコードでは1000個程度の交点座標を計算し、\nその中で上から20番目と下から20番目の点を\n二分探索の探索上下限にしました。\n\nすると良い感じに計算量が減ったらしく、ぎりぎり通るようになっています。\n(たぶん4600~4700msecぐらいで通っているはずです。)\n\n# もしこれを読んでいる方で、\n  このコードの計算量改善点があれば教えていただけると大変喜びます。\n\"\"\"\n\n\nimport sys\nread = sys.stdin.buffer.read\nfrom operator import itemgetter\nimport random\n\nclass FenwickTree:\n def __init__(self, n: int):\n self.__n = n\n self.__data = [0] * self.__n\n\n def add(self, p: int, x: int):\n # assert (0 <= p) & (p < self.__n)\n p += 1\n while(p <= self.__n):\n self.__data[p - 1] += x\n p += p & -p\n\n def sum(self, l: int, r: int):\n # assert (0 <= l) & (l <= r) & (r <= self.__n)\n return self.__sum(r) - self.__sum(l)\n\n\n def _sum(self, r: int):\n s = 0\n while(r > 0):\n s += self.__data[r - 1]\n r -= r & -r\n return s\n\nn,*data = map(int,read().split())\n\nabc = []\nit = iter(data)\nfor a,b,c in zip(it,it,it):\n abc.append([a,b,c])\n\n# 交点乱択\nxs = []\nys = []\nfor _ in range(1000):\n i = random.randint(0,n-1)\n j = (random.randint(1,n-1) + i) % n\n ai,bi,ci = abc[i]\n aj,bj,cj = abc[j]\n x = (bi*cj-ci*bj)/(aj*bi-ai*bj)\n y = (ai*cj-ci*aj)/(bj*ai-bi*aj)\n xs.append(x)\n ys.append(y)\n\nxs.sort()\nys.sort()\n\n\n\ndef check(x, abc, degs, deg_idx):\n ys = []\n for i in range(n):\n a,b,c = abc[i]\n y = (-a*x+c)/b\n ys.append([y,i])\n ys.sort(key=itemgetter(0))\n \n ft = FenwickTree(n)\n\n left = 0\n for i in range(n):\n idx = deg_idx[ys[i][1]]\n left += ft._sum(idx)\n ft.add(idx,1)\n \n # print(x,left,under)\n \n if left <= under:\n return True\n else:\n return False\n \n\ndef calc(abc, upper , downer):\n degs = []\n for i in range(n):\n a,b,c = abc[i]\n degs.append([-a/b,i])\n degs.sort()\n deg_idx = [-1] * n\n for i in range(n):\n _, idx = degs[i]\n deg_idx[idx] = i\n \n \n ok = downer\n ng = upper\n for _ in range(60):\n mid = (ok+ng)/2\n if mid == ok or mid == ng:\n break\n if check(mid, abc, degs, deg_idx):\n ok = mid\n else:\n ng = mid\n dif = ng-ok\n if dif * 10**9 < min(abs(ok),abs(ng)) or dif < 10**-9:\n # print(_)\n break\n\n return ok\n\nunder = (n*(n-1)//2 - 1)//2\nans_x = calc(abc, xs[-20], xs[20])\n\nabc_y = []\nfor a,b,c in abc:\n abc_y.append([-b/a, 1, -c/a])\n\nans_y = calc(abc_y, ys[-20], ys[20])\n\nprint(ans_x, ans_y)\n\n","repo_name":"komajun365/competitive_programming","sub_path":"others/tenka1-2017/e/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70060971688","text":"import cv2 as cv\nimport sys\n\nimg = cv.imread(\"./img/dog.jpg\")\n\nif img is None:\n sys.exit(\"이미지를 불러올 수 없습니다.\")\n\n# 이진화\n# threshold -> (이미지(단일채널) , min, max, 이진화 방법)\nt, bin_img = cv.threshold(img[:,:,2], 0,255, cv.THRESH_BINARY + cv.THRESH_OTSU)\nprint(\"오츄 알고리즘이 찾은 최적의 임계값 : \" , t)\n\n# threshold\nt, bin_img2 = cv.threshold(img[:,:,2] , 119.0, 255, cv.THRESH_BINARY)\n\ncv.imshow(\"R channel original\" , img[:,:,2])\ncv.imshow(\"R channel binarization\" , bin_img)\ncv.imshow(\"R channel binarization2\" , bin_img2)\n\ncv.waitKey()\ncv.destroyAllWindows()\n","repo_name":"Mkpong/ComputerVision","sub_path":"CV-2-2/OTSU.py","file_name":"OTSU.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38308267173","text":"print(\"Approximate zero:\")\ndef f(x):\n return x**3 - x + 4\n\n# Initialize endpoints\na = -2\nb = 0\n\n# Tolerance level\nepsilon = 0.0001\n\n# Loop until the interval size is smaller than epsilon\nwhile abs(a - b) > epsilon:\n c = (a + b) / 2.0 # Compute midpoint\n if f(c) == 0: # If c is a root, we are done\n break\n elif f(a) * f(c) < 0: # f(c) and f(a) have opposite signs, replace b with c\n b = c\n else: # f(c) and f(b) have opposite signs, replace a with c\n a = c\n\n# At this point, either c is a root or [a, b] is a very small interval containing a root\nprint(\"Approximate zero:\", c)","repo_name":"waileungl/Data_structure_and_Algorithm","sub_path":"calculus.py","file_name":"calculus.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13409678179","text":"import random\nimport time\n\nimport click\n\nimport terminedia\nfrom terminedia import Effects\nfrom terminedia.values import FULL_BLOCK\n\n\n# RESOLUTION = \"square\" # \"full\", \"high\", \"sextant\", \"braille\"\n\nK = terminedia.KeyCodes\nD = terminedia.Directions\n\nAPPLE = \"\\U0001F34E\"\n\n\nclass GameOver(BaseException):\n pass\n\n\nclass Snake:\n\n length_step = 10\n\n def __init__(self, pos, direction, length=20):\n self.pos = pos\n self.direction = direction\n self.length = length\n self.body = []\n self.remove = []\n\n def update(self, game):\n self.pos += self.direction\n self.body.append(self.pos)\n while len(self.body) > self.length:\n self.remove.append(self.body.pop(0))\n self.check_dead(game)\n self.check_item(game)\n\n def check_item(self, game):\n x, y = game.drawable.at_parent(self.pos)\n # The apple emoji is a double width character\n for rx in (x, x - 1):\n if rx < 0:\n continue\n if (rx, y) in game.items:\n game.eat_item((rx, y))\n\n def check_dead(self, game):\n if game.drawable.get_at(self.pos) in (True, FULL_BLOCK):\n raise GameOver()\n\n def draw(self, scr):\n scr.draw.set(self.pos)\n if self.remove:\n for pos in self.remove:\n scr.draw.reset(pos)\n self.remove[:] = []\n\n\n@click.command()\n@click.option(\n \"resolution\",\n \"--resolution\",\n \"-r\",\n required=False,\n default=\"square\",\n help=\"Game resolution to use: square, high, sextant, braille\",\n)\ndef main(resolution):\n \"\"\"Terminedia snake-game!\"\"\"\n\n snake = Snake((2, 2), direction=D.RIGHT)\n\n with terminedia.Screen() as scr, terminedia.keyboard():\n try:\n game = Game(scr, snake, resolution=resolution)\n game.run()\n except GameOver:\n pass\n\n print(\"You died!\\n\\n\")\n\n\nclass Game:\n def __init__(self, scr, snake, resolution):\n self.scr = scr\n self.drawable = getattr(scr, resolution)\n self.snake = snake\n self.items = {}\n self.tick = 0\n self.last_item_taken = 0\n self.score = 0\n self.last_score = None\n\n def run(self):\n self.start_scene()\n while True:\n key = terminedia.inkey()\n if key == K.ESC:\n raise GameOver()\n\n if key == K.DOWN:\n self.snake.direction = D.DOWN\n elif key == K.UP:\n self.snake.direction = D.UP\n elif key == K.RIGHT:\n self.snake.direction = D.RIGHT\n elif key == K.LEFT:\n self.snake.direction = D.LEFT\n\n self.snake.update(self)\n self.snake.draw(self.drawable)\n\n self.maybe_create_item()\n self.show_status()\n\n time.sleep(1 / 30)\n self.tick += 1\n\n def start_scene(self):\n width, height = self.drawable.get_size()\n if self.drawable.at_parent((0, height - 2)).y > self.scr.size.y - 2:\n height -= 4\n self.drawable.draw.rect((0, 0, width, height - 2), color=(1, 0, 1))\n\n def show_status(self):\n if self.score == self.last_score:\n return\n width, height = self.scr.get_size()\n center = width // 2\n score_str = f\"{self.score:<6d}\"\n self.scr.print_at(\n (center - 3, height - 1),\n score_str,\n color=(1, 0.5, 0),\n effects=Effects.fullwidth,\n )\n self.last_score = self.score\n\n def maybe_create_item(self):\n if not (self.tick - self.last_item_taken > 30 and random.random() < 0.05):\n return\n if self.items:\n return\n\n width, height = self.scr.get_size()\n pos = random.randrange(1, width - 1), random.randrange(1, height - 2)\n self.items[pos] = True\n self.scr.context.color = 1, 0, 0\n self.scr.print_at(pos, APPLE)\n self.scr.context.color = terminedia.DEFAULT_FG\n\n def eat_item(self, pos):\n item = self.items.pop(pos, None)\n self.scr.reset_at(pos)\n if not item:\n return\n self.snake.length += self.snake.length_step\n self.score += 100\n self.last_item_taken = self.tick\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jsbueno/terminedia","sub_path":"terminedia/examples/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"53"} +{"seq_id":"14863276277","text":"\"\"\"\n@项目名称 : api_test\n@时间 : 2020 2020/9/18 20:40\n@作者 : 周振全\n@文件名 :config.py\n@IDE :PyCharm\n\n\"\"\"\n\nimport os\nimport yaml\nfrom configparser import ConfigParser\nfrom Common.Path import CONFDIR\n\n\nclass config(ConfigParser):\n def __init__(self, filename):\n super().__init__()\n self.filename = filename\n self.read(filename, encoding='utf-8')\n\n def write_data(self, section, option, content):\n self.set(section, option, content)\n self.write(fp=open(self.filename, \"w\"))\n\n\nf = open(CONFDIR + os.sep + 'config.yaml')\ndata = f.read()\nconf = yaml.safe_load(data)\nprint(conf['log']['level'])\n","repo_name":"zhouzhenquan/Api_again","sub_path":"Common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14767138366","text":"from Utils import DataUtils, resnet_block, InstanceNormalization\r\nfrom CycleGAN import CycleGAN\r\nfrom tensorflow.keras.layers import Conv2D, Conv2DTranspose, Activation, Input, LeakyReLU\r\nfrom tensorflow.keras.initializers import RandomNormal\r\nfrom tensorflow.keras.models import Model\r\nimport os\r\n\r\n\r\nclass CycleGAN_TR1_TR2(CycleGAN):\r\n\r\n def __init__(self, input_shape, checkpoint_path):\r\n self.n_resnet = 3\r\n super().__init__(input_shape, checkpoint_path)\r\n\r\n\r\n def build_generator(self, image_shape):\r\n # weight initialization\r\n init = RandomNormal(stddev=0.02)\r\n # image input\r\n in_image = Input(shape=image_shape)\r\n # c7s1-64\r\n g = Conv2D(64, (7, 7), padding='same', kernel_initializer=init)(in_image)\r\n g = InstanceNormalization()(g)\r\n g = Activation('relu')(g)\r\n # d128\r\n g = Conv2D(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=init)(g)\r\n g = InstanceNormalization()(g)\r\n g = Activation('relu')(g)\r\n # d256\r\n g = Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=init)(g)\r\n g = InstanceNormalization()(g)\r\n g = Activation('relu')(g)\r\n # R256\r\n for _ in range(self.n_resnet):\r\n g = resnet_block(256, g)\r\n # u128\r\n g = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=init)(g)\r\n g = InstanceNormalization()(g)\r\n g = Activation('relu')(g)\r\n # u64\r\n g = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', kernel_initializer=init)(g)\r\n g = InstanceNormalization()(g)\r\n g = Activation('relu')(g)\r\n # c7s1-3\r\n g = Conv2D(1, (7, 7), padding='same', kernel_initializer=init)(g)\r\n g = InstanceNormalization()(g)\r\n out_image = Activation('tanh')(g)\r\n # define model\r\n model = Model(in_image, out_image)\r\n return model\r\n\r\n def build_discriminator(self, image_shape):\r\n # weight initialization\r\n init = RandomNormal(stddev=0.02)\r\n # source image input\r\n in_image = Input(shape=image_shape)\r\n # C64\r\n d = Conv2D(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(in_image)\r\n d = LeakyReLU(alpha=0.2)(d)\r\n # C128\r\n d = Conv2D(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d)\r\n d = InstanceNormalization()(d)\r\n d = LeakyReLU(alpha=0.2)(d)\r\n # C256\r\n d = Conv2D(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d)\r\n d = InstanceNormalization()(d)\r\n d = LeakyReLU(alpha=0.2)(d)\r\n # C512\r\n d = Conv2D(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d)\r\n d = InstanceNormalization()(d)\r\n d = LeakyReLU(alpha=0.2)(d)\r\n # second last output layer\r\n d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)\r\n d = InstanceNormalization()(d)\r\n d = LeakyReLU(alpha=0.2)(d)\r\n # patch output\r\n patch_out = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)\r\n # define model\r\n model = Model(in_image, patch_out)\r\n return model\r\n\r\nif __name__ == '__main__':\r\n path2TR1 = os.path.join(os.getcwd(), 'Tr1', 'TrainT1')\r\n path2TR2 = os.path.join(os.getcwd(), 'Tr2', 'TrainT2')\r\n checkpoint_path = os.path.join(os.getcwd(), 'Trained_Model4')\r\n\r\n if not os.path.isdir(checkpoint_path):\r\n os.mkdir(checkpoint_path)\r\n\r\n batch_size = 4\r\n epochs = 100\r\n # load images from\r\n images_x = DataUtils(path2TR1, (220,184)).get_data(batch_size)\r\n images_y = DataUtils(path2TR2, (220,184)).get_data(batch_size)\r\n\r\n sample_x_data = next(iter(images_x))\r\n sample_y_data = next(iter(images_y))\r\n\r\n cycleGan = CycleGAN_TR1_TR2((220,184,1), checkpoint_path)\r\n\r\n cycleGan.train(images_x, images_y, epochs,plot_results=True, sample_data=(sample_x_data, sample_y_data))\r\n","repo_name":"BharathSD/CycleGAN-For-MRI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"5497519313","text":"#!/usr/bin/python3\n\nfrom collections import Counter\nimport matplotlib.pyplot as plt\n\nopen_file = open('./dest/data.txt', 'r')\n\nthe_list = []\n\nfor line in open_file:\n s_line = line.strip()\n l_list = s_line.split()\n the_num = int(l_list[-1])\n the_list.append(the_num)\n\nthe_count = Counter(the_list)\n\nthe_max = max(the_list)\n\nmax_plus_one = the_max + 1\n\ndata_height = []\ndata_number = []\nfor the_number in range(1, max_plus_one):\n data_number.append(the_number)\n data_height.append(the_count[the_number])\n\n \nplt.bar(data_number, data_height, color='#098303')\nplt.show()\n\nopen_file.close()\n\n","repo_name":"Undiscovered-Data/covid_poly_a_python","sub_path":"covid_poly_a/separate_commands/i_plot.py","file_name":"i_plot.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23348961976","text":"import json\nimport yaml\nimport csv\nfrom pprint import pprint\n\nFILENAME_JSON='input.json'\nFILENAME_YAML='input.yaml'\nFILENAME_CSV='input.csv'\n\n# with open(FILENAME_JSON, 'r') as json_file:\n# json_data = json.load(json_file)\n# pprint(json_data)\n\n#read in yaml file\nwith open(FILENAME_YAML, 'r') as yaml_file:\n yaml_data = yaml.load(yaml_file)\n\n#QUery list of dicts from yaml input\nyaml_list_of_dicts = yaml_data['data']\n\n#Read in csv file\nwith open (FILENAME_CSV, 'r') as csv_file:\n reader = csv.reader(csv_file)\n names = next(reader)\n lines = [l for l in reader]\n\n#Transform csv data into list of dicts\ncsv_list_of_dicts = [\n {\n names[col_num]: col_val\n for col_num, col_val in enumerate(line)\n } for line in lines\n]\n#Combine yaml list of dicts with csv list of dicts\ncombined_list_of_dicts = yaml_list_of_dicts + csv_list_of_dicts\n#Output combined data into a csv file\nwith open ('output.csv', 'w') as csv_out:\n header = ','.join(names)\n csv_out.write(header + '\\n')\n for dict_val in combined_list_of_dicts:\n row_list = [str(dict_val[name]) for name in names]\n row_string = ','.join(row_list)\n csv_out.write(row_string+ '\\n')\n\n#Outpot combined data into a JSON file\nwith open('output.json', 'w') as json_out:\n json.dump(combined_list_of_dicts, json_out)\n","repo_name":"adrianomucha/npd_c2_a4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73249515688","text":"import cv2\nimport datetime\n\nclass Display:\n def __init__(self):\n self.input_key_array = ['a', 'b', 'c']\n\n def get_file_name(self, input_value):\n file_name = ''\n if input_value == self.input_key_array[0]:\n file_name = 'video/parkmovie.mp4'\n elif input_value == self.input_key_array[1]:\n file_name = 'video/roadmovie.mp4'\n elif input_value == self.input_key_array[2]:\n file_name = 'video/black.mp4'\n return file_name\n\n\n #Display Fullscreen mode\n def imshow_fullscreen(self, winname, file_name):\n cv2.namedWindow(winname, cv2.WINDOW_NORMAL)\n cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n cv2.imshow(winname, file_name)\n\n\n\n #キー入力した際の時間を表示\n def print_typing_time(self):\n dt_now = datetime.datetime.now()\n print(dt_now)\n\n #動画表示\n def play_video(self, file_name):\n video_file = cv2.VideoCapture(file_name)\n file_name = ''\n fps = 30\n flag = False\n while(video_file.isOpened()):\n ret, frame = video_file.read()\n #frame = cv2.resize(frame, dsize=(1920, 2160))\n if ret:\n frame = cv2.resize(frame, dsize=(640, 640))\n\n #If Display on Dicited Screen\n #cv2.imshow(file_name, frame)\n\n #If Display on Full Screen\n self.imshow_fullscreen('screen', frame)\n\n else:\n print('Loop Here')\n video_file.set(cv2.CAP_PROP_POS_FRAMES, 0)\n\n key = cv2.waitKey(fps) & 0xFF\n if key == ord('q'):\n break\n elif key == ord(self.input_key_array[0]):\n file_name = self.get_file_name(self.input_key_array[0])\n print('Now parkmovie.mp4 on Air')\n elif key == ord(self.input_key_array[1]):\n file_name = self.get_file_name(self.input_key_array[1])\n print('Now roadmovie.mp4 on Air')\n elif key == ord(self.input_key_array[2]):\n file_name = self.get_file_name(self.input_key_array[2])\n print('Now black.mp4 on Air')\n\n if file_name != '':\n self.print_typing_time()\n flag = True\n break\n\n if flag == True:\n self.play_video(file_name)\n video_file.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n i = 0\n display = Display()\n\n while 1:\n i = i + 1\n print('')\n print('Loop: '+str(i))\n input_key = input()\n flag = None\n file_name = display.get_file_name(input_key)\n if file_name != '':\n flag = True\n\n if flag == True:\n display.play_video(file_name)\n\n\n\n","repo_name":"tkskky-tttoshi/video_player_python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13492363402","text":"import socket\nimport json\n\nSERVER_NAME = 't35977451f.zicp.vip'\nSERVER_PORT = 48041\nclass TCPClient:\n def __init__(self, host = SERVER_NAME, port = SERVER_PORT):\n self.host = host\n self.port = port\n\n def send_message(self, message):\n print(\"发送数据:\",message)\n self.host = socket.gethostbyname(SERVER_NAME)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n print(self.host, self.port)\n sock.connect((self.host, self.port))\n sock.sendall(message.encode())\n response = sock.recv(1024).decode()\n print(\"接收数据:\",response)\n return response\n\n def send_json(self,json_data):\n return self.send_message(json.dumps(json_data, indent=4))\n\nif __name__ == '__main__':\n prompt = \"我是说,我在那,我在干什么\"\n data = {\n \"prompt\": prompt,\n \"max_tokens\": 300,\n \"n\": 1,\n \"stop\": \".\",\n \"temperature\": 0.1,\n }\n client = TCPClient()\n ret=client.send_json(data)\n\n print(ret)","repo_name":"jonny201/chatgpt","sub_path":"tcp_client3.py","file_name":"tcp_client3.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20620537908","text":"from ROOT import *\nimport os\nimport sys\n\nfrom array import array\n\ndef rescale(f_sys, h_eventinfo):\n hist_list = []\n hist_list = [x.GetName() for x in f_sys.GetListOfKeys()]\n h_eventinfo_sys = f_sys.Get(\"EventInfo\")\n\n bratio = 1.0\n if \"TT_\" in f_sys.GetName() and not \"Bkg\" in f_sys.GetName(): bratio = 356.4/831.76\n\n h_out =[]\n for hist in hist_list:\n if hist == \"EventInfo\" or \"Weights\" in hist: continue\n h_tmp = f_sys.Get(hist)\n h_tmp.Scale(h_eventinfo.GetBinContent(2)/(h_eventinfo_sys.GetBinContent(2)*bratio))\n h_out.append(h_tmp)\n\n return h_out\n\ndef write_envelope(sys_name, h_central, h_sys_list, h_eventinfo, h_weights):\n #Find maximum, minimum bin errors\n h_sys_weighted_list = []\n for index, hist in enumerate(h_sys_list):\n #hist.Scale(h_eventinfo.GetBinContent(2)/(h_weights.GetBinContent(index+1)*bratio))\n h_sys_weighted_list.append(hist)\n\n h_up = h_central.Clone()\n h_down = h_central.Clone()\n \n if h_central.GetDimension() == 1:\n for ibin in range(h_central.GetNbinsX()+2):\n minimum = float(\"inf\")\n maximum = float(\"-inf\")\n\n for hist in h_sys_weighted_list:\n minimum = min(minimum, hist.GetBinContent(ibin))\n maximum = max(maximum, hist.GetBinContent(ibin))\n\n h_up.SetBinContent(ibin, maximum)\n h_down.SetBinContent(ibin, minimum)\n elif h_central.GetDimension() == 2:\n for ixbin in range(h_central.GetNbinsX()+2):\n for iybin in range(h_central.GetNbinsY()+2):\n minimum = float(\"inf\")\n maximum = float(\"-inf\")\n\n for hist in h_sys_weighted_list:\n minimum = min(minimum, hist.GetBinContent(ixbin, iybin))\n maximum = max(maximum, hist.GetBinContent(ixbin, iybin))\n\n h_up.SetBinContent(ixbin, iybin, maximum)\n h_down.SetBinContent(ixbin, iybin, minimum)\n\n upname = \"__\"+sys_name+\"up\"\n downname= \"__\"+sys_name+\"down\"\n \n h_up.SetName(h_central.GetName()+upname)\n h_down.SetName(h_central.GetName()+downname)\n\n return [h_up, h_down]\n\ndef bSFnorm(h_bSF, h_tmp):\n hist_name = h_tmp.GetName()\n btag_sys = hist_name.split('__')[-1]\n binnum = 2\n if any(i in hist_name for i in ['__lf', '__hf', '__cferr']):\n binnum = h_bSF.GetXaxis().FindBin(str(btag_sys))\n\n if h_bSF.GetBinContent(binnum) > 0:\n h_tmp.Scale(h_bSF.GetBinContent(1)/h_bSF.GetBinContent(binnum))\n\n return h_tmp\n\ndef postProcess(input_path, proc, year, isCP5=True):\n if not os.path.exists(input_path+'/post'):\n try:\n os.makedirs(input_path+'/post')\n except OSError:\n print(input_path+'/post: already exists')\n print(\"Begin Process \"+str(os.getpid())+\" \"+str(proc))\n \n if any(i in proc for i in ['Data', 'QCD']):\n return\n\n f_sample = TFile.Open(os.path.join(input_path, proc), \"READ\")\n f_update = TFile.Open(os.path.join(input_path,'post', proc), \"RECREATE\")\n \n h_eventinfo = f_sample.Get(\"EventInfo\")\n h_scaleweights = f_sample.Get(\"ScaleWeights\")\n h_pdfweights = f_sample.Get(\"PDFWeights\")\n\n print(\"b-tag SF scaling...\")\n hist_list = [x.GetName() for x in f_sample.GetListOfKeys()]\n\n f_update.cd()\n for hist in hist_list:\n if any(i in hist for i in ['scale','ps','pdf','Info','Weight']): continue\n tmp = hist.split('__')[0]\n tmp = tmp.split('_')[-2]\n if not 'Ch2' in tmp:\n if 'S0' in hist:\n h_bSF = f_sample.Get('h_bSFinfo_'+tmp+'_S0')\n else:\n h_bSF = f_sample.Get('h_bSFinfo_'+tmp+'_S1')\n else:\n if 'S0' in hist:\n h_bSF = f_sample.Get('h_bSFinfo_Ch0_S0')\n h_bSF.Add(f_sample.Get('h_bSFinfo_Ch1_S0'))\n else:\n h_bSF = f_sample.Get('h_bSFinfo_Ch0_S0')\n h_bSF.Add(f_sample.Get('h_bSFinfo_Ch1_S0'))\n\n h_tmp = f_sample.Get(hist)\n h_tmp = bSFnorm(h_bSF, h_tmp)\n h_tmp.Write()\n\n print(\"Merge JER, JEC histograms...\")\n syst_jet = [\"jerup\", \"jerdown\", \"jecup\", \"jecdown\"]\n f_list = []\n for jet in syst_jet:\n f_list.append(TFile.Open(os.path.join(input_path, proc[:-5]+\"__\"+jet+\".root\")))\n\n f_update.cd()\n for hist in hist_list:\n if not any(i in hist for i in ['__', 'Info', 'Weight', 'bSF']):\n for index, value in enumerate(f_list):\n tmp = value.Get(hist+\"__\"+syst_jet[index])\n tmp.Write()\n\n if not isCP5:\n syst_external = ['tune', 'hdamp', 'isr', 'fsr']\n else:\n syst_external = ['tune', 'hdamp']\n\n if 'Filter' in proc: return \n\n if 'TT' in proc:\n print(\"Rescaling external samples...\")\n for syst in syst_external:\n for vari in ['up','down']:\n if isCP5:\n ext_name = proc[:-5]\n else:\n ext_name = proc.replace('TTLJ','TT')[:-5]\n \n tmp = syst+vari \n f_ext = TFile.Open(os.path.join(input_path, ext_name+'__'+tmp+'.root'), \"READ\")\n h_out = []\n h_out = rescale(f_ext, h_eventinfo)\n\n f_update.cd()\n for hist in h_out:\n if not any(i in hist.GetName() for i in ['Info', 'Weight']):\n hist.Write()\n\n print(\"Writing envelope...\")\n ps_list = [\"isrup\", \"fsrup\", \"isrdown\", \"fsrdown\"]\n \n if not isCP5:\n if not 'Bkg' in proc:\n ext_name = proc.replace('TTLJ','TT')[:-5]\n else:\n ext_name = proc[:-5]\n\n f_ps_list = []\n for ps in ps_list:\n f_ps_list.append(TFile.Open(os.path.join(input_path, ext_name+\"__\"+ps+\".root\"),\"READ\"))\n\n h_psweights = TH1D(\"PSweights\",\"\",4,0,4)\n for index, value in enumerate(ps_list):\n h_psweights.GetXaxis().SetBinLabel(index+1,value)\n tmp = (f_ps_list[index].Get(\"EventInfo\")).GetBinContent(2)\n h_psweights.SetBinContent(index+1,tmp)\n f_ps_list[index].Close()\n else:\n h_psweights = f_sample.Get(\"PSWeights\")\n\n for hist in hist_list:\n if any(i in hist for i in ['__', 'Info', 'Weight', 'bSF']): continue\n\n h_central = f_sample.Get(hist)\n \n h_sw_list = []\n for i in range(6):\n h_sw_list.append(f_sample.Get(hist+\"__scale\"+str(i)))\n \n h_pdf_list = []\n if year == 16 and not 'CP5' in proc: maxpdf = 102\n else: maxpdf = 104\n for i in range(maxpdf):\n h_pdf_list.append(f_sample.Get(hist+\"__pdf\"+str(i)))\n \n h_ps_list = []\n if year == 16:\n for ps in ps_list:\n h_ps_list.append(f_update.Get(hist+\"__\"+ps))\n else:\n for ps in ps_list:\n h_ps_list.append(f_sample.Get(hist+\"__\"+ps))\n \n f_update.cd()\n h_sw_new = []\n h_sw_new = write_envelope(\"sw\", h_central, h_sw_list, h_eventinfo, h_scaleweights)\n h_sw_new[0].Write()\n h_sw_new[1].Write()\n\n h_pdf_new = []\n h_pdf_new = write_envelope(\"pdf\", h_central, h_pdf_list, h_eventinfo, h_pdfweights)\n h_pdf_new[0].Write()\n h_pdf_new[1].Write() \n \n #h_ps_new = []\n #h_ps_new = write_envelope(\"ps\", h_central, h_ps_list, h_eventinfo, h_psweights)\n #h_ps_new[0].Write()\n #h_ps_new[1].Write()\n \n f_update.cd()\n evtinfo = f_sample.Get(\"EventInfo\")\n evtinfo.Write()\n f_update.Close()\n f_sample.Close()\n print(\"End Process \"+str(os.getpid())+\" \"+str(proc))\n","repo_name":"Somhammer/ttbbRun2","sub_path":"python/postProcess.py","file_name":"postProcess.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33786040923","text":"scriptVersion = \"0.9\"\n\n# This script is the main entrypoint for validation code. To add a validation\n# to this process, subclass Import4Validation in the validations library and\n# add it to the validation library's `__all__` attribute\n\nfrom . import setupLogger\nfrom validations import *\n\nfrom argparse import ArgumentParser\nimport sys\nimport logging\n\nlog = setupLogger(__name__)\n\n\nclass ValidationRunner(object):\n \"\"\"\n This class is the driver for the parsed/chosen validation. The main()\n method of this module will instantiate a single runner and then call its\n 'run' method, which will return an integer exit code\n \"\"\"\n\n def __init__(self, argz):\n self.vTask = argz.clazz()\n self.argz = argz\n\n def run(self):\n try:\n log.info(\"Starting validation %s\", self.vTask.__class__.__name__)\n self.vTask.validate(self.argz)\n log.info(\"Validation successful\")\n return 0\n except ValidationException:\n log.error(\"Validation failed\")\n except Exception as e:\n log.exception(e)\n return 1\n\ndef setup_parser(validationSubs):\n parser = ArgumentParser(\n description=\"Validation tool used to run validations prior to \" +\n \"exporting data for a version 5 import\"\n )\n subparsers = parser.add_subparsers()\n for validationSub in validationSubs:\n validationSub.add_parser(subparsers)\n return parser\n\ndef parse_argz(args=None):\n parser = setup_parser(type.__subclasses__(Import4Validation))\n return parser.parse_args(args)\n\ndef main():\n argz = parse_argz()\n runner = ValidationRunner(argz)\n sys.exit(runner.run())\n\nif __name__ == '__main__':\n main()\n","repo_name":"mrogerszenoss/zenoss.toolbox","sub_path":"src/zenoss/toolbox/import4tools/validate4import.py","file_name":"validate4import.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"25664739829","text":"import typing\nfrom functools import partial\n\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QFrame, QLabel, QPushButton\n\nfrom models import init_model, PaletteModel\nfrom tools import resource_path\n\n\nclass WPalette(QFrame):\n @init_model(PaletteModel)\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n uic.loadUi(resource_path(\"ui/palette.ui\"), self)\n\n self._label_code = typing.cast(QLabel, self.findChild(QLabel, \"labelCode\"))\n\n def on_code_change(value: int, _):\n self._label_code.setStyleSheet(f\"color:#{self.model.selected}\")\n self._label_code.setText(f\"{value:02X}\")\n\n def on_palette_change(_, __):\n for i in range(16):\n for j in range(0, 15, 2):\n ij = f\"{i:01X}{j:01X}\"\n code = int(ij, 16)\n color = self.model.color(code)\n\n btn: QPushButton = typing.cast(\n QPushButton, self.findChild(QPushButton, f\"pushButtonColor{ij}\")\n )\n\n btn.setStyleSheet(\n f\"background-color:#{color}; border-style: none; border-radius: 10px;\"\n )\n\n def set_code(value: int):\n self.model.code.value = value\n\n btn.clicked.connect(partial(set_code, code))\n\n on_code_change(self.model.code.value, None)\n\n self.model.color_mapping.observe(on_palette_change)\n self.model.code.observe(on_code_change)\n\n on_palette_change(None, None)\n","repo_name":"moshenahmias/pppp","sub_path":"widgets/palette.py","file_name":"palette.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"13968574142","text":"import pickle\nfrom data_utils5 import *\n\ntext = file_contents()\ndatabase = meta_math_database(text,n=100000, remember_proof_steps=True)\nlanguage_model = LanguageModel(database)\n# forget the training proof steps, freeing up about 10G of memory\nlanguage_model.training_proof_steps = None\nlanguage_model.test_proof_steps = None\nlanguage_model.validation_proof_steps = None\nlanguage_model.all_proof_steps = None\nfor p in language_model.database.propositions_list:\n p.entails_proof_steps = None\nwith open('lm', 'wb') as handle:\n pickle.dump(database, handle)","repo_name":"dwhalen/holophrasm","sub_path":"save_language_model.py","file_name":"save_language_model.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"53"} +{"seq_id":"70350554087","text":"import uuid\nfrom pathlib import Path\n\nimport pydash\nimport pytest\n\nfrom promptflow._sdk._constants import SCRUBBED_VALUE, CustomStrongTypeConnectionConfigs\nfrom promptflow._sdk._pf_client import PFClient\nfrom promptflow._sdk.entities import CustomStrongTypeConnection\nfrom promptflow.contracts.types import Secret\n\n\nclass MyCustomConnection(CustomStrongTypeConnection):\n api_key: Secret\n api_base: str\n\n\n_client = PFClient()\n\nTEST_ROOT = Path(__file__).parent.parent.parent\nCONNECTION_ROOT = TEST_ROOT / \"test_configs/connections\"\n\n\n@pytest.mark.cli_test\n@pytest.mark.e2etest\nclass TestCustomStrongTypeConnection:\n def test_connection_operations(self):\n name = f\"Connection_{str(uuid.uuid4())[:4]}\"\n conn = MyCustomConnection(name=name, secrets={\"api_key\": \"test\"}, configs={\"api_base\": \"test\"})\n # Create\n _client.connections.create_or_update(conn)\n # Get\n result = _client.connections.get(name)\n assert pydash.omit(result._to_dict(), [\"created_date\", \"last_modified_date\", \"name\"]) == {\n \"module\": \"promptflow.connections\",\n \"type\": \"custom\",\n \"configs\": {\n \"api_base\": \"test\",\n \"promptflow.connection.custom_type\": \"MyCustomConnection\",\n \"promptflow.connection.module\": \"sdk_cli_test.e2etests.test_custom_strong_type_connection\",\n },\n \"secrets\": {\"api_key\": \"******\"},\n }\n # Update\n conn.configs[\"api_base\"] = \"test2\"\n result = _client.connections.create_or_update(conn)\n assert pydash.omit(result._to_dict(), [\"created_date\", \"last_modified_date\", \"name\"]) == {\n \"module\": \"promptflow.connections\",\n \"type\": \"custom\",\n \"configs\": {\n \"api_base\": \"test2\",\n \"promptflow.connection.custom_type\": \"MyCustomConnection\",\n \"promptflow.connection.module\": \"sdk_cli_test.e2etests.test_custom_strong_type_connection\",\n },\n \"secrets\": {\"api_key\": \"******\"},\n }\n # List\n result = _client.connections.list()\n assert len(result) > 0\n # Delete\n _client.connections.delete(name)\n with pytest.raises(Exception) as e:\n _client.connections.get(name)\n assert \"is not found.\" in str(e.value)\n\n def test_connection_update(self):\n name = f\"Connection_{str(uuid.uuid4())[:4]}\"\n conn = MyCustomConnection(name=name, secrets={\"api_key\": \"test\"}, configs={\"api_base\": \"test\"})\n # Create\n _client.connections.create_or_update(conn)\n # Get\n custom_conn = _client.connections.get(name)\n assert pydash.omit(custom_conn._to_dict(), [\"created_date\", \"last_modified_date\", \"name\"]) == {\n \"module\": \"promptflow.connections\",\n \"type\": \"custom\",\n \"configs\": {\n \"api_base\": \"test\",\n \"promptflow.connection.custom_type\": \"MyCustomConnection\",\n \"promptflow.connection.module\": \"sdk_cli_test.e2etests.test_custom_strong_type_connection\",\n },\n \"secrets\": {\"api_key\": \"******\"},\n }\n # Update\n custom_conn.configs[\"api_base\"] = \"test2\"\n result = _client.connections.create_or_update(custom_conn)\n assert pydash.omit(result._to_dict(), [\"created_date\", \"last_modified_date\", \"name\"]) == {\n \"module\": \"promptflow.connections\",\n \"type\": \"custom\",\n \"configs\": {\n \"api_base\": \"test2\",\n \"promptflow.connection.custom_type\": \"MyCustomConnection\",\n \"promptflow.connection.module\": \"sdk_cli_test.e2etests.test_custom_strong_type_connection\",\n },\n \"secrets\": {\"api_key\": \"******\"},\n }\n # List\n result = _client.connections.list()\n assert len(result) > 0\n # Delete\n _client.connections.delete(name)\n with pytest.raises(Exception) as e:\n _client.connections.get(name)\n assert \"is not found.\" in str(e.value)\n\n def test_connection_get_and_update(self):\n # Test api key not updated\n name = f\"Connection_{str(uuid.uuid4())[:4]}\"\n conn = MyCustomConnection(name=name, secrets={\"api_key\": \"test\"}, configs={\"api_base\": \"test\"})\n result = _client.connections.create_or_update(conn)\n assert result.secrets[\"api_key\"] == SCRUBBED_VALUE\n # Update api_base only Assert no exception\n result.configs[\"api_base\"] = \"test2\"\n result = _client.connections.create_or_update(result)\n assert result._to_dict()[\"configs\"][\"api_base\"] == \"test2\"\n # Assert value not scrubbed\n assert result._secrets[\"api_key\"] == \"test\"\n _client.connections.delete(name)\n # Invalid update\n with pytest.raises(Exception) as e:\n result._secrets = {}\n _client.connections.create_or_update(result)\n assert \"secrets ['api_key'] value invalid, please fill them\" in str(e.value)\n\n def test_connection_get_and_update_with_key(self):\n # Test api key not updated\n name = f\"Connection_{str(uuid.uuid4())[:4]}\"\n conn = MyCustomConnection(name=name, secrets={\"api_key\": \"test\"}, configs={\"api_base\": \"test\"})\n assert conn.api_base == \"test\"\n assert conn.configs[\"api_base\"] == \"test\"\n\n result = _client.connections.create_or_update(conn)\n converted_conn = result._convert_to_custom_strong_type()\n\n assert converted_conn.api_base == \"test\"\n converted_conn.api_base = \"test2\"\n assert converted_conn.api_base == \"test2\"\n assert converted_conn.configs[\"api_base\"] == \"test2\"\n\n @pytest.mark.parametrize(\n \"file_name, expected_updated_item, expected_secret_item\",\n [\n (\"custom_strong_type_connection.yaml\", (\"api_base\", \"new_value\"), (\"api_key\", \"\")),\n ],\n )\n def test_upsert_connection_from_file(\n self, install_custom_tool_pkg, file_name, expected_updated_item, expected_secret_item\n ):\n from promptflow._cli._pf._connection import _upsert_connection_from_file\n\n name = f\"Connection_{str(uuid.uuid4())[:4]}\"\n result = _upsert_connection_from_file(file=CONNECTION_ROOT / file_name, params_override=[{\"name\": name}])\n assert result is not None\n assert result.configs[CustomStrongTypeConnectionConfigs.PROMPTFLOW_MODULE_KEY] == \"my_tool_package.connections\"\n update_file_name = f\"update_{file_name}\"\n result = _upsert_connection_from_file(file=CONNECTION_ROOT / update_file_name, params_override=[{\"name\": name}])\n # Test secrets not updated, and configs updated\n assert (\n result.configs[expected_updated_item[0]] == expected_updated_item[1]\n ), \"Assert configs updated failed, expected: {}, actual: {}\".format(\n expected_updated_item[1], result.configs[expected_updated_item[0]]\n )\n assert (\n result._secrets[expected_secret_item[0]] == expected_secret_item[1]\n ), \"Assert secrets not updated failed, expected: {}, actual: {}\".format(\n expected_secret_item[1], result._secrets[expected_secret_item[0]]\n )\n","repo_name":"Indie365/promptflow","sub_path":"src/promptflow/tests/sdk_cli_test/e2etests/test_custom_strong_type_connection.py","file_name":"test_custom_strong_type_connection.py","file_ext":"py","file_size_in_byte":7229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13756195951","text":"import sys\n\ninfile = open(sys.argv[1] + \".in\", \"r\")\noutfile = open(sys.argv[1] + \".out\", \"w\")\n\ndef nextTestCase(i):\n words = next(infile).rstrip().split(\" \")\n words.reverse()\n outfile.write(\"Case #%s: %s\\n\" % (i+1, \" \".join(words)))\n\n\nN = int(next(infile))\nfor i in range(0, N):\n nextTestCase(i)\n","repo_name":"owidder/gcj-practice","sub_path":"reverse_words/reverse-words.py","file_name":"reverse-words.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39216838820","text":"file_path ='pi_digits.txt'\n\nwith open(file_path) as file_object:\n lines = file_object.readlines()\n print(lines)\nfor line in lines:\n print(line.rstrip())\n for i in file_object:\n print(i.rstrip())\n contents = file_object.read()\n print(contents.rstrip())\n\nwith open(file_path) as file_object:\n lines = file_object.readlines()\n\npi_string = ''\nfor line in lines:\n pi_string += line.rstrip()\nprint(pi_string)\nprint(len(pi_string))\n\nwith open(file_path) as file_object:\n for a in file_object:\n print(a.rstrip())\n\nwith open(file_path) as file_object:\n lines = file_object.readlines()\n\nprint(lines)\nfor line in lines:\n print(line.rstrip())\n\npi_string = ''\n\nfor line in lines:\n pi_string += line.strip()\n\nbirthday = input(\"Enter your birthday in a format mmddyy: \")\nif birthday in pi_string:\n print(\"Your birthday is in the first million digit of pi\")\n print(birthday)\nelse:\n print(\"Your birthday does not appear in the pi digits\")\nprint(pi_string[:52] + \".....\")\nprint(len(pi_string))\nfile_path = (\"programming.txt\")\nwith open(file_path, 'w') as file_object:\n file_object.write(\"I love programming \\n\")\n file_object.write(\"I love programming and sport \")\n\nfile_path = (\"programming2.txt\")\nwith open(file_path, 'a') as file_object:\n file_object.write(\"I love programming \\n\")\n file_object.write(\"I love programming and sport \")\n file_object.write(\"I love building games \")\n file_object.write(\"\\nI love eating water mellon \")\n\ntry:\n print(5/0)\nexcept ZeroDivisionError:\n print(\"You cannot divide by Zero\")\n\nprint(\"Give me two numbers and I will divide them for you\")\nprint(\"Insert Q/q to quit\")\nwhile True:\n first_number = input(\"Insert the first number: \")\n if first_number == 'q' or first_number == 'Q':\n break\n second_number = input(\"Insert the second number: \")\n if second_number == 'q' or second_number== 'Q':\n break\n try:\n answer = int(first_number)/int(second_number)\n except ZeroDivisionError:\n print(\"You cannot divide by Zero\")\n else:\n print(answer)\nfilepath = 'location.txt'\ntry:\n with open(filepath) as file_object:\n contents = file_object.read()\nexcept FileNotFoundError:\n message = 'Sorry, the filer ' + filepath + ' does not exist'\n print(message)\n\ntitle = \"Alice in Wonderland\"\na = title.split()\nprint(a)\n\nfilename = 'Alice.txt'\ntry:\n with open(filename) as file_object:\n content = file_object.read()\nexcept FileNotFoundError:\n mesg = 'Sorry, we could not find ' + filename\n print(mesg)\nelse:\n count = content.split()\n count_words = len(count)\n print(\"The file \" + filename + ' contains about ' + str(count_words) + ' words' )\ndef count_words(filename):\n try:\n with open(filename, encoding=\"utf8\") as file_object:\n content = file_object.read()\n except FileNotFoundError:\n # message = 'Sorry, the file ' + filename + ' does not exist'\n # print(message)\n pass\n except UnicodeDecodeError:\n print(\"could not decode file\" + filename)\n except FileExistsError:\n pass\n else:\n count = content.split()\n count_words = len(count)\n print(\"The file \" + filename + ' has about ' + str(count_words) + ' words')\n\nfilename = ['Alice.txt', 'film1.txt','film12.txt','film3.txt']\nfor i in filename:\n count_words(i)\n","repo_name":"A-khateeb/Full-Stack-Development-Path","sub_path":"Python/PCC/Chapter10/FilesExcptions.py","file_name":"FilesExcptions.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9715479704","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom tests.mockrequest import MockParameter\n\n\n@pytest.fixture\ndef real_estate_data(dbsession, transact):\n from pyramid_oereb.contrib.data_sources.standard.models.main import RealEstate\n del transact\n real_estates = [\n RealEstate(**{\n 'id': '1',\n 'egrid': u'TEST',\n 'number': u'1000',\n 'identdn': u'BLTEST',\n 'type': u'RealEstate',\n 'canton': u'BL',\n 'municipality': u'Liestal',\n 'fosnr': 1234,\n 'land_registry_area': 4,\n 'limit': 'SRID=2056;MULTIPOLYGON(((0 0, 0 2, 2 2, 2 0, 0 0)))'\n })\n ]\n dbsession.add_all(real_estates)\n dbsession.flush()\n yield real_estates\n\n\n@pytest.mark.run(order=2)\ndef test_init(pyramid_oereb_test_config):\n from pyramid_oereb.contrib.data_sources.standard.models.main import RealEstate\n from pyramid_oereb.contrib.data_sources.standard.sources.real_estate import DatabaseSource\n from pyramid_oereb.core.adapter import DatabaseAdapter\n\n source = DatabaseSource(**pyramid_oereb_test_config.get_real_estate_config().get('source').get('params'))\n assert isinstance(source._adapter_, DatabaseAdapter)\n assert source._model_ == RealEstate\n\n\n@pytest.mark.run(order=2)\n@pytest.mark.parametrize(\"param\", [\n {'nb_ident': 'BLTEST', 'number': '1000'},\n {'egrid': 'TEST'},\n {'geometry': 'SRID=2056;POINT(1 1)'}\n])\ndef test_read(pyramid_oereb_test_config, real_estate_data, param):\n from pyramid_oereb.contrib.data_sources.standard.sources.real_estate import DatabaseSource\n from pyramid_oereb.core.records.real_estate import RealEstateRecord\n\n source = DatabaseSource(**pyramid_oereb_test_config.get_real_estate_config().get('source').get('params'))\n source.read(MockParameter(), **param)\n assert len(source.records) == len(real_estate_data)\n record = source.records[0]\n assert isinstance(record, RealEstateRecord)\n assert record.fosnr == real_estate_data[0].fosnr\n\n\n@pytest.mark.run(order=2)\ndef test_missing_parameter(pyramid_oereb_test_config):\n from pyramid_oereb.contrib.data_sources.standard.sources.real_estate import DatabaseSource\n\n source = DatabaseSource(**pyramid_oereb_test_config.get_real_estate_config().get('source').get('params'))\n with pytest.raises(AttributeError):\n source.read(MockParameter())\n","repo_name":"openoereb/pyramid_oereb","sub_path":"tests/core/sources/test_real_estate_database.py","file_name":"test_real_estate_database.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"18208888394","text":"import sys\nimport os\n\nimport xml.etree.ElementTree as ET\n\nxml_config_path = \"3000_DSG_CanCcp.xml\"\n\ndef open_config_xml():\n print(\"Loading XML config file...\")\n chan_dict = {}\n\n curr_ecu = \"\"\n curr_eng = \"\"\n\n try:\n if(os.path.exists(xml_config_path)):\n tree = ET.parse(xml_config_path)\n root = tree.getroot()\n for iter_node in root.iter():\n if(iter_node.tag == \"ECU\"):\n curr_ecu = iter_node.attrib[\"ecuPrefix\"].replace(' ', '')\n curr_eng = iter_node.attrib[\"engPrefix\"].replace(' ', '')\n elif(iter_node.tag == \"Measurement\"):\n meas_name = \"\"\n meas_horusid = \"\"\n # Loop over HorusID, Name, Units, Description, DataType\n for child_node in iter_node.iter():\n if(child_node.tag == \"Name\"):\n meas_name = child_node.text\n elif(child_node.tag == \"HorusID\"):\n meas_horusid = child_node.text\n\n chan_dict[meas_horusid] = meas_name + \"_\" + curr_ecu + \"_\" + curr_eng\n \n print(chan_dict)\n \n except Exception as exc:\n print(str(exc))\n\n\nif __name__ == \"__main__\":\n open_config_xml()","repo_name":"alexbillingham/AutomationsAndScripts","sub_path":"Python/HorusIDGrabber/XMLCheck.py","file_name":"XMLCheck.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40805813710","text":"import pandas as pd\nimport numpy as np\n\nimport pandas as pd\n\n# merging two df by key/keys. (may be used in database)\n# simple example\nleft = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']})\nright = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\n\n# print(left)\n# print(right)\n\nres = pd.merge(left,right,on='key')#按照key的元素合并\n# print(res)\n\n\n\n\n#如果有两个key\nleft = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],\n 'key2': ['K0', 'K1', 'K0', 'K1'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']})\nright = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],\n 'key2': ['K0', 'K0', 'K0', 'K0'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\n# print(left)\n# print(right)\n#默认key1,key2元素相等时候合并\nres = pd.merge(left,right,on=['key1','key2'],how='inner')#how = outer,inner,left,right\n# print(res)\n\n\n\n\n\n\n# indicator ---显示合并的状态\ndf1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']})\ndf2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]})\nprint(df1)\nprint(df2)\nres = pd.merge(df1, df2, on='col1', how='outer', indicator=True)\n#给监视器显示自定义名字\nres = pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column')\n\n\n\n\n# merged by index---最左边的序列号\nleft = pd.DataFrame({'A': ['A0', 'A1', 'A2'],\n 'B': ['B0', 'B1', 'B2']},\n index=['K0', 'K1', 'K2'])\nright = pd.DataFrame({'C': ['C0', 'C2', 'C3'],\n 'D': ['D0', 'D2', 'D3']},\n index=['K0', 'K2', 'K3'])\nprint(left)\nprint(right)\n# left_index and right_index\nres = pd.merge(left, right, left_index=True, right_index=True, how='outer')\nres = pd.merge(left, right, left_index=True, right_index=True, how='inner')\n\n\n\n# handle overlapping\nboys = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'age': [1, 2, 3]})\ngirls = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'age': [4, 5, 6]})\nres = pd.merge(boys, girls, on='k', suffixes=['_boy', '_girl'], how='inner')\nprint(res)\n","repo_name":"jainszhang/LearnDM","sub_path":"ML/pandas_numpy/pandas/pandas7-merge.py","file_name":"pandas7-merge.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6854115939","text":"import os\n\n# import pyodbc\nimport pymssql\n\nimport class_settings\n\n\nclass MSSQLConnection:\n def __init__(self, settings_class):\n self.__connected = False\n if type(settings_class) == class_settings.Settings:\n self.__server = settings_class.param(\"server\")\n self.__database = settings_class.param(\"database\")\n self.__login = settings_class.param(\"login\")\n self.__password = settings_class.param(\"password\")\n self.__driver = \"{SQL Server Native Client 11.0}\"\n else:\n Exception(\"Некорректный класс настроек!\")\n try:\n self.__connection = pymssql.connect(self.__server, self.__login, self.__password, self.__database)\n self.__connected = True\n except Exception as E:\n print(f\"Исключительная ситуация при подключении к БД: {E}\")\n self.__connected = False\n self.__connection = None\n\n @property\n def connection(self):\n return self.__connection\n\n @property\n def connected(self):\n return self.__connected\n\n def execute(self, query, params=()):\n if self.connection is not None:\n try:\n if len(params) > 0:\n self.connection.cursor().execute(query, params)\n self.connection.commit()\n else:\n self.connection.cursor().execute(query)\n self.connection.commit()\n return True\n except Exception as E:\n print(f\"Исключительная ситуация (execute): {E}\")\n return False\n else:\n return False\n\n def select(self, query, params=()):\n if self.connection is not None:\n try:\n cursor = self.connection.cursor()\n if len(params) > 0:\n cursor.execute(query, params)\n else:\n cursor.execute(query)\n for row in cursor:\n yield [row[el] for el in range(0, len(row))]\n except Exception as E:\n print(f'Исключительная ситуация (select): {E}')\n return ()\n\n @classmethod\n def file_to_binary_data(cls, filename, delete_after_load=True):\n try:\n with open(filename, \"rb\") as f:\n binary_data = f.read()\n if delete_after_load:\n os.remove(filename)\n return binary_data\n except Exception as E:\n print(f\"Исключительная ситуация при загрузке данных из файла: {E}\")\n return 0\n","repo_name":"MyEternityOrg/ofd_evotor","sub_path":"class_mssql.py","file_name":"class_mssql.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28728795084","text":"import os\nimport cv2\nimport numpy as np\nimport json\nfrom collections import OrderedDict\n\nclass AnnotationChange(object):\n def __init__(self, txt_dir = \"FDDB\\\\FDDB-folds\", img_dir = \"FDDB\", new_img_dir = \"data\\\\image\"):\n self.txt_dir = txt_dir\n self.img_dir = img_dir\n self.new_img_dir = new_img_dir\n self._directory_path()\n self.json_list = []\n\n def _directory_path(self):\n \"\"\"\n If directories for saving images and annotations, make it\n \"\"\"\n if not os.path.isdir(self.new_img_dir) : os.mkdir(self.new_img_dir)\n if not os.path.isdir(os.path.join(self.new_img_dir, \"train\")) : os.mkdir(os.path.join(self.new_img_dir, \"train\"))\n if not os.path.isdir(os.path.join(self.new_img_dir, \"test\")) : os.mkdir(os.path.join(self.new_img_dir, \"test\"))\n\n def _save_annotation(self, purpose):\n \"\"\"\n Save annotaion with different purpose\n \"\"\"\n with open('data\\\\annotation_{}.json'.format(purpose), 'w', encoding='utf-8') as make_file:\n json.dump(self.json_list, make_file, ensure_ascii= False, indent= 4)\n self.json_list.clear()\n\n def __call__(self):\n \"\"\"\n This changes FDDB annotations to the Yolo annotation format\n \"\"\"\n train_number = 1\n test_number = 1\n for cur_file in os.listdir(self.txt_dir):\n if(cur_file.split(\".\")[0].split(\"-\")[-1] == \"ellipseList\"):\n with open(os.path.join(self.txt_dir, cur_file), 'r') as txt_file:\n while(True):\n line = txt_file.readline().rstrip(\"\\n\")\n if not line : break\n\n # Image load\n img_folder_ = line.split(\"/\") \n img_dir_ = \\\n self.img_dir + \"\\\\\" + img_folder_[0] + \"\\\\\" + img_folder_[1] + \"\\\\\" + img_folder_[2] + \"\\\\\" + img_folder_[3] + \"\\\\\" + img_folder_[4] + \".jpg\"\n img = cv2.imread(img_dir_, cv2.IMREAD_COLOR)\n \n if(train_number > 2500) :\n image_name = \"IMG_{}.jpg\".format(test_number)\n cv2.imwrite(os.path.join(self.new_img_dir, \"test\", image_name), img)\n test_number = test_number + 1\n\n else :\n image_name = \"IMG_{}.jpg\".format(train_number)\n cv2.imwrite(os.path.join(self.new_img_dir, \"train\", image_name), img)\n train_number = train_number + 1\n \n object_num = int(txt_file.readline().rstrip(\"\\n\"))\n file_data = OrderedDict()\n object_data = OrderedDict()\n file_data[\"Img_id\"] = image_name.split('.')[0]\n file_data[\"Objects\"] = []\n\n #Do \"for\" operation as much as the number of objects\n for i in range(object_num):\n coord_ = txt_file.readline().split(\" \")\n rad = abs(float(coord_[2]))\n h = float(coord_[0])*np.sin(rad)\n w = float(coord_[1])*np.sin(rad)\n object_data[\"Object_class\"] = \"Face\"\n object_data[\"Coordinate\"] = { \"Xmin\" : float(coord_[3]) - w, \"Ymin\" : float(coord_[4]) - h, \"Xmax\" : float(coord_[3]) + w, \"Ymax\" : float(coord_[4]) + h }\n file_data[\"Objects\"].append(object_data)\n self.json_list.append(file_data)\n\n # Save train annotation\n if(train_number == 2501):\n self._save_annotation(\"train\")\n train_number = train_number + 1\n self._save_annotation(\"test\")\n\nif __name__ == \"__main__\":\n anno_change = AnnotationChange()\n anno_change()","repo_name":"Natural-Goldfish/FaceDetection-YOLOv2","sub_path":"FaceDetection/annotation_change.py","file_name":"annotation_change.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38733556108","text":"def example1():\n for i in range(3):\n x = int(input(\"enter a number: \"))\n y = int(input(\"enter another number: \"))\n try:\n print(x, '/', y, '=', x / y)\n except ZeroDivisionError:\n print('Cant Divide by 0')\n\n\ndef example2(L):\n print(\"\\n\\nExample 2\")\n summary = 0\n sumOfPairs = []\n try:\n for i in range(len(L)):\n sumOfPairs.append(L[i] + L[i + 1])\n except IndexError:\n print('List index out of range')\n except TypeError:\n print('one of elements is wrong type')\n\n print(\"sumOfPairs = \", sumOfPairs)\n\n\ndef print_upper_file(fileName):\n try:\n file = open(fileName, \"r\")\n except FileNotFoundError:\n print('there is no such h file in this directory')\n else:\n print('else')\n finally:\n print('finally')\n\n\n\n\n try:\n for line in file:\n print(line.upper())\n file.close()\n except UnboundLocalError:\n print('file is not associated with value')\n\n\ndef main():\n example1()\n L = [10, 3, 5, 6, 9, 3]\n example2(L)\n example2([10, 3, 5, 6, \"NA\", 3])\n\n print_upper_file(\"doesNotExistYest.txt\")\n print_upper_file(\"./Dessssktop/misspelled.txt\")\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"KDebowiec/DevsMentoring","sub_path":"rozszerzenie/Exceptions/Exc2.py","file_name":"Exc2.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25388619437","text":"#!/bin/env python\n\n\"\"\"\nA warhammer 40,000 army list calculator. You feed it a list of units with\ntheir wargear options and get back a html page with a breakdown of the costs\nand force organisation chart. Units are output in the form of quick reference\ncards that can be printed for convenience.\n\nInput lists are expressed in YAML so that they are easy to read and write by\nhand. They go in the 'lists' subdirectory.\n\nData for models, formations and weapons are stored in .csv files in the 'data'\nsubdirectory.\n\nWhen you run this script, a 'docs' subdirectory will be created containing\npages for each army list in 'lists'.\n\"\"\"\n\nimport shutil\nimport os\n\nfrom cogitator.database import read_armies, Database\nfrom cogitator.writers.army import ArmyWriter\nfrom cogitator.writers.armyheader import ArmyHeaderWriter\nfrom cogitator.output import Outfile\n\n\ndef main():\n\n # Make sure we're in the right place.\n directory = os.path.dirname(__file__)\n if len(directory) > 0:\n os.chdir(directory)\n\n # Read in the data.\n forty_k = Database(\"40k\", \"data\")\n kill_team = Database(\"Kill Team\", \"data\")\n\n # The army lists.\n armies = read_armies(\"lists\")\n\n # Create / clean the directory structure.\n if not os.path.exists(\"docs\"):\n os.mkdir(\"docs\")\n if os.path.exists(\"docs/lists\"):\n shutil.rmtree(\"docs/lists\")\n if os.path.exists(\"docs/index.html\"):\n os.remove(\"docs/index.html\")\n\n # Write out each army and list it in the index file.\n os.chdir(\"docs\")\n os.mkdir(\"lists\")\n shutil.copytree(\"../lists/images\", \"lists/images\")\n with open(\"index.html\", \"w\") as f:\n outfile = Outfile(f)\n outfile.start_tag(\"html\")\n outfile.start_tag(\"head\")\n outfile.content(\"\")\n outfile.end_tag()\n outfile.start_tag(\"body\")\n outfile.content(\"

Army Lists

\")\n for army in armies:\n database = kill_team if army[\"Game\"] == \"Kill Team\" else forty_k\n armywriter = ArmyWriter(database)\n armyheaderwriter = ArmyHeaderWriter(database)\n variants = get_variants(\"lists\", army)\n for variant in variants:\n filename = variant[\"filename\"]\n sections = variant[\"sections\"]\n assert not os.path.exists(filename)\n with open(filename, \"w\") as f2:\n outfile2 = Outfile(f2)\n armywriter.write_army(outfile2, army, sections)\n armyheaderwriter.write_army_header(outfile, army, variants)\n outfile.end_tag() # body\n outfile.end_tag() # html\n\n\ndef get_variants(out_dir, army):\n \"\"\"\n Get the variants of an army list to write.\n :param out_dir: Location of output files.\n :param army: Army definition\n :return: List of variant maps.\n \"\"\"\n variants = [\n {\n \"name\": \"full\",\n \"filename\": os.path.join(out_dir, army[\"Basename\"] + \".html\"),\n \"sections\": []\n },\n {\n \"name\": \"cards\",\n \"filename\": os.path.join(out_dir,\n army[\"Basename\"] + \"_cards.html\"),\n \"sections\": [\"units\"]\n },\n {\n \"name\": \"appendices\",\n \"filename\": os.path.join(out_dir,\n army[\"Basename\"] + \"_appendices.html\"),\n \"sections\": [\"header\", \"appendices\"]\n }\n ]\n return variants\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nathanrw/40k_army_lists","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20614442371","text":"import random\nimport keras_preprocessing.image\n\nfrom nvidia_tao_tf1.cv.makenet.utils.helper import color_augmentation\n\n# padding size.\n# We firstly resize to (target_width + CROP_PADDING, target_height + CROP_PADDING)\n# , then crop to (target_width, target_height).\n# for standard ImageNet size: 224x224 the ratio is 0.875(224 / (224 + 32)).\n# but for EfficientNet B1-B7, larger resolution is used, hence this ratio\n# is no longer 0.875\n# ref:\n# https://github.com/tensorflow/tpu/blob/r1.15/models/official/efficientnet/preprocessing.py#L110\nCROP_PADDING = 32\nCOLOR_AUGMENTATION = False\n\n\ndef _set_color_augmentation(flag):\n global COLOR_AUGMENTATION # pylint: disable=global-statement\n COLOR_AUGMENTATION = flag\n\n\ndef load_and_crop_img(path, grayscale=False, color_mode='rgb', target_size=None,\n interpolation='nearest'):\n \"\"\"Wraps keras_preprocessing.image.utils.load_img() and adds cropping.\n\n Cropping method enumarated in interpolation\n # Arguments\n path: Path to image file.\n color_mode: One of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\".\n The desired image format.\n target_size: Either `None` (default to original size)\n or tuple of ints `(img_height, img_width)`.\n interpolation: Interpolation and crop methods used to resample and crop the image\n if the target size is different from that of the loaded image.\n Methods are delimited by \":\" where first part is interpolation and second is crop\n e.g. \"lanczos:random\".\n Supported interpolation methods are \"nearest\", \"bilinear\", \"bicubic\", \"lanczos\",\n \"box\", \"hamming\" By default, \"nearest\" is used.\n Supported crop methods are \"none\", \"center\", \"random\".\n # Returns\n A PIL Image instance.\n # Raises\n ImportError: if PIL is not available.\n ValueError: if interpolation method is not supported.\n \"\"\"\n\n # Decode interpolation string. Allowed Crop methods: none, center, random\n interpolation, crop = interpolation.split(\":\") \\\n if \":\" in interpolation else (interpolation, \"none\")\n\n if crop == \"none\":\n return keras_preprocessing.image.utils.load_img(\n path,\n grayscale=grayscale,\n color_mode=color_mode,\n target_size=target_size,\n interpolation=interpolation)\n\n # Load original size image using Keras\n img = keras_preprocessing.image.utils.load_img(\n path,\n grayscale=grayscale,\n color_mode=color_mode,\n target_size=None,\n interpolation=interpolation)\n\n # Crop fraction of total image\n target_width = target_size[1]\n target_height = target_size[0]\n\n if target_size is not None:\n if img.size != (target_width, target_height):\n\n if crop not in [\"center\", \"random\"]:\n raise ValueError('Invalid crop method %s specified.' % crop)\n\n if interpolation not in keras_preprocessing.image.utils._PIL_INTERPOLATION_METHODS:\n raise ValueError(\n 'Invalid interpolation method {} specified. Supported '\n 'methods are {}'.format(\n interpolation,\n \", \".join(\n keras_preprocessing.image.utils._PIL_INTERPOLATION_METHODS.keys())))\n\n resample = keras_preprocessing.image.utils._PIL_INTERPOLATION_METHODS[interpolation]\n\n width, height = img.size\n\n if crop == 'random':\n # Resize keeping aspect ratio\n # result should be no smaller than the targer size, include crop fraction overhead\n crop_fraction = random.uniform(0.45, 1.0)\n target_size_before_crop = (\n target_width / crop_fraction,\n target_height / crop_fraction\n )\n ratio = max(\n target_size_before_crop[0] / width,\n target_size_before_crop[1] / height\n )\n target_size_before_crop_keep_ratio = int(width * ratio), int(height * ratio)\n img = img.resize(target_size_before_crop_keep_ratio, resample=resample)\n\n if crop == 'center':\n # Resize keeping aspect ratio\n # result should be no smaller than the targer size, include crop fraction overhead\n target_size_before_crop = (\n target_width + CROP_PADDING,\n target_height + CROP_PADDING\n )\n ratio = max(\n target_size_before_crop[0] / width,\n target_size_before_crop[1] / height\n )\n target_size_before_crop_keep_ratio = int(width * ratio), int(height * ratio)\n img = img.resize(target_size_before_crop_keep_ratio, resample=resample)\n\n width, height = img.size\n\n if crop == \"center\":\n left_corner = int(round(width/2)) - int(round(target_width/2))\n top_corner = int(round(height/2)) - int(round(target_height/2))\n return img.crop(\n (left_corner,\n top_corner,\n left_corner + target_width,\n top_corner + target_height))\n if crop == \"random\":\n # random crop\n left_shift = random.randint(0, int((width - target_width)))\n down_shift = random.randint(0, int((height - target_height)))\n img = img.crop(\n (left_shift,\n down_shift,\n target_width + left_shift,\n target_height + down_shift))\n # color augmentation\n if COLOR_AUGMENTATION and img.mode == \"RGB\":\n return color_augmentation(img)\n return img\n raise ValueError(\"Crop mode not supported.\")\n\n return img\n\n\n# Monkey patch\nkeras_preprocessing.image.iterator.load_img = load_and_crop_img\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/makenet/utils/preprocess_crop.py","file_name":"preprocess_crop.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"4138130603","text":"# -*- coding: utf-8 -*-\r\n# author: Ryan\r\n# time: 2021/9/28\r\nimport smtplib\r\nimport email\r\n# 负责构造文本\r\nfrom email.mime.text import MIMEText\r\n# 负责构造图片\r\nfrom email.mime.image import MIMEImage\r\n# 负责将多个对象集合起来\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.header import Header\r\ndef send_email(server,recv_email):\r\n # SMTP服务器,这里使用163邮箱\r\n mail_host = \"smtp.163.com\"\r\n # 发件人邮箱\r\n mail_sender = \"laat_la@163.com\"\r\n # 邮箱授权码,注意这里不是邮箱密码,如何获取邮箱授权码,请看本文最后教程\r\n mail_license = \"SWEUZVEEOFZBNFLL\"\r\n # 收件人邮箱,可以为多个收件人\r\n mail_receivers = [str(recv_email)]\r\n\r\n mm = MIMEMultipart('related')\r\n\r\n # 邮件主题\r\n subject_content = \"\"\"LAAT 有空闲机器,请及时进行测试\"\"\"\r\n # 设置发送者,注意严格遵守格式,里面邮箱为发件人邮箱\r\n mm[\"From\"] = \"\"\r\n # 设置接受者,注意严格遵守格式,里面邮箱为接受者邮箱\r\n mm[\"To\"] = str(recv_email)\r\n # 设置邮件主题\r\n mm[\"Subject\"] = Header(subject_content,'utf-8')\r\n # 邮件正文内容\r\n body_content = \"\"\"你好,收到本邮件即代表您预约的自动化测试任务可以进行了,本次空闲的自动化测试机器为\"\"\"+server+\"\"\",如有特殊情况无法进行测试,请及时联系我,后续有计划时需重新预约\"\"\"\r\n # 构造文本,参数1:正文内容,参数2:文本格式,参数3:编码方式\r\n message_text = MIMEText(body_content,\"plain\",\"utf-8\")\r\n # 向MIMEMultipart对象中添加文本对象\r\n mm.attach(message_text)\r\n\r\n # 创建SMTP对象\r\n stp = smtplib.SMTP()\r\n # 设置发件人邮箱的域名和端口,端口地址为25\r\n stp.connect(mail_host, 25)\r\n # set_debuglevel(1)可以打印出和SMTP服务器交互的所有信息\r\n stp.set_debuglevel(1)\r\n # 登录邮箱,传递参数1:邮箱地址,参数2:邮箱授权码\r\n stp.login(mail_sender,mail_license)\r\n # 发送邮件,传递参数1:发件人邮箱地址,参数2:收件人邮箱地址,参数3:把邮件内容格式改为str\r\n stp.sendmail(mail_sender, mail_receivers, mm.as_string())\r\n print(\"邮件发送成功\")\r\n # 关闭SMTP对象\r\n stp.quit()\r\nif __name__ == \"__main__\":\r\n send_email(\"LAAT2\",\"wujun12@lenovo.com\")","repo_name":"wj007881/Python-Script2","sub_path":"Python script/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1510761873","text":"import unittest\nfrom selenium import webdriver\nfrom page.bbs.page_home import BBsHome\nimport time\n\nclass Test1_Home(unittest.TestCase):\n\t#打开浏览器\n\t@classmethod\n\tdef setUpClass(self):\n\t\tself.driver = webdriver.Chrome()\n\t\tself.url = \"http://www.360che.com/\"\n\t\tself.bbs_page = BBsHome(self.driver,self.url)\n\t\tself.bbs_page.open()\n\t\t\n\t@classmethod\n\tdef tearDownClass(self):\n\t\tself.driver.quit()\n\n\t#执行case\n\tdef test_01(self):\n\t\tself.bbs_page.bbs_into_home()\n\t\ttime.sleep(5)\n\t\ttry:\n\t\t\tself.bbs_page.swithc_br(self.driver)\n\t\t\ttitle = self.driver.title\n\t\t\tself.assertEqual(title,u\"卡车之家论坛 - 网聚卡车人的力量 我的卡车论坛##\")\n\t\texcept AssertionError as e:\n\t\t\traise\n\n\tdef test_02(self):\n\t\tself.bbs_page.close_br(self.driver)\n\t\tself.bbs_page.bbs_into_page()\n\t\ttime.sleep(5)\n\t\ttry:\n\t\t\tself.bbs_page.swithc_br(self.driver)\n\t\t\ttitle = self.driver.title\n\t\t\tself.assertEqual(title,u\"卡车之家论坛 - 网聚卡车人的力量 我的卡车论坛\")\n\t\texcept AssertionError as e:\n\t\t\traise\n\n\tdef test_03(self):\n\t\tself.bbs_page.close_br(self.driver)\n\t\tself.bbs_page.bbs_into_new()\n\t\ttime.sleep(5)\n\t\ttry:\n\t\t\tself.bbs_page.swithc_br(self.driver)\n\t\t\ttitle = self.driver.title\n\t\t\tself.assertEqual(title,u\"【卡车新帖子】_ 卡车之家论坛\")\n\t\texcept AssertionError as e:\n\t\t\traise\n\n\tdef test_04(self):\n\t\tself.bbs_page.close_br(self.driver)\n\t\tself.bbs_page.bbs_into_hot()\n\t\ttime.sleep(5)\n\t\ttry:\n\t\t\tself.bbs_page.swithc_br(self.driver)\n\t\t\ttitle = self.driver.title\n\t\t\tself.assertEqual(title,u\"卡车十大热帖排行_卡车论坛_卡车之家论坛\")\n\t\texcept AssertionError as e:\n\t\t\traise\n\n\tdef test_05(self):\n\t\tself.bbs_page.close_br(self.driver)\n\t\tself.bbs_page.bbs_into_car()\n\t\ttime.sleep(5)\n\t\ttry:\n\t\t\tself.bbs_page.swithc_br(self.driver)\n\t\t\ttitle = self.driver.title\n\t\t\tself.assertEqual(title,u\"【车型论坛大全】_ 卡车之家论坛\")\n\t\texcept AssertionError as e:\n\t\t\traise\n\t\t\n\n","repo_name":"truckhome-test-dev/auto_UI_PO","sub_path":"testcase/bbs/test1_home.py","file_name":"test1_home.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12362797534","text":"import numpy as np\r\nimport glob\r\nimport os\r\nimport cv2\r\nimport json\r\nimport imageio\r\n\r\n# import matplotlib.pyplot as plt\r\nimport frame_utils\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader, Dataset\r\n\r\nfrom collections import OrderedDict\r\n\r\n\r\n# training_set = [2, 6, 7, 8, 14, 16, 18, 19, 20, 22, 30, 31, 36, 39, 41, 42, 44,\r\n# 45, 46, 47, 50, 51, 52, 53, 55, 57, 58, 60, 61, 63, 64, 65, 68, 69, 70, 71, 72,\r\n# 74, 76, 83, 84, 85, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,\r\n# 101, 102, 103, 104, 105, 107, 108, 109, 111, 112, 113, 115, 116, 119, 120,\r\n# 121, 122, 123, 124, 125, 126, 127, 128]\r\n\r\ndef scale_operation(images, intrinsics, s):\r\n ht1 = images.shape[2]\r\n wd1 = images.shape[3]\r\n ht2 = int(s * ht1)\r\n wd2 = int(s * wd1)\r\n intrinsics[:, 0] *= s\r\n intrinsics[:, 1] *= s\r\n images = F.interpolate(images, [ht2, wd2], mode='bilinear', align_corners=True)\r\n return images, intrinsics\r\n\r\n\r\ndef crop_operation(images, intrinsics, crop_h, crop_w):\r\n ht1 = images.shape[2]\r\n wd1 = images.shape[3]\r\n x0 = (wd1 - crop_w) // 2\r\n y0 = (ht1 - crop_h) // 2\r\n x1 = x0 + crop_w\r\n y1 = y0 + crop_h\r\n images = images[:, :, y0:y1, x0:x1]\r\n intrinsics[:, 0, 2] -= x0\r\n intrinsics[:, 1, 2] -= y0\r\n return images, intrinsics\r\n\r\n\r\ndef random_scale_and_crop(images, masks, intrinsics, depths=None, resize=[-1, -1], crop_size=[448, 576]):\r\n s = 2 ** np.random.uniform(-0.1, 0.4)\r\n\r\n ht1 = images.shape[2]\r\n wd1 = images.shape[3]\r\n if resize == [-1, -1]:\r\n ht2 = int(s * ht1)\r\n wd2 = int(s * wd1)\r\n else:\r\n ht2 = int(resize[0])\r\n wd2 = int(resize[1])\r\n\r\n intrinsics[:, 0] *= float(wd2) / wd1\r\n intrinsics[:, 1] *= float(ht2) / ht1\r\n\r\n if depths is not None:\r\n depths = depths.unsqueeze(1)\r\n depths = F.interpolate(depths, [ht2, wd2], mode='nearest')\r\n\r\n images = F.interpolate(images, [ht2, wd2], mode='bilinear', align_corners=True)\r\n\r\n x0 = np.random.randint(0, wd2 - crop_size[1] + 1)\r\n y0 = np.random.randint(0, ht2 - crop_size[0] + 1)\r\n x1 = x0 + crop_size[1]\r\n y1 = y0 + crop_size[0]\r\n\r\n images = images[:, :, y0:y1, x0:x1]\r\n\r\n if depths is not None:\r\n depths = depths[:, :, y0:y1, x0:x1]\r\n depths = depths.squeeze(1)\r\n\r\n intrinsics[:, 0, 2] -= x0\r\n intrinsics[:, 1, 2] -= y0\r\n\r\n if masks is not None:\r\n masks = masks.unsqueeze(1)\r\n masks = F.interpolate(masks, [ht2, wd2], mode='nearest')\r\n masks = masks[:, :, y0:y1, x0:x1]\r\n masks = masks.squeeze(1)\r\n\r\n return images, depths, masks, intrinsics\r\n\r\n\r\ndef load_pair(file: str):\r\n with open(file) as f:\r\n lines = f.readlines()\r\n n_cam = int(lines[0])\r\n pairs = {}\r\n img_ids = []\r\n for i in range(1, 1 + 2 * n_cam, 2):\r\n pair = []\r\n score = []\r\n img_id = lines[i].strip()\r\n pair_str = lines[i + 1].strip().split(' ')\r\n n_pair = int(pair_str[0])\r\n for j in range(1, 1 + 2 * n_pair, 2):\r\n pair.append(pair_str[j])\r\n score.append(float(pair_str[j + 1]))\r\n img_ids.append(img_id)\r\n pairs[img_id] = {'id': img_id, 'index': i // 2, 'pair': pair, 'score': score}\r\n pairs['id_list'] = img_ids\r\n return pairs\r\n\r\n\r\nclass SYNViewsynTrain(Dataset):\r\n def __init__(self, dataset_path, pointcloud_path, split='train', crop_size=[800, 800], resize=[-1, -1], return_frame_ids=False, data_augmentation=False, start=0, end=9999):\r\n self.dataset_path = dataset_path\r\n self.pointcloud_path = pointcloud_path\r\n self.split = split\r\n\r\n self.crop_size = crop_size\r\n self.resize = resize\r\n\r\n self.data_augmentation = data_augmentation\r\n self.return_frame_ids = return_frame_ids\r\n\r\n self._build_dataset_index()\r\n self._load_and_rescale_points()\r\n\r\n self.start = start\r\n self.end = end\r\n\r\n def _load_and_rescale_points(self):\r\n # get the orginal depth file for scaling\r\n # first load pointclouds\r\n # pointcloud_list = [os.path.join(self.dataset_path, \"pointclouds\", \"%s.ply\" % os.path.basename(self.dataset_path)), ]\r\n # pointcloud_list = [os.path.join(self.dataset_path, \"pointclouds\", \"%s_v1.ply\" % os.path.basename(self.dataset_path)), ]\r\n pointcloud_list = [os.path.join(self.pointcloud_path, \"%s_v1.ply\" % os.path.basename(self.dataset_path)), ]\r\n pointcloud_factor = 150.0 # ad-hoc\r\n\r\n self.depth_scale = 400. # this may need change later\r\n\r\n all_pointclouds = []\r\n\r\n for file_name in pointcloud_list:\r\n # pointcloud = np.load(file_name) # np array of shape N x 3\r\n pointcloud = frame_utils.load_ply(file_name) # np array of shape N x 3\r\n pointcloud = np.transpose(pointcloud) # 3 x N\r\n\r\n # this is no longer needed, as we have adjusted the orientation when saving the raw data.\r\n # # adjust the world coord\r\n # T = np.array(\r\n # ((1, 0, 0),\r\n # (0, 0, -1),\r\n # (0, 1, 0)))\r\n #\r\n # pointcloud = T @ pointcloud\r\n # pointcloud = pointcloud.astype(np.float32)\r\n \r\n all_pointclouds.append(pointcloud)\r\n\r\n all_pointclouds = np.stack(all_pointclouds, axis=0) # n_poses x 3 x N\r\n\r\n # do the scaling\r\n all_pointclouds *= self.depth_scale\r\n all_pointclouds /= pointcloud_factor\r\n self.poses[:, :3, 3] = self.poses[:, :3, 3] * self.depth_scale\r\n\r\n self.all_pointclouds = all_pointclouds\r\n\r\n def _build_dataset_index(self):\r\n splits = [self.split, ]\r\n\r\n testskip = 1 # same as nerf\r\n\r\n metas = {}\r\n for s in splits:\r\n with open(os.path.join(self.dataset_path, 'transforms_{}.json'.format(s)), 'r') as fp:\r\n metas[s] = json.load(fp)\r\n\r\n all_imgs = []\r\n all_poses = []\r\n all_frame_ids = []\r\n\r\n for s in splits:\r\n meta = metas[s]\r\n if s == 'train' or testskip == 0:\r\n skip = 1\r\n else:\r\n skip = testskip\r\n\r\n for frame in meta['frames'][::skip]:\r\n fname = os.path.join(self.dataset_path, frame['file_path'] + '.png')\r\n # img = frame_utils.read_gen(fname)\r\n # img = imageio.imread(fname)\r\n # img = img[..., [2,1,0]] # to cv2 format\r\n img = cv2.imread(fname, cv2.IMREAD_UNCHANGED)\r\n img = np.where(img[..., 3:]==0, 255*np.ones_like(img), img)\r\n img = img[..., 0:3]\r\n\r\n c2w = np.array(frame['transform_matrix'])\r\n\r\n rotation, location = c2w[0:3, 0:3], c2w[0:3, 3]\r\n R_world2bcam = np.transpose(rotation)\r\n\r\n # Convert camera location to translation vector used in coordinate changes\r\n # T_world2bcam = -1*R_world2bcam*cam.location\r\n # Use location from matrix_world to account for constraints:\r\n T_world2bcam = -1 * np.dot(R_world2bcam, location)\r\n\r\n R_bcam2cv = np.array(\r\n ((1, 0, 0),\r\n (0, -1, 0),\r\n (0, 0, -1)))\r\n\r\n # Build the coordinate transform matrix from world to computer vision camera\r\n # NOTE: Use * instead of @ here for older versions of Blender\r\n # TODO: detect Blender version\r\n R_world2cv = np.dot(R_bcam2cv, R_world2bcam)\r\n T_world2cv = np.dot(R_bcam2cv, T_world2bcam)\r\n\r\n # put into 3x4 matrix\r\n RT = np.concatenate((R_world2cv, T_world2cv[:, None]), axis=1)\r\n RT = np.concatenate((RT, np.array([0, 0, 0, 1])[None, :]), axis=0)\r\n\r\n # w2c = np.linalg.inv(c2w)\r\n w2c = RT\r\n\r\n # print(np.array(frame['transform_matrix']))\r\n #\r\n # c2w = np.array(frame['transform_matrix'])\r\n # c2w[0:3, 0:3] = c2w[0:3, 0:3] @ np.array([[1., 0, 0],[0, -1, 0],[0, 0, -1]])\r\n # print(c2w)\r\n #\r\n # c2w = np.array(frame['transform_matrix']) @ np.array([[1., 0, 0, 0],[0, -1, 0, 0],[0, 0, -1, 0],[0, 0, 0, 1]])\r\n # print(c2w)\r\n #\r\n # assert (False)\r\n # w2c = np.linalg.inv(c2w)\r\n\r\n\r\n # w2c = np.array([[1., 0, 0, 0],[0, -1, 0, 0],[0, 0, -1, 0],[0, 0, 0, 1]]) @ w2c\r\n # w2c = w2c @ np.array([[1., 0, 0, 0],[0, -1, 0, 0],[0, 0, -1, 0],[0, 0, 0, 1]])\r\n\r\n all_imgs.append(img)\r\n all_poses.append(w2c)\r\n\r\n if 'frame_id' in frame:\r\n all_frame_ids.append(frame['frame_id'])\r\n else:\r\n all_frame_ids.append(0)\r\n\r\n self.images = np.stack(all_imgs, 0).astype(np.float32) # N x H x W x 3\r\n self.poses = np.stack(all_poses, 0).astype(np.float32)\r\n self.all_frame_ids = np.array(all_frame_ids).astype(np.int64)\r\n\r\n H, W = self.images[0].shape[:2]\r\n camera_angle_x = float(meta['camera_angle_x'])\r\n focal = .5 * W / np.tan(.5 * camera_angle_x)\r\n\r\n self.total_num_views = len(self.images)\r\n\r\n # get the intrinsics\r\n K = np.array([[focal, 0, H/2],[0, focal, W/2],[0, 0, 1]]).reshape(1, 3, 3)\r\n K = np.repeat(K, self.total_num_views, axis=0)\r\n\r\n self.intrinsics = K\r\n\r\n print('Dataset length:', self.total_num_views)\r\n\r\n def __len__(self):\r\n return self.total_num_views\r\n\r\n def __getitem__(self, ix1):\r\n if ix1 < self.start or ix1 >= self.end: return []\r\n # randomly sample neighboring frame\r\n\r\n indices = [ix1, ]\r\n\r\n images, poses, intrinsics, frame_ids = [], [], [], []\r\n for i in indices:\r\n # image = frame_utils.read_gen(self.image_list[i])\r\n # depth = frame_utils.read_gen(self.depth_list[i])\r\n image = self.images[i]\r\n pose = self.poses[i]\r\n calib = self.intrinsics[i].copy()\r\n frame_id = self.all_frame_ids[i]\r\n\r\n images.append(image)\r\n poses.append(pose)\r\n intrinsics.append(calib)\r\n frame_ids.append(frame_id)\r\n\r\n images = np.stack(images, 0).astype(np.float32) # N x H x W x 3\r\n poses = np.stack(poses, 0).astype(np.float32)\r\n intrinsics = np.stack(intrinsics, 0).astype(np.float32)\r\n frame_ids = np.array(frame_ids).astype(np.int64)\r\n\r\n images = torch.from_numpy(images)\r\n poses = torch.from_numpy(poses)\r\n intrinsics = torch.from_numpy(intrinsics)\r\n frame_ids = torch.from_numpy(frame_ids)\r\n\r\n # channels first\r\n images = images.permute(0, 3, 1, 2) # N x 3 x H x W\r\n images = images.contiguous()\r\n\r\n if self.data_augmentation:\r\n images, depths, _, intrinsics = \\\r\n random_scale_and_crop(images, None, intrinsics, None, self.resize, self.crop_size)\r\n\r\n # for op, param in self.size_operations:\r\n # if op == \"scale\":\r\n # images, intrinsics = scale_operation(images, intrinsics, param)\r\n # elif op == \"crop\":\r\n # images, intrinsics = crop_operation(images, intrinsics, *param)\r\n\r\n if self.return_frame_ids:\r\n return images, poses, intrinsics, frame_ids\r\n else:\r\n return images, poses, intrinsics\r\n\r\n def get_pointclouds(self):\r\n return torch.from_numpy(self.all_pointclouds)\r\n\r\n def get_render_poses(self):\r\n splits = ['test']\r\n\r\n metas = {}\r\n for s in splits:\r\n with open(os.path.join(self.dataset_path, 'transforms_{}.json'.format(s)), 'r') as fp:\r\n metas[s] = json.load(fp)\r\n\r\n all_poses = []\r\n\r\n for s in splits:\r\n meta = metas[s]\r\n for frame in meta['frames']:\r\n c2w = np.array(frame['transform_matrix'])\r\n\r\n rotation, location = c2w[0:3, 0:3], c2w[0:3, 3]\r\n R_world2bcam = np.transpose(rotation)\r\n\r\n # Convert camera location to translation vector used in coordinate changes\r\n # T_world2bcam = -1*R_world2bcam*cam.location\r\n # Use location from matrix_world to account for constraints:\r\n T_world2bcam = -1 * np.dot(R_world2bcam, location)\r\n\r\n R_bcam2cv = np.array(\r\n ((1, 0, 0),\r\n (0, -1, 0),\r\n (0, 0, -1)))\r\n\r\n # Build the coordinate transform matrix from world to computer vision camera\r\n # NOTE: Use * instead of @ here for older versions of Blender\r\n # TODO: detect Blender version\r\n R_world2cv = np.dot(R_bcam2cv, R_world2bcam)\r\n T_world2cv = np.dot(R_bcam2cv, T_world2bcam)\r\n\r\n # put into 3x4 matrix\r\n RT = np.concatenate((R_world2cv, T_world2cv[:, None]), axis=1)\r\n RT = np.concatenate((RT, np.array([0, 0, 0, 1])[None, :]), axis=0)\r\n\r\n # w2c = np.linalg.inv(c2w)\r\n w2c = RT\r\n\r\n all_poses.append(w2c)\r\n\r\n render_poses = np.stack(all_poses, 0).astype(np.float32)\r\n render_poses[:, :3, 3] = render_poses[:, :3, 3] * self.depth_scale\r\n\r\n return torch.tensor(render_poses).float().cuda()\r\n\r\n","repo_name":"princeton-vl/SNP","sub_path":"datasets/syn.py","file_name":"syn.py","file_ext":"py","file_size_in_byte":13585,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"53"} +{"seq_id":"32120842153","text":"import numpy as np\nimport pandas as pd\n\nclass NeuralNetwork:\n\n\tdef __init__(self, X, y):\n\t\tm, n = X.shape\n\t\tself.X = X\n\t\tself.y = y\n\t\tself.hidden_layer_size = 25\n\t\tself.input_layer_size = n\n\t\tself.output_layer_size = 10\n\n\tdef sigmoid(self, z):\n\t\treturn 1.0 / (1.0 + np.exp(np.negative(z)))\n\n\tdef sigmoidGradient(self, z):\n\t\treturn self.sigmoid(z) * (1 - self.sigmoid(z))\n\n\tdef reshapeParams(self, flattened_array):\n\t\ttheta1 = flattened_array[:(self.input_layer_size+1)*self.hidden_layer_size] \\\n\t\t\t\t.reshape((self.hidden_layer_size,self.input_layer_size+1))\n\t\ttheta2 = flattened_array[(self.input_layer_size+1)*self.hidden_layer_size:] \\\n\t\t\t\t.reshape((self.output_layer_size,self.hidden_layer_size+1))\n\t\t\n\t\treturn [ theta1, theta2 ]\n\n\tdef genRandThetas(self):\n\t\tepsilon_init = 0.12\n\t\ttheta1_shape = (self.hidden_layer_size, self.input_layer_size+1)\n\t\ttheta2_shape = (self.output_layer_size, self.hidden_layer_size+1)\n\t\trand_thetas = [ np.random.rand( *theta1_shape ) * 2 * epsilon_init - epsilon_init, \\\n\t\t\t\t\t\tnp.random.rand( *theta2_shape ) * 2 * epsilon_init - epsilon_init]\n\t\treturn rand_thetas\n\n\n\tdef costFunction(self, X, y, theta1, theta2, lambd):\n\n\t\t## do feed foward propogation to get the h_x vector ##\n\n\t\t# get m & n variables\n\t\tm, n = X.shape\n\n\t\t# first must set a = X. (5000 x 401)\n\t\ta = X\n\t\t#a = np.c_[np.ones(m), X]\n\n\t\t# theta1 is (25 x 401), theta2 is (10 x 26)\n\t\t# then, z2 is theta1 * a1. z2 has dimensions (5000 x 25)\n\t\tz2 = a.dot(theta1.T)\n\t\t# a2 is sigmoid(z2), add row of 1s to a2 (5000 x 26)\n\t\ta2 = np.c_[np.ones(m), self.sigmoid(z2)]\n\t\t# z3 is theta2 * a2. z3 has dimensions (5000 x 10)\n\t\tz3 = a2.dot(theta2.T)\n\t\t# a3 is sigmoid(z3). dimensions (5000 x 10) h == a3\n\t\th = self.sigmoid(z3)\n\n\t\t## cost function ##\n\n\t\t# turn y into matrix of logical arrays\n\t\ty_matrix = pd.get_dummies(y.ravel()).as_matrix()\n\n\t\t# cost, the theta1/theta2 slice is to ignore first column for regularization\n\t\tcost = np.sum((-(y_matrix) * np.log(h)) - (1 - y_matrix) * np.log(1 - h))/m + \\\n\t\t\t\t(lambd/(2.0*m)) * (np.sum(theta1[:, 1:]**2) + np.sum(theta2[:, 1:]**2))\n\t\t## gradient ##\n\n\t\terror3 = h - y_matrix # shape is (5000 x 10)\n\n\t\terror2 = theta2.T.dot(error3.T)\n\t\terror2 = error2[1:, :] * self.sigmoidGradient(z2.T)\n\n\t\t# error2 shape is (25 x 5000)\n\n\t\tdelta1 = error2.dot(a)\n\t\tdelta2 = error3.T.dot(a2)\n\t\ttheta1_ = np.c_[np.ones((theta1.shape[0],1)),theta1[:,1:]]\n\t\ttheta2_ = np.c_[np.ones((theta2.shape[0],1)),theta2[:,1:]]\n\t\t\n\t\ttheta1_grad = delta1/m + (theta1_*lambd)/m\n\t\ttheta2_grad = delta2/m + (theta2_*lambd)/m\n\n\n\t\treturn cost, theta1_grad, theta2_grad\n\n\n\tdef gradientDescent(self, X, y, theta1, theta2, iterations):\n\t\tfor i in range(iterations):\n\t\t\tcost, grad1, grad2 = self.costFunction(X, y, theta1, theta2, .01)\n\t\t\ttheta1 = theta1 - grad1\n\t\t\ttheta2 = theta2 - grad2\n\n\t\treturn cost, theta1, theta2\n\n\n\n\tdef predict(self, theta_1, theta_2, features):\n\t\tz2 = theta_1.dot(features.T)\n\t\ta2 = np.c_[np.ones((features.shape[0],1)), self.sigmoid(z2).T]\n\t\t\n\t\tz3 = a2.dot(theta_2.T)\n\t\ta3 = self.sigmoid(z3)\n\n\t\treturn(np.argmax(a3, axis=1)+1)\n\n\tdef fit(self, X, y, final):\n\t\tm, n = X.shape\n\t\tM, N = final.shape\n\t\tX = np.c_[np.ones(m), X]\n\t\ttheta1, theta2 = self.genRandThetas()\n\t\tcost, theta1, theta2 = self.gradientDescent(X, y, theta1, theta2, 100)\n\t\tfinal = np.c_[np.ones(M), final]\n\t\treturn self.predict(theta1, theta2, final)\n","repo_name":"jko0531/Machine-Learning","sub_path":"Handwriting/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22606949576","text":"import unittest\nfrom .utils import *\nfrom .utils.data import *\nfrom answers import get_correct_answer\nfrom itertools import product\n\ndef isSpecial(x):\n if x >= 0:\n return False\n\n x = abs(x)\n \n s = 0\n while x > 0:\n s += x % 3\n x //= 3\n\n return s == 12\n\ndef input_generator91(path):\n with open(path, 'w+') as f:\n n = 15\n k = randint(1, 6)\n d = randint(1, 6)\n f.write(str(n) + ' ' + str(k) + ' ' + str(d) + '\\n')\n\n for i in range(n):\n f.write(str(randint(-100000, 100000)) + '\\n')\n\ndef bruteforce(path):\n with open(path) as f:\n n, k, d = map(int, f.readline().split())\n a = list()\n for i in range(n):\n a.append(int(f.readline()))\n\n result = -10001\n for length in [x * d for x in range(1, n // d + 1)]:\n for l in range(n-length+1):\n r = l+length\n s = sum(a[l:r])\n c = 0\n for x in a[l:r]:\n if isSpecial(x):\n c += 1\n\n if c % k == 0 and s > result:\n result = s\n\n return result\n\ndef efficient(path):\n with open(path) as f:\n n, k, d = map(int, f.readline().split())\n answer = -10001\n s = 0\n count = 0\n\n infty = 10 ** 20\n m = [infty] * k # m[i] - минимальная префикс-сумма, в которой количество особенных кратно k\n c = [0] * k # c[i] - номер итерации, на которой встречена префикс-сумма m[i]\n\n for i in range(n):\n x = int(f.readline())\n s += x\n\n if isSpecial(x):\n count += 1\n\n r = count % k\n\n if s > answer and r == 0 and i % d == 0:\n answer = s\n elif m[r] != infty and s - m[r] > answer and c[r] % d == i % d:\n answer = s - m[r]\n \n if s < m[r]:\n m[r] = s\n c[r] = i\n\n return answer\n\ndef solve():\n paths = generate_paths(91)\n solutions = {}\n\n for letter in ['A', 'B']:\n with open(paths[letter]) as f:\n solutions[letter] = efficient(paths[letter])\n\n return '{} {}'.format(*solutions.values())\n\nclass Problem91(unittest.TestCase):\n def test_answer(self):\n assert solve() == get_correct_answer(27, 91)\n\n def test_random(self):\n assert test_with_bruteforce(bruteforce, efficient, input_generator91, verbose=True)\n\nif __name__ == '__main__':\n print(solve())","repo_name":"DmitryKochetkov/polyakov_py","sub_path":"solutions27/problem91.py","file_name":"problem91.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13427009850","text":"'''Exploiting spatial (perceptual) redundancy with the 2D dyadic Discrete Wavelet Transform.'''\n\nimport io\nfrom skimage import io as skimage_io # pip install scikit-image\nimport numpy as np\nimport pywt\nimport os\nimport logging\nimport main\nimport parser\nimport importlib\n\n#from DWT import color_dyadic_DWT as DWT\nfrom DWT2D.color_dyadic_DWT import analyze as space_analyze # pip install \"DWT2D @ git+https://github.com/vicente-gonzalez-ruiz/DWT2D\"\nfrom DWT2D.color_dyadic_DWT import synthesize as space_synthesize\n\nfrom color_transforms.YCoCg import from_RGB # pip install \"color_transforms @ git+https://github.com/vicente-gonzalez-ruiz/color_transforms\"\nfrom color_transforms.YCoCg import to_RGB\n\ndefault_levels = 5\ndefault_DWT = \"db5\"\ndefault_CT = \"YCoCg\"\n\nparser.parser_encode.add_argument(\"-l\", \"--levels\", type=parser.int_or_str, help=f\"Number of decomposition levels (default: {default_levels})\", default=default_levels)\nparser.parser_encode.add_argument(\"-w\", \"--wavelet\", type=parser.int_or_str, help=f\"Wavelet name (default: \\\"{default_DWT}\\\")\", default=default_DWT)\nparser.parser_encode.add_argument(\"-t\", \"--color_transform\", type=parser.int_or_str, help=f\"Color transform (default: \\\"{default_CT}\\\")\", default=default_CT)\nparser.parser_decode.add_argument(\"-l\", \"--levels\", type=parser.int_or_str, help=f\"Number of decomposition levels (default: {default_levels})\", default=default_levels)\nparser.parser_decode.add_argument(\"-w\", \"--wavelet\", type=parser.int_or_str, help=f\"Wavelet name (default: \\\"{default_DWT}\\\")\", default=default_DWT)\nparser.parser_decode.add_argument(\"-t\", \"--color_transform\", type=parser.int_or_str, help=f\"Color transform (default: \\\"{default_CT}\\\")\", default=default_CT)\n\n#import PNG as EC\n#import YCoCg as CT # Color Transform\n\nargs = parser.parser.parse_known_args()[0]\nCT = importlib.import_module(args.color_transform)\n\nclass CoDec(CT.CoDec):\n\n def __init__(self, args):\n super().__init__(args)\n self.levels = args.levels\n logging.info(f\"levels = {self.levels}\")\n #if self.encoding:\n self.wavelet = pywt.Wavelet(args.wavelet)\n # with open(f\"{args.output}_wavelet_name.txt\", \"w\") as f:\n # f.write(f\"{args.wavelet}\")\n # logging.info(f\"Written {args.output}_wavelet_name.txt\")\n logging.info(f\"wavelet={args.wavelet} ({self.wavelet})\")\n #else:\n # with open(f\"{args.input}_wavelet_name.txt\", \"r\") as f:\n # wavelet_name = f.read()\n # logging.info(f\"Read wavelet = \\\"{wavelet_name}\\\" from {args.input}_wavelet_name.txt\")\n # self.wavelet = pywt.Wavelet(wavelet_name)\n # logging.info(f\"wavelet={wavelet_name} ({self.wavelet})\")\n\n def encode(self):\n img = self.encode_read().astype(np.int16)\n img_128 = img #- 128 # To use the deadzone\n CT_img = from_RGB(img_128)\n \n decom_img = space_analyze(CT_img, self.wavelet, self.levels)\n logging.debug(f\"len(decom_img)={len(decom_img)}\")\n decom_k = self.quantize_decom(decom_img)\n self.write_decom(decom_k)\n\n #k = self.quantize(CT_img)\n #logging.debug(f\"k.shape={k.shape}, k.type={k.dtype}\")\n #k[..., 1] += 128\n #k[..., 2] += 128\n #compressed_k = self.compress(k.astype(np.uint8))\n #self.encode_write(compressed_k)\n\n self.BPP = (self.output_bytes*8)/(img.shape[0]*img.shape[1])\n #return rate\n\n def decode(self):\n decom_k = self.read_decom()\n decom_y = decom_k\n decom_y = self.dequantize_decom(decom_k)\n CT_y = space_synthesize(decom_y, self.wavelet, self.levels)\n\n #compressed_k = self.decode_read()\n #k = self.decompress(compressed_k).astype(np.int16)\n #logging.debug(f\"k.shape={k.shape}, k.type={k.dtype}\")\n #k[..., 1] -= 128\n #k[..., 2] -= 128\n #CT_y = self.dequantize(k)\n \n y_128 = to_RGB(CT_y)\n y = y_128# + 128\n y = np.clip(y, 0, 255).astype(np.uint8)\n self.decode_write(y)\n self.BPP = (self.input_bytes*8)/(y.shape[0]*y.shape[1])\n #return rate\n\n def quantize_decom(self, decom):\n LL_k = super().quantize(decom[0])\n LL_k[..., 1] += 128\n LL_k[..., 2] += 128\n decom_k = [LL_k]\n for spatial_resolution in decom[1:]:\n spatial_resolution_k = []\n for subband in spatial_resolution:\n subband_k = super().quantize(subband)\n subband_k += 128\n spatial_resolution_k.append(subband_k)\n decom_k.append(tuple(spatial_resolution_k))\n return decom_k\n\n def dequantize_decom(self, decom_k):\n LL_k = decom_k[0]\n LL_k[..., 1] -= 128\n LL_k[..., 2] -= 128\n decom_y = [super().dequantize(LL_k)]\n for spatial_resolution_k in decom_k[1:]:\n spatial_resolution_y = []\n for subband_k in spatial_resolution_k:\n subband_k -= 128\n subband_y = super().dequantize(subband_k)\n spatial_resolution_y.append(subband_y)\n decom_y.append(tuple(spatial_resolution_y))\n return decom_y\n\n def _quantize_decom(self, decom):\n decom_k = [self.quantize(decom[0])] # LL subband\n for spatial_resolution in decom[1:]:\n spatial_resolution_k = []\n for subband in spatial_resolution:\n subband_k = self.quantize(subband)\n spatial_resolution_k.append(subband_k)\n decom_k.append(tuple(spatial_resolution_k))\n return decom_k\n\n def _dequantize_decom(self, decom_k):\n decom_y = [self.dequantize(decom_k[0])]\n for spatial_resolution_k in decom_k[1:]:\n spatial_resolution_y = []\n for subband_k in spatial_resolution_k:\n subband_y = self.dequantize(subband_k)\n spatial_resolution_y.append(subband_y)\n decom_y.append(tuple(spatial_resolution_y))\n return decom_y\n\n def _quantize(self, subband):\n '''Quantize the image.'''\n #k = self.Q.encode(subband)\n #k = super().quantize(subband)\n k = subband\n k += 32768\n k = k.astype(np.uint16)\n logging.debug(f\"k.shape={k.shape} k.dtype={k.dtype}\")\n return k\n\n def _dequantize(self, k):\n '''\"Dequantize\" an image.'''\n k = k.astype(np.int16)\n k -= 32768\n #self.Q = Quantizer(Q_step=QSS, min_val=min_index_val, max_val=max_index_val)\n #y = self.Q.decode(k)\n #y = super().dequantize(k)\n y = k\n logging.debug(f\"y.shape={y.shape} y.dtype={y.dtype}\")\n return y\n\n def write_decom(self, decom):\n LL = decom[0]\n fn_without_extension = self.args.output.split('.')[0]\n fn_subband = f\"{fn_without_extension}_LL_{self.levels}\"\n #LL = io.BytesIO(LL)\n LL = self.compress(LL.astype(np.uint8))\n self.encode_write_fn(LL, fn_subband)\n resolution_index = self.levels\n #aux_decom = [decom[0][..., 0]] # Used for computing slices\n for spatial_resolution in decom[1:]:\n subband_names = [\"LH\", \"HL\", \"HH\"]\n subband_index = 0\n #aux_resol = [] # Used for computing slices\n for subband_name in subband_names:\n fn_subband = f\"{fn_without_extension}_{subband_name}_{resolution_index}\"\n #SP = io.BytesIO(spatial_resolution[subband_index])\n SP = self.compress(spatial_resolution[subband_index].astype(np.uint8))\n self.encode_write_fn(SP, fn_subband)\n #aux_resol.append(spatial_resolution[subband_index][..., 0])\n subband_index += 1\n resolution_index -= 1\n #aux_decom.append(tuple(aux_resol))\n #self.slices = pywt.coeffs_to_array(aux_decom)[1]\n #return slices\n\n def read_decom(self):\n fn_without_extension = self.args.input.split('.')[0]\n fn_subband = f\"{fn_without_extension}_LL_{self.levels}\"\n LL = self.decode_read_fn(fn_subband)\n LL = self.decompress(LL).astype(np.int16)\n decom = [LL]\n resolution_index = self.levels\n for l in range(self.levels, 0, -1):\n subband_names = [\"LH\", \"HL\", \"HH\"]\n spatial_resolution = []\n for subband_name in subband_names:\n fn_subband = f\"{fn_without_extension}_{subband_name}_{resolution_index}\"\n subband = self.decode_read_fn(fn_subband)\n subband = self.decompress(subband).astype(np.int16)\n spatial_resolution.append(subband)\n decom.append(tuple(spatial_resolution))\n resolution_index -= 1\n return decom\n\n '''\n def __save_fn(self, img, fn):\n io.imsave(fn, img, check_contrast=False)\n self.required_bytes = os.path.getsize(fn)\n logging.info(f\"Written {self.required_bytes} bytes in {fn}\")\n\n def __read_fn(self, fn):\n img = io.imread(fn)\n logging.info(f\"Read {fn} of shape {img.shape}\")\n return img\n '''\n\nif __name__ == \"__main__\":\n main.main(parser.parser, logging, CoDec)\n","repo_name":"Sistemas-Multimedia/VCF","sub_path":"src/DWT.py","file_name":"DWT.py","file_ext":"py","file_size_in_byte":9130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21770561373","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport logging\nimport input, model\nfrom datetime import datetime\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('ckpt_dir', 'data/checkpoints/',\n \"\"\"Directory where to restore a model\"\"\")\ntf.app.flags.DEFINE_string('save_dir', 'data/train/flowers',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('log_dir', 'data/train/log',\n \"\"\"Directory where to write event logs.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 500,\n \"\"\"Number of epochs to run.\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 32,\n \"\"\"Size of batches.\"\"\")\ntf.app.flags.DEFINE_float('learning_rate', 0.005,\n \"\"\"Learning rate for the optimizer\"\"\")\n\n\ndef train():\n with tf.Graph().as_default() as g:\n global_step = tf.train.get_or_create_global_step()\n\n # Get the iterator from the TFRecord files.\n iterator = input.consume_tfrecord()\n images_batch, labels_batch = iterator.get_next()\n\n # Num_classes is None for fine tuning. You need to have the proper scope.\n # From the original model we only need the bottlenecks.\n with tf.contrib.slim.arg_scope(model.inception_v3_arg_scope()):\n bottleneck, end_points = model.inception_v3(images_batch, num_classes=None, is_training=False)\n\n # We pass the bottleneck generated in the step before to the new classifier.\n logits = model.fine_tuning(bottleneck, end_points)\n\n # We compute the loss between the predictions and the labels\n loss = model.loss(logits, labels_batch)\n\n # We use ADAM as a optimizer. You could use whichever you want, like Gradient Descent.\n # It's important to indicate that we only want to retrain the 'fine_tuning' variables.\n optimizer = tf.train.AdamOptimizer(0.005)\n train_op = optimizer.minimize(loss, global_step=global_step, var_list=tf.global_variables('fine_tuning'))\n\n # We create two savers. The first one for the InceptionV3 variables and the second one for the variables of\n # the new classifier.\n saver = tf.train.Saver(tf.global_variables('InceptionV3'))\n saver_ft = tf.train.Saver(tf.global_variables('fine_tuning'))\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n\n # Restore the checkpoints of the InceptionV3 model.\n saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))\n\n # This will let you see the images in tensorboard\n tf.summary.image(tensor=images_batch, name=\"Image\")\n\n # Tensorborad options\n train_writer = tf.summary.FileWriter(FLAGS.log_dir, g)\n\n logger = init_logger()\n logger.info(\"Training starts...\")\n\n # Training loop. Set the max number of steps.\n for i in range(0, FLAGS.max_steps):\n # We compute the image and label batch\n sess.run([images_batch, labels_batch])\n\n # Merge all summary variables for Tensorborad\n merge = tf.summary.merge_all()\n\n # We do the training and compute the loss and the summaries\n _, loss_val, summary = sess.run([train_op, loss, merge])\n\n if i % 10 is 0:\n logger.info('Time: %s Loss: %f Step: %i', datetime.now(), loss_val, i)\n # Write the summaries in the log file\n train_writer.add_summary(summary, i)\n\n # We save the progress every 500 steps\n if i % 500 is 0 and i is not 0:\n saver_ft.save(sess, FLAGS.save_dir, global_step=global_step)\n logger.info(\"***** Saving model in: %s *****\", FLAGS.save_dir)\n\n logger.info(\"Training ends...\")\n saver_ft.save(sess, FLAGS.save_dir, global_step=global_step)\n logger.info(\"***** Saving model in: %s *****\", FLAGS.save_dir)\n\n\ndef main(argv=None):\n train()\n\n\ndef init_logger():\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n return logger\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"juanabascal/tf-flower-classificator","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"41788433094","text":"import os\nimport copy\nimport torch\nimport random\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformers import AutoModel\nimport transformers.adapters.composition as ac\nfrom info_nce import InfoNCE\nfrom transformers import AdapterConfig\n\nseed = 123\nrandom.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nclass MLKGLM(nn.Module):\n \"\"\"docstring for ClassName\"\"\"\n def __init__(self, args):\n super(MLKGLM, self).__init__()\n # load pretrained MLLM\n self.MLLM = AutoModel.from_pretrained(args.model_dir, \n return_dict=True,\n output_hidden_states=True)\n hidden_num = self.MLLM.get_input_embeddings().embedding_dim\n self.training = True\n self.lm_mask_token_id = args.lm_mask_token_id\n # set three extra modules\n self.knowledge_mapping = nn.Sequential(nn.Linear(hidden_num, int(hidden_num / 2)),\n nn.ELU(),\n nn.Dropout(0.1), # project down\n nn.Linear(int(hidden_num / 2), hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1), # project up\n nn.Linear(hidden_num, 4*hidden_num),\n nn.ELU(),\n nn.Dropout(0.1), # project up\n nn.Linear(4*hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1), # project down\n nn.Linear(hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1))\n if not self.training:\n # for testing\n self.all_aggregator = nn.Linear(2*hidden_num, hidden_num, bias=False)\n self.all_aggregator.weight.data = self.weight_init_sum(self.all_aggregator.weight.data)\n\n def weight_init_sum(self, t):\n hidden_num = int(t.shape[-1]/2)\n nn.init.xavier_normal_(t)\n return t*0.05 + torch.cat((0.5*torch.eye(hidden_num,hidden_num),\n 0.5*torch.eye(hidden_num,hidden_num)),dim=1)\n\n def forward(self, **inputs):\n # get MLLM output\n outputs_MLLM = self.MLLM(**inputs).hidden_states\n # take last layer hidden state: (batch_size, sequence_length, hidden_size)\n outputs_MLLM = outputs_MLLM[-1]\n # add adversarial noise\n if self.training:\n outputs_MLLM = outputs_MLLM + 0.1*torch.abs(outputs_MLLM).mean()*torch.randn_like(outputs_MLLM)\n outputs_both = self.knowledge_mapping(outputs_MLLM)\n if self.training:\n return (outputs_both + outputs_MLLM) / 2, outputs_MLLM.clone()\n else:\n outputs_both = self.all_aggregator(torch.cat((outputs_MLLM, outputs_both), dim=-1))\n return outputs_both\n\n\nclass fusion_adapter(nn.Module):\n \"\"\"docstring for ClassName\"\"\"\n def __init__(self, args):\n super(fusion_adapter, self).__init__()\n # load pretrained MLLM\n self.MLLM = AutoModel.from_pretrained(args.model_dir)\n hidden_num = self.MLLM.get_input_embeddings().embedding_dim\n self.training = True\n self.lm_mask_token_id = args.lm_mask_token_id\n self.stage = \"none\" # none, ep, tp, es, ts\n self.fuse = False\n if self.training:\n # adapters\n self.MLLM.add_adapter(\"ep\")\n self.MLLM.add_adapter(\"tp\")\n self.MLLM.add_adapter(\"es\")\n self.MLLM.add_adapter(\"ts\")\n self.MLLM.add_adapter_fusion([\"ep\", \"tp\", \"es\", \"ts\"])\n self.MLLM.active_adapters = ac.Fuse(\"ep\", \"tp\", \"es\", \"ts\")\n\n def forward(self, **inputs):\n return self.MLLM(**inputs)['last_hidden_state']\n\n def checking(self):\n print(self.stage)\n\n'''\nORG, FA, SA\nmBERT: 177853440, 202683648, 206201856\nXLM: 278043648, 302873856, 306392064\nXLMR: 559890432, 648124416, 660652032\n'''\nclass simple_adapter(nn.Module):\n \"\"\"docstring for ClassName\"\"\"\n def __init__(self, args):\n super(simple_adapter, self).__init__()\n # load pretrained MLLM\n self.MLLM = AutoModel.from_pretrained(args.model_dir)\n hidden_num = self.MLLM.get_input_embeddings().embedding_dim\n self.training = True\n self.lm_mask_token_id = args.lm_mask_token_id\n self.stage = \"none\"\n self.fuse = False\n if self.training:\n # adapters\n config = AdapterConfig(mh_adapter=True, output_adapter=True, reduction_factor=1, non_linearity=\"relu\", \n original_ln_before=False, original_ln_after=True, \n ln_before=False, ln_after=False, \n residual_before_ln=False, adapter_residual_before_ln=False)\n self.MLLM.add_adapter(\"baseline\", config=config)\n\n def forward(self, **inputs):\n return self.MLLM(**inputs)['last_hidden_state']\n\n\ndef loss_universal(args, outputs, lossfcn, input_ids=None, el2=True):\n # transform set-level to sample-level\n # outputs = torch.mean(outputs, dim=1)\n outputs_pos = outputs[:int(outputs.shape[0]/2)]\n if input_ids is not None:\n outputs_pos = get_mask(outputs_pos, input_ids, args.lm_mask_token_id)\n outputs_neg = outputs[int(outputs.shape[0]/2):]\n # average\n outputs_pos = torch.mean(outputs_pos, dim=1)\n outputs_neg = torch.mean(outputs_neg, dim=1)\n idx_query, idx_pos = [], []\n for i in range(int(outputs.shape[0]/2)):\n for j in range(int(outputs.shape[0]/2)):\n if i > j:\n idx_query.append(i)\n idx_pos.append(j)\n '''\n if len(idx_query) > args.batch_num:\n idx_all = [i for i in range(len(idx_query))]\n idx_random = random.sample(idx_all, args.batch_num)\n idx_query = [idx_query[i] for i in idx_random]\n idx_pos = [idx_pos[i] for i in idx_random]\n '''\n loss_dp = lossfcn(outputs_pos[idx_query], outputs_pos[idx_pos], outputs_neg)\n if el2 == True:\n lossfcn_el2 = nn.SmoothL1Loss()\n loss_el2 = lossfcn_el2(outputs_pos[idx_query], outputs_pos[idx_pos]) - lossfcn_el2(outputs_pos[idx_query], outputs_neg[idx_pos])\n return loss_dp + loss_el2\n else:\n return loss_dp\n\n\ndef loss_triple(args, outputs, lossfcn, input_ids=None, el2=True):\n # transform set-level to sample-level\n # outputs = torch.mean(outputs, dim=1)\n outputs_query = outputs[:int(outputs.shape[0]/3)]\n if input_ids is not None:\n outputs_query = get_mask(outputs_query, input_ids, args.lm_mask_token_id)\n outputs_pos = outputs[int(outputs.shape[0]/3):int(outputs.shape[0]/3*2)]\n outputs_neg = outputs[int(outputs.shape[0]/3*2):]\n # average\n outputs_query = torch.mean(outputs_query, dim=1)\n outputs_pos = torch.mean(outputs_pos, dim=1)\n outputs_neg = torch.mean(outputs_neg, dim=1)\n loss_dp = lossfcn(outputs_query, outputs_pos, outputs_neg)\n if el2 == True:\n lossfcn_el2 = nn.SmoothL1Loss()\n loss_el2 = lossfcn_el2(outputs_query, outputs_pos) - lossfcn_el2(outputs_query, outputs_neg)\n return loss_dp + loss_el2\n else:\n return loss_dp\n\n\ndef loss_wocontext(args, outputs_query, outputs_pos, input_ids=None, lm_emb=None, el2=True):\n # lossfcn = InfoNCE(negative_mode='unpaired')\n lossfcn = InfoNCE()\n # lossfcn_el2 = nn.MSELoss()\n # lossfcn_re = nn.MSELoss()\n # outputs_query = outputs[:int(outputs.shape[0]/3)]\n # outputs_pos = outputs[int(outputs.shape[0]/3):int(outputs.shape[0]/3*2)]\n # outputs_neg = outputs[int(outputs.shape[0]/3*2):]\n # outputs_query = outputs[:int(outputs.shape[0]/2)]\n # outputs_pos = outputs[int(outputs.shape[0]/2):]\n # remove mask token\n if input_ids is not None:\n outputs_query = get_mask(outputs_query, input_ids[:int(input_ids.shape[0]/3)], args.lm_mask_token_id)\n outputs_pos = get_mask(outputs_pos, input_ids[int(input_ids.shape[0]/3):int(input_ids.shape[0]/3*2)], args.lm_mask_token_id)\n # outputs_neg = get_mask(outputs_neg, input_ids[int(input_ids.shape[0]/3*2):], args.lm_mask_token_id)\n '''\n # remove entity token\n if lm_emb is not None:\n lm_emb = lm_emb[:int(lm_emb.shape[0]/3)]\n context_query = outputs[:int(outputs.shape[0]/3)] - outputs_query\n lm_emb = get_mask(lm_emb, input_ids, args.lm_mask_token_id, reverse=True)\n '''\n # average\n outputs_query = torch.mean(outputs_query, dim=1)\n outputs_pos = torch.mean(outputs_pos, dim=1)\n # outputs_neg = torch.mean(outputs_neg, dim=1)\n # cosine loss\n loss_dp = lossfcn(outputs_query, outputs_pos)\n '''\n # l2-norm loss\n loss_el2 = 0\n if el2 == True:\n loss_el2 = lossfcn_el2(outputs_query, outputs_pos) / (lossfcn_el2(outputs_query, outputs_pos) + lossfcn_el2(outputs_query, outputs_neg))\n # reconstruction loss\n loss_re = 0\n if lm_emb is not None:\n loss_re = lossfcn_re(context_query, lm_emb)\n return loss_dp + loss_el2 + loss_re\n '''\n return loss_dp\n\ndef get_mask(outputs, input_ids, lm_mask_token_id, reverse=False):\n tmp_batch_num = outputs.shape[0]\n if not reverse: # keep entity\n for i in range(int(tmp_batch_num)):\n if lm_mask_token_id not in input_ids[i]: continue\n mask_idx = ((input_ids[i] == lm_mask_token_id).nonzero(as_tuple=True)[0])\n if len(mask_idx) == 1:\n outputs[i,mask_idx[0]:,:] = 0\n else: ## len(mask_idx) >= 2\n outputs[i,:mask_idx[0],:] = 0\n outputs[i,mask_idx[1]:,:] = 0\n else: # keep context\n for i in range(int(tmp_batch_num)):\n if lm_mask_token_id not in input_ids[i]: continue\n mask_idx = ((input_ids[i] == lm_mask_token_id).nonzero(as_tuple=True)[0])\n if len(mask_idx) == 1:\n outputs[i,mask_idx[0]:,:] = 0\n else: ## len(mask_idx) >= 2\n outputs[i,mask_idx[0]:mask_idx[1],:] = 0\n return outputs\n\n'''\nclass MLKGLM(nn.Module):\n \"\"\"docstring for ClassName\"\"\"\n def __init__(self, args):\n super(MLKGLM, self).__init__()\n # load pretrained MLLM\n self.MLLM = AutoModel.from_pretrained(args.model_dir, \n return_dict=True,\n output_hidden_states=True)\n hidden_num = self.MLLM.get_input_embeddings().embedding_dim\n self.training = False\n # set three extra modules\n self.universal_mapping = nn.Sequential(Conv1D(hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1),\n Conv1D(4*hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(4*hidden_num, eps=1e-12),\n nn.Dropout(0.1),\n Conv1D(hidden_num, 4*hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1),\n Conv1D(hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1))\n self.universal_aggregator = nn.Sequential(Conv1D(hidden_num, 2*hidden_num),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1))\n self.triple_mapping = nn.Sequential(Conv1D(hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1),\n Conv1D(4*hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(4*hidden_num, eps=1e-12),\n nn.Dropout(0.1),\n Conv1D(hidden_num, 4*hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1),\n Conv1D(hidden_num, hidden_num),\n nn.ELU(),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1))\n self.triple_aggregator = nn.Sequential(Conv1D(hidden_num, 2*hidden_num),\n nn.LayerNorm(hidden_num, eps=1e-12),\n nn.Dropout(0.1))\n # for testing\n self.all_aggregator = nn.Sequential(nn.Linear(3*hidden_num, hidden_num, bias=False),\n nn.Dropout(0.2))\n # self.all_aggregator.weight.data = self.weight_init_sum(self.all_aggregator.weight.data)\n\n def weight_init_sum(self, t):\n hidden_num = int(t.shape[-1]/3)\n return 0.0003*torch.randn(3*hidden_num, ) + torch.cat((0.333*torch.eye(hidden_num,hidden_num),\n 0.333*torch.eye(hidden_num,hidden_num),\n 0.333*torch.eye(hidden_num,hidden_num)),dim=1)\n\n def forward(self, **inputs):\n # get MLLM output\n outputs_MLLM = self.MLLM(**inputs).hidden_states\n # take last layer hidden state: (batch_size, sequence_length, hidden_size)\n outputs_MLLM = outputs_MLLM[-1]\n # objective 1: universal space\n outputs_universal = self.universal_mapping(outputs_MLLM)\n outputs_universal = self.universal_aggregator(torch.cat((outputs_MLLM, outputs_universal), dim=-1))\n # objective 2: transformer layers\n outputs_MLKGLM = self.triple_mapping(outputs_universal)\n outputs_MLKGLM = self.triple_aggregator(torch.cat((outputs_MLLM, outputs_MLKGLM), dim=-1))\n return self.all_aggregator(torch.cat((outputs_MLLM, outputs_universal, outputs_MLKGLM), dim=-1))\n # return (outputs_MLLM + outputs_universal + outputs_MLKGLM)/3\n'''\n","repo_name":"yifan-h/Multilingual_Space","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15463,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"71991414569","text":"import socket as skt\n\nserverPort = 50102\n\nsSocket = skt.socket(skt.AF_INET, skt.SOCK_STREAM)\n\nsSocket.bind((\"\", serverPort))\n\nsSocket.listen(1)\n\nprint(\"Servidor TCP escuchando en \", serverPort)\n\ncSocket, cAddr = sSocket.accept()\nmsg = cSocket.recv(2048).decode()\n\nresponse = \"Largo de msg es de\" + str(len(msg)) + \" y el msg es\" + str(msg)\ncSocket.send(response.encode())\ncSocket.close()","repo_name":"C0t300/Lab-1-Redes","sub_path":"testTCP.py","file_name":"testTCP.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74713746729","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 17 11:27:45 2022\n\n@author: Jason\n\n分別畫出有細胞的玻片 以及,空白玻片的光譜圖像並且疊加\n\"\"\"\nimport os\nfrom spectral.io import envi\nimport spectral \n# os.chdir(r'C:\\Users\\Jason\\Documents\\HinaLea\\SDKtest\\bin\\processed\\20200521_101240')\n\nspectral.settings.envi_support_nonlowercase_params ='TRUE'\n\n#光譜將具有CxB形狀,其中C是庫中的光譜數,B是每個光譜的譜帶數。\n\nos.chdir(r\"H:\\臺大醫院測試\\第9次拍攝\\700\\sample 700\")\n\nimg = envi.open(\"sample 700.hdr\", \"sample 700.dat\")\n\nprint(img.__class__)\nprint(img)\nprint('===================================')\n\narr = img.load()\narr.__class__\nprint (arr.info())\nprint(arr.shape) #608 x968 x299\n\n#%%\n\nsample700 =[]\n\nfor i in range(0,608):\n for j in range(0,968):\n sample700.append(arr[i,j,:].reshape(299))\n \n\ndata = np.zeros(299)\nfor i in range(len(sample700)):\n data = data +sample700[i]\n\ndata700 = data/299\nplt.plot(data700,label='data')\n\n \nz = 70\n#%%\n\nos.chdir(r\"H:\\臺大醫院測試\\第9次拍攝\\700\\empty 700\")\n\nimg1 = envi.open(\"empty 700.hdr\", \"empty 700.dat\")\n\narr1 = img1.load()\n\nempty700 =[]\n\nfor i in range(0,608):\n for j in range(0,968):\n empty700.append(arr1[i,j,:].reshape(299))\n \n\ndata1 = np.zeros(299)\nfor i in range(len(empty700)):\n data1 = data1 +empty700[i]\n\nempty700 = data1/299\n\nplt.plot(empty700,label='empty')\nplt.legend()\n\nplt.xlabel('Number of bands')\nplt.ylabel('Intensity')\nplt.title('500ms')\n\nvlines(z, 0,data700.max(),'r')\n\n#%%\n\nplt.figure()\n\nindex = data700/empty700\n\nplt.plot(index)\nplt.xlabel('Number of bands')\nplt.ylabel('Intensity')\nplt.title('500ms')\n\nvlines(z, index.min(),index.max(),'r')\n\n","repo_name":"lohas821019/hyperspectral","sub_path":"cal_data+empty .py","file_name":"cal_data+empty .py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74370844968","text":"\"\"\"config.py\n\nThis file contains functionality for managing ThemeFlip's\nconfiguration directory.\n\"\"\"\n\nfrom pathlib import Path\n\nDEFAULT_CONFIG_DIR = Path(\"~/.config/themeflip\")\nTHEME_DIR_NAME = \"themes\"\n\n\ndef create_config_dir(path=DEFAULT_CONFIG_DIR):\n \"\"\"Creates a config directory at the given path.\n\n Parameters\n ----------\n path : pathlib.Path object\n Path for the new directory.\n \"\"\"\n config_full_path = path.expanduser()\n if not config_full_path.exists():\n config_full_path.mkdir(parents=True)\n theme_dir = config_full_path / THEME_DIR_NAME\n theme_dir.mkdir()\n","repo_name":"LeftySolara/themeflip","sub_path":"themeflip/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23671323438","text":"from enthought.traits.api \\\n import HasTraits, List\n \nfrom enthought.traits.ui.api \\\n import View, Item, TableEditor\n \nfrom enthought.traits.ui.wx.color_column \\\n import ColorColumn\n\nfrom enthought.enable.api \\\n import ColorTrait \n \nclass Thingy ( HasTraits ):\n color = ColorTrait( 'black' )\n \n#-------------------------------------------------------------------------------\n# Sample data: \n#-------------------------------------------------------------------------------\n \ncolors = [\n Thingy( color = 'red'),\n Thingy( color = 'orange'),\n Thingy( color = 'yellow'),\n Thingy( color = 'green'),\n Thingy( color = 'blue'),\n Thingy( color = 'indigo'),\n Thingy( color = 'violet'),\n Thingy( color = 'black'),\n Thingy( color = 'white'),\n]\n\nclass TableTest ( HasTraits ):\n \n #---------------------------------------------------------------------------\n # Trait definitions: \n #---------------------------------------------------------------------------\n \n colors = List( Thingy )\n \n table_editor = TableEditor(\n columns = [ ColorColumn( name = 'color' ),\n ],\n \n editable = True,\n deletable = True,\n sortable = True, #\n sort_model = True,\n show_lines = True, #\n orientation = 'vertical',\n show_column_labels = True, #\n row_factory = Thingy\n )\n \n \n traits_view = View(\n [ Item( 'colors',\n id = 'colors',\n editor = table_editor ),\n '|[]<>' ],\n title = 'Table Editor Test',\n id = 'enthought.traits.ui.tests.table_editor_color_test',\n dock = 'horizontal',\n width = .4,\n height = .3,\n resizable = True,\n kind = 'live' )\n\n#-------------------------------------------------------------------------------\n# Run the tests: \n#-------------------------------------------------------------------------------\n \nif __name__ == '__main__':\n tt = TableTest( colors = colors )\n tt.configure_traits()\n","repo_name":"fspaolo/misc-code","sub_path":"maps/build/Traits/integrationtests/ui/table_editor_color_test.py","file_name":"table_editor_color_test.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"7756543754","text":"from itertools import combinations_with_replacement # 조합\nimport math\n\n\ndef solution(n, s):\n answer = []\n # 각 원소의 합이 S가되는 수의 집합\n # 위 조건을 만족하면서 각 원소의 곱이 최대가 되는 집합\n # n : 자연수의 개수\n # s : 자연수 집합의 합\n if n > s:\n return [-1]\n ll = [i for i in range(1, 9)]\n lst = sorted([i for i in combinations_with_replacement(ll, n) if sum(i) == s], key=lambda x: max(x[0], x[1]),\n reverse=False)\n print(lst)\n if len(lst) == 0:\n return [-1]\n return sorted(list(map(int, list(lst[0]))))\n\n\ndef solution2(n, s):\n if n > s:\n return [-1]\n answer = []\n\n p, dv = divmod(s, n)\n answer = [p] * n\n for i in range(dv):\n answer[i] += 1\n return sorted(answer)\n\n\nn, s = 2, 10002321\nprint(solution2(n, s))\n","repo_name":"commin-pg/coding_test","sub_path":"com/practice/programmers/최고의집합.py","file_name":"최고의집합.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9657574718","text":"from django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\n\nfrom core.models import Ingredient, Recipe\nfrom recipe.serializers import IngredientSerializer\n\nINGREDIENT_URL = reverse('recipe:ingredient-list')\n\n\nclass PublicIngredientAPITest(TestCase):\n \"\"\"Test public api ingredient\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_authentication_is_required(self):\n \"\"\"Test that authentication is required to access the api\"\"\"\n res = self.client.get(INGREDIENT_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateIngredientAPITest(TestCase):\n \"\"\"Test private access to the api\"\"\"\n\n def setUp(self):\n self.user = get_user_model().objects.create_user(\n 'user@user.com',\n 'User123#'\n )\n\n self.client = APIClient()\n self.client.force_authenticate(self.user)\n\n def test_retrieve_ingredients_list(self):\n \"\"\"Test retrieving ingredient for authenticated user\"\"\"\n Ingredient.objects.create(name='ing-1', user=self.user)\n Ingredient.objects.create(name='ing-2', user=self.user)\n\n res = self.client.get(INGREDIENT_URL)\n\n ingredients = Ingredient.objects.all().order_by('-name')\n serializer = IngredientSerializer(ingredients, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, serializer.data)\n\n def test_retrieve_ingredients_list_for_current_user(self):\n \"\"\"Test retrieving ingredients only for the current user\"\"\"\n user2 = get_user_model().objects.create_user(\n 'user1@user.com',\n 'User123#'\n )\n\n Ingredient.objects.create(name='ing-1', user=user2)\n\n ingredient = Ingredient.objects.create(name='ing', user=self.user)\n\n res = self.client.get(INGREDIENT_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], ingredient.name)\n\n def test_create_ingredient(self):\n \"\"\"Test create a new ingredient\"\"\"\n payload = {\n 'name': 'ing-1'\n }\n\n self.client.post(INGREDIENT_URL, payload)\n\n exists = Ingredient.objects.filter(user=self.user,\n name=payload['name']).exists()\n self.assertTrue(exists)\n\n def test_create_ingredient_with_invalid_data(self):\n \"\"\"Test create ingredient with invalid name\"\"\"\n payload = {\n 'name': ''\n }\n\n res = self.client.post(INGREDIENT_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_retrieve_ingredients_assigned_to_recipe(self):\n \"\"\"Test retrieve ingredients assigned to recipe\"\"\"\n\n ingredient1 = Ingredient.objects.create(\n user=self.user, name='ingredient-1')\n ingredient2 = Ingredient.objects.create(\n user=self.user, name='ingredient-2')\n recipe = Recipe.objects.create(\n title='recipe',\n time_minutes=20,\n price=5.0,\n user=self.user\n )\n\n recipe.ingredients.add(ingredient1)\n\n res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})\n\n serializer1 = IngredientSerializer(ingredient1)\n serializer2 = IngredientSerializer(ingredient2)\n self.assertIn(serializer1.data, res.data)\n self.assertNotIn(serializer2.data, res.data)\n\n def test_retrieve_ingredients_assigned_unique(self):\n \"\"\"Test filtering ingredients by assigned returns unique items\"\"\"\n ingredient = Ingredient.objects.create(\n user=self.user, name='ingredient-1')\n Ingredient.objects.create(user=self.user, name='ingredient-2')\n recipe1 = Recipe.objects.create(\n title='recipe-1',\n time_minutes=20,\n price=5.0,\n user=self.user\n )\n recipe1.ingredients.add(ingredient)\n recipe2 = Recipe.objects.create(\n title='recipe-2',\n time_minutes=3,\n price=5.0,\n user=self.user\n )\n recipe2.ingredients.add(ingredient)\n\n res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})\n\n self.assertEqual(len(res.data), 1)\n","repo_name":"m-cherni/recipe-app","sub_path":"app/recipe/tests/test_ingredients_api.py","file_name":"test_ingredients_api.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39358461895","text":"from pygame import quit as PyGameQuit\nfrom pygame import USEREVENT\nfrom pygame.freetype import Font\nfrom pygame.event import get\nfrom pygame.time import Clock, set_timer\nfrom pygame.image import load as PyGameImageLoad\nfrom pygame.locals import QUIT, K_SPACE, KEYDOWN, K_KP_ENTER\nfrom pygame.display import update\nfrom pygame.surface import Surface\nfrom pygame.mixer import init, music, Sound\n\nfrom itertools import cycle\nfrom src.utils import BACKGROUND_START, TITLE_FONT, PRESS_START_FONT, INFORMATION_FONT, GAMELAN_BACKSOUND, CLICK_BACKSOUND\n\nclass StartGame(object):\n def __init__(self, screen, rect, background):\n init()\n self.screen = screen\n self.rect = rect\n self.background = background\n self.font = Font(TITLE_FONT, 40)\n self.font_start = Font(PRESS_START_FONT, 15)\n self.font_author = Font(PRESS_START_FONT, 12)\n self.font_information = Font(INFORMATION_FONT, 8)\n self.gamelan_music = music.load(GAMELAN_BACKSOUND)\n self.click_sound = Sound(CLICK_BACKSOUND)\n\n def load_image(self, path, convert_alpha=False, convert=True):\n try:\n resource = PyGameImageLoad(path)\n except PyGameErrorException:\n print (\"duarrr\")\n exit(0)\n if convert:\n return resource.convert()\n elif convert_alpha:\n return resource.convert_alpha()\n else:\n return resource\n\n def run(self):\n clock = Clock()\n music.play()\n text_surface, rect = self.font.render(\"Pocong Runner\", (150,51,51))\n author_surface, author_rect = self.font_author.render(\"@billalxcode\", (255,255,255))\n author_rect.center = self.rect.center\n author_rect.bottom += 165\n \n start_text_surface, start_rect = self.font_start.render(\"Tekan enter untuk memulai...\", (255,255,255))\n info_surface, info_rect = self.font_information.render(\"* Jika anda mengklaim pemilik dari font/gambar silahkan chat pembuat. Terima kasih telah mendukung saya.\", (255,255,255))\n info_rect.bottom += self.rect.height - 20\n blink_rect = start_rect\n blink_rect.center = self.rect.center\n blink_rect.bottom += 150\n\n while True:\n for event in get():\n if event.type == QUIT:\n return True\n elif event.type == KEYDOWN:\n self.click_sound.play()\n if event.key == K_SPACE or event.key == 13:\n return False\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(author_surface, author_rect)\n self.screen.blit(text_surface.convert_alpha(), (self.rect.width/2, 100))\n self.screen.blit(start_text_surface, blink_rect)\n self.screen.blit(info_surface, info_rect)\n clock.tick(60)\n\n update()","repo_name":"billalxcode/PocongRunner","sub_path":"src/startGame.py","file_name":"startGame.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"72878918248","text":"#link : https://codeforces.com/problemset/problem/1538/C\n#author : Mohamed Ibrahim\n\n\nimport bisect\nfor _ in range(int(input())):\n\tn,l,r = map(int,input().split())\n\ta = sorted(list(map(int, input().split())))\n\trslt = 0\n\tfor i in range(n):\n\t\tx = bisect.bisect_left(a, l-a[i])\n\t\tx = max(i+1, x)\n\t\ty = bisect.bisect_right(a, r-a[i])\n\t\tif y>x:\n\t\t\trslt +=(y-x)\n\tprint(rslt)\n\n\n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"Data Structure Problems/C) Number of Pairs,.py","file_name":"C) Number of Pairs,.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"20084192305","text":"from os import environ\n\nfrom src.utils.response import to_response\nfrom .service import API, APIService\n\n\nclass ModelAPI:\n api_url = environ.get('MODEL_API')\n request = APIService(API(api_url))\n\n @classmethod\n def find_simulation_expired(cls, days_expired: int):\n parameters = {\n 'to_expire': days_expired,\n }\n response = cls.request.get(\n f'/root/simulation/expired',\n parameters=parameters\n )\n if not response.ok:\n return to_response(response), True\n return response.json(), False\n\n @classmethod\n def root_delete_simulation(cls, simulation_id: str):\n response = cls.request.get(f'/root/simulation/{simulation_id}')\n if not response.ok:\n return to_response(response), True\n return response.json(), False\n","repo_name":"fenfisdi/cdslab_management","sub_path":"src/services/model_api.py","file_name":"model_api.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1007104997","text":"from django.contrib.auth.models import User\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APITestCase\n\n\nclass TurkleAPITestCase(APITestCase):\n def setUp(self):\n self.root, created = User.objects.get_or_create(username='root')\n if created:\n self.root.is_admin = True\n self.root.save()\n Token.objects.create(user=self.root)\n self.token = Token.objects.get(user__username='root')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)\n","repo_name":"hltcoe/turkle","sub_path":"turkle/api/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"53"} +{"seq_id":"20366704169","text":"\"\"\"\r\nExpresiones regulares con Python\r\n\"\"\"\r\n\r\nimport re, string\r\n\r\ndef validar(patron, cadena):\r\n return True if re.match(patron, cadena) else False\r\n\r\n# Pruebas con DNIs: 8 dig. + 1 letra mayúscula\r\nL = ['AAAA','62F','12345678a','00045678A','10045678A','12345678Bhola'] \r\npatron = r\"\\d{1,8}[A-Z]$\"\r\npatron = r\"[0-9]{1,8}[A-Z]$\"\r\npatron = r\"[1-9][0-9]{,7}[A-Z]$\" # Otro igual pero sin empezar por cero.\r\nR = [validar(patron, i) for i in L]\r\nprint(L)\r\nprint(R)\r\n\r\n\r\n\r\n# Otras pruebas: \r\npatron = r\".+\\.pdf$\"\r\nL = ['re.pdf', 'operadores.pdf','libro.xls','doc.docx','clases.pdf','holapdf','apdf','b.pdf','c.pdfAAAA','hola b.pdf']\r\nR = [validar(patron, i) for i in L]\r\nprint(L)\r\nprint(R)\r\n\r\n\r\nprint('Horas:')\r\nL = ['00:34:08','8:30:01','12:3:2','30:59:59','0:60:60']\r\npatron = r\"([0-2]?[0-9]):([0-5][0-9]):([0-5][0-9])$\"\r\nR = [validar(patron, i) for i in L]\r\nprint(L)\r\nprint(R)\r\n\r\nobj = re.match(patron, L[0])\r\nprint(obj)\r\nhh,mm,ss= obj.groups()\r\nprint(hh,mm,ss)\r\n\r\n# Pruebas con matriculas europeas: 2345GGT:\r\nprint('Matriculas:')\r\nL = ['1234DRF','12FFF','WWW4567','1234RRE','4455GTH','1244AED','1234WWDR']\r\n#consonantes = \"[\" + \"\".join([i for i in string.ascii_uppercase if i not in \"AEIOU\"]) + \"]\"\r\n#print(consonantes)\r\n#patron = r\"\\d{4}\" + consonantes + \"{3}$\"\r\npatron = r\"\\d{4}[A-Z]{3}$\"\r\npatron2 = r\"\\d{4}[^AEIOU]{3}$\"\r\nprint(patron)\r\nR = [validar(patron, i) and validar(patron2, i) for i in L]\r\nprint(L)\r\nprint(R)\r\n\r\n\r\n# Prueba con findall:\r\npatron = r\"[1-9][0-9]{,7}[A-Z]\" \r\ntxt = \"El dni es: 12345678D y el otro dni era: 10004023X\"\r\nL = re.findall(patron, txt)\r\nprint(L)\r\n\r\n\r\n","repo_name":"aldebarran22/curso_santander_1","sub_path":"expresiones_regulares.py","file_name":"expresiones_regulares.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"200605383","text":"class Constants:\n\tINVALID_BOX_NAME = 'Invalid box name'\n\tINVALID_PARAM = 'Invalid parameter'\n\n\nclass Apalara:\n\tdef __init__(self):\n\t\tself.position = [\n\t\t\t[0, 0, 0, 0, 0, 0],\n\t\t\t[0, 0, 0, 0, 0, 0],\n\t\t\t[0, 0, 0, 0, 0, 0],\n\t\t\t[0, 0, 0, 0, 0, 0],\n\t\t\t[0, 0, 0, 0, 0, 0],\n\t\t\t['A', 'B', 'C', 'D', 'E', 'F']\n\t\t]\n\n\t\tself.A = [5, 0]\n\t\tself.B = [5, 1]\n\t\tself.C = [5, 2]\n\t\tself.D = [5, 3]\n\t\tself.E = [5, 4]\n\t\tself.F = [5, 5]\n\t\tself.box_names = ['A', 'B', 'C', 'D', 'E', 'F']\n\t\tself.robot_arm = [4, 0]\n\n\tdef __str__(self):\n\t\toutput = '\\n'.join(' '.join(map(str, i)) for i in self.position)\n\t\treturn output\n\n\tdef get_box(self, letter):\n\t\tbox = None\n\t\tletter = letter.upper()\n\t\tif letter == 'A':\n\t\t\tbox = self.A\n\t\telif letter == 'B':\n\t\t\tbox = self.B\n\t\telif letter == 'C':\n\t\t\tbox = self.C\n\t\telif letter == 'D':\n\t\t\tbox = self.D\n\t\telif letter == 'E':\n\t\t\tbox = self.E\n\t\telif letter == 'F':\n\t\t\tbox = self.F\n\n\t\treturn box\n\n\t\"\"\"\n\tAction proposition over the robot world\n\t\"\"\"\n\n\tdef move_arm_to(self, x):\n\t\tbox = x\n\t\tif type(x) != list:\n\t\t\tbox = self.get_box(x)\n\t\t_index_0 = box[0]\n\t\t_index_1 = box[1]\n\t\tself.robot_arm = [_index_0 - 1, _index_1]\n\n\tdef arm_grasp(self):\n\t\tarm_0 = self.robot_arm[0] + 1\n\t\tarm_1 = self.robot_arm[1]\n\t\tself.robot_arm = [[arm_0], [arm_1]]\n\n\tdef arm_free(self):\n\t\tarm = self.robot_arm\n\t\tarm_0 = arm[0] - 1\n\t\tarm_1 = arm[1]\n\t\tself.robot_arm = [arm_0, arm_1]\n\n\tdef arm_place_on_table(self, x):\n\t\tposition = self.position\n\t\tfor index, i in enumerate(position[5], start=0):\n\t\t\tif str(i) in self.box_names:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tx_ = self.get_box(x)\n\t\t\t\tself.robot_arm = [5, index]\n\t\t\t\tself.move(x_, self.robot_arm)\n\t\t\t\tself.update_position(x_, self.robot_arm)\n\t\t\t\tprint('Box placed on table')\n\n\tdef nop(self):\n\t\tpass\n\n\t\"\"\"\n\tRelation proposition over the robot world objects\n\t\"\"\"\n\n\tdef on(self, x, y):\n\t\tx, y = str(x).upper(), str(y).upper()\n\n\t\tif (x or y) not in self.box_names:\n\t\t\treturn Constants.INVALID_BOX_NAME\n\n\t\tbox_x = self.get_box(x)\n\t\tbox_y = self.get_box(y)\n\n\t\tif (box_x[0] != box_y[0]) and abs(box_x[0] - box_y[0]) == 1 and (box_x[1] == box_y[1]):\n\t\t\treturn True\n\n\t\treturn False\n\n\tdef clear(self, x):\n\t\tx = str(x).upper()\n\t\tif x not in self.box_names:\n\t\t\treturn Constants.INVALID_BOX_NAME\n\n\t\tbox = self.get_box(x)\n\t\ttruth = True\n\t\tprint(box)\n\t\tfor _box in self.box_names:\n\t\t\t_box = (self, _box)\n\t\t\tif _box == box:\n\t\t\t\tcontinue\n\t\t\tif (_box[0] != box[0]) and (_box[1] == box[1]):\n\t\t\t\ttruth = False\n\n\t\treturn truth\n\n\tdef on_table(self, x):\n\t\tx = str(x).upper()\n\t\tif x not in self.box_names:\n\t\t\treturn Constants.INVALID_BOX_NAME\n\n\t\tbox = self.get_box(x)\n\t\tif box[0] != 5:\n\t\t\treturn False\n\n\t\treturn True\n\n\tdef on_left(self, x, y):\n\t\tx, y = str(x).upper(), str(y).upper()\n\n\t\tif (x or y) not in self.box_names:\n\t\t\treturn Constants.INVALID_BOX_NAME\n\n\t\tbox_x = self.get_box(x)\n\t\tbox_y = self.get_box(y)\n\n\t\tif (box_x[0] == box_y[0]) and (box_y[1] - box_x[1] == 1):\n\t\t\treturn True\n\n\t\treturn False\n\n\tdef on_right(self, x, y):\n\t\tx, y = str(x).upper(), str(y).upper()\n\n\t\tif (x or y) not in self.box_names:\n\t\t\treturn Constants.INVALID_BOX_NAME\n\n\t\tbox_x = self.get_box(x)\n\t\tbox_y = self.get_box(y)\n\n\t\tif (box_x[0] == box_y[0]) and (box_x[1] - box_y[1] == 1):\n\t\t\treturn True\n\n\t\treturn False\n\n\tdef under(self, x, y):\n\t\tx, y = str(x).upper(), str(y).upper()\n\n\t\tif (x or y) not in self.box_names:\n\t\t\treturn Constants.INVALID_BOX_NAME\n\n\t\tbox_x = self.get_box(x)\n\t\tbox_y = self.get_box(y)\n\t\tif (box_x[0] != box_y[0]) and (box_x[0] - box_y[0] == 1) and (box_x[1] == box_y[1]):\n\t\t\treturn True\n\t\treturn False\n\n\tdef box(self, pos):\n\t\tif type(pos) != list:\n\t\t\treturn Constants.INVALID_PARAM\n\n\t\tfor _box in self.box_names:\n\t\t\tif self.get_box(_box) == pos:\n\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef grasping(self, x):\n\t\t# Do shit here\n\t\tif self.robot_arm == self.get_box(x):\n\t\t\treturn True\n\t\treturn False\n\n\t\"\"\"\n\tBox move controls\n\t\"\"\"\n\n\tdef update_position(self, old, new):\n\t\told_index_0 = old[0]\n\t\told_index_1 = old[1]\n\t\tnew_index_0 = new[0]\n\t\tnew_index_1 = new[1]\n\t\tself.position[new_index_0][new_index_1] = self.position[old_index_0][old_index_1]\n\t\tself.position[old_index_0][old_index_1] = 0\n\t\t# new_position_name = self.position[new[0]][new[1]]\n\t\t# self.update_box(new_position_name, [old_index_0, old_index_1])\n\n\tdef swap_position_and_update(self, old, new):\n\t\told_index_0, old_index_1 = old[0], old[1]\n\t\tnew_index_0, new_index_1 = new[0], new[1]\n\t\tbox_position_old = self.position[old_index_0][old_index_1]\n\t\tbox_position_new = self.position[new_index_0][new_index_1]\n\t\tself.position[new_index_0][new_index_1] = box_position_old\n\t\tself.position[old_index_0][old_index_1] = box_position_new\n\t\t# new_position_name = self.position[new[0]][new[1]]\n\t\t# old_position_name = self.position[old[0]][old[1]]\n\t\t# self.update_box(old_position_name, [new_index_0, new_index_1])\n\t\t# self.update_box(new_position_name, [old_index_0, old_index_1])\n\n\tdef update_box(self, x, value):\n\t\tx_ = x.upper()\n\t\tif x_ == 'A':\n\t\t\tself.A = value\n\t\telif x_ == 'B':\n\t\t\tself.B = value\n\t\telif x_ == 'C':\n\t\t\tself.C = value\n\t\telif x_ == 'D':\n\t\t\tself.D = value\n\t\telif x_ == 'E':\n\t\t\tself.E = value\n\t\telif x_ == 'F':\n\t\t\tself.F = value\n\n\t# To move from source to destination\n\tdef move(self, box, destination):\n\t\tself.move_arm_to(box)\n\t\tself.arm_grasp()\n\t\tself.move_arm_to(destination)\n\t\tself.arm_free()\n\t\tself.update_position(box, destination)\n\n\tdef put_box_on(self, x, y):\n\t\t# Put x on top of y\n\t\t_x = self.get_box(x)\n\t\t_y = self.get_box(y)\n\t\t_y = [_y[0] - 1, _y[1]]\n\t\tself.move(_x, _y)\n\t\tself.update_box(x, _y)\n\n\tdef swap_boxes(self, x, y):\n\t\tx_ = self.get_box(x)\n\t\ty_ = self.get_box(y)\n\t\tself.swap_position_and_update(x_, y_)\n\n\t\"\"\"\n\tOther\n\t\"\"\"\n\n\tdef can_arm_grasp(self, x):\n\t\tbox = self.get_box(x)\n\t\tif self.position[box[0] - 1][box[1]] == 0:\n\t\t\treturn True\n\t\treturn False\n\n\tdef is_box_in_middle(self, x):\n\t\tbox = self.get_box(x)\n\t\tif box[0] == 5:\n\t\t\treturn False\n\t\telif self.position[box[0] - 1][box[1]] != 0 and self.position[box[0] + 1][box[1]] != 0:\n\t\t\treturn True\n\t\treturn False\n\n\tdef is_box_under_another(self, x):\n\t\tbox = self.get_box(x)\n\t\tif self.position[box[0] - 1][box[1]] != 0:\n\t\t\treturn True\n\t\treturn False\n\n\tdef is_box_on_table(self, x):\n\t\tbox = self.get_box(x)\n\t\tif box[0] == 5:\n\t\t\treturn True\n\t\treturn False\n\n\tdef get_box_position(self, x):\n\t\tx = x.upper()\n\t\tbox = self.get_box(x)\n\t\tif box[0] == 5:\n\t\t\treturn str(x) + \" is on the table\"\n\t\telse:\n\t\t\tbox_under = [box[0] + 1, box[1]]\n\t\t\tfor i in self.box_names:\n\t\t\t\tif self.get_box(i) == box_under:\n\t\t\t\t\treturn \"Box \" + str(x) + \" is on top of box \" + str(i)\n\n\tdef get_neighbours(self, x):\n\t\tx = x.upper()\n\t\tbox = self.get_box(x)\n\t\tneighbours = []\n\t\tfor each_box in self.box_names:\n\t\t\teach__box = self.get_box(each_box)\n\t\t\tif each__box[0] == box[0] and (abs(each__box[1] - box[1]) == 1):\n\t\t\t\tneighbours.append(each_box)\n\n\t\tif len(neighbours) == 1:\n\t\t\toutput = \"The neighbour of \" + str(x) + \" is \" + neighbours[0]\n\t\telif len(neighbours) == 2:\n\t\t\toutput = \"The neighbours of \" + str(x) + \" are \" + neighbours[0] + \" and \" + neighbours[1]\n\t\telse:\n\t\t\toutput = str(x) + \" has no neighbours\"\n\n\t\treturn output\n\n\n\"\"\"\n===========================================================================================\n\"\"\"\n\n\ndef get_command():\n\tinstruction = input('Enter command or query below:\\n').lower()\n\treturn instruction\n\n\ndef handle_action_interactions(self, command):\n\tif 'swap boxes' in command:\n\t\tif len(command) != 18:\n\t\t\tprint('Invalid command')\n\n\t\tbox_1 = command[-7]\n\t\tbox_2 = command[-1]\n\t\tself.swap_boxes(box_1, box_2)\n\t\tprint(self)\n\n\telif 'place box' in command and 'on the table' not in command and 'under box' not in command:\n\t\tif len(command) != 20:\n\t\t\tprint('Invalid command')\n\n\t\tbox_top_name = command[10]\n\t\tbox_under_name = command[-1]\n\t\tself.put_box_on(box_top_name, box_under_name)\n\t\tprint(self)\n\n\telif 'place box' in command and 'under box' in command:\n\t\tif len(command) != 23:\n\t\t\tprint('Invalid command')\n\n\t\tbox_under_name = command[10]\n\t\tbox_top_name = command[-1].upper()\n\t\tbox_top = self.get_box(box_top_name)\n\n\t\tif self.is_box_under_another(box_top_name):\n\t\t\tother_box = self.position[box_top[0] - 1][box_top[1]]\n\t\t\tresponses = {\n\t\t\t\t\"resp\": \"This is not possible \\n\",\n\t\t\t\t\"why\": \"Because box \" + str(box_top_name) + \" is under box \" + str(other_box) + \"\\n\",\n\t\t\t\t\"and_then\": \"I cannot remove a box under another box \\n\",\n\t\t\t\t\"why_again\": \"The stacked boxes will fall \\n\"\n\t\t\t}\n\t\t\tresp = input(responses['resp'])\n\n\t\t\tif 'why' in resp.lower():\n\t\t\t\tresp = input(responses['why'])\n\t\t\t\tif \"and then\" in resp.lower():\n\t\t\t\t\tresp = input(responses['and_then'])\n\t\t\t\t\tif \"why\" in resp.lower():\n\t\t\t\t\t\tprint(responses['why_again'])\n\n\t\telse:\n\t\t\tself.put_box_on(box_top_name, box_under_name)\n\t\t\tprint(self)\n\n\telif 'place box' in command and 'on the table' in command:\n\t\tif len(command) != 24:\n\t\t\tprint('Invalid command')\n\n\t\tbox_for_table = command[10]\n\t\tself.arm_place_on_table(box_for_table)\n\t\tprint(self)\n\n\telif 'generate the' in command:\n\t\tif 'three' in command:\n\t\t\tself.put_box_on('B', 'A')\n\t\t\tself.put_box_on('C', 'B')\n\t\t\tself.put_box_on('E', 'D')\n\t\t\tself.put_box_on('F', 'E')\n\t\t\tprint(self)\n\n\t\tif 'six' in command:\n\t\t\tself.put_box_on('B', 'A')\n\t\t\tself.put_box_on('C', 'B')\n\t\t\tself.put_box_on('D', 'C')\n\t\t\tself.put_box_on('E', 'D')\n\t\t\tself.put_box_on('F', 'E')\n\t\t\tprint(self)\n\n\telif 'stack' in command:\n\t\tif len(command) != 48:\n\t\t\tprint('Invalid command')\n\n\t\tbase_box = command[-1]\n\t\tfirst_box = command[6]\n\t\tsecond_box = command[9]\n\t\tthird_box = command[12]\n\t\tfourth_box = command[19]\n\n\t\tself.put_box_on(fourth_box, base_box)\n\t\tself.put_box_on(third_box, fourth_box)\n\t\tself.put_box_on(second_box, third_box)\n\t\tself.put_box_on(first_box, second_box)\n\t\tprint(self)\n\n\telif 'grasp' in command:\n\t\tbox_name = command[-2]\n\t\tbox = self.get_box(box_name)\n\t\tif self.is_box_under_another(box_name):\n\t\t\tother_box = self.position[box[0] - 1][box[1]]\n\t\t\tresponses = {\n\t\t\t\t\"resp\": \"This is not possible \\n\",\n\t\t\t\t\"why\": \"Because box \" + str(box_name) + \" is under box \" + str(other_box) + \"\\n\",\n\t\t\t\t\"and_then\": \"I cannot remove a box under another box \\n\",\n\t\t\t\t\"why_again\": \"The stacked boxes will fall \\n\"\n\t\t\t}\n\t\t\tresp = input(responses['resp'])\n\t\t\tif 'why' in resp.lower():\n\t\t\t\tresp = input(responses['why'])\n\t\t\t\tif \"and then\" in resp.lower():\n\t\t\t\t\tresp = input(responses['and_then'])\n\t\t\t\t\tif \"why\" in resp.lower():\n\t\t\t\t\t\tprint(responses['why_again'])\n\n\t\telse:\n\t\t\tself.move_arm_to(box)\n\t\t\tself.arm_grasp()\n\t\t\tprint('Grasped')\n\n\telif 'where is' in command:\n\t\tif command[-1] == '?':\n\t\t\tbox_name = command[-2]\n\t\telse:\n\t\t\tbox_name = command[-1]\n\t\tlocation = self.get_box_position(box_name)\n\t\tprint(location)\n\n\telif 'is box' in command and 'on the table' in command:\n\t\tbox_name = command[7]\n\t\tif self.is_box_on_table(box_name):\n\t\t\tprint('Yes, box ' + str(box_name) + ' is on the table')\n\t\telse:\n\t\t\tprint(self.get_box_position(box_name))\n\n\telif 'what boxes are' in command and 'neighbour' in command:\n\t\tif command[-1] == '?':\n\t\t\tbox_name = command[-2]\n\t\telse:\n\t\t\tbox_name = command[-1]\n\t\tprint(self.get_neighbours(box_name))\n\n\telse:\n\t\tprint('Invalid input')\n\t\texit()\n\n\ndef run_apalara(self):\n\tcommand = get_command()\n\tif command.lower() == 'exit':\n\t\treturn exit()\n\n\thandle_action_interactions(self, command)\n\trun_apalara(self)\n\n\nprint('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')\nprint('Welcome to Apalara!')\nprint('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')\n\nprint('Below is the default robot world')\napalara = Apalara()\nprint(apalara)\nprint('--------------------------------\\n')\nprint('Kindly enter \"exit\" to exit your session')\nrun_apalara(apalara)\n","repo_name":"IsraelGboluwaga/apalara","sub_path":"apalara.py","file_name":"apalara.py","file_ext":"py","file_size_in_byte":11443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28110711202","text":"from cs50 import get_float\nfrom math import floor\n\n\ndef main():\n # ask the user for the amount owed\n while True:\n dollars_owed = get_float(\"Change owed: \")\n cents_owed = floor(dollars_owed * 100)\n\n if cents_owed > 0:\n break\n # breakdown of all the coins, see cash.c\n quarters = cents_owed // 25\n dimes = (cents_owed % 25) // 10\n nickels = ((cents_owed % 25) % 10) // 5\n pennies = ((cents_owed % 25) % 10) % 5\n # sum of all the coins we owed\n print(f\"{quarters + dimes + nickels + pennies}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"musikito/CS50","sub_path":"cash/cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36923982304","text":"import sqlite3\ndb = sqlite3.connect('data/ebookstore_db')\ncursor = db.cursor()\n\n# Create a table called books in the database\ncursor.execute('''\n CREATE TABLE books(id INTEGER PRIMARY KEY, Title TEXT, Author TEXT, Qty INTEGER)\n'''\n)\n# Create variables storing the id, title and author and quantity for each book\nid1 = 3001\ntitle1 = 'A Tale of Two Cities'\nauthor1 = 'Charles Dickens'\nqty1 = 30\n\nid2 = 3002\ntitle2 = 'Harry Potter and the Philosophers Stone'\nauthor2 = 'J.K. Rowling'\nqty2 = 40\n\nid3 = 3003\ntitle3 = 'The Lion, the Witch and the Wardrobe'\nauthor3 = 'C. S. Lewis'\nqty3 = 25\n\nid4 = 3004\ntitle4 = 'The Lord of the Rings'\nauthor4 = 'J.R.R Tolkien'\nqty4 = 37\n\nid5 = 3005\ntitle5 = 'Alice in Wonderland'\nauthor5 = 'Lewis Carroll'\nqty5 = 12\n\n# Create a list containing the id, title, author and quantity of each book\nbookinfo = [(id1,title1,author1,qty1),(id2,title2,author2,qty2),(id3,title3,author3,qty3),(id4,title4,author4,qty4),(id5,title5,author5,qty5)]\n\n# Insert the book information in the list into the table\ncursor.executemany(''' INSERT INTO books(id, Title, Author, Qty) VALUES(?,?,?,?)''',\nbookinfo)\ndb.commit()\n\nwhile True:\n # Create a menu for the bookstore clerk\n # Request the clerk to choose an option from the menu\n menu = input(\"\"\"Select one of the following options below:\n 1 - Enter a new book\n 2 - Update exisiting book information\n 3 - Delete a book \n 4 - Seach for a specific book\n 0 - Exit\n : \"\"\").lower()\n\n # Create a programme to enable to the clerk to add new books to the database\n if menu == '1':\n pass\n id = input(\"Enter the id of the book: \").lower()\n title = input(\"Enter the title of the book: \").lower()\n author = input(\"Enter the author of the book: \").lower()\n qty = input(\"Enter the quantity of the book: \").lower()\n cursor.execute (''' INSERT INTO books(id, Title, Author, Qty) VALUES(?,?,?,?)''',\n (id, title, author, qty))\n print(\"Your new book has been added to the database\")\n db.commit()\n\n # Create a programme to enable to the clerk to update book information\n if menu == '2':\n pass\n if bookid:\n bookid = bookid[0][0]\n updatebook = input(\"Which field would you like to update, the id, title, author or quantity? \").lower()\n updatefield = input(\"Please enter the updated information: \").lower()\n cursor.execute('''UPDATE books SET id =? ''')\n if updatebook == \"title\":\n cursor.execute('''UPDATE books SET title = ? WHERE id = ?''', (updatefield, bookid))\n elif updatebook == \"author\":\n cursor.execute('''UPDATE books SET author = ? WHERE id = ?''', (updatefield, bookid))\n elif updatebook == \"quantity\":\n cursor.execute('''UPDATE books SET qty = ? WHERE id = ?''', (updatefield, bookid))\n elif updatebook == \"id\":\n cursor.execute('''UPDATE books SET id = ? WHERE id = ?''', (updatefield, bookid))\n print(\"The selected book has been updated \")\n db.commit()\n\n # Create a programme to enable to the clerk to delete books from the database\n if menu == '3':\n pass\n deletebook_title = input(\"Please enter the title of the book you would like to delete: \").lower()\n cursor.execute('''DELETE FROM books WHERE title = ? ''', (deletebook_title,))\n cursor.execute('''DROP TABLE books''')\n print(\"The selected book has been deleted\")\n db.commit\n\n # Create a programme to enable to the clerk to search the database to find a specific book\n if menu == '4':\n pass\n id = input(\"Enter the id of the book you would like to search for\").lower()\n cursor.execute('''SELECT title, author, qty FROM books WHERE id=? ''', (id,))\n book = cursor.fetchall()\n print(book)\n db.commit\n \n if menu == '0':\n print(\"Thank you for using this database, goodbye!\")\n exit()\n else:\n print(\"You have unfortunately made the incorrect choice, please try again\")\n db.close()\n","repo_name":"nasehamalik/Bookstore-SQL","sub_path":"bookstore.py","file_name":"bookstore.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41112228961","text":"from marvellous import *;\n\n\ndef ListPrime():\n def Accept(num):\n arr = list()\n brr = list()\n sum = 0\n print(\"Enter Numbers In Array :\")\n for i in range(0, num):\n no = int(input(\"Num :\"))\n arr.append(no)\n ans = CheckPrime(no)\n if ans == True:\n # print(\"prime\")\n sum += no\n brr.append(no)\n\n return arr,brr,sum\n\n num = int(input(\"Enter How Many Numbers You Want :\"))\n arr,brr,sum = Accept(num)\n print(\"Entered Numbers Are :\", arr)\n print(\"prime numbers are :\", brr)\n print(\"Addition of Prime Numbers is :\", sum)\n\n\ndef main():\n ListPrime()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SachinRameshGore/Python-Programs","sub_path":"Assignment3/Assignment3_5.py","file_name":"Assignment3_5.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37318295955","text":"import imaplib\nimport email\nimport re\nfrom dateutil import parser\nimport datetime\n\n\naction = \"read.email\"\n\ndef getSender(email):\n sender = email['from']\n m = re.match(r'(.*)\\s<.*>', sender)\n if m:\n return m.group(1)\n return sender\n\ndef getDate(email):\n return parser.parse(email.get('date'))\n\ndef getMostRecentDate(emails):\n dates = [getDate(e) for e in emails]\n dates.sort(reverse=True)\n if dates:\n return dates[0]\n return None\n\ndef fetchUnreadEmails(profile, since=None, markRead=False, limit=None):\n conn = imaplib.IMAP4_SSL('imap.gmail.com')\n conn.debug = 0\n conn.login(profile['gmail_address'], profile['gmail_password'])\n conn.select(readonly=(not markRead))\n\n msgs = []\n (retcode, messages) = conn.search(None, '(UNSEEN)')\n\n if retcode == 'OK' and messages != ['']:\n numUnread = len(messages[0].split(' '))\n if limit and numUnread > limit:\n return numUnread\n\n for num in messages[0].split(' '):\n ret, data = conn.fetch(num, '(RFC822)')\n msg = email.message_from_string(data[0][1])\n\n if not since or getDate(msg) > since:\n msgs.append(msg)\n\n conn.close()\n conn.logout()\n\n return msgs\n\ndef isValid(text):\n if text['result']['action'] == action:\n return True\n\n return False\n\ndef build_JSON(resp, code):\n mes = {}\n mes['id'] = \"self-made\"\n mes['timestamp'] = str(datetime.datetime.utcnow().isoformat('T')) + 'Z'\n mes['result'] = {}\n mes['result']['source'] = \"self\"\n mes['result']['resolvedQuery'] = resp\n mes['status'] = {}\n mes['status']['code'] = code\n mes['status']['errorType'] = \"success\" if code==200 else \"failure\"\n\n return mes\n\ndef handle(text, speaker, profile):\n try:\n msgs = fetchUnreadEmails(profile, limit=5)\n\n if isinstance(msgs, int):\n resp = \"You have %d unread emails.\" % msgs\n speaker.say(resp.split())\n return\n\n senders = [getSender(e) for e in msgs]\n except imaplib.IMAP4.error:\n resp = \"I'm sorry. I'm not authenticated to work with your Gmail.\"\n speaker.say(resp)\n return build_JSON(resp, 404)\n\n if not senders:\n resp = \"You have no unread emails.\"\n elif len(senders) == 1:\n resp = \"You have one unread email from \" + senders[0] + \".\"\n else:\n resp = \"You have %d unread emails\" % len(senders)\n unique_senders = list(set(senders))\n if len(unique_senders) > 1:\n unique_senders[-1] = 'and ' + unique_senders[-1]\n resp += \". Senders include: \"\n resp += '...'.join(senders)\n else:\n resp += \" from \" + unique_senders[0]\n\n speaker.say(resp)\n return build_JSON(resp, 200)\n\n","repo_name":"saarthaks/just-another-AI","sub_path":"current_mac_version/modules/Gmail.py","file_name":"Gmail.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11210909946","text":"import string\n\nfile = \"HH20160905 Betelgeuse II - 5-10 - Play Money No Limit Hold'em.txt\"\npath = ''#r\"/Users/MZ/Library/Application Support/PokerStartsUK/HandHistory/B@TM0N/\"\nfilepath = path+file\n\n\ntext = []\n\nwith open(filepath) as inputfile:\n for line in inputfile:\n text.append(line)\n\n\n#print(len(text[1]))\n\nfor i in range(len(text)):\n if 'PokerStars Hand' in text[i]:\n ## hand initiator\n print('Hand: ',text[i][17:30],' at pos:' ,i )\n if 'Seat #' in text[i]:\n print(\"dealer is seat:\" ,text[i][text[i].find('#')+1])\n if ': B@TM0N' in text[i] and 'chips' in text[i]:\n print('B@TM0N seat: ',text[i][text[i].find(': B@TM0N')-1])\n print('B@TM0N has ', int(text[i][text[i].find('(')+1:text[i].find('in chips')]),' chips')\n if ' big blind ' in text[i]:\n print('big blind is: ',text[i][text[i].find('big blind')+9:])\n\n\n\n\n\n#xes\n#No. Players\n#MyPosition\n#stack size\n#pot size\n#Hand Dealt\n #rank of hand dealt\n#blind size to scale in terms of blinds\n#pot size\n#pre flop/ post-flop/ post turn/\n#sum calls\n# sum of raises\n\n\n#y = (pot won - future in)/","repo_name":"mzed86/PokerBot","sub_path":"pokergame.py","file_name":"pokergame.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23414393468","text":"\r\nimport numpy as np\r\nimport os\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nfrom keras.optimizers import Adam\r\nfrom keras.datasets import mnist\r\nfrom keras.models import Sequential, Model\r\nfrom keras.layers import Input\r\n\r\nif not os.path.isdir('gen_images/'):\r\n\tos.mkdir('gen_images/')\r\n\r\ndef discriminator_model(optimizer, img_rows, img_cols, channels):\r\n\r\n\tmodel = Sequential()\r\n\t###YOUR CODE HERE###\r\n\t# Architecture & Compile\r\n\r\n\treturn model\r\n\r\ndef generator_model(optimizer, img_rows, img_cols, channels):\r\n\r\n\tmodel = Sequential()\r\n\t###YOUR CODE HERE###\r\n\t# Architecture & Compile\r\n\r\n\treturn model\r\n\r\ndef gan_model(img_rows, img_cols, channels):\r\n\t\r\n\t# Adam Optimizer\r\n\tlearning_rate = 0.001\r\n\tbeta = 0.5\r\n\toptimizer = Adam(learning_rate, beta)\r\n\r\n\t# Discriminator & Generator Models\r\n\tdiscriminator = discriminator_model(optimizer, img_rows, img_cols, channels)\r\n\tgenerator = generator_model(optimizer, img_rows, img_cols, channels)\r\n\r\n\tdiscriminator.trainable = False\r\n\r\n\t# GAN Model\r\n\timage_input = Input(shape=(100,))\r\n\tgan = generator(image_input)\r\n\tgan = discriminator(gan)\r\n\tgan = Model(image_input, gan)\r\n\tgan.compile(loss='binary_crossentropy', optimizer=optimizer)\r\n\t\r\n\t# Return Models\r\n\treturn generator, discriminator, gan\r\n\r\ndef save_imgs(generator, epoch):\r\n\trow, column = 5, 5\r\n\tnoise = np.random.normal(0, 1, (row * column, 100))\r\n\tgen_imgs = generator.predict(noise)\r\n\r\n\tgen_imgs = 0.5 * gen_imgs + 0.5\r\n\r\n\tfig, axs = plt.subplots(row, column)\r\n\tcnt = 0\r\n\tfor i in range(row):\r\n\t\tfor j in range(column):\r\n\t\t\taxs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')\r\n\t\t\taxs[i,j].axis('off')\r\n\t\t\tcnt += 1\r\n\tfig.savefig(\"gen_images/epoch_%d.png\" % epoch)\r\n\tplt.close()\r\n\r\ndef train(generator, discriminator, gan, epochs, batch_size=128, save_interval=50):\r\n\t\r\n\t# Load the dataset\r\n\t(X_train, _), (_, _) = mnist.load_data()\r\n\r\n\t# Rescale -1 to 1\r\n\tX_train = (X_train.astype(np.float32) - 127.5) / 127.5\r\n\tX_train = np.expand_dims(X_train, axis=3)\r\n\r\n\thalf_batch = int(batch_size / 2)\r\n\r\n\tfor epoch in range(epochs):\r\n\t idx = np.random.randint(0, X_train.shape[0], half_batch)\r\n\t imgs = X_train[idx]\r\n\r\n\t noise = np.random.normal(0, 1, (half_batch, 100))\r\n\t gen_imgs = generator.predict(noise)\r\n\r\n\t # Train the Discriminator\r\n\t discriminator_loss_real = discriminator.train_on_batch(imgs, np.ones((half_batch, 1)))\r\n\t discriminator_loss_fake = discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1)))\r\n\t discriminator_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_fake)\r\n\r\n\t # Train Generator\r\n\t noise = np.random.normal(0, 1, (batch_size, 100))\r\n\r\n\t valid_y = np.array([1] * batch_size)\r\n\t generator_loss = gan.train_on_batch(noise, valid_y)\r\n\r\n\t print (\"%d [Discriminator loss: %f, acc.: %.2f%%] [Generator loss: %f]\" % (epoch, discriminator_loss[0], 100*discriminator_loss[1], generator_loss))\r\n\r\n\t if epoch % save_interval == 0:\r\n\t save_imgs(generator, epoch)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\timg_rows, img_cols, channels = 28, 28, 1\r\n\t\r\n\tepochs = 10000\r\n\tbatch_size = 32\r\n\tsave_interval = 1000\r\n\t\r\n\tgenerator, discriminator, gan = gan_model(img_rows, img_cols, channels)\r\n\t\r\n\ttrain(generator, discriminator, gan, epochs+1, batch_size, save_interval)\r\n","repo_name":"amirh-bakhtiari/MNIST-GAN","sub_path":"MNIST-GAN.py","file_name":"MNIST-GAN.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15183187152","text":"import shapefile\nimport math\nimport copy\nimport random\nimport sys\nimport os\n\nfrom flask import render_template\n# from flask import current_app as app\nfrom flask.views import MethodView\nfrom flask import Flask, make_response\nimport numpy as np\n\nimport sys\nsys.path.append(\"..\")\n\nfrom sql_dal.township_influence import township_influence_townships, township_influence_neighbours\nfrom ..core.session import db_session\nfrom datetime import date\nfrom ..core.model import DataConsistency, Township, TownshipReproductionRateCache\nfrom sqlalchemy import and_\n\nclass TownshipMap:\n def __init__(self, code, name, points):\n self.code = code\n self.name = name\n\n self.x_min = math.inf\n self.y_min = math.inf\n\n self.x_max = -math.inf\n self.y_max = -math.inf\n\n self.x_middle = 0\n self.y_middle = 0\n\n self.points = []\n self.neighbours_inf = dict()\n\n self.months = dict()\n\n for i in range(0, len(points), 50):\n x = round(points[i][0] / 300)\n y = round((points[i][1] * (-1)) / 500)\n\n if x < self.x_min:\n self.x_min = x\n\n if y < self.y_min:\n self.y_min = y\n\n if x > self.x_max:\n self.x_max = x\n\n if y > self.y_max:\n self.y_max = y\n\n self.x_middle = self.x_middle + x\n self.y_middle = self.y_middle + y\n\n self.points.append((x, y))\n\n self.x_middle = self.x_middle / len(self.points)\n self.y_middle = self.y_middle / len(self.points)\n\n def move(self, x_move, y_move):\n self.x_min = self.x_min + x_move\n self.y_min = self.y_min + y_move \n \n self.x_max = self.x_max + x_move\n self.y_max = self.y_max + y_move \n\n self.x_middle = self.x_middle + x_move\n self.y_middle = self.y_middle + y_move \n\n for i, point in enumerate(self.points):\n self.points[i] = (point[0] + x_move, point[1] + y_move)\n\n def get_path(self):\n return \" \".join(f\"{point[0]},{point[1]}\" for point in self.points)\n\ndef get_map(month_from='2020-01-01', month_to='2020-12-01'):\n if type(month_from) is str:\n month_from = date.fromisoformat(month_from)\n elif type(month_from) is not date:\n raise Exception('invalid date')\n\n if type(month_to) is str:\n month_to = date.fromisoformat(month_to)\n elif type(month_to) is not date:\n raise Exception('invalid date')\n\n with shapefile.Reader(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'data', 'SPH_OKRES'), encoding='windows-1250') as shp:\n townships = [TownshipMap(record[2], record[3], shape.points) for record, shape in zip(shp.records(), shp.shapes())]\n \n min_x = min(ts.x_min for ts in townships)\n min_y = min(ts.y_min for ts in townships)\n \n for ts in townships:\n ts.move((-min_x) + 20, (-min_y) + 20)\n\n max_x = max(ts.x_max for ts in townships)\n max_y = max(ts.y_max for ts in townships)\n \n def _get_defs(i):\n return \"\"\"\n \n \n \n \n \n \"\"\"\n\n townships = { ts.code : ts for ts in townships }\n\n def _val_to_color(val, step_size):\n colours = ['white', '#fff7ec', '#fee8c8', '#fdd49e', '#fdbb84',\n '#fc8d59', '#ef6548', '#d7301f', '#b30000', '#7f0000']\n i = 1\n while i <= len(colours):\n if val < step_size * i:\n return colours[i-1]\n i += 1\n return colours[len(colours) - 1]\n\n with db_session() as db:\n cached_ts = db.query(TownshipReproductionRateCache).filter(and_(TownshipReproductionRateCache.month >= month_from, TownshipReproductionRateCache.month <= month_to)).all()\n\n for cache_ts in cached_ts:\n townships[cache_ts.code].months[cache_ts.month.month] = cache_ts\n\n neighbours_infl = township_influence_neighbours()\n\n svg_townships = {i : [f'', _get_defs(i)] for i in range(month_from.month, month_to.month + 1)}\n\n for ts in townships.values():\n path = ts.get_path()\n \n for key, cached in ts.months.items():\n repr_rate_cached = cached.reproduction_rate\n\n svg_townships[key].append(f'')\n svg_townships[key].append(f'{ts.name}') \n\n def _get_infl_arrow(ts, nb):\n same = list(set(ts.points) & set(nb.points))\n\n if len(same) == 0:\n tx = (nb.x_middle - ts.x_middle) / 3\n ty = (nb.y_middle - ts.y_middle) / 3\n\n x1 = ts.x_middle\n y1 = ts.y_middle\n x2 = ts.x_middle + tx\n y2 = ts.y_middle + ty\n else:\n same.sort(key=lambda x: math.sqrt(x[0]**2 + x[1]**2))\n\n med = np.median(same, axis=0)\n\n tx = round((med[0] - ts.x_middle) / 1.25)\n ty = round((med[1] - ts.y_middle) / 1.25)\n\n x1 = ts.x_middle\n y1 = ts.y_middle\n x2 = ts.x_middle + tx\n y2 = ts.y_middle + ty\n\n return f' ts2_cached.reproduction_rate:\n if t1_t2 is None:\n t1_t2 = _get_infl_arrow(ts1, ts2)\n\n svg.append(t1_t2 + f'{key})\" />')\n elif ts1_cached.reproduction_rate < ts2_cached.reproduction_rate:\n if t2_t1 is None:\n t2_t1 = _get_infl_arrow(ts2, ts1)\n\n svg.append(t2_t1 + f'{key})\" />')\n \n all_svg = []\n \n for svg in svg_townships.values():\n svg.append('')\n all_svg.extend(svg)\n\n return \"\\n\".join(all_svg)","repo_name":"TheEzo/UPA","sub_path":"src/web/views/second_query_map.py","file_name":"second_query_map.py","file_ext":"py","file_size_in_byte":6895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35082163557","text":"import io\nimport os\nfrom time import sleep\n\nf = io.open('my_data', 'w+')\npid = os.fork()\nif pid:\n print(\"I'm the master: sending data\")\n f.write(\"hello\")\nelse:\n print(\"I'm the slave: waiting for data\")\n sleep(1)\n f.seek(0)\n print(\"Received\", f.read())\n","repo_name":"waveform80/presentations","sub_path":"ipc/fork2.py","file_name":"fork2.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13920573781","text":"import torch\nfrom torch import nn\n\nfrom mmdeploy.core import MODULE_REWRITER\n\n\n@MODULE_REWRITER.register_rewrite_module(\n 'mmocr.models.common.modules.PositionalEncoding', backend='default')\nclass PositionalEncoding(nn.Module):\n \"\"\"Rewrite Position Encoding module in `ABINet.\"\"\"\n\n def __init__(self, module, deploy_cfg, **kwargs):\n super(PositionalEncoding, self).__init__()\n self._module = module\n self.deploy_cfg = deploy_cfg\n self.n_position = module.position_table.size(1)\n self.d_hid = module.position_table.size(2)\n\n def _get_sinusoid_encoding_table(self, n_position, d_hid, device):\n \"\"\"Sinusoid position encoding table.\"\"\"\n denominator = torch.Tensor([\n 1.0 / torch.tensor(10000).to(device).pow(\n torch.tensor(2 * (hid_j // 2) / d_hid)).to(device)\n for hid_j in range(d_hid)\n ]).to(device)\n denominator = denominator.view(1, -1)\n pos_tensor = torch.arange(n_position).to(device).unsqueeze(-1).float()\n sinusoid_table = pos_tensor * denominator\n sinusoid_table[:, 0::2] = torch.sin(sinusoid_table[:, 0::2])\n sinusoid_table[:, 1::2] = torch.cos(sinusoid_table[:, 1::2])\n\n return sinusoid_table.unsqueeze(0)\n\n def forward(self, x):\n \"\"\"\n Args:\n x (Tensor): Tensor of shape (batch_size, pos_len, d_hid, ...)\n \"\"\"\n device = x.device\n position_table = self._get_sinusoid_encoding_table(\n self.n_position, self.d_hid, device)\n x = x + position_table[:, :x.size(1), ...]\n return x\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/codebase/mmocr/models/text_recognition/transformer_module.py","file_name":"transformer_module.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"33945520264","text":"from random import randrange\nfrom datetime import date\nimport os, base64\nimport json\n\nfrom __init__ import app, db\nfrom sqlalchemy.exc import IntegrityError\n\n\nclass Facts(db.Model):\n __tablename__ = 'facts' # table industry is plural, class industry is singular\n\n # Define the User schema with \"vars\" from object\n id = db.Column(db.Integer, primary_key=True)\n _car = db.Column(db.String(255), unique=False, nullable=False)\n _industry = db.Column(db.String(255), unique=False, nullable=False)\n \n # Defines a relationship between User record and Notes table, one-to-many (one user to many notes)\n # constructor of a User object, initializes the instance variables within object (self)\n def __init__(self, car, industry):\n\n self._industry = industry # variables with self prefix become part of the object, \n self._car = car\n\n @property\n def industry(self):\n return self._industry\n \n # a setter function, allows industry to be updated after initial object creation\n @industry.setter\n def industry(self, industry):\n self._industry = industry\n\n \n\n @property\n def car(self):\n return self._car\n \n # a setter function, allows industry to be updated after initial object creation\n @car.setter\n def car(self, car):\n self._car = car\n\n def __str__(self):\n return json.dumps(self.read())\n\n def create(self):\n try:\n # creates a person object from User(db.Model) class, passes initializers\n db.session.add(self) # add prepares to persist person object to Users table\n db.session.commit() # SqlAlchemy \"unit of work pattern\" requires a manual commit\n return self\n except IntegrityError:\n db.session.remove()\n return None\n\n # CRUD read converts self to dictionary\n # returns dictionary\n def read(self):\n return {\n \"id\": self.id,\n \"industry\": self.industry,\n \"car\": self.car,\n \n }\n\n # CRUD update: updates user industry, knew, phone\n # returns self\n def update(self, industry=\"\", car=\"\"):\n \"\"\"only updates values with length\"\"\"\n if len(industry) > 0:\n self.industry = industry\n if len(car) > 0:\n self.car = car\n db.session.commit()\n return self\n\n # CRUD delete: remove self\n # None\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n return None\n\"\"\"CRUD DONE\"\"\"\n\ndef initFacts():\n\n \"\"\"Builds sample user/note(s) data\"\"\"\n with app.app_context():\n \"\"\"Create database and tables\"\"\"\n db.init_app(app)\n db.create_all()\n \"\"\"Tester data for table\"\"\"\n u1 = Facts( industry='Nearly half the EVs in the world are in China.', car='Tesla was originally named after Nikola Tesla, the inventor of alternating current.', )\n u2 = Facts( industry='Roughly 96 percent of EV owners would buy or lease another one', car='Tesla built its Gigafactory 3 in China to produce the Tesla Model 3 and Tesla Model Y for the Chinese market.', )\n u3 = Facts( industry='EVs are more efficient. Up to 80 percent of the battery energy powers the vehicle, compared to 14% to 26 percent of the energy from a gasoline-powered car.', car='Nio has innovative battery swap solutions for charging your EV.', )\n u4 = Facts( industry='Hybrid-Electric Vehicles (HEVs): HEVs combine a gas-powered engine with one (or more) electric motors. An HEV does not plug in; it collects energy through regenerative braking', car='Company name Rivian is inspired from CEO RJ Scaringes time growing up in Florida', )\n u5 = Facts( industry='Battery Electric Vehicles (BEVs): Also known as an all-electric car, it needs to be plugged in to recharge', car='Lucid air manufacturer Lucid motors was previously called Atieva', )\n \n facts = [u1, u2, u3, u4, u5]\n\n \"\"\"Builds sample user/note(s) data\"\"\"\n for fact in facts:\n try:\n '''add a few 1 to 4 notes per user'''\n fact.create()\n except IntegrityError:\n '''fails with bad or duplicate data'''\n db.session.remove()\n print(f\"Records exist, duplicate email, or error:\")","repo_name":"Firestorm0986/myflask_portfolio","sub_path":"model/facts.py","file_name":"facts.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11386257597","text":"from libs.pubsub import get_ps_1\nimport matplotlib.pyplot as plt\nfrom custom_package import candle\nfrom libs.instruments import get_instruments\n# from libs.calculation import Calculation\nfrom libs.calculationfft import Calculation\n\nr = get_ps_1()\n\ninsts = get_instruments()\n'TODO: multiple charts for each instruments'\ninsts = [{'symbol': 'SUNPHARMA'}]\ntfs = [5, 15]\n\n# ohlc_channels = [f'OHLC-{x[\"symbol\"]}-{tf}' for tf in tfs\n# for x in insts]\n\n'TODO: need to change'\nsunpharma_ohlc_channel = 'INSTRUMENT-SUNPHARMA-OHLC-LTF'\nsunpharma_calc_channel = 'CALC-SUNPHARMA-LTF'\nplt.ion()\nplt.suptitle(sunpharma_ohlc_channel)\nplt.show(block=False)\nax1 = plt.subplot(2, 1, 1)\nax2 = plt.subplot(2, 1, 2)\n'TODO register datetime converter explicitly'\n'TODO plot the initial data (set initial data from instruments)'\n\n\nclass Monitor:\n def __init__(self) -> None:\n self.ohlc_ltf = None\n self.ohlc_htf = None\n self.sups = None\n self.ress = None\n self.shistory = []\n self.rhistory = []\n self.psup = None\n self.pres = None\n\n def on_data(self, channel, data):\n if(channel == sunpharma_ohlc_channel):\n if(data['tftype'] == 'HTF'):\n self.ohlc_htf = data['ohlcs']\n if(data['tftype'] == 'LTF'):\n self.ohlc_ltf = data['ohlcs']\n if(channel == sunpharma_calc_channel):\n if(data['tftype'] == 'LTF'):\n # print(data.keys())\n # print('history', len(self.shistory), len(self.rhistory))\n calc: Calculation = data['calc']['LTF']\n self.shistory = calc.shistory\n self.rhistory = calc.rhistory\n # (self.shistory, self.rhistory) = data['srhistory']\n self.psup, self.pres = calc.psup, calc.pres\n\n self.plot()\n\n def plot(self):\n plt.cla()\n if(self.ohlc_htf is not None):\n candle.timeplot(ax1, self.ohlc_htf)\n if(self.ohlc_ltf is not None): \n candle.timeplot(ax2, self.ohlc_ltf)\n xsup = [x[0] for x in self.shistory]\n sups = [x[1] for x in self.shistory]\n ax2.plot(xsup, sups, color='#0000ff', linestyle='--', linewidth=1)\n xres = [x[0] for x in self.rhistory]\n ress = [x[1] for x in self.rhistory]\n ax2.plot(xres, ress, color='#ff0000', linestyle='--', linewidth=1)\n if(self.psup and self.ohlc_ltf is not None): \n csups = [self.psup(x+1) for x in range(-len(self.ohlc_ltf), 0)]\n print('sr check')\n print(self.shistory[-2:])\n print(self.ohlc_ltf.index[-2:], csups[-2:])\n ax2.plot(self.ohlc_ltf.index, csups, color='#00ff00')\n if(self.pres and self.ohlc_ltf is not None):\n cress = [self.pres(x+1) for x in range(-len(self.ohlc_ltf), 0)]\n ax2.plot(self.ohlc_ltf.index, cress, color='#9d0000')\n plt.gcf().canvas.draw_idle()\n plt.gcf().canvas.start_event_loop(0.001)\n\n\nif(__name__ == '__main__'):\n m = Monitor()\n 'sunpharma_ohlc_channel data will be in sunpharma_calc_channel also'\n r.subscribe([sunpharma_ohlc_channel, sunpharma_calc_channel], m.on_data)\n print('done')\n","repo_name":"ronscoder/systrade","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10085903418","text":"from config import get_dataset_config as config\nimport os\nimport glob\nfrom zipfile import ZipFile\nimport re\nimport shutil\n\n# Retrieve datasets\ndatasets_path = config.DATASETS_PATH\n\ndatasets_list = glob.glob(os.path.sep.join([datasets_path, \"*.zip\"]))\n\n# Unzip datasets in directory\nfor dataset in datasets_list:\n # Read zipped directory\n zip_data = ZipFile(dataset, 'r')\n\n # Get directory name for extraction\n dataset_dir = dataset.split('.zip')[0]\n\n # Extract files\n zip_data.extractall(dataset_dir)\n zip_data.close()\n\n # If there is only one folder, move everything one folder up\n if len(os.listdir(dataset_dir)) == 1:\n # Read folders\n paths = []\n for (root, folder, files) in os.walk(dataset_dir):\n paths.append(root)\n # Move files\n for path in paths:\n # Create new folder\n new_path = path.replace('/' + os.path.basename(dataset_dir), '')\n os.makedirs(new_path, exist_ok=True)\n # Move files\n for files in glob.glob(path + \"/*.*\"):\n if len(files) > 0:\n for file in [files]:\n shutil.move(file,\n file.replace('/' + os.path.basename(dataset_dir), ''))\n # Drop previous tree\n shutil.rmtree(dataset_dir)\n\n # Remove the zip file\n os.remove(dataset)\n","repo_name":"avaimar/INEGI_Revolution","sub_path":"02_Scripts/01_Process_data/01_Unzip_files.py","file_name":"01_Unzip_files.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15645676115","text":"import nose.tools\n\nimport cb.util.free_src as fs\n\n\n\nclass FreeSrc_test(object):\n def overflow_test(self):\n nose.tools.assert_raises(fs.InsufficientNetAddresses, lambda: fs.FreeSrc('10.0.128.0/30', 1000000))\n assert(fs.FreeSrc('10.0.128.0/30', 30000))\n \n def itotuple_test(self):\n c = fs.FreeSrc('10.0.128.0/24', 100000)\n (ip,port) = c.itotuple(0)\n assert((ip,port) == ('10.0.128.1', 1025))\n \n (ip,port) = c.itotuple(1) \n assert((ip,port) == ('10.0.128.1', 1026))\n \n (ip,port) = c.itotuple(len(c.ports))\n assert((ip,port) == ('10.0.128.2', 1025))\n\n (ip,port) = c.itotuple(len(c.ports)-1)\n assert((ip,port) == ('10.0.128.1', 65535))\n \n def tupletoi_test(self):\n c = fs.FreeSrc('10.0.128.0/24', 100000)\n assert(c.tupletoi(('10.0.128.1', 1025)) == 0) \n assert(c.tupletoi(('10.0.128.1', 1026)) == 1) \n assert(c.tupletoi(('10.0.128.2', 1025)) == len(c.ports)) \n assert(c.tupletoi(('10.0.128.1', 65535)) == len(c.ports)-1)\n \n assert(c.tupletoi(c.itotuple(12314)) == 12314)\n \n def alloc_src_test(self):\n c = fs.FreeSrc('10.0.128.0/24', 100000)\n bef = len(c.free_list)\n (ip1,port1) = c.alloc_src()\n (ip2,port2) = c.alloc_src()\n aft = len(c.free_list)\n assert(bef == aft + 2)\n \n \n def free_src_test(self):\n c = fs.FreeSrc('10.0.128.0/24', 100000)\n bef = len(c.free_list)\n (ip1,port1) = c.alloc_src()\n (ip2,port2) = c.alloc_src()\n c.free_src((ip1,port1))\n c.free_src((ip2,port2))\n \n aft = len(c.free_list)\n assert(bef == aft)\n","repo_name":"DanielEllard/curveball","sub_path":"src/python/cb/util/tests/free_src_test.py","file_name":"free_src_test.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26551487571","text":"import sqlite3\nimport time \nfrom gpiozero import LED\n\n# Using the predefined class LED instead of Output Device as it requires less defining\npump = LED(17)\n\ntimes = {\n 5 : '5:00',\n 6 : '6:00',\n 7 : '7:00',\n 8 : '8:00'\n}\n\ndays = {\n 0 : \"Monday\",\n 1 : \"Tuesday\",\n 2 : \"Wednesday\",\n 3 : \"Thursday\",\n 4 : \"Friday\",\n 5 : \"Saturday\",\n 6 : \"Sunday\" \n}\n\ndef get_db_connection():\n conn = sqlite3.connect('waterbot.db')\n conn.row_factory = sqlite3.Row\n return conn\n\ndef load_db():\n conn = get_db_connection()\n schedule = conn.execute('SELECT * FROM schedule').fetchall()\n conn.close()\n return schedule\n\ndef select_day(current_day,sched):\n '''Iterates through sched then returns the row matching the current day'''\n for i in range(7):\n if days[current_day] == sched[i][1]:\n return sched[i]\n\ndef hoursleep():\n '''Sleeps until the next hour kicks over'''\n now = time.localtime()\n time.sleep((59-now[4])*60+(60-now[5]))\n return\n\ndef monitor():\n while True:\n schedule = load_db()\n now = time.localtime()\n today = select_day(now[6],schedule)\n if today[2] == \"YES\":\n try:\n if today[3] == times[now[3]]:\n pump.on()\n time.sleep(today[4]*60)\n pump.off()\n except:\n pass\n hoursleep()\n\nif __name__=='__main__':\n pump.off()\n monitor()\n","repo_name":"surroundsound5000/waterbot","sub_path":"water.py","file_name":"water.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32396820013","text":"from attacks import Attack, AttackInfo, AttackOptions\n\n\nclass EmailURLAttackOptions(AttackOptions):\n name = \"Recipient name\"\n addr = \"Recipient email address\"\n lhost = \"Reverse HTTP target host or IP address\"\n lport = \"Reverse HTTP target port\"\n\n def _set_defaults(self):\n self.name = \"Jane Doe\"\n self.addr = \"client1@localdomain\"\n self.lhost = \"172.18.0.3\"\n self.lport = \"80\"\n\n\nclass EmailURLAttack(Attack):\n info = AttackInfo(\n name=\"infect_email_url\",\n description=\"Sends an email containing an infected URL\")\n options_class = EmailURLAttackOptions\n\n def run(self):\n with self.check_printed(indicator=\"Email was sent successfully\"):\n self.exec_command_on_target(self._sendemail_command(self._email_body()))\n\n def _sendemail_command(self, message):\n return \" \".join([\n \"sendemail\",\n \"-f attacker@localdomain\",\n \"-t {addr}\".format(addr=self.options.addr),\n \"-s 172.18.0.2\",\n \"-u 'Frozen User Account'\",\n \"-m '{msg}'\".format(msg=message),\n \"-o tls=no\",\n \"-o message-content-type=html\",\n \"-o message-charset=UTF-8\",\n \"\"])\n\n def _email_body(self):\n return (\n \"

\"\n \"

Dear {name},

\"\n \"

our Technical Support Team unfortunately had to freeze your bank account.\"\n \" Please download and read the file provided in the attachment for further\"\n \" information.

\"\n \"

We apologize for the inconvenience caused, and we are really grateful for your\"\n \" collaboration. This is an automated e-mail. Please do not respond.

\"\n \"

For further information please visit the following Link:
\"\n \"
Bank Of Scotland FAQ\"\n \"

\"\n \"

\"\n \"

© 2017 bankofscotland.co.uk. All Rights Reserved.

\"\n \"\".format(name=self.options.name))\n","repo_name":"L015H4CK/socbed-acsac-2021","sub_path":"src/attacks/attack_email_url.py","file_name":"attack_email_url.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72767321769","text":"import pyglet\nimport pyglet.gl\nimport pyglet.window as pw\nimport math\nimport random\n\nclass graphicsWindow(pyglet.window.Window):\n def __init__(self):\n self.dimensions = [640, 480]\n super(graphicsWindow, self).__init__(width=self.dimensions[0], height=self.dimensions[1], vsync=True) # constructor for graphicsWindow class\n self.step = 20\n self.length = 10\n self.vertices = [self.step, self.step, self.step*(1+self.length), self.step]\n self.wrap = False\n self.direction = \"R\"\n self.thickness = 10\n self.food = self.generate_food();\n self._called = 0\n self.game_over = False\n\n #need arr of len 4\n def determine_direction(self, arr):\n if(arr[0] < arr[2]): return \"R\"\n elif(arr[0] > arr[2]): return \"L\"\n elif(arr[1] < arr[3]): return \"U\"\n elif(arr[1] > arr[3]): return \"D\"\n\n def wrap_snake(self):\n for i in range(len(self.vertices)):\n if(i % 2 == 0):\n if(self.vertices[i] > self.dimensions[0]):\n self.vertices[i] -= self.dimensions[0]\n elif(self.vertices[i] < 0):\n self.vertices[i] += self.dimensions[0]\n else:\n if(self.vertices[i] > self.dimensions[1]):\n self.vertices[i] -= self.dimensions[1]\n elif(self.vertices[i] < 0):\n self.vertices[i] += self.dimensions[1]\n\n def generate_food(self):\n while(True):\n rand_x = random.randrange(self.step, self.dimensions[0]-self.step, self.step)\n rand_y = random.randrange(self.step, self.dimensions[1]-self.step, self.step)\n if(not self.in_snake([rand_x, rand_y])):\n break\n return [rand_x, rand_y]\n\n def check_bounds(self):\n self.game_over = self.in_snake(self.vertices[-2:])\n if(not self.game_over):\n for i in range(len(self.vertices)):\n if(i % 2 == 0):\n if(self.vertices[i] > self.dimensions[0] - self.step or self.vertices[i] < self.step):\n self.game_over = True\n elif(self.vertices[i] > self.dimensions[1] - self.step or self.vertices[i] < self.step):\n self.game_over = True\n\n #arr of dim 2\n def in_snake(self, arr):\n for i in range(2, len(self.vertices), 2):\n x2 = self.vertices[i]\n x1 = self.vertices[i-2]\n y2 = self.vertices[i+1]\n y1 = self.vertices[i-1]\n diff_x = x2 - x1\n diff_y = y2 - y1\n if((diff_x == 0 and abs(arr[0]-x1) < self.thickness and arr[1] > min(y2, y1) and arr[1] < max(y2, y1)) or\n (diff_y == 0 and abs(arr[1]-y1) < self.thickness and arr[0] > min(x2, x1) and arr[0] < max(x2, x1))\n and (arr != [x1, y1] and arr != [x2, y2])):\n return True\n return False\n\n def update_tail(self):\n dir_old = self.determine_direction(self.vertices[:4])\n if(dir_old == \"L\"):\n self.vertices[0] -= self.step\n if(dir_old == \"R\"):\n self.vertices[0] += self.step\n if(dir_old == \"D\"):\n self.vertices[1] -= self.step\n if(dir_old == \"U\"):\n self.vertices[1] += self.step\n if (self.vertices[:2] == self.vertices[2:4]):\n self.vertices = self.vertices[2:]\n\n def update_head(self):\n dir_new = self.determine_direction(self.vertices[-4:])\n if(dir_new == \"L\"):\n self.vertices[-2] -= self.step\n if(dir_new == \"R\"):\n self.vertices[-2] += self.step\n if(dir_new == \"D\"):\n self.vertices[-1] -= self.step\n if(dir_new == \"U\"):\n self.vertices[-1] += self.step\n\n def on_key_press(self, symbol, modifiers):\n if(self.game_over):\n if(symbol == pw.key.ENTER):\n self.__init__()\n elif(symbol == pw.key.ESCAPE):\n pyglet.app.exit()\n else:\n if(symbol == pw.key.DOWN and self.direction not in [\"U\", \"D\"]):\n self.vertices += [self.vertices[-2], self.vertices[-1] - self.step]\n self.check_bounds()\n self.update_tail()\n self.direction = \"D\"\n elif (symbol == pw.key.UP and self.direction not in [\"U\", \"D\"]):\n self.vertices += [self.vertices[-2], self.vertices[-1] + self.step]\n self.check_bounds()\n self.update_tail()\n self.direction = \"U\"\n elif (symbol == pw.key.LEFT and self.direction not in [\"R\", \"L\"]):\n self.vertices += [self.vertices[-2] - self.step, self.vertices[-1]]\n self.check_bounds()\n self.update_tail()\n self.direction = \"L\"\n elif (symbol == pw.key.RIGHT and self.direction not in [\"R\", \"L\"]):\n self.vertices += [self.vertices[-2] + self.step, self.vertices[-1]]\n self.check_bounds()\n self.update_tail()\n self.direction = \"R\"\n\n def on_draw(self):\n # clear the graphics buffer\n pyglet.gl.glClear(pyglet.gl.GL_COLOR_BUFFER_BIT)\n\n if(not self.game_over):\n # convert the vertices list to pyGlet vertices format\n vertexList = pyglet.graphics.vertex_list(len(self.vertices) // 2, ('v2f', self.vertices))\n pointList = pyglet.graphics.vertex_list(1, ('v2f', self.food))\n head = pyglet.graphics.vertex_list(1, ('v2f', self.vertices[-2:]))\n score = pyglet.text.Label('Logans Eaten: %d' % (self.length - 10),\n font_name='Arial',\n font_size=13,\n x=0, y = self.dimensions[1],\n anchor_x='left', anchor_y='top')\n score.draw()\n pyglet.gl.glLineWidth(self.thickness)\n pyglet.gl.glPointSize(self.thickness)\n # now use pyGlet commands to draw lines between the vertices\n pyglet.gl.glColor3f(0, 1, 0) # specify colors\n vertexList.draw(pyglet.gl.GL_LINE_STRIP) # draw\n pyglet.gl.glColor3f(1, 0, 0)\n pointList.draw(pyglet.gl.GL_POINTS)\n pyglet.gl.glColor3f(1, 1, 1)\n head.draw(pyglet.gl.GL_POINTS)\n pic = pyglet.image.load('david-logan-posing-left.png')\n pic.anchor_x = pic.width // 2\n pic.anchor_y = pic.height // 2\n pic.blit(self.food[0], self.food[1])\n else:\n label = pyglet.text.Label('GAME OVER',\n font_name='Arial',\n font_size=36,\n x=self.dimensions[0] // 2, y = 2 * self.dimensions[1] // 3,\n anchor_x='center', anchor_y='center')\n prompt = pyglet.text.Label('Press ENTER to eat more Logans, ESC if this game is too trivial',\n font_name='Arial',\n font_size=12,\n x=self.dimensions[0] // 2, y=self.dimensions[1] // 3,\n anchor_x='center', anchor_y='center')\n score = pyglet.text.Label(\"You renormalised %d Logans. Well done!\" % (self.length - 10),\n font_name='Arial',\n font_size=12,\n x=self.dimensions[0] // 2, y=self.dimensions[1] // 2,\n anchor_x='center', anchor_y='center')\n self.clear()\n label.draw()\n prompt.draw()\n score.draw()\n #pyglet.app.exit()\n\n def update(self, dt):\n self._called += 1\n if(self.length >= 30 or self._called % (30 - self.length) == 0):\n self.called = 0;\n if(tuple(self.food) in zip(*[iter(self.vertices)]*2)):\n self.update_head()\n self.food = self.generate_food()\n self.length += 1\n else:\n self.update_tail()\n self.update_head()\n if(self.wrap):\n self.wrap_snake()\n if(not self.game_over):\n self.check_bounds()\n self.on_draw()\n\n# this is the main game engine loop\nif __name__ == '__main__':\n snake = graphicsWindow() # initialize a window class\n pyglet.clock.schedule_interval(snake.update, 1 / 60) # tell pyglet the on_draw() & update() timestep\n pyglet.app.run() # run pyglet\n","repo_name":"davidglo/TMCS-2018-freshStart","sub_path":"game_projects/miroslav351-game/source/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":8659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39350408146","text":"from time import sleep\nimport requests, re\nfrom bs4 import BeautifulSoup\nimport random\nfrom pymongo import MongoClient\n\n'''\n @MongoDB 접속 URI / DB명\n'''\nclient = MongoClient('mongodb://203.255.92.141:27017', authSource='admin')\ndb = client.DBPIA\npubDB = client.PUBLIC\nAuthor = db.Author\nStatus = pubDB.DBPIA_CRAWLER\nsl_start = 5.0\n\n'''\n대학테이블\n'''\ndef isEnglishOrKorean(input_s):\n k_count = 0\n e_count = 0\n try:\n for c in input_s:\n if ord('가') <= ord(c) <= ord('힣'):\n k_count+=1 \n elif ord('a') <= ord(c.lower()) <= ord('z'):\n e_count+=1\n return \"k\" if k_count>1 else \"e\"\n \n except TypeError as e:\n print(input_s)\n return \"e\"\n\ndef check_college(univ0):\n branch_set = ['성균관대학교', '건국대학교', '한양대학교']\n univName = client['PUBLIC']['CollegeName']\n univ1 = re.sub(\"산학협력단|병원\",\"\",univ0)\n univ2 = re.sub(\"대학교\",\"대학교 \",univ1)\n \n try:\n if univ0 == \"\":\n return univ0\n\n if isEnglishOrKorean(univ0) == 'e':\n univ0 = univ0.upper()\n univ0 = univ0.replace('.', ',')\n univ = univ0.split(', ')\n else:\n univ = univ2.replace(\",\", \"\").split()\n univ = list(set(univ)) \n \n for uni in univ:\n if uni in branch_set:\n if (\"ERICA\" or \"에리카\") in univ0:\n univ[univ.index(\"한양대학교\")] = \"한양대학교(ERICA캠퍼스)\"\n elif (\"글로컬\" or \"GLOCAL\") in univ0:\n if \"건국대학교\" in univ0:\n univ[univ.index(\"건국대학교\")] = \"건국대학교 GLOCAL(글로컬)캠퍼스\"\n else :\n univ[univ.index(\"성균관대학교\")] = \"성균관대학교\"\n \n elif \"자연과학캠퍼스\" in univ0:\n univ[univ.index(\"성균관대학교\")] = \"성균관대학교(자연과학캠퍼스)\"\n\n univs = '{\"$or\": ['\n for u in range(len(univ)):\n if univ[-1] == univ[u]:\n univs += '{\"inputName\": \"' + univ[u] + '\"}'\n else:\n univs += '{\"inputName\": \"' + univ[u] + '\"}, '\n univs += ']}'\n\n univ_query = univName.find_one(eval(univs))\n\n if univ_query is None:\n return univ0\n else:\n return univ_query['originalName']\n \n except SyntaxError as e:\n return univ0\n\n'''\n @DBPIA ID를 이용해서 소속 수집 Crawling 개발 (BeautifulSoup 활용) \n'''\nwhile True:\n soup = \"\"\n i = \"\"\n try:\n for doc in Author.find({\"hasInst\" : False}).batch_size(1):\n\n print(\"DBPIA Inst. Crawler :\", doc['name'], doc['_id'])\n i = int( doc['_id'])\n #url 변동 되면 해당 부분 수정 필요 \n url = 'https://www.dbpia.co.kr/author/authorDetail?ancId={}'.format(i)\n conn = requests.get(url, timeout=60).text\n soup = BeautifulSoup(conn, 'html.parser')\n\n #Parsing 태그 정보 (변경 시 수정 필요)\n division_extract = soup.select('dd')\n name_extract = soup.select('h2')\n test_extract = name_extract[0].text.strip().split(\"\\n\")\n\n division = division_extract[3].text.strip()\n department = division_extract[4].text.strip()\n new_name = test_extract[0].strip()\n print(new_name)\n\n if '논문수' == new_name:\n Author.delete_one({\"_id\": doc['_id']})\n Status.find_one_and_update({\"_id\":4865},{\"$inc\":{\"total\":-1}})\n continue \n \n if division == '-' and department == '-':\n inst = ''\n \n elif department=='-':\n department=''\n inst = division + department\n \n else:\n inst = division + ' ' + department\n\n if len(test_extract) < 8:\n if len(test_extract) == 3:\n papers_count = test_extract[0].strip(\"논문수 \")\n used_count = test_extract[2].strip(\"이용수 \")\n else:\n papers_count = test_extract[3].strip(\"논문수 \")\n used_count = test_extract[5].strip(\"이용수 \")\n citation_count = 0\n else:\n citation_count = test_extract[7].strip(\"피인용수 \")\n\n original_inst = check_college(inst)\n\n Author.update_one({\"_id\":doc['_id']},{'$set':{\"plusName\" :new_name, \"inst\": inst , \"hasInst\" : True , \"papers_count\" : papers_count.strip() , \"used_count\": used_count.strip(),'citation_count': citation_count, 'originalName': original_inst}})\n Status.find_one_and_update({\"_id\":4865},{\"$inc\":{\"crawled\":1}})\n requests.session().close()\n print(\"DBPIA Inst. Crawler :\", doc['name'], \", Crawled, [inst , (paper, used, citation count)] : {} , ({}, {}, {})\".format(inst.strip(), papers_count.strip(), used_count.strip(), citation_count))\n sleep(random.uniform(sl_start, sl_start + 5.0))\n\n Status.find_one_and_update({\"_id\":4865},{\"$set\":{\"status\":0}})\n print(\"Inst All Crawled\")\n break\n\n except Exception as e :\n sl_start = sl_start * 1.1\n \n try :\n if '불편' in soup.select('.tit')[0].text:\n Author.update_one({\"_id\":doc['_id']},{'$set':{\"plusName\" :'', \"inst\": '' , \"hasInst\" : True , \"papers_count\" : 0 , \"used_count\": 0,'citation_count': 0}})\n Status.find_one_and_update({\"_id\":4865},{\"$inc\":{\"crawled\":1}})\n print('error no id')\n except Exception as e : \n Author.delete_one({\"_id\": doc['_id']})\n Status.find_one_and_update({\"_id\":4865},{\"$inc\":{\"total\":-1}})\n print('Another Error', e)\n print(\"Inst 예외\", e)\n print(\"Sleep Time Increased by \", sl_start)\n sleep(random.uniform(sl_start, sl_start+5.0))","repo_name":"tiensh12369/Expert-search-project","sub_path":"Crawling/DBPIA_CRAWLER.py","file_name":"DBPIA_CRAWLER.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35222823722","text":"\nfrom pyphoplacecellanalysis.External.pyqtgraph.Qt import QtGui, QtCore\n\nimport numpy as np\nimport pyphoplacecellanalysis.External.pyqtgraph as pg\n\n\nX_1 = 1\nY_1 = 0\nX_2 = 0\nY_2 = 1\n\ndef txtChanged():\n\tglobal X_1, Y_1, X_2, Y_2\n\ttry:\n\t\tX_1 = (int)(line_edit1.text())\n\t\tY_1 = (int)(line_edit2.text())\n\t\tX_2 = (int)(line_edit3.text())\n\t\tY_2 = (int)(line_edit4.text())\n\t\tvalueChanged()\n\t\t\n\texcept:\n\t\tprint(\"exception\")\n\t\n\ndef resize_function():\n\tprint(\"p\")\n\n#QtGui.QApplication.setGraphicsSystem('raster')\napp = QtGui.QApplication([])\n#mw = QtGui.QMainWindow()\n#mw.resize(800,800)\n\npg.setConfigOptions(antialias=True)\nwin = pg.GraphicsWindow(title=\"Basic plotting examples\")\nwin.resize(700, 700)\nwin.setWindowTitle('pyqtgraph example: Plotting')\n\n\nx = np.linspace(-1, 1, 100)\ny1 = np.sqrt(1-np.square(x))\ny2 = -1 * np.sqrt(1-np.square(x))\n\n\nwidget = QtGui.QWidget()\nwidget.setMaximumHeight(100)\n\np = widget.palette()\np.setColor(widget.backgroundRole(), QtCore.Qt.red)\nwidget.setPalette(p)\n\nwidgetMatrix = QtGui.QWidget()\nwidgetMatrix.setMaximumWidth(150)\n\ngridLayout = QtGui.QGridLayout(widgetMatrix)\nline_edit1 = QtGui.QLineEdit(\"1\");line_edit1.setFixedWidth(50);line_edit1.textChanged.connect(txtChanged);\nline_edit2 = QtGui.QLineEdit(\"0\");line_edit2.setFixedWidth(50);line_edit2.textChanged.connect(txtChanged);\nline_edit3 = QtGui.QLineEdit(\"0\");line_edit3.setFixedWidth(50);line_edit3.textChanged.connect(txtChanged);\nline_edit4 = QtGui.QLineEdit(\"1\");line_edit4.setFixedWidth(50);line_edit4.textChanged.connect(txtChanged);\ngridLayout.addWidget(line_edit1, 0, 0)\ngridLayout.addWidget(line_edit2, 1, 0)\ngridLayout.addWidget(line_edit3, 0, 1)\ngridLayout.addWidget(line_edit4, 1, 1)\n\nboxLayout = QtGui.QHBoxLayout(widget)\n\nboxLayout.addWidget(widgetMatrix)\nboxLayout.addStretch()\n\npp = QtGui.QGraphicsProxyWidget()\n\npp.setWidget(widget)\n\nwin.nextRow()\n\np = win.addLayout(row = 0, col = 0)\np.addItem(pp,row=0,col=0)\n\n\n\np4 = win.addPlot(title=\"Parametric, grid enabled\", row = 1, col =0)\np4.disableAutoRange()\np4.setXRange(-4, 4, padding = 0)\np4.setYRange(-4, 4, padding = 0)\nwin.resizeEvent(resize_function())\n\np4.setAspectLocked()\n\n#roi = pg.PolyLineROI([0, 1], [2, 1], pen=(1,9))\n#p4.addItem(roi)\n\nroi = pg.RectROI([0, 20], [2, 1], pen=(0,9))\np4.addItem(roi)\n\n\np4.showGrid(x=True, y=True)\n\nh_lines = []\nv_lines = []\n\nfor i in range(10):\n\tline_1 = pg.InfiniteLine(pen=pg.mkPen((0,80, 110), width=2))\n\tline_1.setAngle(0)\n\tline_1.setValue((0, i))\n\tp4.addItem(line_1)\n\tv_lines.append(line_1)\n\t\t\n\tline_2 = pg.InfiniteLine(pen=pg.mkPen((0,80, 110), width=2))\n\tline_2.setAngle(0)\n\tline_2.setValue((0, -1 * i))\n\tp4.addItem(line_2)\n\tv_lines.append(line_2)\n\t\n\tline_3 = pg.InfiniteLine(pen=pg.mkPen((0,80, 110), width=2))\n\tline_3.setAngle(90)\n\tline_3.setValue((i, 0))\n\tp4.addItem(line_3)\n\th_lines.append(line_3)\n\t\t\n\tline_4 = pg.InfiniteLine(pen=pg.mkPen((0,80, 110), width=2))\n\tline_4.setAngle(90)\n\tline_4.setValue((-i, 0))\n\tp4.addItem(line_4)\n\th_lines.append(line_4)\n\n\tprint(i)\n\n\n\np4.plot(x, y1, pen=pg.mkPen('g', width=2))\np4.plot(x, y2, pen=pg.mkPen('g', width=2))\n\nvector1 = pg.ArrowItem(pos = (1,0), angle = 180, brush = (0, 255,0), pen=pg.mkPen('g', width=2))\nvector1.opts['pos'] = (1,1)\nprint(vector1.opts['pos'])\np4.addItem(vector1)\np4.plot([0, 1],[0, 0] ,pen=pg.mkPen('g', width=3))\n\nvector2 = pg.ArrowItem(pos = (0,1), angle = 90, brush = (255, 0,0), pen=pg.mkPen('r', width=2))\np4.addItem(vector2)\np4.plot([0,0],[0,1],pen=pg.mkPen('r', width=3))\n\n\ndef valueChanged():\n\n\ttheta = slider.value() / 50\n\t\n\tx_1 = (X_1 * theta) + (1 - theta)\n\ty_1 = (Y_1 * theta)\n\t\n\tx_2 = (X_2 * theta)\n\ty_2 = (Y_2 * theta) + (1 - theta)\n\t\n\t\t\n\tX1 = (x_1 * x) + (x_2 * y1)\n\tX2 = (x_1 * x) + (x_2 * y2)\n\tY1 = (y_1 * x) + (y_2 * y1)\n\tY2 = (y_1 * x) + (y_2 * y2)\n\n\n\t\n\tslope1 = 0\n\tslope2 = 0\n\tslope2_inv = 0\n\tang1 = 0\n\tang2 = 0\n\t\n\t\n\tif(x_1 == 0):\n\t\tif(y_1 > 0):\n\t\t\tang1 = 90\t\n\t\telse:\n\t\t\tang1 = 270\n\telif(y_1 == 0):\n\t\tif(x_1 > 0):\n\t\t\tang1 = 0\n\t\telse:\n\t\t\tang1 = 180\n\telse:\n\t\tslope1 = y_1 / x_1\n\t\tang1 = (np.arctan(slope1) * 180) / np.pi\n\t\t\n\tif(x_2 == 0):\n\t\tif(y_2 > 0):\n\t\t\tang2 = 90\n\t\telse:\n\t\t\tang2 = 270\n\telif(y_2 == 0):\n\t\tif(x_2 > 0):\n\t\t\tang2 = 0\n\t\telse:\n\t\t\tang2 = 180\n\telse:\n\t\tslope2 = y_2 / x_2\n\t\tslope2_inv = x_2 / y_2\n\t\t\n\t\tang2 = (np.arctan(slope2) * 180) / np.pi\n\t\n\tp4.plot([0],[0],clear = True )\n\t\n\tfor i in range(10):\n\t\n\t\ty_intercept = y_2 - (x_2 * slope1)\n\t\t\n\t\tline_1 = h_lines[i * 2]\n\t\tline_1.setAngle(ang1)\n\t\tline_1.setValue((0, i * y_intercept))\n\t\t\n\t\t\n\t\tline_2 = h_lines[(i * 2) + 1]\n\t\tline_2.setAngle(ang1)\n\t\tline_2.setValue((0, -i * y_intercept))\n\t\t\n\t\tx_intercept = x_1 - (y_1 * slope2_inv)\n\t\t\n\n\t\tline_3 = v_lines[i * 2]\n\t\tline_3.setAngle(ang2)\n\t\tline_3.setValue((i * x_intercept, 0))\n\t\t\n\t\tline_4 = v_lines[(i * 2) + 1]\n\t\tline_4.setAngle(ang2)\n\t\tline_4.setValue((-i * x_intercept, 0))\n\n\t\tp4.addItem(line_1)\n\t\tp4.addItem(line_2)\t\n\t\tp4.addItem(line_3)\n\t\tp4.addItem(line_4)\n\t\n\t#vector1 = pg.ArrowItem()\n\n\n\tvector1.resetTransform()\n\tvector1.setPos(x_1,y_1)\n\tvector1.rotate(180 - ang1)\n\tprint(\"ang1 \" , ang1)\n\t\n\tvector2.resetTransform()\n\tvector2.setPos(x_2,y_2)\n\tvector2.rotate(180 - ang2)\n\tprint(\"ang2 \", ang2)\n\t\n\tp4.addItem(vector1)\n\tp4.addItem(vector2)\t\n\t\n\t\n\tp4.plot([0, x_1],[0, y_1], pen=pg.mkPen('g', width=3))\n\tp4.plot([0, x_2],[0, y_2], pen=pg.mkPen('r', width=3))\n\t\n\tp4.plot(X1, Y1, pen=pg.mkPen('g', width=2))\n\tp4.plot(X2, Y2, pen=pg.mkPen('g', width=2))\n\n\nwin.nextRow()\nslider_max = 50\nslider = QtGui.QSlider(QtCore.Qt.Horizontal)\nslider.setMinimum(0)\nslider.setMaximum(slider_max)\nslider.setTickInterval(1)\nslider.valueChanged.connect(valueChanged)\n\nproxy = QtGui.QGraphicsProxyWidget()\n\nproxy.setWidget(slider)\n\np3 = win.addLayout(row=2, col=0)\np3.addItem(proxy,row=0,col=0)\n\n\n\nif __name__ == '__main__':\n\timport sys\n\tif (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n\t\tQtGui.QApplication.instance().exec_() \n","repo_name":"CommanderPho/Spike3D","sub_path":"LibrariesExamples/PyQtPlot/Working/pyqtgraph_MatrixVisualize.py","file_name":"pyqtgraph_MatrixVisualize.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22195769668","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nBridges mongo to ros, providing services to query the database.\nNot efficient, so should be only used when no other option is available; if\nyou can use a mongodb client library like pymongo, use it, if you need to query\nfrom client side javascript on a webpage with rosbridge/librosjs then use this.\n\n\"\"\"\n\nimport rospy\nimport mongodb_store.util as dc_util\nfrom mongodb_store.srv import *\nimport pymongo\nimport bson.json_util\nimport json\n\nMongoClient = dc_util.import_MongoClient()\n\nclass MongoBridge(object):\n def __init__(self):\n rospy.init_node(\"mongo_bridge\")\n\n have_dc = dc_util.wait_for_mongo()\n if not have_dc:\n raise Exception(\"No Datacentre?\")\n\n self._mongo_client=pymongo.MongoClient(rospy.get_param(\"mongodb_host\"),\n rospy.get_param(\"mongodb_port\") )\n\n # advertise ros services\n for attr in dir(self):\n if attr.endswith(\"_ros_srv\"):\n service=getattr(self, attr)\n rospy.Service(\"/mongo_bridge/\"+attr[:-8], service.type, service)\n\n\n def find_ros_srv(self, req):\n collection = self._mongo_client[req.db][req.collection]\n res=collection.find(json.loads(req.query, object_hook=json_util.object_hook))\n docs=[i for i in res]\n return json.dumps(docs, default=json_util.default)\n find_ros_srv.type=MongoFind\n\n def update_ros_srv(self,req):\n collection = self._mongo_client[req.db][req.collection]\n res=collection.update(json.loads(req.query, object_hook=json_util.object_hook),\n json.loads(req.update, object_hook=json_util.object_hook))\n return json.dumps(docs, default=json_util.default)\n update_ros_srv.type=MongoUpdate\n\n def insert_ros_srv(self,req):\n collection = self._mongo_client[req.db][req.collection]\n res=collection.insert(json.loads(req.document, object_hook=json_util.object_hook))\n return json.dumps(res, default=json_util.default)\n insert_ros_srv.type=MongoInsert\n\n\nif __name__ == '__main__':\n bridge = MongoBridge()\n\n rospy.spin()\n","repo_name":"strands-project/mongodb_store","sub_path":"mongodb_store/scripts/mongo_bridge.py","file_name":"mongo_bridge.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"53"} +{"seq_id":"12875732118","text":"from datetime import datetime\r\nimport webbrowser, time\r\nclass AbeHirosi:\r\n def __init__(self):\r\n year = datetime.now().year + 1\r\n self.open_time = datetime(year, 1, 1, 0, 0, 0)\r\n self.url = \"http://abehiroshi.la.coocan.jp/\"\r\n def loop(self):\r\n print(self.open_time,\"になったら\",self.url,\"を開きます。\")\r\n while True:\r\n now = datetime.now().replace(microsecond=0)\r\n print('現在時刻:', now)\r\n if now == self.open_time:\r\n print('--------------------')\r\n print('指定の時間になったのでブラウザを開きます')\r\n print('--------------------')\r\n start = datetime.now()\r\n print('開始時刻:', start)\r\n webbrowser.open(self.url)\r\n end = datetime.now()\r\n print('終了時刻:', end)\r\n print('誤差:', end - start)\r\n break\r\n else:\r\n time.sleep(1)\r\n\r\nif \"__main__\" == __name__:\r\n abe = AbeHirosi()\r\n abe.loop()","repo_name":"sailnov/hirosi","sub_path":"hirosi.py","file_name":"hirosi.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38123310741","text":"\"\"\"Partner access by project\n\nRevision ID: d350524bfb8a\nRevises: 9aefe68f8d78\nCreate Date: 2021-08-15 14:42:13.321568\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"d350524bfb8a\"\ndown_revision = \"9aefe68f8d78\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"partnerproject\",\n sa.Column(\n \"partner_id\",\n sa.Unicode(length=64, collation=\"utf8mb4_unicode_ci\"),\n nullable=False,\n ),\n sa.Column(\n \"project_id\",\n sa.Unicode(length=64, collation=\"utf8mb4_unicode_ci\"),\n nullable=False,\n ),\n sa.Column(\"access_date\", sa.DateTime(), nullable=True),\n sa.Column(\n \"granted_by\",\n sa.Unicode(length=120, collation=\"utf8mb4_unicode_ci\"),\n nullable=False,\n ),\n sa.ForeignKeyConstraint(\n [\"granted_by\"],\n [\"fsuser.user_id\"],\n name=op.f(\"fk_partnerproject_granted_by_fsuser\"),\n ondelete=\"CASCADE\",\n ),\n sa.ForeignKeyConstraint(\n [\"partner_id\"],\n [\"partner.partner_id\"],\n name=op.f(\"fk_partnerproject_partner_id_partner\"),\n ondelete=\"CASCADE\",\n ),\n sa.ForeignKeyConstraint(\n [\"project_id\"],\n [\"project.project_id\"],\n name=op.f(\"fk_partnerproject_project_id_project\"),\n ondelete=\"CASCADE\",\n ),\n sa.PrimaryKeyConstraint(\n \"partner_id\", \"project_id\", name=op.f(\"pk_partnerproject\")\n ),\n mysql_charset=\"utf8mb4\",\n mysql_engine=\"InnoDB\",\n mysql_collate=\"utf8mb4_unicode_ci\",\n )\n op.create_index(\n op.f(\"ix_partnerproject_project_id\"),\n \"partnerproject\",\n [\"project_id\"],\n unique=False,\n )\n\n\ndef downgrade():\n op.drop_table(\"partnerproject\")\n","repo_name":"qlands/FormShare","sub_path":"alembic/versions/d350524bfb8a_partner_access_by_project.py","file_name":"d350524bfb8a_partner_access_by_project.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"26526903701","text":"from sapp.decorators import WithContext\nfrom sapp.plugins.pyramid.controller import RestfulController\n\nfrom cashflow import app\nfrom cashflow.application.forms import FormSerializer\nfrom cashflow.auth.view_mixins import AuthMixin\nfrom cashflow.wallet.drivers import WalletReadDriver\nfrom cashflow.wallet.drivers import WalletWriteDriver\nfrom cashflow.wallet.schemas import CreateWalletSchema\nfrom cashflow.wallet.schemas import WalletSchema\n\n\nclass WalletListView(RestfulController, AuthMixin):\n permission = 'edit'\n\n @WithContext(app, args=['dbsession'])\n def get(self, dbsession):\n schema = WalletSchema()\n driver = WalletReadDriver(dbsession)\n wallets = driver.list_for_user(user_id=self.get_user_id())\n data = schema.dump(wallets, many=True).data\n\n return dict(elements=data)\n\n @WithContext(app, args=['dbsession'])\n def post(self, dbsession):\n wallet_wd = WalletWriteDriver(dbsession)\n form = FormSerializer(CreateWalletSchema())\n form.parse_json(self.request.json_body)\n\n result = {}\n\n if form.validate():\n data = form.fields()\n data['user_id'] = self.get_user().id\n\n wallet = wallet_wd.create(**data)\n result['uuid'] = wallet.uuid\n else:\n self.request.response.status_code = 400\n\n result['form'] = form.fullform\n return result\n\n\n","repo_name":"socek/cashflow","sub_path":"backend/code/cashflow/wallet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19821376568","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Note',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('body', models.TextField()),\n ('create_ts', models.DateTimeField(auto_now_add=True)),\n ('update_ts', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=100)),\n ('last_name', models.CharField(max_length=100)),\n ('email', models.CharField(max_length=100)),\n ],\n ),\n migrations.AddField(\n model_name='note',\n name='user_id',\n field=models.ForeignKey(to='lifetracker.User'),\n ),\n ]\n","repo_name":"pijopenchev/lifetracker","sub_path":"lifetracker/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37672768063","text":"# Created by Henry O'Scannlain-Miller\n# This is a simple server that returns the first level of the Google Product Taxonomy\n# that is predicted from a Torch model.\n# Google Product Taxonomy: (https://www.google.com/basepages/producttype/taxonomy.en-US.txt)\n\nfrom flask import Flask, request\nimport torch\nfrom PIL import Image\nimport torchvision.transforms.functional as TF\n\nmodel = torch.load('21categories_model.torch')\nclass_names = ['Animals & Pet Supplies',\n 'Apparel & Accessories',\n 'Arts & Entertainment',\n 'Baby & Toddler',\n 'Business & Industrial',\n 'Cameras & Optics',\n 'Electronics',\n 'Food, Beverages & Tobacco',\n 'Furniture',\n 'Hardware',\n 'Health & Beauty',\n 'Home & Garden',\n 'Luggage & Bags',\n 'Mature',\n 'Media',\n 'Office Supplies',\n 'Religious & Ceremonial',\n 'Software',\n 'Sporting Goods',\n 'Toys & Games',\n 'Vehicles & Parts']\n\napp = Flask(__name__)\n\n# handle request\n# bad requests automaticaly return 400\n@app.route('/class', methods=['GET'])\ndef classify():\n file = request.files['image']\n image = Image.open(file)\n x = TF.to_tensor(image)\n x.unsqueeze_(0)\n _, output = torch.max(model(x), 1)\n predicted_category = class_names[output]\n return predicted_category\n\n# start the server\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=105)\n\n","repo_name":"henryom/ProductAutoClassifier","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8627229443","text":"import random\r\nfrom threading import *\r\nimport time\r\n\r\n\r\ndef normal_quicksort(array, start, end):\r\n if start >= end:\r\n return\r\n\r\n pivot = array[start]\r\n low = start + 1\r\n high = end\r\n\r\n while True:\r\n while low <= high and array[high] >= pivot:\r\n high = high - 1\r\n\r\n while low <= high and array[low] <= pivot:\r\n low = low + 1\r\n\r\n if low <= high:\r\n array[low], array[high] = array[high], array[low]\r\n #\r\n else:\r\n break\r\n\r\n array[start], array[high] = array[high], array[start]\r\n\r\n p = high\r\n normal_quicksort(array, start, p - 1)\r\n normal_quicksort(array, p + 1, end)\r\n\r\n\r\ndef thread_sort(sets, left, right):\r\n i = left\r\n j = right\r\n\r\n pivot = sets[int((left + right) / 2)]\r\n temp = 0\r\n while i <= j:\r\n while pivot > sets[i]:\r\n i = i + 1\r\n\r\n while pivot < sets[j]:\r\n j = j - 1\r\n\r\n if i <= j:\r\n temp = sets[i]\r\n sets[i] = sets[j]\r\n sets[j] = temp\r\n i = i + 1\r\n j = j - 1\r\n\r\n left_thread = None\r\n right_thread = None\r\n\r\n if left < j:\r\n left_thread = Thread(target=lambda: thread_sort2(sets, left, j))\r\n left_thread.start()\r\n\r\n if i < right:\r\n right_thread = Thread(target=lambda: thread_sort2(sets, i, right))\r\n right_thread.start()\r\n\r\n if left_thread is not None:\r\n left_thread.join()\r\n if right_thread is not None:\r\n right_thread.join()\r\n return sets\r\n\r\n\r\ndef thread_sort2(sets, left, right):\r\n i = left\r\n j = right\r\n\r\n pivot = sets[int((left + right) / 2)]\r\n temp = 0\r\n while i <= j:\r\n while pivot > sets[i]:\r\n i = i + 1\r\n\r\n while pivot < sets[j]:\r\n j = j - 1\r\n\r\n if i <= j:\r\n temp = sets[i]\r\n sets[i] = sets[j]\r\n sets[j] = temp\r\n i = i + 1\r\n j = j - 1\r\n\r\n left_thread = None\r\n right_thread = None\r\n\r\n if left < j:\r\n left_thread = Thread(target=normal_quicksort(sets, left, j))\r\n left_thread.start()\r\n\r\n if i < right:\r\n right_thread = Thread(target=normal_quicksort(sets, i, right))\r\n right_thread.start()\r\n\r\n if left_thread is not None:\r\n left_thread.join()\r\n if right_thread is not None:\r\n right_thread.join()\r\n return sets\r\n\r\n\r\nls = []\r\nfor i in range(10000000):\r\n ls.append(random.randint(9, 99999999999))\r\n\r\nstart = time.time()\r\nthread_sort(ls, 0, len(ls) - 1)\r\nend = time.time()\r\n\r\ntime1 = end - start\r\nprint(f\"thread sort:{time1}\")\r\n\r\nrandom.shuffle(ls)\r\n\r\nstart2 = time.time()\r\nnormal_quicksort(ls, 0, len(ls) - 1)\r\nend2 = time.time()\r\ntime2 = end2 - start2\r\nprint(f\"normal sort:{time2}\")\r\nprint(time2 - time1)\r\nprint(f\"thread Vs. normal:{round(-(time1 - time2) * 100 / time2)}%\")\r\n\r\n","repo_name":"ahMADASSadi/Thread-Quicksort","sub_path":"MT.py","file_name":"MT.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20410357447","text":"# Databricks notebook source\nfrom pyspark import SparkConf , SparkContext\nconf = SparkConf().setAppName('quiz') \nsc = SparkContext.getOrCreate(conf=conf)\n\n# COMMAND ----------\n\ntext = sc.textFile('/FileStore/tables/groupby.txt')\n\n# COMMAND ----------\n\nrdd = text.flatMap(lambda x : x.split())\n\n# COMMAND ----------\n\n#getNumberof Partitions in rdd\nrdd.getNumPartitions()\n\n# COMMAND ----------\n\n#repartition to given value\nrdd2 = rdd.repartition(5)\n\n# COMMAND ----------\n\n#gives the number of partitions assigned to rdd2\nrdd2.getNumPartitions()\n\n# COMMAND ----------\n\nrdd2.saveAsTextFile('/FileStore/tables/Output/5partitions')\n\n# COMMAND ----------\n\n#coalesce decreate the numberof partitions by given value\nrdd3 = rdd2.coalesce(3)\n\n# COMMAND ----------\n\nrdd3.getNumPartitions()\n\n# COMMAND ----------\n\nrdd3.saveAsTextFile('/FileStore/tables/Output/3partitions')\n\n# COMMAND ----------\n\n#to see no of partitions in particular file\nrdd5 =sc.textFile('/FileStore/tables/Output/3partitions')\nrdd5.getNumPartitions()\n","repo_name":"Mailendiran98/Pyspark-Learnings","sub_path":"RDD_Examples/partition&coalesce.py","file_name":"partition&coalesce.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41571559977","text":"# this variable has no parent function, but is actually NOT a global variable.\n# it just so happens that it is committed to memory before the function is called\n# so we are able to iterate, or call it out, but we cannot do much else.\n\nx = 6\n\ndef example():\n print(x)\n # z, however, is a local variable. \n z = 5\n # this works\n print(z)\n \nexample()\n# this does not, which often confuses people, because z has been defined\n# and successfully even was called... the problem is that it is a local\n# variable only, and you are attempting to access it globally.\n\nprint(z)\n\nx = 6\n\ndef example2():\n # works\n print(x)\n print(x+5)\n\n # but then what happens when we go to modify:\n x+=6\n\n # so there we attempted to take the x var and add 6 to it... but now\n # we are told that we cannot, as we're referencing the variable before\n # its assignment.\n\n# Here, again, we are able to reference x, we are even able to print x+6... \n# but we are not allowed to modify x.\n\n# What if we'd like to modify x? Well, then we need to use global!\nx = 6\n\ndef example3():\n # what we do here is defined x as a global variable. \n global x\n # now we can:\n print(x)\n x+=5\n print(x)\n# Now we're cooking! The problem here is that some people do not like the idea at all of \n# using global variables. How do we get around using them and referencing them locally?\nx = 6\n\ndef example4():\n globx = x\n # now we can:\n print(globx)\n globx+=5\n print(globx)\n# We are able to do the above, by assigning the value that we can reference to a local\n# variable, then doing what we want with it from there.\n\n# Another choice you might have, as suggested by one of my viewers is the following:\nx = 6\ndef example(x):\n \n print(x)\n x+=5\n print(x)\n return x\n \nx = example(x)\nprint(x)\n# In the above example, we have the function modifying x. \n\n# It may appear somewhat confusing since x is being used in multiple locations, \n# so maybe a more clear example is something like:\nx = 6\ndef example(modify):\n \n print(modify)\n modify+=5\n print(modify)\n return modify\n \nx = example(x)\nprint(x)","repo_name":"SaretMagnoslove/Python_3_Basics_Tutorial_Series-Sentdex","sub_path":"Lesson12_global_variables.py","file_name":"Lesson12_global_variables.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20226996334","text":"import urllib.request\nimport time\nfrom openpyxl import load_workbook\nfrom bs4 import BeautifulSoup\n\n#excel file versions of inventories\nsenx = load_workbook(\"detailsSEN.xlsx\")\nsen = senx.active\nsenlinks = []\n\ncpx = load_workbook(\"detailsC4P.xlsx\")\ncp = cpx.active\ncplinks = []\n\nhlx = load_workbook(\"detailsHL.xlsx\")\nhl = hlx.active\nhllinks = []\n\nwsx = load_workbook(\"detailsWS.xlsx\")\nws = wsx.active\nwslinks = []\n\nworkbooks = [cp, hl, sen, ws]\nurllists = [cplinks, hllinks, senlinks, wslinks]\n\nURL = \"A\"\nstart = 1\n\ndef list_of_links(workbook, llist):\n for row in workbook.range(\"%s%s:%s%s\" % (URL, start, URL, workbook.get_highest_row())):\n for cell in row:\n llist.append(cell.value)\n\n#returns list of non-text symbols\ndef multiplefilechecker(llist):\n newlist = []\n for link in llist:\n soup = BeautifulSoup(urllib.request.urlopen(link))\n add = filechecker(soup)\n if len(add) > 1:\n newlist.append(add)\n print(len(newlist), \" resources containing at least one non-text symbol\")\n return newlist\n\ndef filechecker(soup):\n fields = soup.find_all(\"td\", limit = 4) #returns text of title, brief description, url, and abstract or purpose, respectively\n newstring = str(soup.find(\"td\"))\n for item in fields:\n string = str(item)\n if fields.index(item) != 2:\n i = -5\n for char in string:\n i += 1\n if ord(char) > 126:\n if fields.index(item) == 0:\n newstring += \"-title\"\n elif fields.index(item) == 1:\n newstring += \"-description\"\n elif fields.index(item) == 3:\n newstring += \"-abstract\"\n newstring += str(i) \n if len(newstring) == len(str(soup.find(\"td\"))):\n newstring = \"\"\n newstring = newstring.replace(\"\", \"\")\n newstring = newstring.replace(\"\", \"\")\n return newstring\n\nstart_time = time.time()\n\ncount = 0\nwhile count < 4:\n list_of_links(workbooks[count], urllists[count])\n count += 1\n\nprint(\"Non-text symbols in C4P:\")\nfor item in multiplefilechecker(cplinks):\n print(item)\nprint()\nprint(\"Non-text symbols in HLI:\")\nfor item in multiplefilechecker(hllinks):\n print(item)\nprint()\nprint(\"Non-text symbols in SEN:\")\nfor item in multiplefilechecker(senlinks):\n print(item)\nprint()\nprint(\"Non-text symbols in WSI:\")\nfor item in multiplefilechecker(wslinks):\n print(item)\nprint()\n\nelapsed_time = time.time() - start_time\nprint(elapsed_time)","repo_name":"RockyCal/CINERGIDataCuration","sub_path":"NonTextChecker.py","file_name":"NonTextChecker.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8532366596","text":"import sys\nimport csv\nimport subprocess\n\nargv = sys.argv\nif len(argv) != 3:\n print(\"Incorrect usage: cmd audio.mp3 template.csv\")\n print(len(argv))\n sys.exit(1)\n\n# get the file to disassemble\naudio_file = argv[1]\n\n# get the template file and configuration\nt_file = open(argv[2])\nt_reader = csv.reader(t_file)\n# t_file.close()\n\n# for every row in t_reader,\nfor row in t_reader:\n # check if the id is a number\n if row[3].isdigit():\n # create a list object with the row value\n name = str(row[0])\n start_time = str(row[1])\n duration = str(row[5])\n id = int(row[3])\n directory = str(row[4])\n\n filename = \"'%d. %s.mp3'\" % (id, name)\n cmd = \"ffmpeg -i %s -ss %s -t %s -c copy %s\" % (audio_file, start_time, duration, filename)\n subprocess.run(cmd, shell=True)\n\n# track_name = \"id \" + \"name\" + \".mp3\"\n# ffmpeg -i audio_file -ss start_time -t duration -c copy filename\n","repo_name":"LogistyxCat/crash-course-python","sub_path":"song-scalpel.py","file_name":"song-scalpel.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15206014446","text":"#!/usr/bin/env python3\nimport sys\n\nfile = open(sys.argv[1])\nlines = file.readlines()\nlength = len(lines)\n\n\ni = 0\nthreshold = 3\n\n\nwhile ibegining:\n print(begining-threshold)\n print(ending)\n","repo_name":"tekakutli/tortoise-scripts","sub_path":"collapse_dir/collapse_list_2.py","file_name":"collapse_list_2.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"35238374552","text":"import time\r\nimport os\r\nimport pandas as pd\r\nfrom sklearn import tree\r\n\r\ndef slp(a):\r\n time.sleep(a)\r\n\r\ndef copyright():\r\n print(time.ctime())\r\n print(\"(c) 2020 Kelompok 3 . All rights reserved.\")\r\n\r\ndef opsi1():\r\n data = pd.read_csv(\"hasil.csv\")\r\n print(\"\")\r\n print(\"=\"*53,\" DATA DISTRIBUSI JARINGAN \",\"=\"*53)\r\n print(data)\r\n print(\"=\"*135)\r\n\r\n y = data.iloc[:, 0]\r\n X = data.iloc[:,1:12:2]\r\n clf = tree.DecisionTreeClassifier()\r\n clf = clf.fit(X,y)\r\n\r\n print(\"\\n[*] Analisis tipe gangguan\")\r\n Ea = float(input(\"\\n[?] Masukan nilai Ea : \"))\r\n Eb = float(input(\"[?] Masukan nilai Eb : \"))\r\n Ec = float(input(\"[?] Masukan nilai Ec : \"))\r\n Ia = float(input(\"[?] Masukan nilai Ia : \"))\r\n Ib = float(input(\"[?] Masukan nilai Ib : \"))\r\n Ic = float(input(\"[?] Masukan nilai Ic : \"))\r\n predicition = clf.predict([[Ea,Eb,Ec,Ia,Ib,Ic]])\r\n\r\n def a():\r\n tulis = (\"[-] Menurut data yang telah di input, data tersebut termasuk ke dalam gangguan : \")\r\n return tulis\r\n\r\n print(\"\\n[*] Output Metode Machine Learning\")\r\n print(a(),predicition)\r\n print(\"\\n[*] Output Metode Kondisi\")\r\n if Ea0.5 and Ea-Eb>0.001 or Ib0.5 and Eb-Ea>0.001 or Ic0.5 and Eb-Ea>0.001 or Ia 10:\n raise ValueError(\n 'Please guess a number within the given range')\n attempts += 1\n attempts_list.append(attempts)\n\n if guess == rand_num:\n print('Nice! You got it!')\n print(f'It took you {attempts} attempts')\n wanna_play = input(\n 'Would you like to play again? (Enter Yes/No): ')\n if wanna_play.lower() != 'yes':\n print('That\\'s cool, have a good one!') # The slash in ' That's ' is just to allow the apostrophe because python only understands alphanumeric values. So the slash just helps the computer to accept the apostrophe.\n break\n else:\n attempts = 0\n rand_num = random.randint(1, 10)\n show_score()\n continue\n else:\n if guess > rand_num:\n print('It\\'s lower') # same here with the slash :)\n elif guess < rand_num:\n print('It\\'s higher') # also the same here with the slash... you get idea :)\n \n\n\n except ValueError as err:\n print('Oh no!, that is not a valid value. Try again...')\n print(err)\n\n\nif __name__ == '__main__':\n start_game()\n\n","repo_name":"Achyut-Labs/python-seva","sub_path":"excercise3/aairanumberguessing.Py","file_name":"aairanumberguessing.Py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"41876896389","text":"N = int(input())\n\nline = []\nfor i in range(N):\n x, y = map(int, input().split())\n line.append((x,y))\n\nline.sort()\nans = 0\ntmpS = -2000000000\ntmpE = -2000000000\nfor d in line:\n if d[0] <= tmpE:\n tmpE = max(tmpE, d[1])\n else:\n ans += tmpE - tmpS\n tmpS = d[0]\n tmpE = d[1]\nans += tmpE - tmpS \nprint(ans)","repo_name":"KAN-RYU/Algorithm","sub_path":"Line.py","file_name":"Line.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17959541593","text":"from odoo import api, fields, models\n\n\nclass FSMStage(models.Model):\n _inherit = \"fsm.stage\"\n\n validate_field_ids = fields.Many2many(\n \"ir.model.fields\",\n string=\"Fields to Validate\",\n help=\"Select fields which must be set on the document in this stage\",\n )\n\n stage_type_model_id = fields.Many2one(\n \"ir.model\",\n compute=\"_compute_stage_model\",\n string=\"Model for Stage\",\n help=\"Technical field to hold model type\",\n )\n\n @api.depends(\"stage_type\")\n def _compute_stage_model(self):\n Model = self.env[\"ir.model\"]\n for rec in self:\n model_id = False\n if rec.stage_type:\n model_string = \"fsm.\" + rec.stage_type\n model_id = Model.search([(\"model\", \"=\", model_string)], limit=1).id\n rec.stage_type_model_id = model_id\n","repo_name":"OCA/field-service","sub_path":"fieldservice_stage_validation/models/fsm_stage.py","file_name":"fsm_stage.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"53"} +{"seq_id":"22176452844","text":"import openai\nimport re\nfrom decouple import config\n\nfrom fastapi import FastAPI\nfrom fastapi.param_functions import Depends\nfrom pydantic import BaseModel, validator\n\nfrom src.config.db import get_session\nfrom src.vector_db import client\nfrom src.prompts.safety_prompt import SAFETY_PROMPT\n\nfrom sqlmodel import select\nfrom better_profanity import profanity\nfrom qdrant_client.http.models import PointStruct\nfrom qdrant_client.http import models\n\nprofanity.load_censor_words()\n\napp = FastAPI()\n\nopenai.api_key = config(\"OPENAI_API_KEY\")\n\n\nclass UserInput(BaseModel):\n input: str\n\n\n@app.post(\"/complete_prompt\")\nasync def complete_prompt(\n user_input: UserInput,\n session=Depends(get_session)\n):\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=user_input.input,\n temperature=0.8,\n max_tokens=300\n )\n\n return response\n\n\n@app.post(\"/complete_chat\")\nasync def complete_chat_prompt(\n user_input: UserInput,\n session=Depends(get_session)\n):\n\n response = openai.Embedding.create(\n input=user_input.input,\n model=\"text-embedding-ada-002\"\n )\n\n input_embedding = response['data'][0]['embedding']\n\n operation_info = client.search(\n collection_name=\"test_collection\",\n search_params=models.SearchParams(\n hnsw_ef=128,\n exact=False\n ),\n query_vector=input_embedding,\n limit=150,\n )\n prompt = \"\"\n if operation_info:\n prompt += \"Here are some context from previous messages:\"\n for op in operation_info:\n prompt += f\"{op.payload['role'].upper()}: {op.payload['message']}\"\n\n prompt += f\"USER: {user_input.input}\"\n prompt = prompt.replace('\\n', '')\n print(prompt)\n completion = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=prompt,\n temperature=0.5,\n max_tokens=500\n )\n\n operation_info = client.upsert(\n collection_name=\"test_collection\",\n wait=True,\n points=[\n PointStruct(id=1, vector=input_embedding, payload={\"role\": \"user\", \"message\": user_input.input}),\n PointStruct(id=2, vector=input_embedding, payload={\"role\": \"assistant\", \"message\": completion['choices'][0].text}),\n ]\n )\n\n return completion\n","repo_name":"renatonerijr/AI-API","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32902776673","text":"from abc import ABC\nfrom dataclasses import dataclass\nimport re\nimport sys\nfrom typing import Any, Dict, List, Optional, Set, Tuple\n\nfrom python.generators.sql_processing.docs_extractor import DocsExtractor\nfrom python.generators.sql_processing.utils import ObjKind\nfrom python.generators.sql_processing.utils import ARG_ANNOTATION_PATTERN\nfrom python.generators.sql_processing.utils import NAME_AND_TYPE_PATTERN\nfrom python.generators.sql_processing.utils import FUNCTION_RETURN_PATTERN\nfrom python.generators.sql_processing.utils import COLUMN_ANNOTATION_PATTERN\n\n\ndef is_internal(name: str) -> bool:\n return re.match(r'^internal_.*', name, re.IGNORECASE) is not None\n\n\ndef is_snake_case(s: str) -> bool:\n \"\"\"Returns true if the string is snake_case.\"\"\"\n return re.fullmatch(r'^[a-z_0-9]*$', s) is not None\n\n\nclass AbstractDocParser(ABC):\n\n @dataclass\n class Column:\n pass\n\n def __init__(self, path: str, module: str):\n self.path = path\n self.module = module\n self.name = None\n self.errors = []\n\n def _parse_name(self, upper: bool = False):\n assert self.name\n assert isinstance(self.name, str)\n module_pattern = f\"^{self.module}_.*\"\n if upper:\n module_pattern = module_pattern.upper()\n starts_with_module_name = re.match(module_pattern, self.name, re.IGNORECASE)\n if self.module == \"common\":\n if starts_with_module_name:\n self._error('Names of tables/views/functions in the \"common\" module '\n f'should not start with {module_pattern}')\n return self.name\n if not starts_with_module_name:\n self._error('Names of tables/views/functions should be prefixed with the '\n f'module name (i.e. should start with {module_pattern})')\n return self.name.strip()\n\n def _parse_desc_not_empty(self, desc: str):\n if not desc:\n self._error('Description of the table/view/function is missing')\n return desc.strip()\n\n def _validate_only_contains_annotations(self,\n ans: List[DocsExtractor.Annotation],\n ans_types: Set[str]):\n used_ans_types = set(a.key for a in ans)\n for type in used_ans_types.difference(ans_types):\n self._error(f'Unknown documentation annotation {type}')\n\n def _parse_columns(self, ans: List[DocsExtractor.Annotation],\n sql_cols_str: str) -> Dict[str, str]:\n cols = {}\n for t in ans:\n if t.key != '@column':\n continue\n m = re.match(COLUMN_ANNOTATION_PATTERN, t.value)\n if not m:\n self._error(f'@column annotation value {t.value} does not match '\n f'pattern {COLUMN_ANNOTATION_PATTERN}')\n continue\n cols[m.group(1)] = m.group(2).strip()\n\n sql_cols = self._parse_name_and_types_str(sql_cols_str)\n if sql_cols:\n for col in set(cols.keys()).difference(sql_cols.keys()):\n self._error(f'@column \"{col}\" documented but does not exist in '\n 'function definition')\n for col in set(sql_cols.keys()).difference(cols):\n self._error(f'Column \"{col}\" defined in SQL but is not documented with '\n '@column')\n return cols\n\n def _parse_args(self, ans: List[DocsExtractor.Annotation],\n sql_args_str: str) -> Dict[str, Any]:\n args = {}\n for an in ans:\n if an.key != '@arg':\n continue\n m = re.match(ARG_ANNOTATION_PATTERN, an.value)\n if m is None:\n self._error(f'Expected arg documentation \"{an.value}\" to match pattern '\n f'{ARG_ANNOTATION_PATTERN}')\n continue\n args[m.group(1)] = {'type': m.group(2), 'desc': m.group(3).strip()}\n\n sql_args = self._parse_name_and_types_str(sql_args_str)\n if sql_args:\n for col in set(args.keys()).difference(sql_args.keys()):\n self._error(f'Arg \"{col}\" documented with @arg but does not exist '\n 'in function definition')\n for arg in set(sql_args.keys()).difference(args.keys()):\n self._error(f'Arg \"{arg}\" defined in SQL but is not documented with '\n '@arg')\n return args\n\n def _parse_ret(self, ans: List[DocsExtractor.Annotation],\n sql_ret_type: str) -> Tuple[str, str]:\n rets = [a.value for a in ans if a.key == '@ret']\n if len(rets) != 1:\n self._error('Return value is not documentated with @ret')\n return '', ''\n\n ret = rets[0]\n m = re.match(FUNCTION_RETURN_PATTERN, ret)\n if not m:\n self._error(\n f'@ret {ret} does not match pattern {FUNCTION_RETURN_PATTERN}')\n return '', ''\n\n ret_type, ret_desc = m.group(1), m.group(2)\n if ret_type != sql_ret_type:\n self._error(\n f'@ret {ret_type} does not match SQL return type {sql_ret_type}')\n return '', ''\n return ret_type, ret_desc.strip()\n\n def _parse_name_and_types_str(self, args_str: str) -> Dict[str, str]:\n if not args_str:\n return {}\n\n args = {}\n for arg_str in args_str.split(\",\"):\n m = re.match(NAME_AND_TYPE_PATTERN, arg_str)\n if m is None:\n self._error(f'Expected \"{arg_str}\" to match pattern '\n f'{NAME_AND_TYPE_PATTERN}')\n continue\n args[m.group(1)] = m.group(2).strip()\n return args\n\n def _error(self, error: str):\n self.errors.append(\n f'Error while parsing documentation for {self.name} in {self.path}: '\n f'{error}')\n\n\nclass TableOrView:\n name: str\n type: str\n desc: str\n cols: Dict[str, str]\n\n def __init__(self, name, type, desc, cols):\n self.name = name\n self.type = type\n self.desc = desc\n self.cols = cols\n\n\nclass TableViewDocParser(AbstractDocParser):\n \"\"\"Parses documentation for CREATE TABLE and CREATE VIEW statements.\"\"\"\n\n def __init__(self, path: str, module: str):\n super().__init__(path, module)\n\n def parse(self, doc: DocsExtractor.Extract) -> Optional[TableOrView]:\n assert doc.obj_kind == ObjKind.table_view\n\n self.name = doc.obj_match[1]\n if is_internal(self.name):\n return None\n\n self._validate_only_contains_annotations(doc.annotations, {'@column'})\n return TableOrView(\n name=self._parse_name(),\n type=doc.obj_match[0],\n desc=self._parse_desc_not_empty(doc.description),\n cols=self._parse_columns(doc.annotations, ''),\n )\n\n\nclass Function:\n name: str\n desc: str\n args: Dict[str, Any]\n return_type: str\n return_desc: str\n\n def __init__(self, name, desc, args, return_type, return_desc):\n self.name = name\n self.desc = desc\n self.args = args\n self.return_type = return_type\n self.return_desc = return_desc\n\n\nclass FunctionDocParser(AbstractDocParser):\n \"\"\"Parses documentation for CREATE_FUNCTION statements.\"\"\"\n\n def __init__(self, path: str, module: str):\n super().__init__(path, module)\n\n def parse(self, doc: DocsExtractor.Extract) -> Optional[Function]:\n self.name, args, ret, _ = doc.obj_match\n\n # Ignore internal functions.\n if is_internal(self.name):\n return None\n\n self._validate_only_contains_annotations(doc.annotations, {'@arg', '@ret'})\n\n ret_type, ret_desc = self._parse_ret(doc.annotations, ret)\n name = self._parse_name(upper=True)\n\n if not is_snake_case(name):\n self._error('Function name %s is not snake_case (should be %s) ' %\n (name, name.casefold()))\n\n return Function(\n name=self._parse_name(upper=True),\n desc=self._parse_desc_not_empty(doc.description),\n args=self._parse_args(doc.annotations, args),\n return_type=ret_type,\n return_desc=ret_desc,\n )\n\n\nclass TableFunction:\n name: str\n desc: str\n cols: Dict[str, str]\n args: Dict[str, Any]\n\n def __init__(self, name, desc, cols, args):\n self.name = name\n self.desc = desc\n self.cols = cols\n self.args = args\n\n\nclass ViewFunctionDocParser(AbstractDocParser):\n \"\"\"Parses documentation for CREATE_VIEW_FUNCTION statements.\"\"\"\n\n def __init__(self, path: str, module: str):\n super().__init__(path, module)\n\n def parse(self, doc: DocsExtractor.Extract) -> Optional[TableFunction]:\n self.name, args, columns, _ = doc.obj_match\n\n # Ignore internal functions.\n if is_internal(self.name):\n return None\n\n self._validate_only_contains_annotations(doc.annotations,\n {'@arg', '@column'})\n return TableFunction(\n name=self._parse_name(upper=True),\n desc=self._parse_desc_not_empty(doc.description),\n cols=self._parse_columns(doc.annotations, columns),\n args=self._parse_args(doc.annotations, args),\n )\n\n\nclass ParsedFile:\n errors: List[str] = []\n table_views: List[TableOrView] = []\n functions: List[Function] = []\n table_functions: List[TableFunction] = []\n\n def __init__(self, errors, table_views, functions, view_functions):\n self.errors = errors\n self.table_views = table_views\n self.functions = functions\n self.table_functions = view_functions\n\n\n# Reads the provided SQL and, if possible, generates a dictionary with data\n# from documentation together with errors from validation of the schema.\ndef parse_file(path: str, sql: str) -> ParsedFile:\n if sys.platform.startswith('win'):\n path = path.replace('\\\\', '/')\n\n # Get module name\n module_name = path.split('/stdlib/')[-1].split('/')[0]\n\n # Extract all the docs from the SQL.\n extractor = DocsExtractor(path, module_name, sql)\n docs = extractor.extract()\n if extractor.errors:\n return ParsedFile(extractor.errors, [], [], [])\n\n # Parse the extracted docs.\n errors = []\n table_views = []\n functions = []\n view_functions = []\n for doc in docs:\n if doc.obj_kind == ObjKind.table_view:\n parser = TableViewDocParser(path, module_name)\n res = parser.parse(doc)\n if res:\n table_views.append(res)\n errors += parser.errors\n if doc.obj_kind == ObjKind.function:\n parser = FunctionDocParser(path, module_name)\n res = parser.parse(doc)\n if res:\n functions.append(res)\n errors += parser.errors\n if doc.obj_kind == ObjKind.view_function:\n parser = ViewFunctionDocParser(path, module_name)\n res = parser.parse(doc)\n if res:\n view_functions.append(res)\n errors += parser.errors\n\n return ParsedFile(errors, table_views, functions, view_functions)\n","repo_name":"iridium-browser/iridium-browser","sub_path":"third_party/perfetto/python/generators/sql_processing/docs_parse.py","file_name":"docs_parse.py","file_ext":"py","file_size_in_byte":10282,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"53"} +{"seq_id":"22211312557","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n head1 = l1\n head2 = l2\n l = []\n num1,num2 = \"\",\"\"\n \n while head1:\n num1 += str(head1.val)\n head1 = head1.next\n \n while head2:\n num2 += str(head2.val)\n head2 = head2.next\n \n sum1 = int(num1[::-1]) + int(num2[::-1])\n \n l = [int(i) for i in str(sum1)]\n \n for index,value in enumerate(l):\n if index==0:\n l[index] = ListNode(val=value,next=None)\n else:\n l[index] = ListNode(val=value,next=l[index-1])\n \n return l[len(l)-1]","repo_name":"AyushSingh-github/Leetcode","sub_path":"0002-add-two-numbers/0002-add-two-numbers.py","file_name":"0002-add-two-numbers.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27016951824","text":"class Solution(object):\n def search(self, l, t, k):\n po = 0\n while po < len(l):\n i = po + 1\n while i < len(l):\n if abs(l[i][0] - l[po][0]) <= t and abs(l[i][1] - l[po][1]) <= k:\n return True\n else:\n if abs(l[i][0] - l[po][0]) > t:\n break\n else:\n i +=1\n po += 1\n return False \n \n def containsNearbyAlmostDuplicate(self, nums, k, t):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :type t: int\n :rtype: bool\n \"\"\"\n pp = sorted(zip(nums, range(len(nums))), key= lambda x:x[0])\n return self.search(pp,t ,k )\n","repo_name":"rohitgit1/September-Leetcode-Challenge","sub_path":"Day2/Contains_duplicate_3.py","file_name":"Contains_duplicate_3.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"37205544712","text":"#!/usr/bin/env python\n\"\"\"\n@author: Francis Obiagwu\n@software: SecureDocumentSharing\n@file: DSClient.py\n@time: 6/6/18 7:16 PM\n\"\"\"\n\nimport socket\nimport sys\nimport threading\nimport os\n\nfrom DSCodes import DSCode\nfrom DSInput import DSInput\nfrom DSPdu import DSPdu\nfrom DSClientServerResponseProcessor import DSClientServerResponseProcessor\n\n\nclass DSClient:\n \"\"\"\n The DSClient class is used to create a client connection. To avoid error, the server object has\n to be started first before starting the client object\n \"\"\"\n __BUFFER_SIZE = None\n __PORT = 5005\n __TCP_IP = '127.0.0.1'\n __CLIENT__SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n placement = 0\n\n def __init__(self):\n self.client_pdu = DSPdu() # create new pdu object\n self.allowed_data_size = self.client_pdu.get_data_size()\n self.__BUFFER_SIZE = self.client_pdu.get_buffer_size() # set the size of the pdu\n self.done_processing = False\n self.client_ds_code = DSCode()\n self.message_type_index, self.timestamp_index, self.error_code_index, self.flag_index, self.changed_section_index, self.section_id_index, self.reserved_1_index, self.reserved_2_index, self.reserved_3_index, self.data,self.data_size_index, self.checksum_index = DSPdu().get_pdu_parts_index()\n self.null_byte = b'\\x00' # used in place of data when is not need to send data during S_DENIED, CAUTH etc\n self.input_processor = DSInput()\n self.server_response_processor = DSClientServerResponseProcessor()\n\n self.first_message_recvd = False # used to track if the client have received the first 'CONNECT' message\n self.placement = 0\n self.is_authenticated = False\n self.server_alive = False\n\n def start( self ):\n \"\"\"\n Used to start the client\n :return: None\n \"\"\"\n try:\n self.__CLIENT__SOCKET.connect((self.__TCP_IP, self.__PORT))\n self.server_alive = True\n # print('client successfully connected')\n except ConnectionRefusedError as err:\n print(err.args)\n os._exit(1)\n\n #################################################\n # receive and send the first message\n #################################################\n\n while not self.first_message_recvd:\n # wait here until the server sends you the first message\n pdu = self.__CLIENT__SOCKET.recv(self.__BUFFER_SIZE)\n # print(pdu)\n array = self.client_pdu.remove_padding(self.client_pdu.unpack(pdu))\n if self.server_response_processor.process_response(array, self.__CLIENT__SOCKET, self):\n break\n else:\n print('The server doesn\\'t want to talk to you')\n os._exit(1)\n\n # run the thread to receive pdu from the server, this thread runs forever\n recv_thread = threading.Thread(target=self.receiving_thread).start()\n\n while self.server_alive:\n array, string_array = self.input_processor.get_user_input() # return the user input as array and as string\n if array[0] == 'COMMIT':\n # process differently\n # print(array, string_array)\n commit_pdu_array = self.input_processor.process_user_input(array, string_array)\n for item in commit_pdu_array:\n print(item)\n pdu = self.client_pdu.pack(item)\n self.__CLIENT__SOCKET.send(pdu)\n print('packed pdu sent: {}'.format(pdu))\n\n else:\n pdu_array = self.input_processor.process_user_input(array, string_array) # obtain the pdu as byte\n print(pdu_array)\n pdu = self.client_pdu.pack(pdu_array)\n self.__CLIENT__SOCKET.send(pdu) # send\n\n if array[0] == \"LOGOFF\":\n self.server_alive = False\n os._exit(-1)\n\n def receiving_thread( self ):\n \"\"\"\n Used to run the receiving thread forever\n :return: None\n \"\"\"\n while self.server_alive: # if the server is still listening for this client\n try:\n pdu = self.__CLIENT__SOCKET.recv(self.__BUFFER_SIZE)\n # print(pdu)\n unpacked_pdu = self.client_pdu.unpack(pdu)\n unpacked_no_pad = self.client_pdu.remove_padding(unpacked_pdu)\n self.server_response_processor.process_response(unpacked_no_pad, self.__CLIENT__SOCKET, self)\n\n except ConnectionResetError as err:\n print(err.args)\n self.server_alive = False\n\n os._exit(1)\n\n\n\nif __name__ == '__main__':\n try:\n a = DSClient()\n a.start()\n except (KeyboardInterrupt, EOFError, OSError, IOError) as err:\n print(err.args)\n os._exit(1)\n","repo_name":"Francisobiagwu/DocumentSharing","sub_path":"DSClient.py","file_name":"DSClient.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74213031527","text":"import numpy as np\nimport random\nfrom latest_board_state import latest_board_state\nfrom apply_move import apply_move\n\n\nclass AdjacentAgent:\n def __init__(self, name, id):\n self.id = id\n self.time = 0\n self.name = name\n\n def find_empty_places(self, board):\n empty_places = []\n for i, j in zip(np.where(board == 0)[0], np.where(board == 0)[1]):\n empty_places.append((i, j))\n return empty_places\n\n def __str__(self):\n return self.name\n\n def play(self):\n # returns True if the neightbor marble has the same color as the marble\n def adjancent_with_same_id(board, player_id, x, y):\n return (\n board[x + 1][y] == player_id\n and board[x - 1][y] == player_id\n and board[x][y + 1] == player_id\n and board[x][y - 1] == player_id\n )\n\n # REMOVES the padding\n def return_to_original(all_arr):\n ans = []\n for arr in all_arr:\n ans.append(((arr[0] - 1), (arr[1] - 1)))\n return ans\n\n board = latest_board_state()\n if self.time == 0:\n empty_places = []\n for i in range(6):\n for j in range(6):\n if board[i][j] == 0:\n empty_places.append((i, j))\n x, y = random.choice(empty_places)\n quarter = random.choice([1, 2, 3, 4])\n direction = random.choice([-1, 1])\n self.time += 1\n return x, y, quarter, direction\n\n else:\n last_filtered_choices = []\n board_with_padding = np.pad(\n board, 1, mode=\"constant\", constant_values=(-1,)\n )\n available_choices = self.find_empty_places(board_with_padding)\n for x, y in available_choices:\n if adjancent_with_same_id(board_with_padding, self.id, x, y):\n last_filtered_choices.append((x, y))\n last_filtered_choices = return_to_original(last_filtered_choices)\n if last_filtered_choices != []:\n x, y = random.choice(last_filtered_choices)\n else:\n x = random.choice([1, 2, 3, 4, 5])\n y = random.choice([1, 2, 3, 4, 5])\n quarter = random.choice([1, 2, 3, 4])\n direction = random.choice([-1, 1])\n\n # apply_move(board, self.id, x, y, quarter, direction)\n return (x, y, quarter, direction)\n","repo_name":"Hadraniel/pentago","sub_path":"adjacent_agent.py","file_name":"adjacent_agent.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19748520649","text":"import time\nimport select\nimport socket\nfrom typing import Any\nfrom unittest import TestCase\n\nfrom tmtccmd.com.tcpip_utils import EthAddr\nfrom tmtccmd.com.udp import UdpComIF\n\nLOCALHOST = \"127.0.0.1\"\n\n\nclass TestUdpIf(TestCase):\n def setUp(self) -> None:\n self.udp_server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.addr = (LOCALHOST, 7777)\n self.udp_server.bind(self.addr)\n self.udp_client = UdpComIF(\"udp\", send_address=EthAddr.from_tuple(self.addr))\n self.udp_client.initialize()\n\n def test_basic(self):\n self.assertEqual(self.udp_client.id, \"udp\")\n self._open()\n\n def test_send(self):\n self._open()\n self._simple_send(bytes([0, 1, 2, 3]))\n\n def test_recv(self):\n self._open()\n data = bytes([0, 1, 2, 3])\n sender_addr = self._simple_send(data)\n self.udp_server.sendto(data, sender_addr)\n time.sleep(0.05)\n self.assertTrue(self.udp_client.data_available())\n data_recv = self.udp_client.receive()\n self.assertEqual(len(data_recv), 1)\n self.assertEqual(data_recv[0], data)\n\n def _simple_send(self, data: bytes) -> Any:\n self.udp_client.send(data)\n ready = select.select([self.udp_server], [], [], 0.1)\n self.assertTrue(ready[0])\n data_recv, sender_addr = self.udp_server.recvfrom(4096)\n self.assertEqual(data, data_recv)\n return sender_addr\n\n def _open(self):\n self.udp_client.open()\n self.assertTrue(self.udp_client.is_open())\n\n def tearDown(self) -> None:\n self.udp_client.close()\n self.udp_server.close()\n","repo_name":"robamu-org/tmtccmd","sub_path":"tests/com/test_udp.py","file_name":"test_udp.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"4922260161","text":"from pyecharts.charts import Bar\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Calendar\n\n#使用主题\nfrom pyecharts.globals import ThemeType\n\nimport datetime\nimport random\ndef calendar_base() -> Calendar:\n begin = datetime.date(2017, 1, 1)\n end = datetime.date(2017, 12, 31)\n data = [\n [str(begin + datetime.timedelta(days=i)), random.randint(1000, 25000)]\n for i in range((end - begin).days + 1)\n ]\n\n c = (\n Calendar()\n .add(\"\", data, calendar_opts=opts.CalendarOpts(range_=\"2017\"))\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"Calendar-2017年微信步数情况\"),\n visualmap_opts=opts.VisualMapOpts(\n max_=20000,\n min_=500,\n orient=\"horizontal\",\n is_piecewise=True,\n pos_top=\"230px\",\n pos_left=\"100px\",\n ),\n )\n )\n return c\n\nc = calendar_base()\nc.render()\n\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Gauge, Page\n\n\ndef gauge_base() -> Gauge:\n c = (\n Gauge()\n .add(\"\", [(\"完成率\", 66.6)])\n .set_global_opts(title_opts=opts.TitleOpts(title=\"Gauge-基本示例\"))\n )\n return c\ng = gauge_base()\ng.render()\n\nbar = (\n Bar()\n #Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))\n .add_xaxis([\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"高跟鞋\", \"袜子\"])\n .add_yaxis(\"商家A\", [5, 20, 36, 10, 75, 90])\n .add_yaxis(\"商家B\", [15, 6, 45, 20, 35, 66])\n .set_global_opts(title_opts=opts.TitleOpts(title=\"主标题\", subtitle=\"副标题\"))\n)\nbar.render()\n\nfrom pyecharts.charts import Geo\nfrom pyecharts.faker import Faker\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Geo\nfrom pyecharts.globals import ChartType, SymbolType\nfrom pyecharts.charts import Map\ndef map_visualmap() -> Map:\n c = (\n Map()\n .add(\"商家A\", [list(z) for z in zip(Faker.provinces, Faker.values())], \"china\")\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"Map-VisualMap(连续型)\"),\n visualmap_opts=opts.VisualMapOpts(max_=200),\n )\n )\n return c\n\ng = map_visualmap()\ng.render()","repo_name":"WD20160314/spider","sub_path":"pyecharts练习.py","file_name":"pyecharts练习.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10077629088","text":"import uvicorn\nfrom fastapi import FastAPI\nfrom slowapi import _rate_limit_exceeded_handler\nfrom slowapi.errors import RateLimitExceeded\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom config.logging_setup import setup_logging\nfrom router import gpt_router\nfrom router import grammar_router\n\nsetup_logging()\n\napp = FastAPI()\napp.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)\n\norigins = [\n \"http://localhost:8000\",\n \"http://localhost:3000\",\n \"http://34.64.87.27\",\n \"https://34.64.87.27\",\n \"http://kdt-ai-8-team01-1.elicecoding.com\",\n \"https://kdt-ai-8-team01-1.elicecoding.com\",\n \"http://172.19.0.3:5001\",\n \"http://172.19.0.4\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(gpt_router.router)\napp.include_router(grammar_router.router)\n\n\n@app.on_event(\"startup\")\nasync def init_app():\n print(\"🚀 GPT API START 🚀\")\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8777, reload=True)\n","repo_name":"hs20180519/aiProject","sub_path":"gpt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32837775515","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import rnn\nimport math\nimport random\nimport os\n\ntf.set_random_seed(777)\n\nbatch_size = 100\n# sequence_length = 25\nchop_len = 198\nmel_bin = 256\nnum_classes = 4\nlearning_rate = 0.0005\niterations = 100\nckpt_path = 'ckpt/'\ndata_path = ''\n\ntrain = np.load(data_path+'train_256mel.npy')\n# train = train[:200]\nnp.random.shuffle(train)\ntest = np.load(data_path+'test_256mel.npy')\ntestx,testy = np.hsplit(test,[-1])\ntestx = np.reshape(testx,[-1,256,chop_len,1])\n\ndata_size = train.shape[0]\n\ntf.reset_default_graph()\n\nwith tf.Graph().as_default():\n x = tf.placeholder(tf.float32,[None,mel_bin,chop_len])\n x = tf.image.per_image_standardization(x)\n x = tf.reshape(x,[-1,mel_bin,chop_len,1])\n y = tf.placeholder(tf.float32,[None,4])\n phase = tf.placeholder(tf.bool,[])\n\n w1 = tf.Variable(tf.random_normal([3,3,1,64],stddev=0.01))\n l1 = tf.nn.conv2d(x,w1,strides=[1,1,1,1],padding='SAME')\n # l1 = tf.contrib.layers.batch_norm(l1,center=True,scale=True,is_training=phase)\n l1 = tf.nn.relu(l1)\n l1 = tf.nn.max_pool(l1,ksize=[1,3,3,1],strides=[1,3,3,1],padding='SAME')\n l1 = tf.layers.dropout(l1,0.1,training=phase)\n\n w2 = tf.Variable(tf.random_normal([3,3,64,64],stddev=0.01))\n l2 = tf.nn.conv2d(l1,w2,strides=[1,1,1,1],padding='SAME')\n # l2 = tf.contrib.layers.batch_norm(l2,center=True,scale=True,is_training=phase)\n l2 = tf.nn.relu(l2)\n l2 = tf.nn.max_pool(l2,ksize=[1,3,3,1],strides=[1,3,3,1],padding='SAME')\n # l2 = tf.layers.dropout(l2,0.1,training=phase)\n \n w3 = tf.Variable(tf.random_normal([3,3,64,64],stddev=0.01))\n l3 = tf.nn.conv2d(l2,w3,strides=[1,1,1,1],padding='SAME')\n # l3 = tf.contrib.layers.batch_norm(l3,center=True,scale=True,is_training=phase)\n l3 = tf.nn.max_pool(l3,ksize=[1,6,1,1],strides=[1,6,1,1],padding='SAME')\n # l3 = tf.layers.dropout(l3,0.2,training=phase)\n l3_transpose = tf.transpose(l3,perm=[0,1,3,2])\n temp_shape = l3_transpose.get_shape().as_list()\n l3_reshaped = tf.reshape(l3_transpose,[-1,temp_shape[1]*temp_shape[2],temp_shape[3]])\n l3_reshaped = tf.transpose(l3_reshaped,perm=[0,2,1])\n \n fw_hidden_size=temp_shape[1]*temp_shape[2]//4\n bw_hidden_size=fw_hidden_size//2\n cell_fw = rnn.BasicLSTMCell(num_units=fw_hidden_size,state_is_tuple=True)\n cell_bw = rnn.BasicLSTMCell(num_units=bw_hidden_size,state_is_tuple=True)\n\n # l4_squeeze = tf.squeeze(l4_max_pool,squeeze_dims=1)\n outputs, states= tf.nn.bidirectional_dynamic_rnn(cell_fw,cell_bw,l3_reshaped,dtype=tf.float32) #[N,25,8]\n\n temp_shape1 = outputs[1].get_shape().as_list()\n X_for_fc = tf.reshape(outputs[1],[-1,temp_shape1[1]*temp_shape1[2]])\n\n # FC layer\n y_pred = tf.contrib.layers.fully_connected(X_for_fc,num_classes, activation_fn=None)\n y_pred = tf.layers.dropout(y_pred,0.1,training=phase)\n y_pred = tf.reshape(y_pred, [-1,num_classes])\n result = tf.argmax(y_pred,axis=1)\n target = tf.argmax(y,axis=1)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(result,target),\"float\"))\n\n # crossentropy loss\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_pred,labels=y))\n train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n last_epoch = tf.Variable(0, name='last_epoch')\n\n\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(ckpt_path)\n if ckpt and ckpt.model_checkpoint_path:\n try:\n saver.restore(sess,ckpt.model_checkpoint_path)\n print(\"Successfully loaded:\", ckpt.model_checkpoint_path)\n except:\n print(\"Error on loading old network weights\")\n else:\n print(\"Could not find old network weights\")\n start_from = sess.run(last_epoch)\n\n for epoch in range(start_from,iterations):\n for j in range(int(data_size/batch_size)):\n batch = train[j*batch_size:(j+1)*batch_size]\n batchx, batchy = np.hsplit(batch,[-1])\n batchx = np.reshape(batchx,[-1,256,198,1])\n batchy = sess.run(tf.one_hot(batchy[:,0,0]-1,4)) \n _, step_loss, step_accuracy = sess.run([train_op,loss,accuracy],feed_dict={x:batchx,y:batchy,phase:True})\n if j%25==0:\n print(\"[step: {}/{}] loss: {} accuracy: {}\".format(j,epoch, step_loss,step_accuracy))\n if j%25==0:\n testyoh = sess.run(tf.one_hot(testy[:,0,0]-1,4))\n test_accuracy,test_loss,test_result, test_target = sess.run([accuracy,loss,result,target],feed_dict={x:testx,y:testyoh,phase:False})\n confusionMatrix = np.zeros((4,4),dtype=int)\n for i in range(len(test_result)):\n confusionMatrix[test_target[i],test_result[i]]=confusionMatrix[test_target[i],test_result[i]]+1\n print('test accuracy',test_accuracy,'\\n','test_loss',test_loss,'\\n',confusionMatrix)\n if j%300==0:\n if not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n saver.save(sess,ckpt_path+\"/model\",global_step=epoch)\n if epoch%2 == 0:\n np.random.shuffle(train)\n sess.run(last_epoch.assign(epoch))\n\nprint('learning_finished!') \n","repo_name":"yena53/BE","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7539530633","text":"import os\nimport json\nimport pymysql\nimport traceback\nimport warnings\nimport re as preg\n\n# 导入 警告类 并把 警告 转换成 错误\nwarnings.filterwarnings('error')\n\nclass Db:\n\n __connect = 'local'\n __instance = {}\n __startTrans = False\n __configFile = 'config.json'\n __MysqlLink = {}\n __cursor = {}\n __setting = { # 设置\n 'cursorType' : pymysql.cursors.DictCursor #默认的 游标类型 为 key value\n }\n\n \"\"\"\n 单列 实例化的 时候 new 自己\n \"\"\"\n def __new__(cls ,*args,**kwargs):\n\n cls.__DataBaseConfigJson = None\n # 获取当前包的 根路径\n __pathlist = os.path.dirname(__file__).split('\\\\')\n __pathlist.pop()\n # 当前包的根目录\n packetRoot = '\\\\'.join(__pathlist)\n\n # 当前驱动的名称\n driverName = 'Mysql'\n\n # 配置文件的路径\n configFilePath = packetRoot + '\\\\DataBase\\\\' + cls.__configFile\n\n\n try:\n # 读取配置文件 并把json 转化成 字典 (索引数组)\n if not os.path.exists(configFilePath):\n raise ValueError('{} table is not exists'.format(configFilePath))\n\n fileIo = open(configFilePath, 'r', 1, 'UTF-8')\n cls.__DataBaseConfigJson = json.load(fileIo)[driverName]\n fileIo.close()\n\n if 'connect' in kwargs:\n cls.__connect = kwargs['connect']\n\n # 连接数据库 多次实例化 只连接 一次\n if cls.__connect not in cls.__MysqlLink :\n connect = cls.__DataBaseConfigJson[cls.__connect]\n cls.__MysqlLink[cls.__connect] = pymysql.Connect(**connect)\n cls.__cursor[cls.__connect] = cls.__MysqlLink[cls.__connect].cursor(cls.__setting['cursorType'])\n\n\n\n except IOError as e:\n print(traceback.print_exc())\n return False\n except Exception as e:\n print(traceback.print_exc())\n return False\n\n\n\n if cls.__connect not in cls.__instance.keys() :\n\n cls.__instance[cls.__connect] = object.__new__(cls)\n\n\n return cls.__instance[cls.__connect]\n\n \"\"\"\n :param tabelName 表的名称\n \"\"\"\n def __init__(self ,connect='local'):\n\n # 当前连接的key\n self.__connect = connect\n self.__sql = {\n 'allow_type': ['SELECT', 'FIND', 'UPDATE', 'INSERT', 'DELETE','COUNT'],\n 'field': '',\n 'where': [],\n 'order': '',\n 'limit': [],\n 'group': ''\n }\n self.sql = ''\n\n\n def table(self ,tableName=''):\n self.tableName = tableName\n if tableName != False and self.__checkTableExists(tableName) == 0 :\n raise ValueError('表不存在')\n \n return self.__instance[self.__connect]\n\n \"\"\"\n 检查表是否存在\n \"\"\"\n def __checkTableExists(self ,tableName):\n return self.__cursor[self.__connect].execute('show tables like \"%s\"' % (tableName))\n\n \"\"\"\n 获取SQL语句类型\n \"\"\"\n def __getSqlType(self):\n # 获取被哪个方法调用的\n sqlPrefix = traceback.extract_stack()[-3][-2]\n if(sqlPrefix == 'find' ):\n sqlPrefix = 'select'\n\n\n # 检查 被调用的方法 是否在 允许的 范围\n try:\n # list.index() 不再 列表里 会 抛出异常\n self.__sql['allow_type'].index(sqlPrefix.upper())\n except ValueError as e:\n raise ValueError(sqlPrefix + ' SQL 类型不被允许')\n\n self.__sql['sqlType'] = sqlPrefix\n\n if(sqlPrefix == 'select'):\n self.__sql['prifix'] = sqlPrefix.upper()\n\n if(sqlPrefix == 'insert'):\n self.__sql['prifix'] = sqlPrefix.upper() + ' INTO '\n\n if(sqlPrefix == 'delete'):\n self.__sql['prifix'] = sqlPrefix.upper() + ' FROM '\n\n if(sqlPrefix == 'update'):\n self.__sql['prifix'] = sqlPrefix.upper() + ' '\n\n if (sqlPrefix == 'count'):\n self.__sql['prifix'] = 'SELECT' + ' '\n\n\n return\n\n \"\"\"\n 组装sql\n \"\"\"\n def __buildSql(self ,*args):\n\n self.__getSqlType()\n\n fieldStr = self.__filterField()\n\n if(self.__parseWhere(self.__sql['where']).__len__()):\n whereStr = ' WHERE ' + self.__parseWhere(self.__sql['where'])\n else:\n whereStr = ''\n\n limitStr = self.__limitToStr()\n\n\n try:\n\n if (['insert','update'].index(self.__sql['sqlType']) >= 0 ):\n return self.__sql['prifix'] + ' `'+self.tableName+'`' + self.__dataToSql(args[0]) + whereStr + limitStr\n\n except ValueError as e:\n return self.__sql['prifix'] + fieldStr + ' `' + self.tableName + '`' + whereStr + self.__sql['group'] + self.__sql[\n 'order'] + limitStr\n\n \"\"\"\n 多条查询\n \"\"\"\n def select(self):\n self.lastSql = self.__buildSql()\n result = self.__query(self.lastSql)\n self.__clearSelfAttr()\n if (result.__len__() == 0):\n return False\n else:\n return result\n\n def find(self):\n self.__sql['limit'] = [1]\n self.lastSql = self.__buildSql()\n result = self.__query(self.lastSql)\n\n self.__clearSelfAttr()\n\n if(result.__len__() == 0):\n return False;\n else:\n return result[0]\n\n def insert(self ,data = []):\n sql = self.__buildSql(data)\n self.lastSql = sql\n self.__clearSelfAttr()\n return self.__query(sql)\n\n def count(self):\n sql = self.__buildSql()\n self.lastSql = sql\n self.__clearSelfAttr()\n return self.__query(sql)\n\n def update(self ,data = []):\n\n sql = self.__buildSql(data)\n self.lastSql = sql\n self.__clearSelfAttr()\n return self.__query(sql)\n\n\n def startTrans(self):\n self.__startTrans = True\n self.__MysqlLink[self.__connect].begin()\n\n def rollback(self):\n self.__MysqlLink[self.__connect].rollback()\n def commit(self):\n self.__MysqlLink[self.__connect].commit()\n\n\n def __query(self ,sql = ''):\n try:\n # 没有开启事物的情况下 自动提交\n if(self.__startTrans == False):\n self.__MysqlLink[self.__connect].autocommit(1)\n\n flag = self.__cursor[self.__connect].execute(sql)\n if(self.__sql['sqlType'] == 'insert'):\n return self.__cursor[self.__connect].lastrowid\n if (self.__sql['sqlType'] == 'update'):\n return flag\n if(self.__sql['sqlType'] == 'select'):\n return self.__cursor[self.__connect].fetchall()\n\n if (self.__sql['sqlType'] == 'count'):\n res = self.__cursor[self.__connect].fetchall();\n return res[0]['count(*)']\n\n except Warning as e:\n self.__MysqlLink[self.__connect].rollback()\n raise Exception('sql warining :'+ str(e) + ' sql :'+ sql)\n\n except Exception as e:\n self.__MysqlLink[self.__connect].rollback()\n raise Exception('sql execute Error check your sql : '+ sql)\n\n def field(self ,fieldStr = ''):\n self.__sql['field'] = fieldStr if(fieldStr) else fieldStr;\n return self.__instance[self.__connect]\n\n def where(self ,whereList = []):\n if(type(whereList) != list):\n raise ValueError('where 值必须是list类型')\n self.__sql['where'] = (whereList if(whereList) else whereList)\n\n return self.__instance[self.__connect]\n\n def order(self , orderStr = ''):\n self.__sql['order'] = ' ORDER BY %s ' % (orderStr)\n return self.__instance[self.__connect]\n\n def group(self , groupStr = ''):\n self.__sql['group'] = ' GROUP BY %s ' % (groupStr)\n return self.__instance[self.__connect]\n\n def limit(self , limitList = []):\n self.__sql['limit'] = (limitList if (limitList) else [])\n return self.__instance[self.__connect]\n\n \"\"\"\n 数组组装成对应的 sql 字符串\n \"\"\"\n def __dataToSql(self ,data = []):\n\n field = '('\n fieldList = []\n values = ' VALUES'\n if (type(data) != list):\n raise ValueError('insert 值必须是 list 列表 类型')\n\n if (self.__sql['sqlType'] == 'insert'):\n for k ,v in enumerate(data):\n if(type(v) != dict):\n raise ValueError('insert 列表中的值,只可以是 dict 字典类型')\n values += '('\n for key ,value in v.items():\n if(fieldList.count(key) == 0):\n fieldList.append(key)\n if isinstance(value ,str):\n values += \"\\\"%s\\\",\" % (pymysql.escape_string(value))\n elif value == None:\n values += '%s ,' % 'NULL'\n else:\n values += '\\'%s\\' ,' % (value)\n\n values = values.strip(',')\n values += '),'\n\n for f in fieldList:\n f = \"`%s`\" % f\n field += f + ','\n\n # print(field.strip(',') + ') ' + values.strip(','))\n\n return field.strip(',') + ') ' + values.strip(',')\n\n\n if (self.__sql['sqlType'] == 'update'):\n for k ,v in enumerate(data):\n if (type(v) != dict):\n raise ValueError('insert 列表中的值,只可以是 dict 字典类型')\n\n for key ,value in v.items():\n _str = ' `%s` = \"%s\" ' % (key ,value)\n fieldList.append(_str)\n return ' SET '+ ','.join(fieldList)\n\n\n \"\"\"\n 解析where参数 转换成 where 字符串\n \"\"\"\n def __parseWhere(self ,whereExp = []):\n\n if(type(whereExp) != list and whereExp.__len__()):\n return ''\n\n whereList = []\n for k ,v in enumerate(whereExp):\n if (type(v) == list):\n _str = self.__parseWhere(v)\n whereList.append(_str)\n else:\n # ['exp' ,'instr(field,str)'] 这种格式\n if(whereExp[0].upper() == 'EXP' and len(whereExp[1]) > 0):\n _str = whereExp[1]\n if(whereList.count(whereExp[1]) == 0):\n whereList.append(_str)\n # ['id' ,'=' ,1] 这种格式\n elif(len(whereExp[0]) > 0 and len(whereExp[1]) > 0 and len( str(whereExp[2]) ) > 0):\n\n if whereExp[1] == 'in':\n strList = []\n for i in whereExp[2]:\n strList.append(str(i))\n\n strList = ','.join(strList)\n\n _str = whereExp[0] +' '+ whereExp[1] + \"(%s)\" % (strList)\n\n else:\n _str = whereExp[0] +' '+ whereExp[1] + \"'%s'\" % (str(whereExp[2]))\n if (whereList.count(_str) == 0):\n whereList.append(_str)\n\n else:\n continue\n\n return ''.join(whereList)\n\n\n return ' AND '.join(whereList)\n\n def __limitToStr(self):\n\n if(self.__sql['sqlType'] == 'insert'):\n return ''\n if( isinstance(self.__sql['limit'],list) != True ):\n raise ValueError('limit 值得类型必须是 list 列表')\n if(self.__sql['limit'].__len__() == 0):\n return ''\n if(self.__sql['limit'].__len__() > 2):\n raise ValueError('limit 列表对多两个值')\n\n if (self.__sql['limit'].__len__() == 1):\n return ' LIMIT %s ' % (self.__sql['limit'][0])\n\n if (self.__sql['limit'].__len__() == 2):\n return ' LIMIT %s,%s' % (self.__sql['limit'][0] ,self.__sql['limit'][1])\n\n\n def __filterField(self ):\n if self.__sql['sqlType'] == 'count':\n return 'count(*) FROM'\n\n field = ''\n for v in self.__sql['field'].split(','):\n if(v.__len__() > 0):\n field += ' `%s`,' % (v)\n if(field):\n return field.strip(',') + ' FROM'\n else:\n return ' * FROM'\n\n\n\n def query(self ,sql):\n try:\n\n if(preg.match('select',sql ,preg.I)):\n self.__cursor[self.__connect].execute(sql)\n return self.__cursor[self.__connect].fetchall()\n elif (preg.match('show', sql, preg.I)):\n self.__cursor[self.__connect].execute(sql)\n return self.__cursor[self.__connect].fetchall()\n else:\n result = self.__cursor[self.__connect].execute(sql)\n self.__MysqlLink[self.__connect][self.__connect].commit()\n return result\n except:\n print(traceback.print_exc())\n\n\n def __clearSelfAttr(self):\n self.__sql['field'] = ''\n self.__sql['where'] = []\n self.__sql['order'] = ''\n self.__sql['limit'] = []\n self.__sql['group'] = ''\n\n\n\n\n\n\n\n \"\"\"\n 获取刚刚执行的sql\n \"\"\"\n def getLastSql(self):\n return self.lastSql\n\n \"\"\"\n 脚本执行完成 释放|删除 对象的时候\n \"\"\"\n def __del__(self):\n\n self.__cursor[self.__connect].close()\n self.__MysqlLink[self.__connect].close()\n del self\n #print('--------------------del object close Mysql link--------------------')\n\n\n\n\n\"\"\"\n直接调用直接pass\n\"\"\"\nif __name__ == \"main\":\n pass\n\n\n\n\n\n","repo_name":"jyolo/nginxWatcher","sub_path":"DataBase/Mysql.py","file_name":"Mysql.py","file_ext":"py","file_size_in_byte":13509,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"12243770418","text":"\"\"\"Routes for lab manipulation (start, stop, get status, see events).\"\"\"\n\nfrom fastapi import APIRouter, Depends, Header, HTTPException, Response, status\nfrom safir.models import ErrorLocation, ErrorModel\nfrom safir.slack.webhook import SlackRouteErrorHandler\nfrom sse_starlette import EventSourceResponse\n\nfrom ..dependencies.context import RequestContext, context_dependency\nfrom ..dependencies.user import user_dependency\nfrom ..exceptions import (\n InvalidDockerReferenceError,\n PermissionDeniedError,\n UnknownDockerImageError,\n UnknownUserError,\n)\nfrom ..models.domain.gafaelfawr import GafaelfawrUser\nfrom ..models.v1.lab import LabSpecification, UserLabState\n\nrouter = APIRouter(route_class=SlackRouteErrorHandler)\n\"\"\"Router to mount into the application.\"\"\"\n\n__all__ = [\"router\"]\n\n\n@router.get(\n \"/spawner/v1/labs\",\n responses={403: {\"description\": \"Forbidden\", \"model\": ErrorModel}},\n summary=\"List all users with running labs\",\n)\nasync def get_lab_users(\n context: RequestContext = Depends(context_dependency),\n) -> list[str]:\n \"\"\"Returns a list of all users with running labs.\"\"\"\n return await context.lab_state.list_lab_users(only_running=True)\n\n\n@router.get(\n \"/spawner/v1/labs/{username}\",\n response_model=UserLabState,\n responses={\n 403: {\"description\": \"Forbidden\", \"model\": ErrorModel},\n 404: {\"description\": \"Lab not found\", \"model\": ErrorModel},\n },\n summary=\"Status of user's lab\",\n)\nasync def get_lab_state(\n username: str,\n context: RequestContext = Depends(context_dependency),\n) -> UserLabState:\n try:\n return await context.lab_state.get_lab_state(username)\n except UnknownUserError as e:\n e.location = ErrorLocation.path\n e.field_path = [\"username\"]\n raise\n\n\n@router.post(\n \"/spawner/v1/labs/{username}/create\",\n responses={\n 403: {\"description\": \"Forbidden\", \"model\": ErrorModel},\n 409: {\"description\": \"Lab exists\", \"model\": ErrorModel},\n },\n status_code=201,\n summary=\"Create user lab\",\n)\nasync def post_new_lab(\n username: str,\n lab: LabSpecification,\n response: Response,\n context: RequestContext = Depends(context_dependency),\n user: GafaelfawrUser = Depends(user_dependency),\n) -> None:\n context.rebind_logger(user=username)\n if username != user.username:\n raise PermissionDeniedError(\"Permission denied\")\n\n # The user is valid and matches the route. Attempt the lab creation.\n lab_manager = context.factory.create_lab_manager()\n try:\n await lab_manager.create_lab(user, lab)\n except (InvalidDockerReferenceError, UnknownDockerImageError) as e:\n e.location = ErrorLocation.body\n e.field_path = [\"options\", lab.options.image_attribute]\n raise\n url = context.request.url_for(\"get_lab_state\", username=username)\n response.headers[\"Location\"] = str(url)\n\n\n@router.delete(\n \"/spawner/v1/labs/{username}\",\n summary=\"Delete user lab\",\n responses={\n 403: {\"description\": \"Forbidden\", \"model\": ErrorModel},\n 404: {\"description\": \"Lab not found\", \"model\": ErrorModel},\n },\n status_code=204,\n)\nasync def delete_user_lab(\n username: str,\n context: RequestContext = Depends(context_dependency),\n) -> None:\n context.rebind_logger(user=username)\n lab_manager = context.factory.create_lab_manager()\n try:\n await lab_manager.delete_lab(username)\n except UnknownUserError as e:\n e.location = ErrorLocation.path\n e.field_path = [\"username\"]\n raise\n except Exception as e:\n # The exception was already reported to Slack at the service layer, so\n # convert it to a standard error message instead of letting it\n # propagate as an uncaught exception.\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=[{\"msg\": \"Failed to delete lab\", \"type\": \"delete_failed\"}],\n ) from e\n\n\n@router.get(\n \"/spawner/v1/labs/{username}/events\",\n summary=\"Get event stream for user's lab\",\n description=(\n \"Returns a stream of server-sent events representing progress in\"\n \" creating the user's lab. The stream ends when the lab creation\"\n \" succeeds or fails.\"\n ),\n responses={\n 403: {\"description\": \"Forbidden\", \"model\": ErrorModel},\n 404: {\"description\": \"Lab not found\", \"model\": ErrorModel},\n },\n)\nasync def get_lab_events(\n username: str,\n x_auth_request_user: str = Header(..., include_in_schema=False),\n context: RequestContext = Depends(context_dependency),\n) -> EventSourceResponse:\n \"\"\"Returns the events for the lab of the given user.\"\"\"\n if username != x_auth_request_user:\n raise PermissionDeniedError(\"Permission denied\")\n context.rebind_logger(user=username)\n try:\n generator = context.lab_state.events_for_user(username)\n return EventSourceResponse(generator)\n except UnknownUserError as e:\n e.location = ErrorLocation.path\n e.field_path = [\"username\"]\n raise\n","repo_name":"lsst-sqre/jupyterlab-controller","sub_path":"src/jupyterlabcontroller/handlers/labs.py","file_name":"labs.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8849165265","text":"'''TimeDistributed classes'''\n\nfrom typing import Optional, Union, Tuple\n\nimport torch\n\n\nclass TimeDistributed(torch.nn.Module):\n '''TimeDistributed Container module'''\n\n def __init__(self, module: torch.nn.Module) -> None:\n super().__init__()\n self.module = module\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n '''Runs forward pass'''\n # Squash samples and timesteps into a single axis\n x_reshape = x.view(-1, *x.shape[2:]) # (samples * timesteps, ...)\n y = self.module(x_reshape)\n\n # reshape Y\n y = y.view(*x.shape[:2], *y.shape[1:]) # (samples, timesteps, ...)\n return y\n\n\nclass DistributedConv3D(torch.nn.Module):\n '''TimeDistributed Conv3D'''\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Tuple[int, int, int]],\n instance_norm: bool = False,\n batch_norm: bool = False,\n activation: Optional[str] = 'swish',\n ) -> None:\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.instance_norm = instance_norm\n self.batch_norm = batch_norm\n self.activation = activation\n\n # Initialise layers\n conv_layer = self._get_conv_layer()\n activation_layer = self._get_activation_layer()\n norm_layer = self._get_norm_layer()\n sequential = torch.nn.Sequential(conv_layer)\n if activation_layer is not None:\n sequential.add_module(str(len(sequential)), activation_layer)\n if norm_layer is not None:\n sequential.add_module(str(len(sequential)), norm_layer)\n\n self.layers = TimeDistributed(sequential)\n\n def _get_conv_layer(self) -> torch.nn.Module:\n return torch.nn.Conv3d(\n self.in_channels, self.out_channels, self.kernel_size, padding='same'\n )\n\n def _get_activation_layer(self) -> Union[None, torch.nn.Module]:\n if self.activation is None:\n return None\n act_layers = {\n 'swish': torch.nn.SiLU,\n 'relu': torch.nn.ReLU,\n }\n assert self.activation in act_layers, f'{self.activation} not in allowed activations.'\n return act_layers[self.activation]()\n\n def _get_norm_layer(self) -> Union[None, torch.nn.Module]:\n if self.instance_norm:\n return torch.nn.InstanceNorm3d(self.out_channels)\n if self.batch_norm:\n return torch.nn.BatchNorm3d(self.out_channels)\n return None\n\n def forward(self, tensor: torch.Tensor) -> torch.Tensor:\n '''Runs forward pass'''\n return self.layers(tensor)\n","repo_name":"m-lyon/dmri-pcconv","sub_path":"dmri_pcconv/core/model/layers/distributed.py","file_name":"distributed.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6741751908","text":"import maya.cmds as cmds\r\nimport maya.mel as mel\r\nimport time\r\n\r\nclass Dialog(object):\r\n def successDialog(self):\r\n cmds.confirmDialog( t=\"Success\", m=\"Function Success! \", button=[\"Ok\"]) \r\n print(\"\")\r\n\r\n def warningDialog(self, msg):\r\n cmds.confirmDialog(t=\"Warning\", m=msg, button=[\"Return\"]) \r\n\r\n def sameNameOrNoExistDialog(self, obj, sear, repl, noPop=[], exist=1):\r\n conti= 1\r\n sameName,noExist,success,finalTar,dupName= [],[],[],[],[]\r\n for item in obj:\r\n #Super long because for same name object, just replace the [-1] and needa put back the front parent name\r\n realName= item.split(\"|\")[-1]\r\n parName= (\"|\").join(item.split(\"|\")[:-1])\r\n if realName.replace(sear, repl)==realName:\r\n sameName.append(item)\r\n else: \r\n if parName:\r\n newName= \"%s|%s\"%(parName, realName.replace(sear,repl))\r\n else:\r\n newName= realName.replace(sear,repl)\r\n\r\n #Checking noExist/dupName/success/finalTar \r\n if cmds.objExists(newName)==0:\r\n noExist.append(\"%s ( %s )\"%(item, newName))\r\n else:\r\n if len(cmds.ls(newName))>1:\r\n dupName.append(\"%s ( %s )\"%(item, newName))\r\n else:\r\n success.append(item)\r\n finalTar.append(newName)\r\n \r\n #exist is for renamer because it no need to search for a target\r\n if exist==[]:\r\n noExist= [] \r\n\r\n #Pop out layoutDialog \r\n if sameName or noExist or dupName: \r\n if noPop==[]:\r\n ans= cmds.layoutDialog(t=\"Continue Or Not\", ui=lambda :self.sameNameOrNoExistDialogUI1(sameName, noExist, dupName, success))\r\n if ans==\"Continue\":\r\n conti= 1\r\n elif ans==\"No + Print\":\r\n conti=[]\r\n self.sameNameOrNoExistDialogUI2(sameName, noExist, dupName, success)\r\n else:\r\n conti=[]\r\n return conti, finalTar \r\n \r\n #This is just for copyWeight's Search Replace Influence because need to stack \r\n def sameNameOrNoExistSrDialog(self, obj, sear, repl, noPop=[], sameName=[], noExist=[], success=[], dupName=[]):\r\n conti= 1\r\n finalTar,newSuccess,newFinalTar,newSameName= [],[],[],[]\r\n if obj:\r\n for item in obj:\r\n #Super long because for same name object, just replace the [-1] and needa put back the front parent name\r\n realName= item.split(\"|\")[-1]\r\n parName= (\"|\").join(item.split(\"|\")[:-1])\r\n if realName.replace(sear, repl)==realName:\r\n sameName.append(item)\r\n else: \r\n if parName:\r\n newName= \"%s|%s\"%(parName, realName.replace(sear,repl))\r\n else:\r\n newName= realName.replace(sear,repl)\r\n\r\n #Checking noExist/dupName/success/finalTar \r\n if cmds.objExists(newName)==0:\r\n noExist.append(\"%s ( %s )\"%(item, newName))\r\n else:\r\n if len(cmds.ls(newName))>1:\r\n dupName.append(\"%s ( %s )\"%(item, newName))\r\n else:\r\n #finalTar is after item become search replace\r\n finalTar.append(newName)\r\n if newName not in obj:\r\n newSuccess.append(\"%s ( %s )\"%(item, newName))\r\n newFinalTar.append(newName)\r\n for stuff in sameName:\r\n if stuff not in finalTar:\r\n newSameName.append(stuff)\r\n success,finalTar,sameName= newSuccess,newFinalTar,newSameName\r\n\r\n #Pop out layoutDialog \r\n if sameName or noExist or dupName: \r\n if noPop==[]:\r\n ans= cmds.layoutDialog(t=\"Continue Or Not\", ui=lambda :self.sameNameOrNoExistDialogUI1(sameName, noExist, dupName, success))\r\n if ans==\"Continue\":\r\n conti= 1\r\n elif ans==\"No + Print\":\r\n conti=[]\r\n self.sameNameOrNoExistDialogUI2(sameName, noExist, dupName, success)\r\n else:\r\n conti=[]\r\n return conti, finalTar, sameName, noExist, success, dupName \r\n\r\n def sameNameOrNoExistDialogUI1(self, tar1, tar2, tar3, tar4):\r\n form= cmds.setParent(q=1)\r\n cmds.formLayout(form, e=1, nd=100, w=500)\r\n txt1= cmds.text(l=\"< %s > Same Name As Source\"%len(tar1))\r\n txt2= cmds.text(l=\"< %s > Name Does Not Exist\"%len(tar2))\r\n txt3= cmds.text(l=\"< %s > Duplicated Target\"%len(tar3))\r\n txt4= cmds.text(l=\"< %s > Success\"%len(tar4))\r\n sf1= cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"- %s\"%(\"\\n- \".join(tar1)))\r\n sf2= cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"- %s\"%(\"\\n- \".join(tar2)))\r\n sf3= cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"- %s\"%(\"\\n- \".join(tar3)))\r\n sf4= cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"- %s\"%(\"\\n- \".join(tar4)))\r\n but1= cmds.button(l=\"Continue\", c=\"cmds.layoutDialog(dis='Continue')\")\r\n but2= cmds.button(l=\"No\", c=\"cmds.layoutDialog(dis='No')\")\r\n but3= cmds.button(l=\"No + Print\", c=\"cmds.layoutDialog(dis='No + Print')\")\r\n cmds.formLayout(form, e=1,\r\n af=[(txt1, \"top\", 10),\r\n (txt2, \"top\", 10),\r\n (txt3, \"top\", 10),\r\n (txt4, \"top\", 10),\r\n (sf1, \"top\", 31),\r\n (sf2, \"top\", 31),\r\n (sf3, \"top\", 31),\r\n (sf4, \"top\", 31),\r\n (but1, \"bottom\", 5),\r\n (but2, \"bottom\", 5),\r\n (but3, \"bottom\", 5)],\r\n ac=[(sf1, \"bottom\", 10, but1),\r\n (sf2, \"bottom\", 10, but1),\r\n (sf3, \"bottom\", 10, but1),\r\n (sf4, \"bottom\", 10, but1)], \r\n ap=[(txt1, \"left\", 0, 0),\r\n (txt1, \"right\", 0, 25),\r\n (txt2, \"left\", 0, 26),\r\n (txt2, \"right\", 0, 50),\r\n (txt3, \"left\", 0, 51),\r\n (txt3, \"right\", 0, 75),\r\n (txt4, \"left\", 0, 76),\r\n (txt4, \"right\", 0, 100),\r\n (sf1, \"left\", 0, 0),\r\n (sf1, \"right\", 0, 25),\r\n (sf2, \"left\", 0, 26),\r\n (sf2, \"right\", 0, 50),\r\n (sf3, \"left\", 0, 51),\r\n (sf3, \"right\", 0, 75),\r\n (sf4, \"left\", 0, 76),\r\n (sf4, \"right\", 0, 100),\r\n (but1, \"left\", 0, 0),\r\n (but1, \"right\", 0, 33),\r\n (but2, \"left\", 0, 34),\r\n (but2, \"right\", 0, 66),\r\n (but3, \"left\", 0, 67),\r\n (but3, \"right\", 0, 100)])\r\n cmds.setFocus(cmds.text(l=\"\"))\r\n\r\n def sameNameOrNoExistDialogUI2(self, tar1, tar2, tar3, tar4):\r\n try: \r\n cmds.deleteUI(\"snone\") \r\n except:\r\n pass \r\n cmds.window(\"snone\")\r\n cmds.window(\"snone\", e=1, t=\"Targets With Naming Problem\", s=1, wh=(700,300))\r\n snone1= cmds.paneLayout(cn=\"vertical4\", w=700)\r\n cmds.paneLayout(snone1, e=1, ps=[1,25,25])\r\n cmds.paneLayout(snone1, e=1, ps=[2,25,25])\r\n cmds.paneLayout(snone1, e=1, ps=[3,25,25])\r\n cmds.paneLayout(snone1, e=1, ps=[4,25,25])\r\n cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"< %s > Same Name As Source\\n-----------------------------------\\n\\n%s\"%(len(tar1),\"- %s\"%(\"\\n- \".join(tar1))))\r\n cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"< %s > Name Does Not Exist\\n----------------------------------\\n\\n%s\"%(len(tar2),\"- %s\"%(\"\\n- \".join(tar2))))\r\n cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"< %s > Duplicated Target\\n------------------------------\\n\\n%s\"%(len(tar3),\"- %s\"%(\"\\n- \".join(tar3))))\r\n cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"< %s > Succeeded\\n---------------------\\n\\n%s\"%(len(tar4),\"- %s\"%(\"\\n- \".join(tar4))))\r\n cmds.showWindow(\"snone\")\r\n\r\n def continueDialog(self, tar, msg):\r\n if tar:\r\n conti= 1\r\n else:\r\n ans= cmds.confirmDialog(t=\"Continue Or Not\", m=\"%s\"%msg, button=[\"Continue\", \"No\"]) \r\n if ans==\"Continue\":\r\n conti= 1\r\n else:\r\n conti= []\r\n return conti\r\n\r\n def printingDialog(self, tar, msg):\r\n conti= 1\r\n if tar:\r\n ans= cmds.layoutDialog(t=\"Continue Or Not\", ui=lambda :self.printingDialogUI1(msg, tar))\r\n if ans==\"Continue\":\r\n conti= 1\r\n elif ans==\"No + Print\":\r\n self.printingDialogUI2(msg, tar)\r\n conti= []\r\n else:\r\n conti= []\r\n return conti \r\n\r\n def printingDialogUI1(self, msg, tar):\r\n form= cmds.setParent(q=1)\r\n t1= cmds.text(l=msg)\r\n sf= cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"\\n\".join(tar), ip=100)\r\n but1= cmds.button(l=\"Continue\", c=\"cmds.layoutDialog(dis='Continue')\")\r\n but2= cmds.button(l=\"No\", c=\"cmds.layoutDialog(dis='No')\")\r\n but3= cmds.button(l=\"No + Print\", c=\"cmds.layoutDialog(dis='No + Print')\")\r\n cmds.setFocus(cmds.text(l=\"\"))\r\n cmds.formLayout(form, e=1, nd=100, w=400,\r\n af=[(t1, \"top\", 10),\r\n (sf, \"top\", 50),\r\n (but1, \"bottom\", 5),\r\n (but2, \"bottom\", 5),\r\n (but3, \"bottom\", 5)],\r\n ac=[(sf, \"bottom\", 10, but1)], \r\n ap=[(t1, \"left\", 0, 0),\r\n (t1, \"right\", 0, 100),\r\n (sf, \"left\", 0, 0),\r\n (sf, \"right\", 0, 100),\r\n (but1, \"left\", 0, 0),\r\n (but1, \"right\", 0, 33),\r\n (but2, \"left\", 0, 34),\r\n (but2, \"right\", 0, 66),\r\n (but3, \"left\", 0, 67),\r\n (but3, \"right\", 0, 100)])\r\n\r\n def printingDialogUI2(self, msg, tar):\r\n try: \r\n cmds.deleteUI(\"pd\") \r\n except:\r\n pass \r\n cmds.window(\"pd\")\r\n cmds.window(\"pd\", e=1, t=\"Target List\", s=1, wh=(200,300))\r\n cmds.frameLayout(\"pd1\", lv=0)\r\n cmds.separator(h=5, st=\"none\") \r\n cmds.setFocus(cmds.text(l=msg))\r\n cmds.separator(h=5, st=\"none\")\r\n cmds.scrollField(ed=0, fn=\"plainLabelFont\", tx=\"\\n\".join(tar))\r\n cmds.showWindow(\"pd\")\r\n\r\n\r\n","repo_name":"Lvnmou/LvnTools","sub_path":"Mod/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":11628,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"11045969798","text":"#!/usr/bin/env python\n\nimport psycopg2\n\n\ndef question_1(cursor):\n cursor.execute('''\n SELECT a.title, (string_to_array(path,'/'))[3] as article, count(*) as c\n FROM log as l JOIN articles as a on a.slug=(string_to_array(path,'/'))[3]\n WHERE (string_to_array(path,'/'))[3] is not null\n GROUP BY article, a.title ORDER BY c desc;''')\n results = cursor.fetchall()\n print(\"-\" * 80)\n for r in results:\n print('\"{}\" - {} views'.format(r[0], r[2]))\n\n\ndef question_2(cursor):\n sql_query = '''\n SELECT au.name, count(*) as c\n FROM log as l JOIN articles as a on a.slug=(string_to_array(path,'/'))[3]\n JOIN authors as au ON a.author=au.id\n WHERE (string_to_array(path,'/'))[3] is not null\n GROUP BY au.id, au.name\n ORDER BY c desc;\n'''\n cursor.execute(sql_query)\n results = cursor.fetchall()\n print(\"-\" * 80)\n for r in results:\n print('{} - {} views'.format(r[0], r[1]))\n\n\ndef question_3(cursor):\n sql_query = '''\n select t1.day, CAST (t2.error_views as FLOAT) / t1.views as rate\n FROM ( SELECT date_trunc('day', time) as day, count(*) as views\n from log group by day ) as t1\n JOIN ( SELECT date_trunc('day', time) as day, count(*) as error_views\n from log where CAST((string_to_array(status,' '))[1] AS INTEGER) >=400\n group by day ) as t2 on t1.day = t2.day\n WHERE CAST (t2.error_views as FLOAT) / t1.views > 0.01\n '''\n cursor.execute(sql_query)\n results = cursor.fetchall()\n print(\"-\" * 80)\n for r in results:\n print('{} - {:.2%} errors'.format(r[0].date(), r[1]))\n\n\ndef main():\n conn = psycopg2.connect('dbname=news')\n cursor = conn.cursor()\n question_1(cursor)\n question_2(cursor)\n question_3(cursor)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wxhh61/project_1","sub_path":"log_analysis.py","file_name":"log_analysis.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20422892851","text":"from flask import Blueprint, request\nfrom init import db\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom datetime import datetime\nfrom models.application import Application, application_schema\nfrom models.job import Job\n\napplications_bp = Blueprint(\n \"applications\", __name__)\n\n# CRUD functionality for the Application model\n\n\n# Create Application Route\n@applications_bp.route('/', methods=[\"POST\"])\n@jwt_required()\ndef create_application(job_id):\n\n # Creates a new application for a job. Retrieves JSON data from the request body and checks if the job exits, if it does, it creates a new application model instance and adds it to the database.\n body_data = request.get_json()\n stmt = db.select(Job).where(Job.id == job_id)\n job = db.session.scalar(stmt)\n if job:\n # Convert the date_applied string to a date object\n date_applied = datetime.strptime(\n body_data.get(\"date_applied\"), \"%Y-%m-%d\").date()\n\n # Status ID is provided in the request data\n status_id = body_data.get(\"status_id\")\n if status_id is None:\n return {\"error\": \"Status ID is required in the request body\"}, 400\n\n application = Application(\n date_applied=date_applied,\n # This passes the applicant id from the JWT token.\n applicant_id=get_jwt_identity(),\n # This passes the model instance to the model relationship.\n job=job,\n status_id=status_id # Set the status_id to the provided value\n\n )\n\n db.session.add(application)\n db.session.commit()\n return application_schema.dump(application), 201\n else:\n return {\"error\": f\"Job not found with id {job_id}\"}, 404\n\n\n# Delete Application Route.\n@applications_bp.route('/', methods=[\"DELETE\"])\ndef delete_application(job_id, application_id):\n # Deletes an application from the database. Checks if the application exists, if it does, it deletes it from the database.\n stmt = db.select(Application).where(\n Application.id == application_id)\n application = db.session.scalar(stmt)\n if application:\n db.session.delete(application)\n db.session.commit()\n return {\"message\": f\"Application with id {application_id} has been deleted\"}\n else:\n return {\"error\": f\"Application not found with id {application_id}\"}, 404\n\n\n# Update Application Route\n@applications_bp.route('/', methods=[\"PUT\", \"PATCH\"])\n# Update an application in the database. Checks if the application exists, if it does, it updates it in the database.\n@jwt_required()\ndef update_application(job_id, application_id):\n body_data = request.get_json()\n stmt = db.select(Application).where(Application.id == application_id)\n application = db.session.scalar(stmt)\n if application:\n application.date_applied = body_data.get(\n \"date_applied\") or application.date_applied\n db.session.commit()\n return application_schema.dump(application)\n else:\n return {\"error\": f\"Application not found with id {application_id}\"}, 404\n","repo_name":"Looch8/T2A2-Webserver_API","sub_path":"src/controllers/application_controller.py","file_name":"application_controller.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7514005222","text":"from Crypto.Random import get_random_bytes\nfrom Crypto.Cipher import DES, AES\nfrom Crypto.Util.Padding import pad,unpad\n\nclass DES_CIPHER:\n BLOCK_SIZE_DES = 8 \n def __init__(self, key):\n self.key = key\n\n def cifrar(self, cadena, IV):\n cadena = cadena.encode(\"UTF-8\")\n cipher=DES.new(self.key, DES.MODE_CBC, IV)\n ciphertext = cipher.encrypt(pad(cadena,self.BLOCK_SIZE_DES))\n return ciphertext\n\n def descifrar(self, cifrado, IV):\n decipher_des = DES.new(self.key, DES.MODE_CBC, IV)\n new_data = unpad(decipher_des.decrypt(cifrado), self.BLOCK_SIZE_DES).decode(\"UTF-8\", \"ignore\")\n return new_data\n\n\nIV = get_random_bytes(8) # IV aleatorio de 64 bits para CBC\nkey = get_random_bytes(8) # Clave aleatoria de 64 bits\ndatos = \"Hola Mundo con DES en modo ECB\"\nprint(datos)\nd = DES_CIPHER(key)\ncifrado = d.cifrar(datos, IV)\nprint(cifrado)\ndescifrado = d.descifrar(cifrado, IV)\nprint(descifrado)\n","repo_name":"x1n4px/Seguridad-de-la-informacion","sub_path":"2023-2024/práctica2-criptografíaSimétrica/ej1_DES_CBC.py","file_name":"ej1_DES_CBC.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12895624602","text":"# encoding: utf-8\n\"\"\"\nTests of io.axonio\n\"\"\"\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom neo.io import AxonIO\nfrom neo.test.io.common_io_test import BaseTestIO\n\n\nclass TestAxonIO(BaseTestIO, unittest.TestCase):\n files_to_test = ['File_axon_1.abf',\n 'File_axon_2.abf',\n 'File_axon_3.abf',\n 'File_axon_4.abf',\n 'File_axon_5.abf',\n 'File_axon_6.abf',\n \n \n ]\n files_to_download = files_to_test\n ioclass = AxonIO\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"tkf/neo","sub_path":"neo/test/io/test_axonio.py","file_name":"test_axonio.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17441194734","text":"# 국회의원 선거\ndef maesu(n, cand):\n num = 0\n while True:\n count = 0\n for i in range(n-2, -1, -1):\n other = cand[i]\n me = cand[-1]\n if me < other:\n c = (other - me) // 2 + 1\n if c == 0:\n c = 1\n cand[-1] += c\n cand[i] -= c\n num += c\n count += c\n elif me == other:\n cand[-1] += 1\n cand[i] -= 1\n num += 1\n count += 1\n if count == 0:\n return num\n\nfrom collections import deque\ndef maesu_2(n, cand):\n q = deque()\n num = 0\n for i in range(n-2, -1, -1):\n if cand[-1] <= cand[i]:\n q.append(i)\n while q:\n i = q.popleft()\n other = cand[i]\n me = cand[-1]\n if me <= other:\n c = (other - me) // 2\n if c == 0:\n c = 1\n cand[-1] += c\n cand[i] -= c\n num += c\n if cand[i] > cand[-1]:\n q.append(i)\n return num\n\ndef maesu_3(n, me, cand):\n num = 0\n if len(cand)==0:\n return 0\n while cand[-1]>=me:\n num += 1\n me += 1\n cand[-1] -= 1\n cand.sort()\n return num\n\nn = int(input())\nme = int(input())\ncand = []\nfor _ in range(n-1):\n cand.append(int(input()))\ncand.sort()\nprint(maesu_3(n, me, cand))","repo_name":"dodoyeon/SW_Academy","sub_path":"greedy/1417_congress.py","file_name":"1417_congress.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22370210752","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 13:21:01 2018\n\n@author: avelinojaver\n\"\"\"\nimport numpy as np\nimport cv2\nimport tqdm\nfrom pathlib import Path\n#%%\nif __name__ == '__main__':\n root_dir = Path.home() / 'Vesicles/training_data/'\n frames_root = root_dir / 'initial_frames'\n \n save_root = root_dir / 'dat2label'\n train_root = save_root / 'train'\n train_root.mkdir(parents=True, exist_ok=True)\n \n test_root = save_root / 'test'\n test_root.mkdir(parents=True, exist_ok=True)\n #%%\n \n \n fnames = list(frames_root.rglob('*_0.png'))\n #%%\n for fname in tqdm.tqdm(fnames):\n \n img = cv2.imread(str(fname), -1)\n if img is None:\n continue\n \n img_l = np.log(img+1)\n \n bot, top = np.min(img_l), np.max(img_l)\n img_n = (img_l-bot)/(top-bot)*255\n img_n = img_n.astype(np.uint8)\n \n if 'train' in str(fname.parent):\n save_dir = train_root\n else:\n save_dir = test_root\n \n bn = fname.name.replace('_0.','.')\n \n save_name = save_dir / bn\n cv2.imwrite(str(save_name), img_n)\n \n \n \n ","repo_name":"ver228/vesicle_contours","sub_path":"collect/transform2label.py","file_name":"transform2label.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2257513841","text":"from surprise import CoClustering\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise import accuracy\nfrom surprise.model_selection import GridSearchCV\nimport pandas as pd\nimport time\n\nroot = \"./folds_score/\"\ngs = False\n\nif gs:\n fold = ' ' + str(0)\n train_df, test_df = pd.read_csv(root + fold +'/train.csv'), pd.read_csv(root + fold + '/test.csv')\n\n reader = Reader(rating_scale=(0, 10))\n\n# train_data = Dataset.load_from_df(train_df, reader).build_full_trainset()\n# data = Dataset.load_from_folds([(root + fold +'/train.csv', root + fold + '/test.csv')], reader)\n# test_data = Dataset.load_from_df(test_df, reader).build_full_trainset().build_testset()\n data = Dataset.load_from_df(train_df, reader)\n \n param_grid = {\n \"n_cltr_u\": [20, 30, 40],\n \"n_cltr_i\": [20, 30, 40],\n \"n_epochs\": [50, 75, 100],\n \"random_state\": [42]\n }\n \n gs = GridSearchCV(CoClustering, param_grid, measures=[\"rmse\", \"mae\"], cv=5, n_jobs=4)\n\n gs.fit(data)\n\n print(gs.best_score[\"rmse\"])\n print(gs.best_params[\"rmse\"])\n \nelse:\n for i in range(5):\n fold = ' ' + str(i)\n train_df, test_df = pd.read_csv(root + fold +'/train_score.csv').drop([\"Unnamed: 0\",\"playtime\"], axis=1), pd.read_csv(root + fold + '/test_score.csv').drop([\"Unnamed: 0\",\"playtime\"], axis=1)\n\n reader = Reader(rating_scale=(0, 10))\n\n train_data = Dataset.load_from_df(train_df, reader).build_full_trainset()\n test_data = Dataset.load_from_df(test_df, reader).build_full_trainset().build_testset()\n\n\n\n print(\"\\nCo-CLustering - Testing fold{}:\".format(fold))\n\n algo = CoClustering(n_cltr_u=40, n_cltr_i=40, n_epochs=100, random_state=42, verbose=True)\n\n tic = time.time()\n algo.fit(train_data)\n toc = time.time()\n\n print('Finished fit model in {}s'.format(toc-tic))\n\n predictions = algo.test(test_data)\n\n accuracy.rmse(predictions)\n # print(\"RMSE: \", rmse)","repo_name":"camilalaranjeira/RecSteam","sub_path":"cocluster.py","file_name":"cocluster.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15318437618","text":"def find_missing(full, par):\n missing_items = set(full) - set(par)\n assert(len(missing_items) == 1)\n return list(missing_items)[0]\n \n\nprint(\"The missing number from the partial list, using linear time:\",find_missing([4,12,9,5,6], [4,9,12,6]))\n\n\ndef find_missing_xor(full, par):\n xor_sum = 0\n for num in full:\n xor_sum ^= num\n for num in par:\n xor_sum ^= num\n\n return xor_sum\n\nprint(\"The misssing xor_sum is, which is constant time:\",find_missing_xor([4,12,9,5,6], [4,9,12,6]))\n\n#https://www.youtube.com/watch?v=cdCeU8DJvPM&list=PL_557Q1uZ7gLfEajI2TZDU80Y3kkpAxti&index=67\n\n#https://www.youtube.com/watch?v=r0CAz6MdgEg&list=PL5tcWHG-UPH1D-JVSiZI_8I8LPUJtoHdg&index=5\n","repo_name":"adamphopal/more-interview-code","sub_path":"find_missing.py","file_name":"find_missing.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2809897376","text":"from clustertop.poller import Poller\nfrom ConfigParser import ConfigParser\nimport argparse\nimport importlib\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Control the cluster top backend')\n parser.add_argument('command', choices=['check', 'run'])\n parser.add_argument('--config', type=str, default='/etc/clustertop')\n args = parser.parse_args()\n cf = ConfigParser()\n cf.read(args.config)\n the_poller = Poller\n if cf.has_option('main', 'poller'):\n mod_path, cls = cf.get('main', 'poller').split(':')\n module = importlib.import_module(mod_path)\n the_poller = getattr(module, cls)\n if args.command == 'run':\n poller = the_poller(cf)\n poller.poll_loop()\n elif args.command == 'check':\n poller = the_poller(cf)\n poller.poll()\n","repo_name":"rossdylan/clustertop","sub_path":"clustertop/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72854168808","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 6 21:26:20 2021\r\n\"\"\"\r\n\r\ndef SelectionSort(l):\r\n \"\"\"Returns None. Provided list is sorted in place\"\"\"\r\n for start in range(len(l)):\r\n minpos = start\r\n for i in range(start+1, len(l)):\r\n if l[i] 0:\n assert False, \"Positional arguments for queries are not supported yet\"\n else:\n assert len(kwargs) == len(query_args), f\"Expected {len(query_args)} keyword arguments for query, got {len(kwargs)}: Expected: {query_args}, got: {kwargs}\"\n\n for query_kw in kwargs.keys():\n assert query_kw in query_args, f\"Unknown query argument '{query_kw}'\"\n \n return await module.query(**kwargs)\n\nasync def run(code, *args, output_writer=None, **kwargs):\n temp_lmql_file = tempfile.mktemp(suffix=\".lmql\")\n with open(temp_lmql_file, \"w\") as f:\n f.write(code)\n \n os.chdir(os.path.join(os.path.dirname(__file__), \"../../\")) \n return await run_file(temp_lmql_file, *args, output_writer=output_writer, **kwargs)\n \ndef _query_from_string(s):\n temp_lmql_file = tempfile.mktemp(suffix=\".lmql\")\n with open(temp_lmql_file, \"w\") as f:\n f.write(s)\n module = load(temp_lmql_file, autoconnect=True, output_writer=silent)\n return module.query\n\ndef query(fct):\n import inspect\n\n if type(fct) is LMQLQueryFunction: return fct\n\n # support for lmql.query()\n if type(fct) is str: return _query_from_string(fct)\n \n calling_frame = inspect.stack()[1]\n scope = LMQLInputVariableScope(fct, calling_frame)\n code = get_decorated_function_code(fct)\n\n temp_lmql_file = tempfile.mktemp(suffix=\".lmql\")\n with open(temp_lmql_file, \"w\") as f:\n f.write(code)\n module = load(temp_lmql_file, autoconnect=True, output_writer=silent)\n \n assert inspect.iscoroutinefunction(fct), f\"@lmql.query {fct.__name__} must be declared async.\"\n \n argnames = inspect.getfullargspec(fct).args\n \n args_of_query = [a for a in inspect.getfullargspec(module.query.fct).args if a != \"context\"]\n # print code of module.query.fct\n for a in argnames:\n if a not in args_of_query:\n print(f\"warning: @lmql.query {fct.__name__} has an argument '{a}' that is not used in the query.\")\n \n # set the function context of the query based on the function context of the decorated function\n module.query.function_context = FunctionContext(argnames, args_of_query, scope)\n \n return module.query\n\nasync def static_prompt(query_fct, *args, **kwargs):\n \"\"\"\n Returns the static prompt prefix that is generated by the given query function up until the first variable.\n \"\"\"\n res = await query_fct(*args, **kwargs, return_prompt_string=True)\n return res[0]\n\ndef main(query_fct):\n \"\"\"\n Runs the provided query function in the main thread\n and returns the result.\n\n This call is blocking.\n \"\"\"\n import asyncio\n return asyncio.run(query_fct())\n","repo_name":"SilacciA/lmql","sub_path":"src/lmql/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"71261476648","text":"# 用于构建界面代码 app_启动窗口.py\nimport json\nimport os\nimport re\n\nfrom 中文对照组件常量 import 取组件名称中英文对照\nfrom 中文对照组件常量 import 通过组件名称取组件库对象\nfrom 界面代码生成类 import 界面代码生成类\nfrom 组件库.组件单行编辑框 import 组件单行编辑框\nfrom 组件库.组件富文本编辑框 import 组件富文本编辑框\nfrom 组件库.组件按钮 import 组件按钮\nfrom 组件库.组件标签 import 组件标签\nfrom 组件库.组件窗口 import 组件窗口\nfrom 组件库.组件纯文本编辑框 import 组件纯文本编辑框\n\n\nclass 代码生成UiPy文件(object):\n 界面代码生成: 界面代码生成类 = None\n 组件数: object = None\n 依赖组件列表 = []\n\n def __init__(self, json界面数据文件: str):\n self.界面代码生成 = 界面代码生成类()\n self.组件树 = json.loads(json界面数据文件)\n\n self.界面代码生成.加载已存在的文件内容 = \"\"\n self.界面代码生成.末尾代码 = \"\"\"\n QMetaObject.connectSlotsByName(MainWindow)\n\nclass MainWin(QMainWindow):\n def __init__(self):\n super().__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.show()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWin()\n sys.exit(app.exec())\n\"\"\"\n\n def 取头部依赖组件(self, 依赖组件=None):\n return f\"\"\"\n# -*- coding: utf-8 -*-\nimport sys\n\nfrom PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,\n QMetaObject, QObject, QPoint, QRect,\n QSize, QTime, QUrl, Qt)\nfrom PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor,\n QFont, QFontDatabase, QGradient, QIcon,\n QImage, QKeySequence, QLinearGradient, QPainter,\n QPalette, QPixmap, QRadialGradient, QTransform)\nfrom PySide6.QtWidgets import (QApplication, {依赖组件})\n\"\"\"\n\n def 生成代码(self):\n pass\n self.递归(组件树=self.组件树)\n # set(self.依赖组件列表)\n 依赖组件 = \", \".join(set(self.依赖组件列表))\n self.界面代码生成.头部导包代码 = self.取头部依赖组件(依赖组件)\n\n 代码 = self.界面代码生成.生成代码UiPy()\n return 代码\n\n def 递归(self, 递归深度=0, 组件树=None):\n self.获取代码(组件树)\n for 子组件 in 组件树['子组件']:\n self.递归(递归深度=递归深度 + 1, 组件树=子组件)\n\n def 获取代码(self, 组件对象):\n 窗口代码 = \"\"\n 组件名称 = 组件对象['组件名称']\n 组件类型 = 组件对象['组件类型']\n 组件属性 = 组件对象['组件属性']\n # print(组件名称, 组件类型, 组件属性)\n self.依赖组件列表.append(组件类型) # 依赖列表\n\n if 组件类型 == \"QMainWindow\":\n 组件信息 = 组件窗口()\n 窗口代码 = 组件信息.导出为代码(组件对象)\n self.界面代码生成.类初始化代码 = 窗口代码\n return \"\"\n\n 父组件 = 组件对象['父组件']\n 父组件类型 = 组件对象['父组件类型']\n# if 组件类型 == \"QWidget\": # 窗口组件\n# self.界面代码生成.加入事件绑定代码(f\"\"\"\n# self.{组件名称} = QWidget(MainWindow)\n# self.{组件名称}.setObjectName(u\"{组件名称}\")\n# \"\"\")\n if 父组件类型 == \"QTabWidget\" and 组件类型 == \"QWidget\": # 选择夹 组件\n self.界面代码生成.加入事件绑定代码(f\"\"\"\nself.{组件名称} = QWidget()\nself.{组件名称}.setObjectName(u\"{组件名称}\")\n \"\"\")\n self.界面代码生成.加入事件绑定代码(f\"\"\"\nself.{父组件}.addTab(self.{组件名称}, \"\")\n \"\"\", True)\n\n self.界面代码生成.加入事件绑定代码(f\"\"\"\nself.{父组件}.setTabText(self.tabWidget.indexOf(self.{组件名称}), u\"{组件属性['标题']}\")\n \"\"\")\n\n if 组件类型 == \"QTabWidget\": # 选项卡组件\n self.界面代码生成.加入事件绑定代码(f\"\"\"\nself.{组件名称} = QTabWidget(self.{父组件})\nself.{组件名称}.setObjectName(u\"{组件名称}\")\nself.{组件名称}.setGeometry(QRect({组件属性['左边']}, {组件属性['顶边']}, {组件属性['宽度']}, {组件属性['高度']}))\n \"\"\")\n self.界面代码生成.加入事件绑定代码(f\"\"\"\nself.{组件名称}.setCurrentIndex(1)\n \"\"\", True)\n #创建组件步骤 2 生成代码的地方\n # if 组件类型 == \"QPushButton\": # 按钮组件\n # 组件信息 = 组件按钮()\n # 窗口代码 = 组件信息.导出为代码(组件对象)\n # if 组件类型 == \"QLineEdit\": # 组件单行编辑框\n # 组件信息 = 组件单行编辑框()\n # 窗口代码 = 组件信息.导出为代码(组件对象)\n # if 组件类型 == \"QPlainTextEdit\": # 组件单行编辑框\n # 组件信息 = 组件纯文本编辑框()\n # 窗口代码 = 组件信息.导出为代码(组件对象)\n # if 组件类型 == \"QTextEdit\": # 组件单行编辑框\n # 组件信息 = 组件富文本编辑框()\n # 窗口代码 = 组件信息.导出为代码(组件对象)\n # if 组件类型 == \"QLabel\": # 组件单行编辑框\n # 组件信息 = 组件标签()\n # 窗口代码 = 组件信息.导出为代码(组件对象)\n\n 组件库对象 = 通过组件名称取组件库对象(组件类型, None)\n if 组件库对象 is None:\n print(\"请补充组件 \"+组件类型)\n return\n\n 窗口代码 = 组件库对象.导出为代码(组件对象)\n\n if 窗口代码 == \"\":\n raise Exception(\"未知组件类型\", 组件对象)\n\n self.界面代码生成.加入事件绑定代码(窗口代码)\n\n\nif __name__ == \"__main__\":\n 数据文件路径 = r\"/Users/chensuilong/Desktop/pythonproject/pythonProject3/main.json\"\n # 数据文件路径 = r\"C:\\pyefun\\QtEasyDesigner\\test\\启动窗口.json\"\n\n # os 取文件路径的目录\n 项目目录 = os.path.dirname(数据文件路径)\n # 获取文件名不要扩展名\n 窗口名称 = os.path.splitext(os.path.basename(数据文件路径))[0]\n\n with open(f\"{项目目录}/{窗口名称}.json\", \"r\", encoding=\"utf-8\") as f:\n 导入数据 = f.read()\n\n # 生成入口文件绑定事件\n python = 代码生成UiPy文件(导入数据).生成代码()\n print(\"生成的代码:\", python)\n with open(f\"{项目目录}/ui_{窗口名称}.py\", \"w\", encoding=\"utf-8\") as f:\n f.write(python)\n","repo_name":"duolabmeng6/QtEasyDesigner","sub_path":"qt_esay_model/代码生成UiPy文件.py","file_name":"代码生成UiPy文件.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"zh","doc_type":"code","stars":33,"dataset":"github-code","pt":"53"} +{"seq_id":"39445031584","text":"import requests\nimport bs4\n\ntoken = 'https://ev-database.org/#sort:path~type~order=.rank~number~desc|range-slider-range:prev~next=0~1200|range-slider-acceleration:prev~next=2~23|range-slider-topspeed:prev~next=110~450|range-slider-battery:prev~next=10~200|range-slider-eff:prev~next=100~300|range-slider-fastcharge:prev~next=0~1500|paging:currentPage=0|paging:number=all'\n\n\nresponse = requests.get(token)\n\nsoup = bs4.BeautifulSoup(response.text, 'html.parser')\ndiv = soup.find_all(\"div\", {\"class\":\"data-wrapper\"})\n\n\nf = open(\"data.json\", \"a\");\nf.write(\"[\\n\");\n\nfor item in div:\n\tf.write(\"{ \\n\")\n\tf.write(\"\\\"model\\\" : \\\"\" + item.find(\"a\", {\"class\": \"title\"}).text + \"\\\",\\n\")\n\tf.write(\"\\\"acceleration\\\" : \\\"\" + item.find(\"span\", {\"class\": \"acceleration\"}).text + \"\\\",\\n\")\n\tf.write(\"\\\"topspeed\\\" : \\\"\" + item.find(\"span\", {\"class\": \"topspeed\"}).text + \"\\\",\\n\")\n\tf.write(\"\\\"efficiency\\\" : \\\"\" + item.find(\"span\", {\"class\": \"efficiency\"}).text + \"\\\",\\n\")\n\tf.write(\"\\\"range\\\" : \\\"\" + item.find(\"span\", {\"class\": \"erange_real\"}).text + \"\\\",\\n\")\n\tf.write(\"\\\"charge\\\" : \\\"\" + item.find(\"span\", {\"class\": \"fastcharge_speed_print\"}).text + \"\\\",\\n\")\n\tf.write(\"},\\n\")\n\nf.write(\"]\")\n\n\n\n\n","repo_name":"r1beguin/dataviz","sub_path":"scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20504553286","text":"import time\n\nfrom CARLA import icarla\nfrom data.datakey import DataKey\nfrom support.logger import logger\nfrom threads.haltabledatathread import HaltableDataThread\n\n\nclass ControllerThread(HaltableDataThread):\n vehicle = None\n\n def __init__(self, data):\n super().__init__(data)\n\n def set_vehicle(self, vehicle):\n self.vehicle = vehicle\n\n def loop(self):\n if self.vehicle is not None:\n time.sleep(0.05)\n d = self.data.get(DataKey.CONTROL_OUT)\n if d is not None:\n self.control(d[0], d[1])\n else:\n self.control(0, 0)\n else:\n time.sleep(1.0)\n\n def control(self, throttle, steering):\n if self.vehicle is not None:\n if throttle >= 0.0:\n try:\n self.vehicle.apply_control(icarla.vehicle_control(throttle=throttle, steer=steering))\n except RuntimeError as r:\n logger.error(f'Error: {r}')\n logger.warning(f'Setting vehicle to None')\n self.vehicle = None\n else:\n try:\n throttle *= -1.0\n self.vehicle.apply_control(icarla.vehicle_control(reverse=True, throttle=throttle, steer=steering))\n except RuntimeError as r:\n logger.error(f'Error: {r}')\n logger.warning(f'Setting vehicle to None')\n self.vehicle = None\n else:\n raise RuntimeError('Vehicle not set in ControllerThread')\n","repo_name":"istvanaut/TK-MachineLearning","sub_path":"AGVCar/Simulation/Main/threads/controllerthread.py","file_name":"controllerthread.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14163983768","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 28 19:50:38 2021\n\n@author: saul\n\"\"\"\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import grad\n\nactivations = {\n 'tanh': nn.Tanh,\n 'relu': nn.ReLU,\n 'elu': nn.ELU,\n 'softplus': nn.Softplus,\n 'sigmoid': nn.Sigmoid,\n 'leaky_relu': nn.LeakyReLU,\n 'selu': nn.SELU,\n 'identity': nn.Identity\n}\n\ndef init(input_size, output_size, n_hidden_layers, n_neurons, activation, ln):\n activation=activations[activation]\n model = [torch.nn.Linear(input_size, n_neurons)]\n model.append(activation())\n if ln==True:\n model.append(torch.nn.LayerNorm(n_neurons))\n \n for i in range(n_hidden_layers-1):\n model.append(torch.nn.Linear(n_neurons, n_neurons))\n model.append(activation())\n if ln:\n model.append(torch.nn.LayerNorm(n_neurons))\n if output_size is not None:\n model.append(torch.nn.Linear(n_neurons, output_size))\n return model\nclass FF_Network(nn.Module):\n def __init__(self, input_size, output_size, n_layers, n_neurons, device, activation=None, ln=False):\n super().__init__()\n model=init(input_size, output_size, n_layers, n_neurons, activation, ln)\n # for i in range(0,len(model)):\n # self.weights_init(model[i])\n self.ff_nn = torch.nn.Sequential(*model)\n # def weights_init(self,model):\n # classname = model.__class__.__name__\n # if classname.find('Linear') != -1:\n # # nn.init.constant_(model.bias, 0)\n # # nn.init.normal_(model.bias)\n # # nn.init.orthogonal_(model.weight, gain=nn.init.calculate_gain('tanh'))\n # # torch.nn.init.uniform_(model.weight, -lim, lim)\n # # torch.nn.init.xavier_normal_(model.weight)\n\n\n def forward(self,qqd):\n return self.ff_nn(qqd) \n \nclass LNN(nn.Module):\n def __init__(self, input_size, output_size, n_layers, n_neurons, h, activation=None, custom_init=None, ln=True, rk4_step=False):\n self.h=h\n self.custom_init=custom_init\n self.input_size=input_size\n self.n_layers=n_layers\n self.n_neurons=n_neurons\n self.rk4_step=rk4_step\n \n super().__init__()\n model=init(input_size, output_size, n_layers, n_neurons, activation, ln)\n \n if self.custom_init:\n step=2\n if ln:\n step=3\n for i in range(0,len(model), step):\n self.weights_init(model[i],i//step)\n nn.init.constant_(model[i].bias, 0)\n self.ff_nn = torch.nn.Sequential(*model)\n \n \n def weights_init(self,model, i):\n classname = model.__class__.__name__\n if classname.find('Linear') != -1:\n if i==0:\n nn.init.normal_(model.weight, 0, 2.2/torch.sqrt(torch.tensor(self.n_neurons,dtype=float)))\n elif i==(self.n_layers-1):\n nn.init.normal_(model.weight, 0, self.n_neurons/torch.sqrt(torch.tensor(self.n_neurons,dtype=float)))\n else :\n nn.init.normal_(model.weight, 0, (0.58*i)/torch.sqrt(torch.tensor(self.n_neurons,dtype=float)))\n \n def forward(self, x):\n with torch.set_grad_enabled(True):\n qqd = x.requires_grad_(True)\n if self.rk4_step:\n out=self._rk4_step(qqd)\n else:\n out=self.euler_lagrange(qqd)\n return out\n \n \n def _lagrangian(self, qqd):\n return self.ff_nn(qqd) \n \n def euler_lagrange(self,qqd):\n self.n = n = qqd.shape[1]//2\n L = self._lagrangian(qqd).sum()\n J = grad(L, qqd, create_graph=True)[0] ;\n DL_q, DL_qd = J[:,:n], J[:,n:]\n DDL_qd = []\n for i in range(n):\n J_qd_i = DL_qd[:,i][:,None]\n H_i = grad(J_qd_i.sum(), qqd, create_graph=True)[0][:,:,None]\n DDL_qd.append(H_i)\n DDL_qd = torch.cat(DDL_qd, 2)\n DDL_qqd, DDL_qdqd = DDL_qd[:,:n,:], DDL_qd[:,n:,:]\n T = torch.einsum('ijk, ij -> ik', DDL_qqd, qqd[:,n:])\n qdd = torch.einsum('ijk, ij -> ik', DDL_qdqd.inverse(), DL_q - T)\n return qdd\n \n \n def _rk4_step(self, qqd):\n k1 = self.h * self.euler_lagrange(qqd)\n k2 = self.h * self.euler_lagrange(qqd + k1/2)\n k3 = self.h * self.euler_lagrange(qqd + k2/2)\n k4 = self.h *self.euler_lagrange(qqd + k3)\n return qqd + 1/6 * (k1 + 2 * k2 + 2 * k3 + k4)\n \nclass PINN(nn.Module):\n \"\"\"\n Input shape: torch.tensor([[q1_past, q2_past, q1, a2, q1_next, q2_next]]): \n N x 3*d - Where N is the mini batch size and d the number of degrees of freedom \n \"\"\"\n def __init__(self, input_size, n_layers, n_neurons, device, activation=None, ln=False):\n super().__init__()\n self.d = int(input_size/(2*3))\n self.d_input = int(self.d*2)\n output_size = 1\n model=init(self.d_input, output_size, n_layers, n_neurons, activation, ln)\n self.lnn = torch.nn.Sequential(*model)\n model=init(int(self.d*5), self.d, n_layers, n_neurons, activation, ln)\n self.q = torch.nn.Sequential(*model)\n \n def forward(self, x):\n with torch.set_grad_enabled(True):\n q_past, qk,_, u_past, u, u_next = torch.split(x,self.d,1)\n q = qk.requires_grad_(True)\n q_next = self._position(q_past, q, u_past, u, u_next)\n qq_next = torch.cat((q,q_next),1)\n qq_past = torch.cat((q_past, q),1)\n L2 = self._lagrangian(qq_past)\n L1 = self._lagrangian(qq_next)\n f = L1 + L2\n DEL = grad(f.sum(), q, create_graph=True)[0]\n out = DEL + 0.25*0.01*(u_past + u) + 0.25*0.01*(u + u_next)\n # print(q_next)\n # print(out)\n return out, q_next\n\n def _lagrangian(self, q):\n return self.lnn(q)\n \n def _position(self, q_past, q, u_past, u, u_next):\n input = torch.cat((q_past,q, u_past, u, u_next ),1)\n return self.q(input)\n \n def loss(self, output, target):\n N = len(output[0])\n loss = (output[0]**2)/N + ((output[1] - target)**2)/N\n \n return loss.sum() \n \nclass HNN(nn.Module):\n def __init__(self, input_size, output_size, n_layers, n_neurons, activation=None, ln=False):\n super().__init__()\n model=init(input_size, output_size, n_layers, n_neurons, activation, ln)\n self.hnn = torch.nn.Sequential(*model)\n def forward(self, x):\n with torch.set_grad_enabled(True):\n self.n = x.shape[1]//2\n x = x.requires_grad_(True)\n gradH = torch.autograd.grad(self._hamiltonean(x).sum(), x, allow_unused=False, create_graph=True)[0]\n return torch.cat([gradH[:,self.n:], -gradH[:,:self.n]], 1).to(x)\n\n def _hamiltonean(self, qqd):\n return self.hnn(qqd)\n \n \nclass DELAN(nn.Module):\n def __init__(self, input_size, n_layers, n_neurons, device, mask, activation=None, ln=False):\n super().__init__()\n self.input_dim = input_size//3\n self.device = device\n self.mask = mask.to(self.device)\n self.activation = activation\n self.n_neurons = n_neurons\n output_size=None\n self.model = init(self.input_dim, output_size, n_layers, n_neurons, activation, ln=False) \n self.delan = torch.nn.Sequential(*self.model)\n self.neg_slope=0.01\n self.diagonal_layer = nn.functional.softplus\n \n # gravity layer\n self.fc2 = nn.Linear(self.n_neurons, self.input_dim) \n \n # ld layer\n self.fc3 = nn.Linear(self.n_neurons, self.input_dim)\n \n # lo layer\n self.d_n_terms=((self.input_dim**2)-self.input_dim)/2\n self.fc4 = nn.Linear(self.n_neurons, int(self.d_n_terms))\n \n def get_analytical_derivatives(self, hi, activation):\n dact_dhi ={\n 'leaky_relu' : lambda: torch.where(hi > 0, torch.ones(hi.shape,device=self.device), self.neg_slope * torch.ones(hi.shape,device=self.device)),\n 'relu' : lambda: torch.where(hi > 0, torch.ones(hi.shape,device=self.device), torch.zeros(hi.shape,device=self.device)),\n 'softplus' : lambda: torch.sigmoid(hi),\n 'elu' : lambda: torch.where(hi > 0, torch.ones(hi.shape,device=self.device), hi + 1)\n }\n return dact_dhi[activation]()\n \n def embed_angle(self, q):\n theta = torch.masked_select(q, self.mask)\n x = torch.cos(theta)\n y = torch.sin(theta)\n p = torch.masked_select(q, ~self.mask)\n cartesian = torch.stack([x,y, p], axis = -1)\n return cartesian\n \n def reshape(self, ld, lo, q_dot, dld_dhi, dlo_dhi):\n # Get L, dL matrices without inplace operations\n n=self.n\n d=self.d\n dld_dqi = dld_dhi.permute(0,2,1).view(n,d,d,1)\n dlo_dqi = dlo_dhi.permute(0,2,1).view(n,d,-1,1)\n\n dld_dt = dld_dhi @ q_dot.view(n,d,1)\n dlo_dt = dlo_dhi @ q_dot.view(n,d,1)\n L = []\n dL_dt = []\n dL_dqi = []\n zeros = torch.zeros_like(ld)\n zeros_2 = torch.zeros_like(dld_dqi)\n lo_start = 0\n lo_end = d - 1\n for i in range(d):\n l = torch.cat((zeros[:, :i].view(n, -1), ld[:, i].view(-1, 1), lo[:, lo_start:lo_end]), dim=1)\n dl_dt = torch.cat((zeros[:, :i].view(n, -1), dld_dt[:, i].view(-1, 1),\n dlo_dt[:, lo_start:lo_end].view(n, -1)), dim=1)\n \n dl_dqi = torch.cat((zeros_2[:, :, :i].view(n, d, -1), dld_dqi[:, :, i].view(n, -1, 1),\n dlo_dqi[:, :, lo_start:lo_end].view(n, d, -1)), dim=2)\n \n lo_start = lo_start + lo_end\n lo_end = lo_end + d - 2 - i\n L.append(l)\n dL_dt.append(dl_dt)\n dL_dqi.append(dl_dqi)\n \n L = torch.stack(L, dim=2)\n dL_dt = torch.stack(dL_dt, dim=2)\n \n dL_dqi = torch.stack(dL_dqi, dim=3).permute(0, 2, 3, 1)\n \n return L, dL_dt, dL_dqi\n \n def inertia_matrix(self, L):\n epsilon = 1e-9 #small number to ensure positive definiteness of H\n\n return L @ L.transpose(1, 2) + epsilon * torch.eye(self.d, device=self.device)\n def coriolis_matrix(self, L, dL_dqi, q_dot, dL_dt):\n d=self.d\n n=self.n\n # Time derivative of Mass Matrix\n dH_dt = L @ dL_dt.permute(0,2,1) + dL_dt @ L.permute(0,2,1)\n quadratic_term = []\n for i in range(d):\n qterm = q_dot.view(n, 1, d) @ (dL_dqi[:, :, :, i] @ L.transpose(1, 2) +\n L @ dL_dqi[:, :, :, i].transpose(1, 2)) @ q_dot.view(n, d, 1)\n quadratic_term.append(qterm)\n\n quadratic_term = torch.stack(quadratic_term, dim=1)\n \n return dH_dt @ q_dot.view(n,d,1) - 0.5 * quadratic_term.view(n,d,1)\n \n def matrix_layers(self, hi, dhii_dhi):\n # Gravity torque\n g = self.fc2(hi)\n \n # ld is vector of diagonal L terms, lo is vector of off-diagonal L terms\n h3 = self.fc3(hi)\n #Positive activation function to guarantee positive definitess of H\n ld = self.diagonal_layer(h3)\n lo = self.fc4(hi)\n \n #Analytical derivatives of matrix layers\n dld_dhi = torch.diag_embed(self.get_analytical_derivatives(h3,'softplus')) @ self.fc3.weight\n dlo_dhi = self.fc4.weight\n dld_dhi =dld_dhi @ dhii_dhi\n dlo_dhi = dlo_dhi @ dhii_dhi\n \n return g, ld, lo, dld_dhi, dlo_dhi\n \n def forward(self, x):\n if (x.shape[1]%3) != 0:\n self.d = d = x.shape[1] // 2\n q, q_dot = torch.split(x,[d,d], dim = 1)\n forced = False\n else:\n self.d = d = x.shape[1] // 3\n q, q_dot, tau = torch.split(x,[d,d,d], dim = 1)\n forced = True\n self.n = n = x.shape[0]\n self.embed_angle(q)\n #common layers analytical derivatives and forward:\n hi=q\n dhii_dhi=torch.eye(d, device = self.device)\n dhii_dhi = dhii_dhi.reshape((1, d, d))\n dhii_dhi = dhii_dhi.repeat(n, 1, 1)\n for layer in self.delan:\n if isinstance(layer, nn.Linear):\n affine=layer\n ai = affine(hi)\n else:\n hi = layer(ai)\n dact_dfci=self.get_analytical_derivatives(hi, self.activation)\n dhii_dhi = torch.diag_embed(dact_dfci) @ affine.weight @ dhii_dhi\n \n #Analytical derivatives and matrices \n g, ld, lo, dld_dhi, dlo_dhi = self.matrix_layers(hi, dhii_dhi)\n \n \n #Get reshaped terms\n L, dL_dt, dL_dqi=self.reshape(ld, lo, q_dot, dld_dhi, dlo_dhi)\n \n #Get Inertia Matrix\n H = self.inertia_matrix(L)\n\n #Get coriolis\n c=self.coriolis_matrix(L, dL_dqi, q_dot, dL_dt)\n \n #Inverse Euler_lagrange\n if forced:\n q_ddot = torch.solve(tau.view(n,d,1) - c - g.view(n,d,1), H)[0]\n else:\n q_ddot = torch.solve(-c - g.view(n,d,1), H)[0]\n # The loss layer will be applied outside Network class\n return q_ddot.squeeze()","repo_name":"ssantos97/SyMo","sub_path":"Code/Baseline.py","file_name":"Baseline.py","file_ext":"py","file_size_in_byte":13253,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15636653962","text":"from rest_framework import serializers\n\n\nclass UserLoginSuccessResponse(serializers.Serializer):\n token = serializers.CharField(\n label='토큰'\n )\n\n\nclass UserLoginFailResponse(serializers.Serializer):\n message = serializers.CharField(\n label='메세지',\n default='아이디와 비밀번호를 다시 한번 확인해주세요.'\n )\n code = serializers.CharField(\n label='에러 코드',\n default='WRONG CREDENTIALS'\n )\n\n @classmethod\n def json(cls):\n return {\n 'message': '아이디와 비밀번호를 다시 한번 확인해주세요.',\n 'code': 'WRONG CREDENTIALS'\n }\n\n\nclass UserDuplicateResponse(serializers.Serializer):\n is_duplicate = serializers.BooleanField(\n label='아이디 중복 여부'\n )","repo_name":"amathon-2019/team13-backend","sub_path":"api/user/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34427038338","text":"import random\nimport os\n\ndef main():\n def clear_screen():\n os.system('clear')\n\n def the_options():\n \n options = ('piedra', 'papel', 'tijera')\n my_option = input('Elige PIEDRA, PAPEL O TIJERA: ').lower().strip()\n if not my_option in options:\n print('Opcion incorrecta, prueba de nuevo.')\n return None, None\n \n cpu_option = random.choice(options)\n\n print(f'Mi opcion: {my_option}')\n print(f'Opcion de la CPU: {cpu_option}')\n\n return my_option, cpu_option\n\n\n def game_logic(my_option, cpu_option, my_wins, cpu_wins):\n\n if my_option == cpu_option:\n print('EMPATE!')\n\n elif my_option == 'piedra':\n if cpu_option == 'tijera':\n print('Ganaste!!')\n my_wins += 1\n else:\n print('La CPU ganó!')\n cpu_wins += 1\n \n elif my_option == 'papel':\n if cpu_option == 'piedra':\n print('Ganaste!!')\n my_wins += 1\n else:\n print('La CPU ganó!')\n cpu_wins += 1\n\n elif my_option == 'tijera':\n if cpu_option == 'papel':\n print('Ganaste!!')\n my_wins += 1\n else:\n print('La CPU ganó!')\n cpu_wins += 1\n \n return my_wins, cpu_wins\n\n\n def game_start():\n my_wins = 0\n cpu_wins = 0\n round = 0\n \n while True:\n\n clear_screen()\n print('BIENVENIDO AL JUEGO DE PIEDRA, PAPEL Y TIJERA!')\n print()\n print('TURNO', round)\n print()\n print('computer wins:', cpu_wins)\n print('my wins:', my_wins)\n\n my_option, cpu_option = the_options()\n\n my_wins, cpu_wins = game_logic(my_option, cpu_option, my_wins, cpu_wins)\n\n if cpu_wins == 3:\n print('El ganador es la CPU')\n input('Presiona Enter para continuar.')\n clear_screen()\n break\n\n elif my_wins == 3:\n print('El ganador es el usuario')\n input('Presiona Enter para continuar.')\n clear_screen()\n break\n \n input('Presiona Enter para continuar')\n round += 1\n\n\n game_start() \n\n \n \n\n\n\nif __name__ == '__main__':\n main()","repo_name":"ZeroRoyX/curso-python-pip","sub_path":"game/jueguito.py","file_name":"jueguito.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14163999908","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 29 23:01:38 2021\n\n@author: saul\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom scipy.misc import derivative\nimport numpy as np\nfrom Code.data import generate_trajectory\nfrom numpy.linalg import inv, pinv\nimport torch\nimport importlib\n\nclass discrete_model(object):\n def __init__(self, model, a, h):\n class_ = getattr(importlib.import_module(\"Code.models\"), model)\n self.model = class_ \n self.h = h\n self.a = a\n \n def Lagrangian_integration(self, q0, q1):\n q0, q1 = np.array(q0), np.array(q1)\n theta = (1-self.a)*q0 + self.a*q1\n dtheta = (q1 - q0)/self.h\n return theta, dtheta\n \n def DEL(self, q_past, q, q_next, u_past, u, u_next):\n self.d = len(q_past)\n q_past1, q_past2 = q_past[0]\n q1, q2 = q[0]\n q_next1, q_next2 = q_next[0]\n \n inp = [q_past1, q_past2, q1, q2, q_next1, q_next2]\n DEL0 = self.partial_derivative(self.func, 2, inp) \n DEL1 = self.partial_derivative(self.func, 3, inp) \n \n return np.array([[DEL0, DEL1]])\n \n def partial_derivative(self, func, var=0, point=[]):\n args = point[:]\n def wraps(x):\n args[var] = x\n return func(*args)\n return derivative(wraps, point[var], dx = 1e-6, order = 3)\n \n def func(self, *args):\n q_past1, q_past2 = args[0], args[1]\n q1, q2 = args[2], args[3]\n q_next1, q_next2 = args[4], args[5]\n q_past = np.array([q_past1, q_past2])\n q = np.array([q1, q2])\n q_next = np.array([q_next1, q_next2])\n \n \n q1, qd1 = self.Lagrangian_integration(q_past, q)\n q2, qd2 = self.Lagrangian_integration(q, q_next)\n state1 = np.concatenate((q1, qd1), 0)\n state2 = np.concatenate((q2, qd2), 0)\n \n L1 = self.model().kinetic_energy(state1).item() - self.model().potential_energy(state1).item()\n L2 = self.model().kinetic_energy(state2).item() - self.model().potential_energy(state2).item()\n \n return L1*self.h + L2*self.h\n \n def Jacobian(self, q_past, q, q_next, u_past, u, u_next):\n q_past1, q_past2 = q_past[0]\n q1, q2 = q[0]\n q_next1, q_next2 = q_next[0]\n inp1 = [q_past1, q_past2, q1, q2, q_next1, q_next2, 1]\n inp2 = [q_past1, q_past2, q1, q2, q_next1, q_next2, 2]\n \n J11 = self.partial_derivative(self.Jac_fun, 4, inp1) \n J12 = self.partial_derivative(self.Jac_fun, 5, inp1) \n J21 = self.partial_derivative(self.Jac_fun, 4, inp2) \n J22 = self.partial_derivative(self.Jac_fun, 5, inp2) \n \n return np.array([[J11, J12], [J21, J22]])\n \n def Jac_fun(self, *args):\n \"The Jacobian of the DEL for the root finding algorithm is correspondent with the D2D1L(q, q_next) term\"\n q_past1, q_past2 = args[0], args[1]\n q1, q2 = args[2], args[3]\n q_next1, q_next2 = args[4], args[5]\n \n d_f = args[6]\n inp = [q_past1, q_past2, q1, q2, q_next1, q_next2]\n if d_f == 1: \n DEL = self.partial_derivative(self.func, 2, inp)\n else:\n DEL = self.partial_derivative(self.func, 3, inp) \n \n return DEL\n \n \nclass VI_trajectory(object):\n def __init__(self, model, tol, x0, N, h, a):\n self.model = model\n self.tol = tol\n self.x0 = x0\n self.N = N\n self.h = h\n self.a = a\n self.f = discrete_model(self.model, self.a, self.h)\n def Iteration(self, init, q_past, q, u_past, u, u_next): \n lastX = init\n error = 9e9\n while (error > self.tol): # this is how you terminate the loop - note use of abs()\n newY = self.f.DEL(q_past, q, lastX, u_past, u, u_next)\n J = self.f.Jacobian(q_past, q, lastX, u_past, u, u_next)\n newX = lastX.T - inv(J)@ newY.T # update estimate using N-R\n error = np.max(np.abs(lastX.T - newX ))\n lastX = newX.T\n return newX\n\n def get_root(self, q_past, q, u_past, u, u_next):\n #Initial guess:\n q_past = np.array([q_past], dtype=np.double)\n q = np.array([q])\n dq = (q - q_past)\n x0 = q + dq\n q_next = self.Iteration(x0, q_past, q, u_past, u, u_next)\n return q_next\n \n def trajectory(self, u):\n x0 = self.init(u)\n u = np.array(u)\n n = int(len(x0)/2)\n traj = np.empty(shape = (self.N,n), dtype = np.float64)\n q_past, q = np.split(x0,2,0)\n traj[0], traj[1] = q_past, q\n for i in range(2, self.N, 1):\n q_past = traj[i-2]\n q = traj[i-1]\n q_next = self.get_root(q_past, q, u[i-2], u[i-1], u[i])\n traj[i] = q_next.T\n print('Iteration:{} '.format(i-1))\n q0 = traj[:-1]\n q1 = traj[1:]\n CD = self.f.Lagrangian_integration(q0, q1)\n coor = np.concatenate((CD[0], CD[1]),1)\n return traj, coor\n \n def init(self, u):\n x0, h = self.x0, self.h\n u = torch.tensor(u)\n _, q =generate_trajectory(self.model, x0, 1, h, u).get_trajectory()\n x = np.array(q)\n x = x[0,:2]\n x0 = np.concatenate([np.array(x0)[0:2],x],0)\n return x0\n \n","repo_name":"ssantos97/SyMo","sub_path":"Code/VI.py","file_name":"VI.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33548715611","text":"import copy, threading, time\nimport lp_colors\n\nRUN_DELAY = 0.005 # 0.005 == 200 FPS\n\n\ndef unbound_press(x, y):\n print(\"[lp_events] (\" + str(x) + \", \" + str(y) + \") Unbound button...\")\n\n\npress_funcs = [[unbound_press for y in range(9)] for x in range(9)]\npressed = [[False for y in range(9)] for x in range(9)]\n\ntimer = None\n\n\ndef init(lp_object):\n global timer\n global press_funcs\n timer = threading.Timer(RUN_DELAY, run, (lp_object,))\n\n\ndef run(lp_object):\n global timer\n while True:\n event = lp_object.ButtonStateXY()\n if event:\n x = event[0]\n y = event[1]\n try:\n if event[2] == 0:\n pressed[x][y] = False\n else:\n pressed[x][y] = True\n press_funcs[x][y](x, y)\n lp_colors.updateXY(x, y)\n except IndexError:\n pass\n else:\n break\n init(lp_object)\n timer.start()\n\n\ndef start(lp_object):\n lp_colors.init(lp_object)\n init(lp_object)\n run(lp_object)\n lp_colors.update_all()\n\n\ndef bind_func_with_colors(x, y, func, off_color):\n global press_funcs\n press_funcs[x][y] = func\n lp_colors.setXY(x, y, off_color)\n\n\ndef unbind(x, y):\n global press_funcs\n press_funcs[x][y] = unbound_press\n lp_colors.setXY(x, y, [0, 0, 0])\n lp_colors.updateXY(x, y)\n\n\ndef unbind_all():\n global press_funcs\n press_funcs = [[unbound_press for y in range(9)] for x in range(9)]\n for x in range(9):\n for y in range(9):\n lp_colors.setXY(x, y, [0, 0, 0])\n lp_colors.raw_clear()\n","repo_name":"nimaid/LPHK","sub_path":"lp_events.py","file_name":"lp_events.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"53"} +{"seq_id":"35590312692","text":"import unittest\nimport os\nimport identifiers\nfrom .common import BaseTestWithEmptyDB\n\ntest_db_base = 'test'\ntest_db_fname = '%s.db' % test_db_base\n\nclass BaseTest(unittest.TestCase):\n\n def setUp(self):\n self.i1 = identifiers.Identifier('a', 'b')\n self.i2 = identifiers.Identifier('a', 'b')\n self.i3 = identifiers.Identifier('a', 'c')\n self.i4 = identifiers.Identifier('A', 'b')\n return\n\nclass TestIdentifier(BaseTest):\n\n def test_str(self):\n self.assertEqual(str(self.i1), 'a:b')\n return\n\n def test_repr(self):\n self.assertEqual(repr(self.i1), \"Identifier('a', 'b')\")\n return\n\n def test_equal(self):\n self.assertEqual(self.i1, self.i2)\n return\n\n def test_hash_equal(self):\n self.assertEqual(hash(self.i1), hash(self.i2))\n return\n\n def test_not_equal(self):\n self.assertNotEqual(self.i1, self.i3)\n return\n\n def test_hash_not_equal(self):\n self.assertNotEqual(hash(self.i1), hash(self.i3))\n return\n\n def test_types(self):\n with self.assertRaises(TypeError):\n identifiers.Identifier(1, 'b')\n with self.assertRaises(TypeError):\n identifiers.Identifier('a', 2)\n return\n\n def test_case(self):\n self.assertEqual(self.i1, self.i4)\n return\n\n def test_from_key_type(self):\n with self.assertRaises(TypeError):\n identifiers.Identifier.from_key(1)\n return\n\n def test_from_key_value(self):\n with self.assertRaises(ValueError):\n identifiers.Identifier.from_key('a')\n return\n\n def test_from_key(self):\n i = identifiers.Identifier.from_key(self.i1.key)\n self.assertEquals(i, self.i1)\n return\n\n def test_from_key_case(self):\n i = identifiers.Identifier.from_key('A:b')\n self.assertEquals(i, self.i1)\n return\n\nclass TestLinksBasics(BaseTest):\n\n def test_link_types(self):\n with self.assertRaises(TypeError):\n identifiers.link(0, self.i2, 'ch')\n with self.assertRaises(TypeError):\n identifiers.link(self.i1, 0, 'ch')\n with self.assertRaises(TypeError):\n identifiers.link(self.i1, self.i3, 0)\n return\n\n def test_link_values(self):\n with self.assertRaises(ValueError):\n identifiers.link(self.i1, self.i2, 'ch')\n return\n\n def test_get_links_type(self):\n with self.assertRaises(TypeError):\n identifiers.get_links(0)\n return\n\n def test_get_links_return(self):\n self.assertEqual(identifiers.get_links(self.i1), {})\n return\n\n def test_unlink_types(self):\n with self.assertRaises(TypeError):\n identifiers.unlink(0, self.i2, 'ch')\n with self.assertRaises(TypeError):\n identifiers.unlink(self.i1, 0, 'ch')\n with self.assertRaises(TypeError):\n identifiers.unlink(self.i1, self.i3, 0)\n return\n\n def test_unlink_values(self):\n with self.assertRaises(ValueError):\n identifiers.unlink(self.i1, self.i2, 'ch')\n return\n\nclass BaseTestLinks(BaseTestWithEmptyDB):\n\n def setUp(self):\n BaseTestWithEmptyDB.setUp(self)\n self.i1 = identifiers.Identifier('a', 'b')\n self.i2 = identifiers.Identifier('a', 'c')\n identifiers.link(self.i1, self.i2, 'ch')\n return\n\nclass TestLinks(BaseTestLinks):\n\n def test_link(self):\n links = identifiers.get_links(self.i1)\n self.assertEquals(links.keys(), [self.i2])\n self.assertEquals(links[self.i2], {'ch'})\n links = identifiers.get_links(self.i2)\n self.assertEquals(links.keys(), [self.i1])\n self.assertEquals(links[self.i1], {'ch'})\n return\n\n def test_unlink(self):\n identifiers.unlink(self.i1, self.i2, 'ch')\n links = identifiers.get_links(self.i1)\n self.assertEqual(links, {})\n links = identifiers.get_links(self.i2)\n self.assertEqual(links, {})\n return\n\nclass TestLinksDuplicateAsserter(BaseTestLinks):\n\n def test_duplicate_asserter(self):\n identifiers.link(self.i1, self.i2, 'ch')\n self.assertEquals(identifiers.get_links(self.i1), {self.i2: {'ch'}})\n self.assertEquals(identifiers.get_links(self.i2), {self.i1: {'ch'}})\n return\n\nclass TestLinksAdditionalAsserter(BaseTestLinks):\n\n def setUp(self):\n BaseTestLinks.setUp(self)\n identifiers.link(self.i1, self.i2, 'jb')\n return\n\n def test_additional_asserter(self):\n asserters = {'ch', 'jb'}\n self.assertEquals(identifiers.get_links(self.i1), {self.i2: asserters})\n self.assertEquals(identifiers.get_links(self.i2), {self.i1: asserters})\n return\n\n def test_unlink(self):\n identifiers.unlink(self.i1, self.i2, 'ch')\n asserters = {'jb'}\n self.assertEquals(identifiers.get_links(self.i1), {self.i2: asserters})\n self.assertEquals(identifiers.get_links(self.i2), {self.i1: asserters})\n return\n\n# eof\n","repo_name":"imagecite/identifiers","sub_path":"tests/identifiers_.py","file_name":"identifiers_.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33646139341","text":"from django.urls import path \nfrom . import views \n\n#URL Conf\nurlpatterns=[\n # path('',views.mhb_response),\n path('register/', views.register),\n path('request-name',views.request_name),\n path('', views.contact_form,name='form1')\n]\n","repo_name":"pouya-mhb/My-Py-projects","sub_path":"DjangoPractice/hello-world/contactForm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16401372542","text":"##ID DFS algo\ngraph ={\n 'a' : ['b','c'],\n 'b' : ['d','e'],\n 'c' : ['g'],\n 'd' : [],\n 'e' : ['f'],\n 'f' : [],\n 'g' : []\n}\ndef dfs(currNode, goalNode, depth, graph):\n print(\"Checking node : \",currNode)\n if currNode == goalNode:\n return True\n if depth<=0:\n return False\n for node in graph[currNode]:\n if dfs(node, goalNode, depth-1, graph):\n return True\n return False\n\ndef iddfs(currNode, goalNode, depth, graph):\n for i in range(depth):\n if dfs(currNode, goalNode, i, graph):\n return True\n return False\n\nif iddfs('a', 'g', 5, graph):\n print(\"path exists\")\nelse:\n print(\"path not found\")\n \n","repo_name":"Umgoel/AI_LAB","sub_path":"iddfs_algo.py","file_name":"iddfs_algo.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23687310066","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 5 03:55:14 2016\n\n@author: viveksagar\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pylab as plt\nO3_t=np.empty([600,5])\nnum_col=35 \n\nfor ii in range(5):\n with np.load('Deep_data_simple'+str(ii+1)+'.npz') as data:\n O3_t[:,ii] = data['O3']\n \nwith np.load('Fim_week1.npz') as data2:\n fim = data2['Fim']\n\nwith np.load('Pre_processed.npz') as data2:\n duration = data2['X3']\n\n \nO3_t=O3_t.reshape(3000)\nfim= np.delete(fim, np.s_[3000:],axis=0)\n#t_2 = 100-100*(len(np.where(fim==0)[0]))/(len(fim))\nduration = np.delete(duration, np.s_[3000:],axis=0)\n\ndef ghadha_ghoda(vec, week_l):\n week_l = week_l.astype(np.int64)\n ind = np.argsort(week_l)\n sorted_week = week_l[ind]\n sorted_diff = sorted_week-np.roll(sorted_week,1)\n start_ind = np.where(sorted_diff!=0)[0]\n sorted_diff2 = np.roll(sorted_week,-1)-sorted_week\n end_ind = np.where(sorted_diff2!=0)[0]\n x_axis = sorted_week[end_ind]\n sorted_data = vec[ind]\n mean_list = []\n stdv_list = []\n for ii in range(len(x_axis)): \n if start_ind[ii] Jwt:\n member = self.member_repository.find_by_login_id(login_dto.login_id)\n if not member or not member.verify_password(login_dto.password):\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"존재하지 않는 id 또는 password 입니다\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n return self._make_jwt(member)\n\n def create(self, member_create_dto: MemberCreateDto) -> MemberGetDto:\n self._validate_email(member_create_dto.email)\n self._validate_login_id(member_create_dto.login_id)\n member = Member.from_create_dto(member_create_dto)\n return MemberGetDto.from_orm(self.member_repository.create(member))\n\n def delete(self, login_id: str):\n member = self._get_current_member(login_id)\n self.member_repository.delete(member)\n\n def find_by_id(self, member_id: int) -> MemberGetDto:\n member = self.member_repository.find_by_id(member_id)\n self._validate_member(member)\n return MemberGetDto.from_orm(member)\n\n def find_me(self, login_id: str) -> MemberGetDto:\n member = self._get_current_member(login_id)\n return MemberGetDto.from_orm(member)\n\n def update(self, login_id: str, member_update_dto: MemberUpdateDto) -> Jwt:\n member = self._get_current_member(login_id)\n self._validate_for_update(member, member_update_dto)\n member.update(member_update_dto)\n self.member_repository.update(member)\n return self._make_jwt(member)\n\n def _get_current_member(self, login_id):\n member = self.member_repository.find_by_login_id(login_id)\n self._validate_member(member)\n return member\n\n def _validate_for_update(self, member, member_update_dto):\n if member.login_id != member_update_dto.login_id:\n self._validate_login_id(member_update_dto.login_id)\n if member.email != member_update_dto.email:\n self._validate_email(member_update_dto.email)\n\n def _validate_member(self, member):\n if not member:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED, detail=\"등록된 사용자가 아닙니다\"\n )\n\n def _make_jwt(self, member) -> Jwt:\n access_token = self._make_access_token(member.login_id)\n return Jwt(\n access_token=access_token,\n token_type=TOKEN_TYPE,\n login_id=member.login_id,\n type=member.type,\n )\n\n def _make_access_token(self, login_id: str):\n data = {\n \"sub\": login_id,\n \"exp\": datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES),\n }\n return jwt.encode(data, SECRET_KEY, algorithm=ALGORITHM)\n\n def _validate_email(self, email: str):\n if self.member_repository.find_by_email(email):\n raise HTTPException(status_code=409, detail=\"이미 존재하는 email 입니다\")\n\n def _validate_login_id(self, login_id: str):\n if self.member_repository.find_by_login_id(login_id):\n raise HTTPException(status_code=409, detail=\"이미 존재하는 id 입니다\")\n","repo_name":"Mo0nl19ht/schedular-onboarding","sub_path":"member/service/member_service.py","file_name":"member_service.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10271744805","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .forms import UploadFileForm\nfrom .process import handle_file_upload\n\n# Create your views here.\n\n@csrf_exempt\ndef main(request):\n form = UploadFileForm()\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n print('Post method reveived')\n if form.is_valid():\n redirect_name = handle_file_upload(request.FILES['text_input'])\n return HttpResponse(redirect_name)\n context = {\n 'form':form\n }\n return render(request, 'postprocessing/main.html', context)\n\ndef redir(request):\n return redirect('postprocessing/')","repo_name":"fiorentinogiuseppe/aiboxsummerschool-OCR","sub_path":"py/projects/testes/SpellingCheck_Natas/Temporario/servidor/postprocessing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18289537236","text":"import threading\nimport time\n\n'''\n多线程基础\n'''\n\n\nexitFlag = 0\n\n\nclass myThread(threading.Thread): # 继承父类threading.Thread\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\n print(\"Starting \" + self.name)\n print_time(self.name, self.counter, 5)\n print(\"Exiting \" + self.name)\n\n\ndef print_time(threadName, delay, counter):\n while counter:\n if exitFlag:\n (threading.Thread).exit()\n time.sleep(delay)\n print(\"%s: %s\" % (threadName, time.ctime(time.time())))\n counter -= 1\n\n\n# 创建新线程\nthread1 = myThread(1, \"Thread-1\", 1)\nthread2 = myThread(2, \"Thread-2\", 2)\n\n# 开启线程\nthread1.start()\nthread2.start()\nprint(\"Exiting Main Thread\")\n\n\n\n# class A(object):\n# def __init__(self, name):\n# self.name = name\n# print(\"name:\", self.name)\n#\n# def getName(self):\n# return 'A ' + self.name\n#\n#\n# class B(A):\n# def __init__(self, name):\n# super(B, self).__init__(name)\n# print(\"hi\")\n# self.name = name\n#\n# def getName(self):\n# return 'B ' + self.name\n#\n# if __name__ == '__main__':\n# b = B('hello world')\n# print(b.getName())\n","repo_name":"feichouchou/AutoTestXyz","sub_path":"testcase/testThread.py","file_name":"testThread.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73906123048","text":"\"\"\" AOC Day 7 \"\"\"\nimport re\nfrom collections import defaultdict, deque\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Union\n\n\ndef parse_rules(filename: Union[str, Path]) -> Dict[str, List[Tuple[str, int]]]:\n \"\"\"\n Parse the input text into a nice formatted set of rules. We assume:\n * Each type of bag appears on the left hand side of a rule\n * The rule list does not contain (directed) cycles\n * Each bag appears only once on the left hand side of a rule\n\n Input format:\n [BAG TYPE] bags contain (no other bags|[N] [BAG TYPE] bags?[, [N] [BAG TYPE] bags?...]).\n\n Output format:\n key: Left hand side bag type\n value: List of pairs (bag type, number of bags contained in\n parent bag of this type)\n \"\"\"\n rules: Dict[str, List[Tuple[str, int]]] = {}\n with open(filename, \"rt\") as infile:\n for line in infile:\n left, right = line.strip(\"\\n.\").split(\"contain\")\n left = left.strip()[: -len(\" bags\")]\n right = right.strip()\n if right == \"no other bags\":\n rules[left] = []\n else:\n children = [\n (bag_type, int(bag_num))\n for bag_num, bag_type in re.findall(r\"(\\d+) ([a-z ]+) bags?\", right)\n ]\n if not children:\n raise ValueError(f\"Something is wrong in regex for {right}\")\n rules[left] = children\n\n return rules\n\n\ndef first(filename: Union[str, Path]) -> int:\n \"\"\"\n How many types of bags can contain a shiny gold bag?\n \"\"\"\n rules = parse_rules(filename)\n\n # Reverse rules so children point to parents\n child_to_parent = defaultdict(list)\n for key, value in rules.items():\n for child, _ in value:\n child_to_parent[child].append(key)\n\n # BFS up the tree. Note we're not counting the bag itself and we assume no\n # directed cycles so this is fine for the resulting BFS\n seen_grandparents = set()\n queue = deque([\"shiny gold\"])\n\n # Execute BFS\n while queue:\n bag = queue.popleft()\n for parent in child_to_parent[bag]:\n if parent not in seen_grandparents:\n seen_grandparents.add(parent)\n queue.append(parent)\n\n return len(seen_grandparents)\n\n\ndef count_bags(\n rules: Dict[str, List[Tuple[str, int]]], bag_name: str, multiplier: int\n) -> int:\n \"\"\"\n Count the number of bags necessarily contained in `multipler` bags of\n type `bag_name` according to the `rules`.\n\n Note that this includes the outer bags themselves!\n \"\"\"\n return multiplier * (\n 1 + sum(count_bags(rules, name, mult) for name, mult in rules[bag_name])\n )\n\n\ndef second(filename: Union[str, Path]) -> int:\n \"\"\"\n How many bags are contained in a shiny gold bag, not including the\n shiny gold bag itself?\n \"\"\"\n rules = parse_rules(filename)\n\n # -1 because we're not counting the outer shiny gold bag itself\n return count_bags(rules, \"shiny gold\", 1) - 1\n","repo_name":"khwilson/advent2020","sub_path":"advent/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41433439126","text":"class Node:\r\n def _init_(self, data):\r\n self.data = data\r\n self.children = []\r\n\r\nclass Tree:\r\n def _init_(self, root):\r\n self.root = root\r\n\r\n def sums(self, node):\r\n total = node.data\r\n for child in node.children:\r\n total += self.sums(child)\r\n return total\r\n\r\n def sibling(self, node):\r\n if node == self.root:\r\n return 0\r\n\r\n parent = self._find_parent(node)\r\n total = 0\r\n for sibling in parent.children:\r\n total += sibling.data\r\n return total\r\n\r\n def _find_parent(self, node):\r\n # Helper function to find the parent of a node\r\n return self._find_parent_recursive(self.root, node)\r\n\r\n def _find_parent_recursive(self, current_node, node):\r\n if node in current_node.children:\r\n return current_node\r\n\r\n for child in current_node.children:\r\n parent = self._find_parent_recursive(child, node)\r\n if parent:\r\n return parent\r\n\r\n return None\r\n\r\n# Membangun struktur tree yang diberikan pada gambar\r\n# Menggunakan angka-angka yang sama seperti pada gambar\r\nval200 = Node(200)\r\nval9 = Node(9)\r\nval2 = Node(2)\r\nval7 = Node(7)\r\nval3 = Node(3)\r\nval10 = Node(10)\r\nval7_2 = Node(7)\r\nval5 = Node(5)\r\nval8 = Node(8)\r\nval33 = Node(33)\r\nval4 = Node(4)\r\nval2_2 = Node(2)\r\n\r\nval200.children = [val9, val2, val7]\r\nval9.children = [val3]\r\nval2.children = [val10]\r\nval7.children = [val7_2]\r\nval3.children = [val5, val8]\r\nval7_2.children = [val33]\r\nval8.children = [val4]\r\nval33.children = [val2_2]\r\n\r\nt = Tree(val200)\r\n\r\n# Testcase 1\r\nprint(f'Total value of node {val200.data} and all of its descendants = {t.sums(val200)}')\r\n\r\n# Testcase 2\r\nprint(f'Total value of all siblings on node {val33.data} = {t.sibling(val33)}')","repo_name":"TheaMarya/UG12_71210701","sub_path":"UG_12.py","file_name":"UG_12.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33713757828","text":"import heapq\r\ndef solution(scoville, K):\r\n answer = 0\r\n # heap정렬 만들기\r\n heapq.heapify(scoville)\r\n while 1:\r\n if len(scoville)<2 and scoville[-1]=K:\r\n break\r\n # 가장 작은 수\r\n one = heapq.heappop(scoville)\r\n # 두번째\r\n two = heapq.heappop(scoville)\r\n three = one + (two*2)\r\n # 섞은 음식 지수\r\n heapq.heappush(scoville,three)\r\n answer += 1\r\n\r\n return answer\r\n\r\n\r\nscoville = [1, 2,3,9,10,12]\r\nK = 7\r\nprint(solution(scoville, K))","repo_name":"aeriheo/study","sub_path":"2월 3주차/Programmers_42626.py","file_name":"Programmers_42626.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"488674750","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 25 10:32:33 2017\r\n\r\n@author: JoseMaria\r\n\"\"\"\r\n\r\nimport random\r\nimport plotly.offline as py\r\nimport plotly.graph_objs as go\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom numpy import * ## the same that above but this is bad :) \r\n\r\n##Plot a huge amount of boxplots\r\n\r\nN = 30. # Number of boxes\r\n\r\n# generate an array of rainbow colors by fixing the saturation and lightness of the HSL representation of colour \r\n# and marching around the hue. \r\n# Plotly accepts any CSS color format, see e.g. http://www.w3schools.com/cssref/css_colors_legal.asp.\r\nc = ['hsl('+str(h)+',50%'+',50%)' for h in linspace(0, 360, N)]\r\n\r\n# Each box is represented by a dict that contains the data, the type, and the colour. \r\n# Use list comprehension to describe N boxes, each with a different colour and with different randomly generated data:\r\ndata = [{\r\n 'y': 3.5*sin(pi * i/N) + i/N+(1.5+0.5*cos(pi*i/N))*random.rand(10), \r\n 'type':'box',\r\n 'marker':{'color': c[i]}\r\n } for i in range(int(N))]\r\n\r\n# format the layout\r\nlayout = {'xaxis': {'showgrid':False,'zeroline':False, 'tickangle':60,'showticklabels':False},\r\n 'yaxis': {'zeroline':False,'gridcolor':'white'},\r\n 'paper_bgcolor': 'rgb(233,233,233)',\r\n 'plot_bgcolor': 'rgb(233,233,233)',\r\n }\r\n\r\npy.plot(data)\r\n\r\n\r\n#------------------------------------------------------------------------\r\n#------------------------------------------------------------------------\r\n#------------------------------------------------------------------------\r\n\r\n##Plot a 3d surface\r\n\r\n# Read data from a csv\r\nz_data = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv')\r\n\r\ndata = [\r\n go.Surface(\r\n z=z_data.as_matrix()\r\n )\r\n]\r\nlayout = go.Layout(\r\n title='Example of 3d surface',\r\n autosize=False,\r\n width=1500,\r\n height=800,\r\n margin=dict(\r\n l=65,\r\n r=50,\r\n b=65,\r\n t=90\r\n )\r\n)\r\nfig = go.Figure(data=data, layout=layout)\r\npy.plot(fig, filename='elevations-3d-surface')\r\n\r\n#------------------------------------------------------------------------\r\n#------------------------------------------------------------------------\r\n#------------------------------------------------------------------------\r\n\r\n##Plot a beauty Histogram\r\n\r\nx0 = np.random.randn(500)\r\nx1 = np.random.randn(500)+1\r\n\r\ntrace1 = go.Histogram(\r\n x=x0,\r\n opacity=0.75\r\n)\r\ntrace2 = go.Histogram(\r\n x=x1,\r\n opacity=0.75\r\n)\r\n\r\ndata = [trace1, trace2]\r\nlayout = go.Layout(barmode='overlay')\r\nfig = go.Figure(data=data, layout=layout)\r\n\r\npy.plot(fig, filename='overlaid histogram')\r\n\r\n#------------------------------------------------------------------------\r\n#------------------------------------------------------------------------\r\n#------------------------------------------------------------------------\r\n\r\n\r\n## Scatter plot of random numbers just to see another example \r\n\r\n##Prepare the data\r\nn = 750\r\ncol = []\r\nfor i in range(250):\r\n for elem in ['rgb(0, 255, 0)', 'rgb(255, 0, 0)', 'rgb(0, 0, 255)']:\r\n col.append(elem)\r\n\r\n \r\ntrace0 = go.Scatter(\r\n x= np.random.rand(n).tolist(),\r\n y= np.random.rand(n).tolist(),\r\n mode='markers',\r\n marker=dict(\r\n color=col,\r\n #opacity=np.full([1,n],0.1),\r\n size= 100*np.random.rand(n),\r\n )\r\n)\r\n\r\n##Plot the results offline\r\ndata = [trace0]\r\npy.plot(data, filename='Random-Scatter')","repo_name":"jmlago/DA_python","sub_path":"practices/plotly_examples.py","file_name":"plotly_examples.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39712040919","text":"import os\nfrom core.search_space.search_space_config import SearchSpace\nfrom core.search_algorithm.graphnas_search_algorithm import Search\nfrom core.estimation import Scratch_Train_Test\n\nclass AutoModel(object):\n \"\"\"\n The top API to realize gnn architecture search and model testing automatically.\n\n Using search algorithm samples gnn architectures and evaluate\n corresponding performance,testing the top k model from the sampled\n gnn architectures based on performance.\n\n \"\"\"\n\n def __init__(self, graph_data, args):\n\n self.graph_data = graph_data\n self.args = args\n self.search_space = SearchSpace()\n\n print(\"stack gcn architecture information:\\t\", self.search_space.stack_gcn_architecture)\n\n self.search_algorithm = Search(self.graph_data,\n self.args,\n self.search_space)\n\n self.search_model()\n\n\n self.derive_target_model()\n\n\n def search_model(self):\n\n self.search_algorithm.search_operator()\n\n def derive_target_model(self):\n\n path = os.path.split(os.path.realpath(__file__))[0][:-(len('core') + 1)] + \"/logger\" + '/' + str(self.args.data_name) + \"/gnn_logger_\"+ str(self.args.data_name)\n architecture_performance_list = self.gnn_architecture_performance_load(path)\n gnn_architecture_performance_dict = {}\n gnn_architecture_list = []\n performance_list = []\n\n for line in architecture_performance_list:\n line = line.split(\":\")\n gnn_architecture = eval(line[0])\n performance = eval(line[1].replace(\"\\n\", \"\"))\n gnn_architecture_list.append(gnn_architecture)\n performance_list.append(performance)\n\n for key, value in zip(gnn_architecture_list, performance_list):\n gnn_architecture_performance_dict[str(key)] = value\n\n ranked_gnn_architecture_performance_dict = sorted(gnn_architecture_performance_dict.items(),\n key=lambda x: x[1],\n reverse=True)\n\n sorted_gnn_architecture_list = []\n sorted_performance = []\n\n top_k = int(self.args.test_gnn_num)\n i = 0\n for key, value in ranked_gnn_architecture_performance_dict:\n if i == top_k:\n break\n else:\n sorted_gnn_architecture_list.append(eval(key))\n sorted_performance.append(value)\n i += 1\n\n print(35 * \"=\" + \" the testing start \" + 35 * \"=\")\n\n model_num = [num for num in range(len(sorted_gnn_architecture_list))]\n\n for target_architecture, num in zip(sorted_gnn_architecture_list, model_num):\n self.target_architecture = target_architecture\n\n print(\"test gnn architecture {}:\\t\".format(num + 1), str(self.target_architecture))\n\n ## train from scratch\n test_repeat = 5\n for i in range(test_repeat):\n valid_acc, test_acc, test_acc_std = Scratch_Train_Test(self.target_architecture, self.graph_data, args=self.args)\n\n print(\"{}-th run in all {} runs || Test_acc:{}±{}\".format(i+1, test_repeat, test_acc, test_acc_std))\n\n print(35 * \"=\" + \" the testing ending \" + 35 * \"=\")\n\n def gnn_architecture_performance_load(self, path):\n\n with open(path + \".txt\", \"r\") as f:\n gnn_architecture_performance_list = f.readlines()\n return gnn_architecture_performance_list\n","repo_name":"Zhen-Peng-Wu/DAGC","sub_path":"core/auto_model.py","file_name":"auto_model.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3691942885","text":"from Libraries.RACOM.RACOM_TP import RACOM_TP\n\n\nclass I2C:\n RacomTP = None\n\n def __init__(self):\n self.RacomTP = RACOM_TP(\"UART\")\n \n def set_I2C_register(self, address, register, value):\n global RacomTP\n data = [address, register, value]\n self.RacomTP.send(0x11, data)\n \n def get_I2C_register(self, address, register):\n global RacomTP\n data = [address, register]\n self.RacomTP.send(0x10, data)\n while self.RacomTP.available() == 0:\n continue\n return self.RacomTP.read()\n\n def get_I2C_Word(self, address, register):\n global RacomTP\n data = [address, register]\n self.RacomTP.send(0x12, data)\n while self.RacomTP.available() == 0:\n continue\n return self.RacomTP.read()\n","repo_name":"pablogcbcn/Casper_Project4.0","sub_path":"ROS_Casper/src/Libraries/I2C/I2C.py","file_name":"I2C.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13398926248","text":"class Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n \n \"\"\"\n first, sort the intervals via their starting pos\n then, iterate through intervals starting points\n and check for overlaps\n \"\"\"\n\n intervals.sort(key = lambda i: i[0])\n\n # take the first interval and add to output\n output = [intervals[0]] \n\n for start, end in intervals[1:]:\n # get most recently added interval\n lastEnd = output[-1][1]\n\n # means they're overlapping\n if start <= lastEnd:\n # we take the max because of this case: [1, 5], [2, 4] => [1, 5]\n output[-1][1] = max(lastEnd, end)\n else:\n output.append([start, end])\n \n return output","repo_name":"yihui-hu/leetcode","sub_path":"0056-merge-intervals/0056-merge-intervals.py","file_name":"0056-merge-intervals.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17763373243","text":"from audioop import add\nimport os\nimport argparse\nimport time\nfrom turtle import hideturtle\nfrom unicodedata import numeric\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nimport networkx as nx\nimport datetime\nimport torchdiffeq as ode\nimport sys\nimport functools\nimport random\nfrom data_loader_hyper import NetDataset\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom prettytable import PrettyTable\nfrom torch.utils.tensorboard import SummaryWriter\nimport setproctitle\nimport datetime\nimport logging\nfrom model.resinf import ResInf\nfrom pathlib import Path\nfrom engine import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lr', type=float, default=0.001,\n help='Initial learning rate.')\nparser.add_argument('--weight_decay', type=float, default=1e-5,\n help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--dropout', type=float, default=0,\n help='Dropout rate (1 - keep probability).')\nparser.add_argument('--hidden', type=int, default=6,\n help='Number of hidden units.')\nparser.add_argument('--time_tick', type=int, default=100) # default=10)\n\nparser.add_argument('--gpu', type=int, default=0)\n\nparser.add_argument('--seed', type=int, default=2021, help='Random Seed')\nparser.add_argument('--T', type=float, default=200., help='Terminal Time')\nparser.add_argument('--operator', type=str,\n choices=['lap', 'norm_lap', 'kipf', 'norm_adj' ], default='norm_adj')\nparser.add_argument('--epoch', type=int, default=50)\nparser.add_argument('--train_size', type=int, default=1)\nparser.add_argument('--valid_size', type=int, default=1)\nparser.add_argument('--test_size', type=int, default=1)\nparser.add_argument('--rand_guess', type=bool, default=False)\nparser.add_argument('--layers', type=int, default=3)\nparser.add_argument('--use', type=str, default='start')\nparser.add_argument('--type', type=str, default='node')\nparser.add_argument('--causal', type=int, default=0)\nparser.add_argument('--K', type=int, default=11)\nparser.add_argument('--comment', type=str, default='normal')\nparser.add_argument('--mech', type=int, default=1)\nparser.add_argument('--asso', type=int, default=0)\nparser.add_argument('--use_model',type=str, default='transgnn')\nparser.add_argument('--decompo', type=str, default='None')\nparser.add_argument('--cross', type=int, default=0)\nparser.add_argument('--save', type=int, default=1)\nparser.add_argument('--emb_size',type=int,default=8)\nparser.add_argument('--hidden_layers_num', type=int, default=1)\nparser.add_argument('--pool_type', type=str, default='virtual')\nparser.add_argument('--pool_arch', type=str, default='global')\nparser.add_argument('--trans_layers', type=int, default=1)\nparser.add_argument('--trans_emb_size',type=int, default=8)\nparser.add_argument('--n_heads',type=int, default=4)\nparser.add_argument('--finetune',type=int, default=0)\nargs = parser.parse_args()\nif args.gpu >= 0:\n device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')\nelse:\n device = torch.device('cpu')\nif __name__ == '__main__':\n epsilon = 1e-6\n seed = 2021\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n dataset = NetDataset(mode = 'train', type=args.type, args=args)\n train_length = int(len(dataset) * 0.8)\n valid_length = int(len(dataset) * 0.1)\n test_length = len(dataset) - train_length - valid_length\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(dataset, (train_length, valid_length, test_length))\n train_data_loader = DataLoader(train_dataset, batch_size=args.train_size)\n valid_data_loader = DataLoader(valid_dataset, batch_size=args.valid_size)\n test_data_loader = DataLoader(test_dataset, batch_size=args.test_size)\n input_size = 1\n if args.use_model == 'transgnn':\n if args.mech == 1:\n args.K = 10\n args.layers = 3\n args.emb_size = 8\n args.hidden_layers_num = 1\n args.trans_layers = 1\n args.trans_emb_size = 32\n elif args.mech == 2:\n args.K = 5\n args.layers = 3\n args.emb_size = 16\n args.hidden_layers_num = 1\n args.trans_layers = 1\n args.trans_emb_size = 8\n elif args.mech == 3 or args.mech == 0:\n args.K = 11\n args.layers = 3\n args.emb_size = 8\n args.hidden_layers_num = 1\n args.trans_layers = 1\n args.trans_emb_size = 8\n model = ResInf(input_plane=args.K, seq_len = args.hidden, trans_layers=args.trans_layers, gcn_layers=args.layers, hidden_layers=args.hidden_layers_num, gcn_emb_size=args.emb_size, trans_emb_size=args.trans_emb_size, pool_type=args.pool_type, args=args,n_heads=args.n_heads).to(device)\n criterion = nn.BCELoss()\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n t_train = tqdm(train_data_loader, smoothing=0, mininterval=1.0)\n t_valid = tqdm(valid_data_loader,smoothing=0, mininterval=1.0)\n t_test = tqdm(test_data_loader,smoothing=0, mininterval=1.0)\n final_epoch, final_total_loss = train_valid(model, train_dataset, valid_dataset, t_train, t_valid, optimizer, criterion, args)\n test(model, t_test, criterion, args, final_epoch, final_total_loss)","repo_name":"tsinghua-fib-lab/ResInf","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38733488822","text":"import json\nimport logging\nimport os\nimport spacy\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import Dataset\n\nlog = logging.getLogger(__name__)\nnlp = spacy.load('en')\n\ndef get_tags(sentence):\n return [w.text for w in nlp(sentence.strip())]\n\n\ndef get_index_from_annotation(image_id_2_index, annotation):\n return image_id_2_index[annotation['image_id']]\n\n\ndef build_tags_json(annotation_path, tags_json_filename):\n image_tags = dict()\n image_paths = list()\n annotations = json.load(open(annotation_path))\n image_id_2_index = dict()\n\n for i, image in enumerate(annotations['images']):\n image_id_2_index[image['id']] = i\n image_paths.append(image['file_name'])\n for i, annotation in enumerate(annotations['annotations']):\n if i % 1000 == 999:\n print('Processed {:d}/{:d} annotations.'.format(i+1, len(annotations['annotations'])))\n image_index_new = get_index_from_annotation(image_id_2_index, annotation)\n if image_index_new not in image_tags:\n image_tags[image_index_new] = list()\n image_tags[image_index_new].extend(get_tags(annotation['caption']))\n\n with open(tags_json_filename, 'w', encoding='utf8') as tags_json_file:\n json.dump((image_paths, image_tags), tags_json_file)\n\n return image_paths, image_tags\n\n\ndef load_tags_json(tags_json_filename):\n with open(tags_json_filename, 'r') as tags_json_file:\n image_paths, image_tags = json.load(tags_json_file)\n return image_paths, image_tags\n\n\ndef get_tag_scores(image_tags):\n tag_scores = dict()\n for tags in image_tags.values():\n for tag in tags:\n tag = tag.lower()\n tag_scores[tag] = tag_scores.get(tag, 0) + 1\n\n return tag_scores\n\n\ndef get_filtered_tags(image_tags):\n tag_scores = get_tag_scores(image_tags)\n\n # Will keep only the tags that have at least 200 occurrences\n filtered_tags = dict()\n for image_idx, tags in image_tags.items():\n filtered_image_tags = []\n for tag in tags:\n if tag_scores.get(tag, 0) >= 200:\n filtered_image_tags.append(tag)\n\n if filtered_image_tags:\n filtered_tags[image_idx] = filtered_image_tags\n\n return filtered_tags\n\n\ndef get_image_score(image_tags, image_index):\n return len(image_tags.get(image_index, []))\n\n\ndef get_filtered_images(filtered_tags, image_indexes, min_score=3):\n return [img for img in image_indexes if get_image_score(filtered_tags, img) >= min_score]\n\n\ndef get_images_by_tag(tags):\n images_by_tag = dict()\n for image_id, tags in tags.items():\n for tag in tags:\n images_by_tag.setdefault(tag, set()).add(image_id)\n\n return images_by_tag\n\n\nclass MSCOCODataset(Dataset):\n\n def __init__(self, images_directory, annotation_path, transform=None):\n self.images_directory = images_directory\n self.transform = transform\n \n tags_json_filename = os.path.join(images_directory, \"tags.json\")\n try:\n self.image_paths, image_tags = load_tags_json(tags_json_filename)\n except FileNotFoundError:\n self.image_paths, image_tags = build_tags_json(annotation_path, tags_json_filename)\n\n image_indexes = list(range(len(self.image_paths)))\n\n filtered_tags = get_filtered_tags(image_tags)\n self.filtered_images = get_filtered_images(filtered_tags, image_indexes)\n\n # Because we've filtered out some of our images, we'll now map the previous image indices\n # to the new ones to avoid dealing with gaps.\n self.filtered_tags = self._build_filtered_tags_with_converted_image_indexes(filtered_tags)\n self.images_by_tag = get_images_by_tag(self.filtered_tags)\n\n def __len__(self):\n return len(self.filtered_images)\n\n def get_image_path(self, image_index):\n return os.path.join(self.images_directory, self.image_paths[image_index])\n\n def get_pil_image(self, index):\n return Image.open(self.get_image_path(self.filtered_images[index])).convert('RGB')\n\n def __getitem__(self, index):\n image = self.get_pil_image(index)\n if self.transform:\n try:\n image = self.transform(image)\n except Exception as e:\n log.warning(\"Could not transform image %s due to %s. Skipping.\", index, e)\n return torch.zeros(3, 224, 224)\n\n return image\n\n def _build_filtered_image_to_index_map(self):\n filtered_image_to_index = dict()\n filtered_image_paths = list()\n for i, image_index in enumerate(self.filtered_images):\n filtered_image_to_index[image_index] = i\n filtered_image_paths.append(self.image_paths[image_index])\n self.image_paths = filtered_image_paths\n return filtered_image_to_index\n\n def _build_filtered_tags_with_converted_image_indexes(self, filtered_tags):\n filtered_image_to_index = self._build_filtered_image_to_index_map()\n filtered_tags_with_converted_image_indexes = dict()\n for image, tags in filtered_tags.items():\n if image not in filtered_image_to_index:\n continue\n new_image_index = filtered_image_to_index[image]\n filtered_tags_with_converted_image_indexes[new_image_index] = tags\n\n return filtered_tags_with_converted_image_indexes\n","repo_name":"victorssilva/concreteness","sub_path":"mscoco.py","file_name":"mscoco.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"7072912469","text":"# Contest No.: 639\n# Problem No.: D\n# Solver: JEMINI\n# Date: 20200507\n\n##### FAST INPUT #####\nimport os\nimport sys\nfrom io import BytesIO, IOBase\n\nBUFSIZE = 8192\n\n\nclass FastIO(IOBase):\n newlines = 0\n\n def __init__(self, file):\n self._fd = file.fileno()\n self.buffer = BytesIO()\n self.writable = \"x\" in file.mode or \"r\" not in file.mode\n self.write = self.buffer.write if self.writable else None\n\n def read(self):\n while True:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n if not b:\n break\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines = 0\n return self.buffer.read()\n\n def readline(self):\n while self.newlines == 0:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n self.newlines = b.count(b\"\\n\") + (not b)\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines -= 1\n return self.buffer.readline()\n\n def flush(self):\n if self.writable:\n os.write(self._fd, self.buffer.getvalue())\n self.buffer.truncate(0), self.buffer.seek(0)\n\n\nclass IOWrapper(IOBase):\n def __init__(self, file):\n self.buffer = FastIO(file)\n self.flush = self.buffer.flush\n self.writable = self.buffer.writable\n self.write = lambda s: self.buffer.write(s.encode(\"ascii\"))\n self.read = lambda: self.buffer.read().decode(\"ascii\")\n self.readline = lambda: self.buffer.readline().decode(\"ascii\")\n\n\nsys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)\ninput = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\n##### END OF FAST INPUT #####\n#import sys\n\ndef main():\n n, m = map(int, input().split())\n grid = []\n for _ in range(n):\n temp = input()\n #temp2 = [_ for _ in temp]\n grid.append(temp)\n\n ansFlag = True\n wrow = 0\n wcol = 0\n # row check\n for i in range(n):\n invCnt = 0\n flag = True\n for j in range(m):\n if j > 0:\n if grid[i][j] != grid[i][j - 1]:\n invCnt += 1\n if (invCnt == 2 and grid[i][0] == \"#\") or invCnt >= 3:\n ansFlag = False\n if grid[i][j] == \"#\":\n flag = False\n if flag:\n wrow += 1 \n if not ansFlag:\n print(-1)\n return\n\n # col check\n for j in range(m):\n invCnt = 0\n flag = True\n for i in range(n):\n if i > 0:\n if grid[i][j] != grid[i - 1][j]:\n invCnt += 1\n if (invCnt == 2 and grid[0][j] == \"#\") or invCnt >= 3:\n ansFlag = False\n if grid[i][j] == \"#\":\n flag = False\n if flag:\n wcol += 1\n if not ansFlag:\n print(-1)\n return\n\n if wcol * wrow == 0 and wcol + wrow > 0:\n print(-1)\n return\n visitedSet = set()\n # count connected elements -> min of N \n ans = 0\n for i in range(n):\n for j in range(m):\n if grid[i][j] == \"#\" and (i, j) not in visitedSet:\n #BFS\n stack = [(i, j)]\n while stack:\n x, y = stack.pop()\n visitedSet.add((x, y))\n # make visited\n #grid[x][y] = \"*\"\n if x > 0 and (x - 1, y) not in visitedSet and grid[x - 1][y] == \"#\":\n stack.append((x - 1, y))\n if x < n - 1 and (x + 1, y) not in visitedSet and grid[x + 1][y] == \"#\":\n stack.append((x + 1, y))\n if y > 0 and (x, y - 1) not in visitedSet and grid[x][y - 1] == \"#\":\n stack.append((x, y - 1))\n if y < m - 1 and (x, y + 1) not in visitedSet and grid[x][y + 1] == \"#\":\n stack.append((x, y + 1))\n ans += 1\n print(ans)\n \n\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jinmin-Goh/Codeforces","sub_path":"#639_Div_2/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24783329095","text":"#!/usr/bin/env python\n\"\"\"\nVisualise Changes in Edge Weights\n=================================\n\nHere, we demonstrate how to visualise changes in edge weights over time.\nWe change both, the colour and the width of the edges depending on the weight.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.animation import FuncAnimation\nfrom netgraph import Graph\n\n# Simulate a dynamic network with\n# - 5 frames / network states,\n# - with 10 nodes at each time point,\n# - an expected edge density of 25%, and\n# - edge weights drawn from a Gaussian distribution.\ntotal_frames = 5\ntotal_nodes = 10\nadjacency_matrix = np.random.rand(total_nodes, total_nodes) < 0.25\nweight_matrix = np.random.randn(total_frames, total_nodes, total_nodes)\n\n# Normalise the weights, such that they are on the interval [0, 1].\n# They can then be passed directly to matplotlib colormaps (which expect floats on that interval).\nvmin, vmax = -2, 2\nweight_matrix[weight_matrixvmax] = vmax\nweight_matrix -= vmin\nweight_matrix /= vmax - vmin\n\ncmap = plt.cm.RdGy\n\nfig, ax = plt.subplots()\ng = Graph(adjacency_matrix, edge_cmap=cmap, arrows=True, ax=ax)\n\ndef update(ii):\n artists = []\n for jj, kk in zip(*np.where(adjacency_matrix)):\n w = weight_matrix[ii, jj, kk]\n artist = g.edge_artists[(jj, kk)]\n artist.set_facecolor(cmap(w))\n artist.update_width(0.03 * np.abs(w-0.5)) # np.abs(w-0.5) so that large negative edges are also wide\n artists.append(artist)\n return artists\n\nanimation = FuncAnimation(fig, update, frames=total_frames, interval=200, blit=True)\n","repo_name":"paulbrodersen/netgraph","sub_path":"docs/source/sphinx_gallery_animations/plot_02_animate_edges.py","file_name":"plot_02_animate_edges.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":586,"dataset":"github-code","pt":"53"} +{"seq_id":"22184910138","text":"from pymdc.rest import REST\nfrom pymdc.keypair import ECDSA\n\n# A list of ECDSA objects for a user account.\n# Requires HTTP basic auth.\nclass Keys():\n def __init__(self, user):\n self.user = user\n self.key_pairs = []\n\n def list(self, callback=None):\n resource = \"/keys\"\n ret = REST(\n \"GET\",\n resource,\n callback=callback,\n auth=self.user\n )\n\n return ret\n\n # Hex ECDSA pub key.\n def create(self, key_pair=None, callback=None):\n key_pair = key_pair or ECDSA()\n if key_pair not in self.key_pairs:\n self.key_pairs.append(key_pair)\n\n params = {\n \"key\": key_pair.get_public_key(\"hex\")\n }\n\n ret = REST(\n \"POST\",\n \"/keys\",\n params,\n callback=callback,\n auth=self.user\n )\n\n return ret\n\n def delete(self, key_pair, callback=None):\n if key_pair in self.key_pairs:\n self.key_pairs.remove(key_pair)\n\n resource = \"/keys/%s\" % key_pair.get_public_key(\"hex\")\n ret = REST(\n \"DELETE\",\n resource,\n callback=callback,\n auth=self.user\n )\n\n return ret\n","repo_name":"StorjOld/bridge-client-python","sub_path":"pymdc/api/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29903324411","text":"import numpy as np\nimport os\nimport pathlib\nimport skimage.io as io\nimport skimage.transform as tf\nimport torch\nfrom haroun import Data, Model\nfrom haroun.augmentation import augmentation\nfrom haroun.losses import rmse\nfrom NeuralNetworks import Network\n\n# Process of loading and labelling the data\n# Load up the pictures\n\n\ndef loaderOfPics():\n path = pathlib.Path.cwd().parent / \"FakeFaceDetection\" / \"real_and_fake_face_detection\"\n path = path / \"real_and_fake_face\"\n images, labels = [], []\n\n for directory in os.listdir(path):\n dataPath = path / directory\n\n for im in os.listdir(dataPath)[:]:\n image = io.imread(f\"{dataPath}/{im}\")\n image = tf.resize(image, (64, 64))\n images.append(image)\n if directory == \"training_fake\":\n labels.append(\"fake\")\n elif directory == \"training_real\":\n labels.append(\"real\")\n\n images = np.array(images)\n labels = np.array(labels)\n\n images, labels = augmentation(images, labels, flip_y=True, flip_x=True, brightness=True)\n\n return images, labels\n\n\n# Labelling\nclasses = {'real': 0, 'fake': 1}\ndata = Data(loader=loaderOfPics(), classes=classes)\ndata.stat()\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndata.dataset(split_size=0.05, shuffle=True, random_state=42, images_format=torch.float32, labels_format=torch.float32,\n permute=True, one_hot=True, device=device)\nnet = Network()\ncatfishClassifier = Model(net, \"adam\", rmse, device)\ncatfishClassifier.train(train_data=(data.train_inputs, data.train_outputs),\n val_data=(data.val_inputs, data.val_outputs),\n epochs=1, patience=20, batch_size=100, learning_rate=1.0E-3)\n\ncatfishClassifier.evaluate(test_data=(data.test_inputs, data.test_outputs))\ncatfishClassifier.plot()\n","repo_name":"TemurKhabibullaev/FakeFaceDetection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23092887363","text":"import json\n\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom freezegun import freeze_time\nfrom mock import Mock, patch\n\nfrom tsuru_dashboard.auth.views import LoginRequiredView\nfrom tsuru_dashboard.dashboard.views import DeploysView\n\n\nclass DeploysViewTest(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n\n def test_requires_login(self):\n assert issubclass(DeploysView, LoginRequiredView), \\\n \"DeploysView should inherit from LoginRequiredView\"\n\n @patch(\"requests.get\")\n @patch(\"tsuru_dashboard.auth.views.token_is_valid\")\n def test_get_no_deploys(self, token_is_valid, get):\n token_is_valid.return_value = True\n resp = Mock()\n resp.json.return_value = None\n get.return_value = resp\n request = self.factory.get(\"/dashboard/deploys\")\n request.session = {\"tsuru_token\": \"sometoken\"}\n response = DeploysView.as_view()(request)\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content)\n self.assertEqual({\"last_deploys\": 0, \"errored\": 0}, result)\n\n @freeze_time(\"2012-04-01 16:32:15\", tz_offset=0)\n @patch(\"requests.get\")\n @patch(\"tsuru_dashboard.auth.views.token_is_valid\")\n def test_get_filtering(self, token_is_valid, get):\n token_is_valid.return_value = True\n resp = Mock()\n resp.json.return_value = [\n {\"Timestamp\": \"2012-03-31T12:10:15Z\",\n \"Error\": \"something went wrong\"},\n {\"Timestamp\": \"2012-03-31T09:02:15-0300\",\n \"Error\": None},\n {\"Timestamp\": \"2012-04-01T19:02:15+0800\",\n \"Error\": None},\n {\"Timestamp\": \"2012-03-31T00:02:15-0800\",\n \"Error\": None},\n {\"Timestamp\": \"2012-04-01T10:02:15.903-0300\",\n \"Error\": \"a failure in the deploy\"},\n {\"Timestamp\": \"2012-03-31T13:32:16-0300\",\n \"Error\": None},\n {\"Timestamp\": \"2012-03-31T20:00:00.903Z\",\n \"Error\": None},\n {\"Timestamp\": \"2012-03-31T20:00:00\",\n \"Error\": \"deployment failed\"},\n ]\n get.return_value = resp\n request = self.factory.get(\"/dashboard/deploys\")\n request.session = {\"tsuru_token\": \"sometoken\"}\n response = DeploysView.as_view()(request)\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content)\n self.assertEqual({\"last_deploys\": 5, \"errored\": 2}, result)\n","repo_name":"tsuru/tsuru-dashboard","sub_path":"tsuru_dashboard/dashboard/tests/test_deploys_view.py","file_name":"test_deploys_view.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"53"} +{"seq_id":"10848427001","text":"# lab №2 ex. 1 | Var 4 Sergey Zubrilin 10/3/18\n\n\nimport math\nfrom random import uniform\nimport numpy as np\n\n\nclass Metric:\n \"\"\"\n Класс описываюший различные метрики\n \"\"\"\n @staticmethod\n def manhattan(a, b):\n \"\"\"\n ex1,d) Определить функцию manhattan(A, B) для нахождения манхэттенского\n расстояния между точками A и B.\n :param a: point_coordinates (two_elements_{list_or_tuple_or_array})\n :param b: point_coordinates (two_elements_{list_or_tuple_or_array})\n :return: float\n \"\"\"\n return math.fabs(a[0] - b[0]) + math.fabs(a[1] - b[1])\n\n @staticmethod\n def metric_for_ex4(a, b):\n \"\"\"\n ex4) Описывает заданную в ex4 метрику\n :param a: point_coordinates (two_elements_{list_or_tuple_or_array})\n :param b: point_coordinates (two_elements_{list_or_tuple_or_array})\n :return: float\n \"\"\"\n return min(math.fabs(a[0]-b[0]), math.fabs(a[1]-b[1]))\n\n\nclass MatrixTool:\n def __init__(self):\n \"\"\"\n При инициализации, дабовляеться два пустых столбца в матрицу (для задание b, c и создает массив\n записей производный от исходной матрицы\n \"\"\"\n dtype = [('X', float), ('Y', float), ('O', float), ('coordinate quarter', int)]\n self.matrix_ex1 = np.array([self.random_point_in_square() for _ in range(4)], dtype=np.float16)\n self.matrix = np.array([tuple(el) for el in np.hstack((self.matrix_ex1,\n 0*np.ones((self.matrix_ex1.shape[0], 2)))).tolist()], dtype=dtype)\n self.r_matrix = self.random_matrix()\n\n @staticmethod\n def random_point_in_square(side=2):\n \"\"\"\n функция, генерирующая случайные координаты точек входяшии в\n квадрат. Функция возвращает list из вдвух значений float [float(), float()] в случае если условие |x| + |y| <= d\n возваращает True.\n \"\"\"\n point = [uniform(-side / 2, side / 2) for _ in range(2)]\n\n return point\n\n @staticmethod\n def random_matrix(start=0, stop=10, i=10, j=10):\n \"\"\"\n generate random matrix\n :param start: int\n :param stop: int\n :param i: int\n :param j: int\n :return: numpy.ndarray\n \"\"\"\n return np.array(np.random.randint(start, stop, size=(i, j)))\n\n def sort_by_max_range(self):\n \"\"\"\n b)\"Отсортировать точки в порядке возрастания расстояния от центра\n квадрата.\"\n подсчитывает ростояние от точки до начала координат и сотрирует по возрастанию (по этому расстоянию)\n \"\"\"\n self.matrix['O'] = (self.matrix['X']**2 + self.matrix['Y']**2)**(1/2)\n self.matrix = np.sort(self.matrix, order='O')\n\n def nearest_to_center(self):\n \"\"\"\n a) \"Найти точку, ближайшую к центру квадрата.\"\n :return: tuple.\n \"\"\"\n self.sort_by_max_range()\n return self.matrix[0]\n\n # def sort_by_coordinate_quarter(self):\n # \"\"\"\n # Определяет координатнуй четверть входных точек.\n # \"\"\"\n # for i in range(len(self.matrix)):\n # if self.matrix[i][0] > 0 and self.matrix[i][1] > 0:\n # self.matrix[i][3] = 1\n # elif self.matrix[i][0] < 0 < self.matrix[i][1]:\n # self.matrix[i][3] = 2\n # elif self.matrix[i][0] < 0 and self.matrix[i][1] < 0:\n # self.matrix[i][3] = 3\n # elif self.matrix[i][0] > 0 > self.matrix[i][1]:\n # self.matrix[i][3] = 4\n # else:\n # self.matrix[i][3] = 0\n\n def coordinate_quarter(self, quarter=1):\n \"\"\"\n с) \"Профильтровать массив, оставив только точки из первого квадранта.\n \"\"\"\n res = self.matrix[(self.matrix['X'] > 0) & (self.matrix['Y'] > 0)]\n if len(res) > 0:\n return res\n else:\n return f'Not point in {quarter} coordinate quarter'\n\n def range_matrix(self, metric=Metric.manhattan):\n \"\"\"\n Возврашает матрицу растояний в заданной метрике\n \"\"\"\n l = len(self.matrix)\n range_matrix = np.zeros(shape=(l, l))\n count = 0\n for i in range(count, l):\n count += 1\n for j in range(count, l):\n range_matrix[i, j] = metric(self.matrix[i], self.matrix[j])\n\n return range_matrix + range_matrix.T\n\n def nearest_or_farthest_point(self, range_matrix, mod='nearest'):\n \"\"\"\n e) В исходном массиве найти пару точек, наиболее близких в смысле\n манхэттенской метрики.\n :param metric: method of class Metric\n :param mod: 'farthest' or 'nearest'\n :return: ближайшие точки по манхэттанской метрике\n \"\"\"\n if mod == 'nearest':\n\n ind = np.unravel_index(np.argmin(range_matrix, axis=None), range_matrix.shape)\n return [self.matrix[:][ind[0]], self.matrix[:][ind[1]]]\n\n if mod == 'farthest':\n ind = np.unravel_index(np.argmax(range_matrix, axis=None), range_matrix.shape)\n return [self.matrix[:][ind[0]], self.matrix[:][ind[1]]]\n\n\ndef test_ex1():\n\n res = MatrixTool()\n print(res.matrix)\n print('a)------------------nearest_to_center_point-------------------')\n print(res.nearest_to_center())\n print('b)----X------------Y-------------O--------quarter-------------')\n res.sort_by_max_range()\n print(res.matrix)\n print('c)-----------------first_coordinate_quarter_points-------------')\n print(res.coordinate_quarter())\n print('d)-----------------manhattan_range_matrix----------------------')\n print(res.range_matrix(metric=Metric.manhattan))\n print('e)-----------------nearest_point_by_manhattan_metric-----------')\n print(res.nearest_or_farthest_point(res.range_matrix()))\n\n\ntest_ex1()\n\n\n\n\n\n\n","repo_name":"ZegsZub/Python_sql_corse_labs","sub_path":"lab_2/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35912125362","text":"import requests\n\n\n__all__ = [\n 'NESTServerClient',\n]\n\n\ndef encode(response):\n if response.ok:\n return response.json()\n elif response.status_code == 400:\n raise response.text\n\n\nclass NESTServerClient:\n\n def __init__(self, host='localhost', port=52425):\n self.url = 'http://{}:{}/'.format(host, port)\n self.headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\n def __getattr__(self, call):\n def method(*args, **kwargs):\n kwargs.update({'args': args})\n response = requests.post(self.url + 'api/' + call, json=kwargs, headers=self.headers)\n return encode(response)\n return method\n\n def exec_script(self, source, return_vars=None):\n params = {\n 'source': source,\n 'return': return_vars,\n }\n response = requests.post(self.url + 'exec', json=params, headers=self.headers)\n return encode(response)\n\n def from_file(self, filename, return_vars=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n script = ''.join(lines)\n print('Execute script code of {}'.format(filename))\n print('Return variables: {}'.format(return_vars))\n print(20 * '-')\n print(script)\n print(20 * '-')\n return self.exec_script(script, return_vars)\n","repo_name":"VRGroupRWTH/insite","sub_path":"test/nest_test_network/NESTServerClient/NESTServerClient.py","file_name":"NESTServerClient.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39292403678","text":"from plots import *\nfrom downloads import *\nfrom utils import *\nimport datetime\nfrom boosting.boosting import update_weights_boosting\nfrom boosting.weighted_loss import *\nfrom training import *\nfrom instance_based import instance_selection\n\n\nclass Experiment:\n def __init__(self, baseline, dataset, distortion_type, mean, std, property, property_perc, epochs_boosting=5,\n boosting_perc=None, most=True, random=False, diversity=False, alternate=False, classes=None,\n classes_to_distort=None):\n self.baseline = baseline\n self.dataset = dataset\n self.distortion_type = distortion_type\n self.mean = mean\n self.std = std\n self.property = property\n self.property_perc = property_perc\n self.most = most\n self.diversity = diversity\n self.random = random\n self.classes = classes\n self.classes_to_distort = classes_to_distort\n self.alternate = alternate\n self.epochs_boosting = epochs_boosting\n self.boosting_perc = boosting_perc\n if classes is not None and len(classes) == 2:\n self.criterion = torch.nn.BCELoss()\n else:\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def setup_experiment(self):\n try:\n self.model_clean = load_model_from_disk(self.baseline, output_dim=len(self.classes))\n except FileNotFoundError:\n print('Baseline not found. Training...', end='')\n train_baseline(self.dataset, self.baseline, classes=self.classes, verbose=False)\n time.sleep(3)\n print('done!')\n self.model_clean = load_model_from_disk(self.baseline)\n\n try:\n X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test = load_data_from_disk(self.dataset,\n self.distortion_type,\n self.mean, self.std)\n\n except FileNotFoundError:\n print('Noisy data not found. Generating...', end='')\n gen_distorted_dataset(self.dataset, self.distortion_type, self.mean, self.std,\n classes_to_distort=self.classes_to_distort)\n print('done!')\n time.sleep(3)\n X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test = load_data_from_disk(self.dataset,\n self.distortion_type,\n self.mean, self.std)\n\n\n # Filtering the selected classes\n if self.classes is not None:\n X_train, _ = select_classes(X_train, y_train, self.classes, convert_labels=True)\n X_test, _ = select_classes(X_test, y_test, self.classes, convert_labels=True)\n X_train_noisy, y_train = select_classes(X_train_noisy, y_train, self.classes, convert_labels=True)\n X_test_noisy, y_test = select_classes(X_test_noisy, y_test, self.classes, convert_labels=True)\n\n return self.model_clean, (X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test)\n\n ################################################ RUNS ##############################################################\n\n def run_jacobian(self, epochs=40, flatten=False):\n # Setup\n model, (X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test) = self.setup_experiment()\n\n # Validation splitting\n (X_train_noisy, y_train), (X_valid_noisy, y_valid) = dataset_split(X_train_noisy, y_train,\n return_data='samples')\n\n # Image pre-processing: scale pixel values\n X_train_noisy_sc, X_mean, X_std = image_preprocessing(X_train_noisy, scale_only=False)\n X_valid_noisy_sc, _, _ = image_preprocessing(X_valid_noisy, seq_mean=X_mean, seq_std=X_std, scale_only=False)\n X_test_noisy_sc, _, _ = image_preprocessing(X_test_noisy, seq_mean=X_mean, seq_std=X_std, scale_only=False)\n\n n_classes = len(np.unique(y_train))\n criterion = nn.CrossEntropyLoss()\n\n # If random, I select a subset of indices as long as the length of the jacobian-based selected indices\n if self.random:\n total_len = X_train_noisy_sc.shape[0]\n subset_len = 9839\n selected_indices = np.random.choice(total_len, subset_len, replace=False)\n writer = SummaryWriter(\n 'runs/' + self.dataset + '_random_jacobian_' + str(n_classes))\n else:\n\n selected_indices = instance_selection.instance_selection_no_hessian(model=model,\n X_train=X_train_noisy_sc,\n y_train=y_train,\n X_valid=X_valid_noisy_sc,\n y_valid=y_valid,\n criterion=criterion,\n flatten=flatten,\n return_influences=False,\n save_jacobian_train=False)\n\n writer = SummaryWriter(\n 'runs/' + self.dataset + '_jacobian_' + str(n_classes))\n\n print('Total instances selected: {}/{}'.format(len(selected_indices), X_train_noisy_sc.shape[0]))\n\n X_noisy_subset_sc, y_subset = X_train_noisy_sc[selected_indices], y_train[selected_indices]\n\n noisy_subset_dataloader = get_data_loader(X_noisy_subset_sc, y_subset)\n valid_noisy_dl = get_data_loader(X_valid_noisy_sc, y_valid)\n test_noisy_dl = get_data_loader(X_test_noisy_sc, y_test)\n\n # Train\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n train_losses, train_accuracies, val_accuracies, \\\n val_losses, _, model = train(model, noisy_subset_dataloader,\n valid_noisy_dl, test_noisy_dl, optimizer,\n self.criterion, device, epochs=epochs,\n early_stopping=False, writer=writer,\n save_model=False, model_path='', pbar=True,\n flatten=flatten, start_epoch=0)\n\n return train_losses, train_accuracies, val_accuracies, val_losses, model\n\n def run_boosting(self, lr=None, loss='cross'):\n # Setup\n model, (X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test) = self.setup_experiment()\n n_classes = np.unique(y_train)\n\n if loss == 'exp':\n y_train = convert_labels(y_train, [0, 1], [-1, 1])\n y_test = convert_labels(y_test, [0, 1], [-1, 1])\n\n (X_train_noisy, y_train), (X_valid_noisy, y_valid) = dataset_split(X_train_noisy, y_train,\n return_data='samples')\n\n # Image pre-processing: scale pixel values\n X_train_noisy_sc, X_mean, X_std = image_preprocessing(X_train_noisy, scale_only=False)\n X_valid_noisy_sc, _, _ = image_preprocessing(X_valid_noisy, seq_mean=X_mean, seq_std=X_std, scale_only=False)\n X_test_noisy_sc, _, _ = image_preprocessing(X_test_noisy, seq_mean=X_mean, seq_std=X_std, scale_only=False)\n\n # TODO Mapping X to y: better come up with a smarter solution\n X_to_y = dict(zip([tuple(x.flatten()) for x in X_train_noisy_sc], y_train))\n\n # Noisy Dataloaders\n valid_noisy_dl = get_data_loader(X_valid_noisy_sc, y_valid)\n test_noisy_dl = get_data_loader(X_test_noisy_sc, y_test)\n\n X_test_sc, _, _ = image_preprocessing(X_test)\n test_clean_dl = get_data_loader(X_test_sc, y_test)\n print('Sanity check | Model performance on clean test set: {}'.format(evaluate(model,\n test_loader=test_clean_dl,\n device=device)))\n print('Sanity check | Model performance on noisy test set: {}'.format(evaluate(model,\n test_loader=test_noisy_dl,\n device=device)))\n\n train_losses = []\n train_accuracies = []\n valid_accuracies = []\n valid_losses = []\n\n learning_rate_collection = [0]\n epsilon_collection = [0]\n\n _, weights_boosting = init_loss(X_train_noisy_sc, loss=loss)\n\n if self.boosting_perc:\n if lr is not None:\n writer = SummaryWriter(\n 'runs/' + self.dataset + '_boosting_' + str(self.boosting_perc) + '_lr=' + str(lr) + '_' + loss\n + str(n_classes))\n else:\n writer = SummaryWriter(\n 'runs/' + self.dataset + '_boosting_' + str(self.boosting_perc) + '_' + loss + str(n_classes))\n elif lr and not self.boosting_perc:\n writer = SummaryWriter('runs/' + self.dataset + '_boosting_lr=' + str(lr) + '_' + loss + str(n_classes))\n else:\n if self.random:\n writer = SummaryWriter('runs/' + self.dataset + '_random' + str(self.property_perc) + '_' + loss\n + str(n_classes))\n\n global_epoch = 0\n\n for epoch in range(self.epochs_boosting):\n\n if self.boosting_perc:\n points_weights = list(weights_boosting.items())\n if epoch == 0:\n points_weights = sorted(points_weights, key=lambda x: x[1])[::-1][\n :int(1.0 * len(points_weights))]\n else:\n points_weights = sorted(points_weights, key=lambda x: x[1])[::-1][\n :int(self.boosting_perc * len(points_weights))]\n points = [pw[0] for pw in points_weights]\n\n y_subset = np.array([X_to_y[p] for p in points])\n X_noisy_subset_sc = np.array([np.array(p).reshape((32, 32, 3)) for p in points])\n\n else:\n # In order to compare with the boosting method, the first epochs must be done with the full dataset\n if self.random and epoch == 0:\n indices = get_random_subset(X_train_noisy_sc, y_train, self.property_perc,\n return_indices=True)\n\n indices = indices[:int(1.0 * len(X_train_noisy_sc))]\n\n # Only draw the random samples at the second epoch, then use always the same\n if self.random and epoch == 1:\n indices = get_random_subset(X_train_noisy_sc, y_train, self.property_perc,\n return_indices=True)\n\n indices = indices[:int(self.property_perc * len(X_train_noisy_sc))]\n\n X_noisy_subset_sc, y_subset = X_train_noisy_sc[indices], y_train[indices]\n\n print('Sanity check | Epoch {} - Training set size: {}'.format(epoch, len(X_noisy_subset_sc)))\n\n self.criterion = WeightedLoss(X=X_train_noisy_sc, weights_boosting=weights_boosting, loss=loss)\n\n print('Sanity check | Pre-training evaluation of a single sample...')\n rnd_idx = np.random.randint(0, len(X_train_noisy_sc))\n self.evaluate_single_sample(epoch, model, X_train_noisy_sc, y_train, idx=rnd_idx,\n weights_boosting=weights_boosting,\n lr=lr, loss=loss)\n\n # Data Loader Generation\n noisy_subset_dataloader = get_data_loader(X_noisy_subset_sc, y_subset, shuffle=True)\n\n # Train\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.00001)\n\n if epoch == 0:\n sub_epochs = 3\n else:\n sub_epochs = 5\n epoch_train_losses, epoch_train_accuracies, epoch_val_accuracies, \\\n epoch_val_losses, global_epoch, model = train(model, noisy_subset_dataloader,\n valid_noisy_dl, test_noisy_dl, optimizer,\n self.criterion, device, epochs=sub_epochs,\n early_stopping=False, writer=writer,\n save_model=False, model_path='', pbar=True,\n flatten=False, start_epoch=global_epoch + 1)\n\n train_losses.append(epoch_train_losses)\n train_accuracies.append(epoch_train_accuracies)\n valid_losses.append(epoch_val_losses)\n valid_accuracies.append(epoch_val_accuracies)\n\n if lr != 0.0 and not self.random:\n weights_boosting = update_weights_boosting(model,\n weights_boosting,\n X_train_noisy_sc, y_train,\n learning_rate_collection=learning_rate_collection,\n epsilon_collection=epsilon_collection,\n device=device,\n lr=lr, loss=loss)\n\n print('Sanity check | Post-training evaluation of a single sample...')\n self.evaluate_single_sample(epoch, model, X_train_noisy_sc, y_train, idx=rnd_idx,\n weights_boosting=weights_boosting,\n lr=lr, loss=loss)\n\n # Evaluation\n test_epoch_acc = evaluate(model, test_noisy_dl, device)\n\n print('---------------------------------------------------------------------------------------------------')\n print('Boosting epoch {:3d} | Train loss: {:5f} | Train acc: {:5f} | Val acc: {:5f} | Val loss: {:5f}'\n ' | Test acc: {:5f} \\nBoosting parameters: Epsilon: {} | Lambda: {}'.format(epoch,\n epoch_train_losses[-1],\n epoch_train_accuracies[\n -1],\n epoch_val_accuracies[-1],\n epoch_val_losses[-1],\n test_epoch_acc,\n epsilon_collection[-1],\n learning_rate_collection[\n -1]))\n print('---------------------------------------------------------------------------------------------------')\n\n final_test_acc = evaluate(model, test_noisy_dl, device)\n print('Your final test accuracy is {}'.format(final_test_acc))\n\n # generate_boosting_plots(self.epochs_boosting, train_accuracies, train_losses, valid_accuracies, weights,\n # learning_rate_collection, epsilon_collection, lr=lr)\n\n return train_accuracies, valid_accuracies\n\n def evaluate_single_sample(self, epoch, model, X_train, y_train, idx, weights_boosting, lr, loss):\n model.eval()\n x, y_true = X_train[idx], y_train[idx]\n x = copy.deepcopy(x)\n x = np.moveaxis(x, source=-1, destination=0).astype(np.float32)\n x = np.expand_dims(x, axis=0)\n x = torch.from_numpy(x)\n x = x.to(device)\n model = model.to(device)\n\n y_hat = model(x.float()).cpu()\n\n if loss == 'exp':\n y_hat = torch.where(y_hat > 0, torch.ones(1), torch.ones(1) * (-1))\n else:\n if y_hat.shape[1] <= 1:\n y_hat = torch.where(y_hat > 0.5, torch.ones(1), torch.zeros(1))\n else:\n y_hat = torch.argmax(y_hat.data, 1)\n\n w = weights_boosting[tuple(X_train[idx].flatten())]\n\n print('Epoch: {} | Sample index: {} | Y_hat: {} | Y_true: {} | Weight: {} | Lr: {}'.format(epoch, idx,\n y_hat.item(),\n y_true,\n w, lr))\n\n def run_usps(self):\n try:\n model = load_model_from_disk(self.baseline)\n except RuntimeError:\n train_baseline(self.dataset, self.baseline, classes=self.classes, verbose=False)\n\n (X_train, y_train), (X_test, y_test) = load_USPS()\n\n (X_train, y_train), (X_valid, y_valid) = dataset_split(X_train, y_train, return_data='samples')\n train_loader = get_data_loader(X_train, y_train, shuffle=False)\n val_loader = get_data_loader(X_valid, y_valid, shuffle=False)\n test_loader = get_data_loader(X_test, y_test, shuffle=False)\n\n # Model path and Writer\n model_path, writer = self.get_model_path_and_writer()\n\n # Optimizer and criterion\n optimizer = self.get_optimizer(model)\n criterion = nn.CrossEntropyLoss()\n\n acc = evaluate(model, test_loader, device, flatten=True)\n print(\"Accuracy before training = {}\".format(acc), flush=True)\n\n train(model, train_loader, val_loader, test_loader, optimizer,\n criterion, device, writer, model_path=model_path, pbar=True, flatten=True)\n\n # Evaluation\n acc = evaluate(model, test_loader, device, flatten=True)\n print(\"Accuracy after training = {}\".format(acc))\n\n def visualize_boundary(self, total_epochs=50, recompute_epochs=1):\n model, (X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test) = self.setup_experiment()\n train_loader_noisy = get_data_loader(X_train_noisy, y_train, shuffle=False)\n test_loader_noisy = get_data_loader(X_test_noisy, y_test, shuffle=False)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n model_path, writer = self.get_model_path_and_writer()\n\n flatten = False\n epoch_features = []\n epoch_entropy_indices = []\n epoch_y = []\n unique_labels = list(np.unique(y_train))\n self.alternate = False\n\n i = 0\n while i < total_epochs:\n model.to(device)\n\n if self.random:\n indices = get_random_subset(X_train_noisy, y_train, self.property_perc, return_indices=True)\n else:\n if self.alternate and i % 2 == 0:\n indices = get_random_subset(X_train_noisy, y_train, self.property_perc, return_indices=True)\n else:\n indices = compute_samples_property(model, X_train_noisy, y_train, prop='entropy',\n unique_labels=unique_labels, indices=True)\n if self.most:\n indices = indices[::-1]\n indices = indices[:int(self.property_perc * len(indices))]\n\n X_train_noisy_sub, y_train_sub = X_train_noisy[indices], y_train[indices]\n\n (X_t, y_t), (X_v, y_v) = dataset_split(X_train, y_train, return_data='samples')\n\n # Gather data to plot\n epoch_y.append(y_train)\n epoch_entropy_indices.append(indices)\n\n # Create dataloaders\n train_loader_noisy_sub = get_data_loader(X_t, y_t, shuffle=False)\n val_loader_noisy_sub = get_data_loader(X_v, y_v, shuffle=False)\n\n # Train\n train(model, train_loader_noisy_sub, val_loader_noisy_sub, test_loader_noisy, optimizer,\n self.criterion, device, epochs=i + recompute_epochs, early_stopping=False,\n writer=writer, start_epoch=i, save_model=False, model_path=model_path, pbar=True,\n flatten=flatten)\n\n # Compute features\n features = compute_features(model, data_loader=train_loader_noisy, device=device)\n epoch_features.append(features)\n\n i += recompute_epochs\n\n visualize_decision_boundary(epoch_features, epoch_y, epoch_entropy_indices=epoch_entropy_indices, interval=800)\n\n def run_recompute(self, total_epochs=50, recompute_epochs=1):\n # Setup\n model, (X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test) = self.setup_experiment()\n test_loader_noisy = get_data_loader(X_test_noisy, y_test, shuffle=False)\n print('Starting accuracy: {:.6f}'.format(evaluate(model, test_loader_noisy, device)))\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n model_path, writer = self.get_model_path_and_writer()\n\n flatten = False\n use_lambda = False\n l = 0.001\n\n # # TODO SPLIT ONLY ONCE TRAIN AND VALID BEFORE SUBSAMPLING\n # (X_train_noisy, y_train), (X_valid_noisy, y_valid) = self.train_validation_split(X_train_noisy, y_train,\n # data_loader=False)\n\n if self.dataset == 'MNIST':\n flatten = True\n\n unique_labels = list(np.unique(y_train))\n old_values = np.zeros(len(X_train_noisy))\n old_indices_sub = [0]\n\n i = 0\n while i < total_epochs:\n model.to(device)\n\n # Generate specific subset\n if self.random:\n X_train_noisy_sub, y_train_sub = get_random_subset(X_train_noisy, y_train, self.property_perc)\n else:\n if use_lambda:\n values = compute_samples_property(model, X_train_noisy, y_train, unique_labels,\n self.property, indices=False, flatten=flatten)\n values = np.array(values)\n values = np.interp(values, (values.min(), values.max()), (0, 1))\n new_values = old_values - l * (old_values - values)\n n_samples = int(len(new_values) * self.property_perc)\n indices = np.argsort(new_values)\n if self.most: indices = indices[::-1]\n indices_sub = indices[:n_samples]\n print('Samples changed: {}'.format(len(np.setdiff1d(indices_sub, old_indices_sub))))\n old_values = new_values\n old_indices_sub = indices_sub\n X_train_noisy_sub, y_train_sub = X_train_noisy[indices_sub], y_train[indices_sub]\n else:\n if self.alternate and i % 2 == 0:\n X_train_noisy_sub, y_train_sub = get_random_subset(X_train_noisy, y_train, self.property_perc)\n else:\n X_train_noisy_sub, y_train_sub = get_samples_by_property(model, X_train_noisy, y_train,\n self.property_perc, self.most,\n prop=self.property, diversity=False,\n flatten=flatten)\n # Create DataLoader objects\n # TODO THIS IS NORMAL MODE\n train_loader_noisy_sub, val_loader_noisy_sub = dataset_split(X_train_noisy_sub, y_train_sub,\n return_data='data_loader')\n # TODO THIS IS USING THE SAME VALIDATION\n # train_loader_noisy_sub = get_data_loader(X_train_noisy_sub, y_train_sub)\n # val_loader_noisy_sub = get_data_loader(X_valid_noisy, y_valid)\n\n # Training\n train(model, train_loader_noisy_sub, val_loader_noisy_sub, test_loader_noisy, optimizer,\n self.criterion, device, epochs=recompute_epochs, early_stopping=False,\n writer=writer, start_epoch=i, save_model=False, model_path=model_path, pbar=True,\n flatten=flatten)\n\n i += recompute_epochs\n\n def run(self):\n # Setup\n model, (X_train_noisy, X_test_noisy, X_train, X_test, y_train, y_test) = self.setup_experiment()\n\n # Scale datasets\n X_train, X_mean, X_std = image_preprocessing(X_train, scale_only=False)\n X_test, _, _ = image_preprocessing(X_test, seq_mean=X_mean, seq_std=X_std, scale_only=False)\n\n X_train_noisy, X_noisy_mean, X_noisy_std = image_preprocessing(X_train_noisy, scale_only=False)\n X_test_noisy, _, _ = image_preprocessing(X_test_noisy, seq_mean=X_noisy_mean, seq_std=X_noisy_std,\n scale_only=False)\n\n if self.dataset == 'MNIST':\n flatten = True\n else:\n flatten = False\n\n # Generate specific subset\n if self.random:\n X_train_noisy_sub, y_train_sub = get_random_subset(X_train_noisy, y_train, self.property_perc)\n else:\n X_train_noisy_sub, y_train_sub = get_samples_by_property(model, X_train_noisy, y_train,\n self.property_perc, self.most,\n prop=self.property,\n flatten=flatten)\n\n # Create DataLoader objects\n train_loader_noisy_sub, val_loader_noisy_sub = dataset_split(X_train_noisy_sub, y_train_sub,\n return_data='data_loader')\n\n test_loader_noisy = get_data_loader(X_test_noisy, y_test, shuffle=False)\n test_loader = get_data_loader(X_test, y_test, shuffle=False)\n\n # Evaluation\n acc_clean = evaluate(model, test_loader, device, flatten=flatten)\n acc_noisy = evaluate(model, test_loader_noisy, device, flatten=flatten)\n print('Starting... Your accuracy on (x_test_clean) = %.3f' % acc_clean)\n print('Starting... Your accuracy on (x_test_noisy) = %.3f' % acc_noisy)\n\n # Model path and Writer\n model_path, writer = self.get_model_path_and_writer()\n\n # Optimizer and criterion\n optimizer = self.get_optimizer(model)\n criterion = nn.CrossEntropyLoss()\n\n # Training\n train(model, train_loader_noisy_sub, val_loader_noisy_sub, test_loader_noisy, optimizer,\n criterion, device, writer, model_path=model_path, pbar=True, flatten=flatten)\n\n # early_stopping(model, train_loader_noisy_sub, val_loader_noisy_sub, test_loader_noisy, optimizer, device,\n # model_path, writer)\n\n # Evaluation\n acc_noisy = evaluate(model, test_loader_noisy, device, flatten=flatten)\n acc_clean = evaluate(model, test_loader, device, flatten=flatten)\n print('Your accuracy on (x_test_clean) = %.3f' % acc_clean)\n print('Your accuracy on (x_test_noisy) = %.3f' % acc_noisy)\n\n def get_model_path_and_writer(self, other=''):\n \"\"\"\n Get the path to save the model and compose the name of the experiment, according to its features\n :param other: optional string to be appended to the end of the name\n :return:\n \"\"\"\n if self.random:\n model_path = os.path.join(RETRAINED_DIR,\n self.dataset + '_' + self.distortion_type + '-m=' + str(self.mean) +\n '-std=' + str(self.std), 'random' + str(self.property_perc))\n writer = SummaryWriter('runs/' + self.dataset + '_' + self.distortion_type + '-m=' + str(self.mean) +\n '-std=' + str(self.std) + '_random' + str(self.property_perc))\n return model_path, writer\n\n if self.most:\n model_path = os.path.join(RETRAINED_DIR,\n self.dataset + '_' + self.distortion_type + '-m=' + str(self.mean) +\n '-std=' + str(self.std), 'top' + str(self.property_perc) + other)\n writer = SummaryWriter('runs/' + self.dataset + '_' + self.distortion_type + '-m=' + str(self.mean) +\n '-std=' + str(self.std) + '_top' + str(self.property_perc) + other)\n else:\n model_path = os.path.join(RETRAINED_DIR,\n self.dataset + '_' + self.distortion_type + '-m=' + str(self.mean) +\n '-std=' + str(self.std), 'bottom' + str(self.property_perc) + other)\n writer = SummaryWriter('runs/' + self.dataset + '_' + self.distortion_type + '-m=' + str(self.mean) +\n '-std=' + str(self.std) + '_bottom' + str(self.property_perc) + other)\n\n if not os.path.exists(os.path.join(MODELS_DIR, model_path)):\n os.makedirs(os.path.join(MODELS_DIR, model_path))\n\n model_path = os.path.join(model_path, datetime.datetime.now().strftime('%d-%m-%Y_%H:%M:%S') + '.pt')\n\n return model_path, writer\n\n def get_optimizer(self, net, weight_decay=0):\n if self.dataset == 'CIFAR_10':\n # freeze fully connected layers\n # for name, param in net.named_parameters():\n # if 'fc' in name:\n # param.requires_grad = False\n # else:\n # param.requires_grad = True\n # # passing only those parameters that explicitly requires grad\n # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=0.000001, weight_decay=weight_decay)\n optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=weight_decay)\n elif self.dataset == 'CIFAR_100':\n # freeze all but first two layers\n for name, param in net.named_parameters():\n if name[4] in ['3', '4', '5', '6', '7', '8', '9']:\n param.requires_grad = False\n else:\n param.requires_grad = True\n # passing only those parameters that explicitly requires grad\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=0.000001,\n weight_decay=weight_decay)\n else:\n optimizer = torch.optim.Adam(net.parameters())\n return optimizer\n","repo_name":"tmscarla/improving-transfer-learning","sub_path":"image-recognition/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":31605,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"70894243047","text":"import numpy as np\nfrom numpy import linalg as LA\nimport re\nimport os\nimport inspect\n\nclass functions:\n#__init__(self,sig,c)\n#HBkernel(self,r_i,r_r)\n#PBCkernel(self,r_i,r_r)\n#computeV(mapping,currentposition)\n#overlap(matA,matB)\n#distance(r_1,r_2)\n#transfer(h,th,g)\n#positive_mean(v,th)\n#fix_parameters(V1,h1,th,a,a2)\n#dynamic(position,J,mapping)\n#attractor_distrib(self,side,J,grid,iterations,subcells)\n#random_trajectory(self,L,n,t)\n#ordered_trajectory(self,L,n,t)\n#sample_regular_trajectory(self,L,n,epochs)\n\n \n def __init__(self,sig):\n self.sig=sig\n\n def HBkernel(self,r_i,r_r):#r_i=position on the grid,r_r=rat position,sig=standard deviation\n V=0.0 \n dx=0\n dy=0\n d=0\n dx=np.abs(r_i[0]-r_r[0])\n dy=np.abs(r_i[1]-r_r[1])\n d=np.sqrt(pow(dx,2)+pow(dy,2))\n V=np.exp(-0.5*pow(d/self.sig,2))\n return V\n \n def PBCkernel(self,r_i,r_r):\n periodicity=1/2\n V=0.0 \n dx=0\n dy=0\n d=0\n \n if np.abs(r_i[0]-r_r[0])0:\n return h\n else:\n return 0\n \n def computeV(self,mapping,currentposition): #computes the activity of the network depending on the current position of the external input\n N=len(mapping) \n V=np.zeros(N)\n for i in range(N):\n V[i]=self.PBCkernel(mapping[i],currentposition)\n return V\n\n def overlap(self,matA,matB):\n m=0\n for i in range(matA.shape[0]):\n for j in range(matA.shape[0]):\n m=m+matA[i][j]*matB[i][j]\n m=m/float(LA.norm(matA)*LA.norm(matB))\n return m\n\n def distance(self,r_1,r_2):\n dx=0\n dy=0\n d=0\n dx=np.abs(r_1[0]-r_2[0])\n dy=np.abs(r_1[1]-r_2[1])\n d=np.sqrt(pow(dx,2)+pow(dy,2))\n return d\n \n \n \n\n def dynamic(self,f,position,J,grid): #evolves the activity with J starting from a bump in current position\n #parameters\n N=len(grid)\n maxsteps=50\n #initialization\n\n V=self.computeV(grid,position)\n Vin=self.Sparsify(V,f)\n Vin=Vin/np.mean(Vin)\n for step in range(maxsteps):\n h=np.dot(J,V)\n V=np.asarray(list(map(lambda h: self.transfer(h),h)))\n V=self.Sparsify(V,f)\n V=V/np.mean(V)\n #print(\"Dynamic step: \"+str(step)+\" done, mean: \"+str(np.mean(V))+\" sparsity: \"+str(pow(np.mean(V),2)/np.mean(pow(V,2))))\n return Vin,V\n \n \n \n def attractor_distrib(self,sparsity,J,grid,iterations,ncells):\n N=len(grid)\n spacing=1.0/(float(ncells))\n xcenters=np.linspace(spacing/2.0,1.0-spacing/2.0,ncells)\n ycenters=np.linspace(spacing/2.0,1.0-spacing/2.0,ncells)\n \n VinMat=np.zeros((ncells,ncells,iterations,N)) #matrix for storing initial activityes\n VoutMat=np.zeros((ncells,ncells,iterations,N)) #matrix for storing final activities\n for j in range(ncells):\t \t\n for i in range(ncells):\n for t in range(iterations):\n InitialLocation=np.zeros(2)\n InitialLocation[0]=np.random.uniform(xcenters[i]-spacing/2.0,xcenters[i]+spacing/2.0)\n InitialLocation[1]=np.random.uniform(ycenters[j]-spacing/2.0,ycenters[j]+spacing/2.0)\n Vin,Vfin=self.dynamic(sparsity,InitialLocation,J,grid)\n VinMat[i][j][t]=Vin\n VoutMat[i][j][t]=Vfin\n #print(\"Cell (\"+str(i)+\",\"+str(j)+\") calculated\")\n return VinMat,VoutMat\n\n \n\n\n","repo_name":"davidespalla/CAN_code","sub_path":"Continuity/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18983032797","text":"\"\"\"\n괄호\n\n6\n(())())\n(((()())()\n(()())((()))\n((()()(()))(((())))()\n()()()()(()()())()\n(()((())()(\n\nNO\nNO\nYES\nNO\nYES\nNO\n\"\"\"\nfrom sys import stdin\n\ndef solution(vps):\n f_cnt = 0\n b_cnt = 0\n idx = 0\n if vps[0] == \")\":\n return \"NO\"\n elif vps[-1] == \"(\":\n return \"NO\"\n else:\n while True:\n if len(vps) == idx:\n break\n if f_cnt < b_cnt:\n return \"NO\"\n if vps[idx] == \"(\":\n f_cnt += 1\n elif vps[idx] == \")\":\n b_cnt += 1\n idx += 1\n if f_cnt == b_cnt:\n return \"YES\"\n else:\n return \"NO\"\n\ndef init():\n t = int(stdin.readline().rstrip())\n for i in range(t):\n vps = stdin.readline().rstrip()\n print(solution(vps))\n\ninit()","repo_name":"kkojae91/algorithm_prac","sub_path":"python_algorithm/boj/9012.py","file_name":"9012.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4300165341","text":"import numpy as np\n\nfrom camera import Camera\nfrom constants import *\nfrom scene import Scene, Sphere\nimport utils\n\n\nclass Ray:\n def __init__(self, pr: np.ndarray, nr: np.ndarray):\n self.pr = pr\n self.nr = nr\n\n def intersect_sphere(self, obj: Sphere) -> float:\n diff = self.pr - obj.pos\n b = np.dot(self.nr, diff)\n c = np.dot(diff, diff) - obj.r ** 2\n discriminant = b ** 2 - c\n if b > 0 or discriminant < 0:\n return -1\n t = -b - np.sqrt(discriminant)\n return t\n\n def intersect(self, obj) -> float:\n if isinstance(obj, Sphere):\n t = self.intersect_sphere(obj)\n else:\n raise Exception(\n f\"{type(obj)} object type doesn't implement intersect function\"\n )\n return t\n\n def at(self, t: float):\n return self.pr + t * self.nr\n\n\nclass Raytracer:\n def __init__(self, height: int, width: int, scene: Scene, camera: Camera):\n self.height = height\n self.width = width\n self.scene = scene\n self.camera = camera\n\n def compute_color(self, j: float, i: float) -> np.ndarray:\n # create ray for pixel\n pp = self.camera.project_pixel(j, i, self.width, self.height)\n nr = utils.normalize(pp - self.camera.pos)\n ray = Ray(self.camera.pos, nr)\n # intersect ray with all objects\n min_t = float('inf')\n min_obj = None\n for obj in self.scene.objects:\n t = ray.intersect(obj)\n if 0 < t < min_t:\n min_t = t\n min_obj = obj\n # calculate color for hit point\n if isinstance(min_obj, Sphere):\n n = min_obj.normal_at(ray.at(min_t))\n l = self.scene.light.get_l()\n n_dot_l = np.clip(np.dot(n, l), 0, 1)\n ambient = 0.2\n color = np.clip((n_dot_l + ambient) * min_obj.color, 0, 1)\n else:\n color = np.zeros(COLOR_CHANNELS)\n return color\n","repo_name":"HenrYxZ/pycon-2022-2","sub_path":"raytracer.py","file_name":"raytracer.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72198716648","text":"from common_import import *\n\n\n@benchmark_registry.register(\"isfinite_nan_inf\")\nclass IsfiniteNanInfConfig(APIConfig):\n def __init__(self):\n super(IsfiniteNanInfConfig, self).__init__(\"isfinite_nan_inf\")\n self.api_name = 'isfinite'\n self.api_list = {\n 'isfinite': 'isfinite',\n 'isnan': 'isnan',\n 'isinf': 'isinf'\n }\n\n def to_tensorflow(self):\n # The change of self.api_list should be in front of the calling of parent's function.\n self.api_list = {\n 'isfinite': 'is_finite',\n 'isnan': 'is_nan',\n 'isinf': 'is_inf'\n }\n tf_config = super(IsfiniteNanInfConfig, self).to_tensorflow()\n return tf_config\n\n\n@benchmark_registry.register(\"isfinite_nan_inf\")\nclass PaddleIsfiniteNanInf(PaddleOpBenchmarkBase):\n def build_graph(self, config):\n x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)\n result = self.layers(config.api_name, x=x)\n\n self.feed_list = [x]\n self.fetch_list = [result]\n\n\n@benchmark_registry.register(\"isfinite_nan_inf\")\nclass TorchIsfiniteNanInf(PytorchOpBenchmarkBase):\n def build_graph(self, config):\n x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)\n result = self.layers(config.api_name, x=x)\n\n self.feed_list = [x]\n self.fetch_list = [result]\n\n\n@benchmark_registry.register(\"isfinite_nan_inf\")\nclass TFIsfiniteNanInf(TensorflowOpBenchmarkBase):\n def build_graph(self, config):\n x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)\n out = self.layers(config.api_name, x=x)\n\n self.feed_list = [x]\n self.fetch_list = [out]\n","repo_name":"PaddlePaddle/benchmark","sub_path":"api/tests/isfinite_nan_inf.py","file_name":"isfinite_nan_inf.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"53"} +{"seq_id":"25516884023","text":"\"\"\"\n This file is part of AMTK - the Acorn mesh toolkit, the companion\n software package to the Master's Thesis \"The value of probabilistic\n geological modeling: Application to the Acorn CO2 storage site\"\n of Marco van Veen\n\n AMTK is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n AMTK is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with AMTK. If not, see .\n\n@author: Marco van Veen\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport holoviews as hv\nfrom bokeh.models import NumeralTickFormatter\n\n\ndef plot_mesh_error_legacy(points_list, error_list, names, figsize=(16,8), ax=None):\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n else:\n ax = ax\n\n ax.scatter(points_list, error_list, label=names)\n ax.plot(points_list, error_list)\n\n ax.set_xlim(max(points_list) + 100, 0)\n ax.set_xlabel('Number of points')\n ax.set_ylabel('Divergence / Euclidean Hausdorff Distance')\n ax.grid(True)\n ax.legend()\n\n\ndef plot_mesh_error_legacy_2(evaluation, sample=0, figsize=(16,8), ax=None):\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n else:\n ax = ax\n\n # plot unsampled Hausdorff distance\n if sample == 0 or sample == 2:\n ax.plot(evaluation['size'].values, evaluation['hausdorff'].values,\n marker='o', linestyle='--', label=evaluation.loc[0, 'description'])\n\n # plot sampled Hausdorff distance\n if sample == 1 or sample == 2:\n ax.plot(evaluation['size'].values, evaluation['sample_hausdorff'].values,\n marker='o', linestyle='-', label=(evaluation.loc[0, 'description'] + ''))\n\n ax.set_xlim(evaluation['size'].max() + 100, 0)\n ax.set_xlabel('Number of vertices')\n ax.set_ylabel('Approximation error (Euclidean Hausdorff distance)')\n ax.grid(True)\n ax.legend()\n\ndef plot_mesh_error(evaluation, figsize=(16, 8), ax=None, measure='hausdorff'):\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n else:\n ax = ax\n\n # plot sampled Hausdorff distance\n ax.plot(evaluation['size'].values, evaluation[measure].values,\n marker='o', linestyle='-', label=(evaluation.loc[0, 'description'] + ''))\n\n ax.set_xlim(evaluation['size'].max() + 100, 0)\n ax.set_xlabel('Number of vertices')\n ax.set_ylabel('Approximation error (Euclidean Hausdorff distance) [m]')\n\n ax.grid(True, linestyle='dotted')\n ax.legend()\n\n\ndef plot_vertices_2d(vertices, title=None, step=1, colored=False, figsize=(16,8), ax=None, xaxis=True, yaxis=True, colorbar=False, unit='', **kwargs):\n \"\"\"Plot point set / vertices (pandas DataFrame) in 2D.\n \"\"\"\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n else:\n ax = ax\n\n if colored:\n plot = ax.scatter(vertices.values[::step, 0], vertices.values[::step, 1], c=vertices.values[::step, 2], **kwargs)\n else:\n plot = ax.scatter(vertices.values[::step, 0], vertices.values[::step, 1], **kwargs)\n\n ax.grid(True, linestyle='dotted')\n\n if title is not None:\n ax.set_title(title, size='16')\n\n if xaxis:\n ax.set_xlabel('X')\n else:\n ax.xaxis.set_ticklabels([])\n\n if yaxis:\n ax.set_ylabel('Y')\n else:\n ax.yaxis.set_ticklabels([])\n\n if colorbar:\n #fig.tight_layout()\n cbar = plt.colorbar(plot, ax=ax, pad=0.02)\n cbar.set_label(unit, rotation=270, labelpad=12)\n \n\ndef plot_vertices_3d(vertices, title=None, step=1, colored=False, colordimension=None, figsize=(16,8), ax=None, viewpoint=(30, -60), **kwargs):\n \"\"\"Plot a point set / vertices (pandas DataFrame) in 3D.\n \"\"\"\n\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n ax.elev = viewpoint[0]\n ax.azim = viewpoint[1]\n else:\n ax = ax\n\n if colored:\n ax.scatter(vertices.values[::step, 0], vertices.values[::step, 1], vertices.values[::step, 2],\n c=vertices.values[::step, 2], cmap='viridis', **kwargs)\n else:\n ax.scatter(vertices.values[::step, 0], vertices.values[::step, 1], vertices.values[::step, 2],\n c=colordimension, cmap=plt.cm.nipy_spectral, **kwargs)\n\n if title is not None:\n ax.set_title(title, size='16')\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n\ndef plot_mesh_2d_legacy(vertices, simplices, title=None, colored=False):\n \"\"\"Plot set of vertices and simplices (pandas DataFrames) interactively in 3D.\n \"\"\"\n\n hv.extension('matplotlib')\n\n if colored:\n edge_color = 'Z'\n else:\n edge_color = ''\n\n if title is None:\n title = ''\n\n # styling plot\n options = {'TriMesh': {'style': dict(cmap='viridis',\n node_marker='o'\n ),\n 'plot': dict(fig_inches=(16, 8),\n aspect=2,\n show_grid=True,\n edge_color_index=edge_color,\n filled=colored,\n fontsize={'title': 16}\n )\n }\n }\n\n trimesh = hv.TriMesh((simplices, vertices), label=title).opts(options)\n\n return trimesh\n\n\ndef plot_mesh_2d(vertices, simplices, title=None, colored=False, figsize=(16, 8), ax=None, **kwargs):\n \"\"\"Plot set of vertices and simplices (pandas DataFrames) interactively in 3D.\n \"\"\"\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n # ax = fig.add_subplot(111, projection='3d')\n else:\n ax = ax\n\n ax.triplot(vertices.values[:, 0], vertices.values[:, 1], simplices.values,\n color='k',\n marker='.',\n linestyle='-'\n )\n\n if colored:\n ax.tricontourf(vertices.values[:, 0], vertices.values[:, 1], simplices.values, vertices.values[:, 2])\n\n if title is not None:\n ax.set_title(title, size='16')\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n\n\ndef bokeh_axes_formatter(plot, element):\n # helper function for bokeh and holoview\n # format Bokeh tick labels in HoloView via finalize_hooks\n\n plot.handles['xaxis'].formatter = NumeralTickFormatter(format='7')\n plot.handles['yaxis'].formatter = NumeralTickFormatter(format='7')\n\n\ndef plot_mesh_2d_bokeh(vertices, simplices, title=None, colored=False, default_axes=False):\n \"\"\"Plot set of vertices and simplices (pandas DataFrames) interactively in 3D.\n \"\"\"\n\n hv.extension('bokeh')\n\n if colored:\n edge_color = 'Z'\n else:\n edge_color = ''\n\n if default_axes:\n hooks = []\n else:\n hooks = [bokeh_axes_formatter]\n\n if title is None:\n title = ''\n\n # styling plot\n options = {'TriMesh': {'style': dict(cmap='viridis'\n ),\n 'plot': dict(width=900,\n height=450,\n aspect='square',\n show_grid=True,\n edge_color_index=edge_color,\n filled=colored,\n finalize_hooks=hooks,\n inspection_policy='edges',\n tools=['hover'],\n fontsize = {'title': '12pt'},\n toolbar = 'above'\n )}\n }\n\n trimesh = hv.TriMesh((simplices, vertices), label=title).opts(options)\n\n return trimesh\n\n\ndef plot_mesh_3d(vertices, simplices=None, title=None, step=1, colored=False, figsize=(16, 8), ax=None, viewpoint=(30, -60), **kwargs):\n \"\"\"Plot point set (pandas DataFrame) in 3D triangular grid.\n \"\"\"\n\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n ax.elev = viewpoint[0]\n ax.azim = viewpoint[1]\n else:\n ax = ax\n\n if colored:\n colormap = 'viridis'\n else:\n colormap = None\n\n # Only accept step argument, if no simplices are specified (and therefore automatically Delaunay triangulated by matplotlib.\n if simplices is None:\n step = step\n else:\n step = 1\n\n ax.plot_trisurf(vertices.values[::step, 0], vertices.values[::step, 1], vertices.values[::step, 2],\n triangles=simplices, cmap=colormap, **kwargs)\n if title is not None:\n ax.set_title(title, size='16')\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')","repo_name":"cgre-aachen/MSc_theses","sub_path":"probabilistic_model_acorn_co2/modules/mesh_plot.py","file_name":"mesh_plot.py","file_ext":"py","file_size_in_byte":9348,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"69973750889","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\"\"\"\nIncludes:\n * Function to compute IoU similarity for axis-aligned, rectangular, 2D bounding boxes\n * Function to perform greedy non-maximum suppression\n * Function to decode raw SSD model output\n * Class to encode targets for SSD training\n\"\"\"\n\nimport numpy as np\n\n\ndef iou(boxes1, boxes2, coords='centroids'):\n if len(boxes1.shape) > 2: raise ValueError(\"boxes1 must have rank either 1 or 2, but has rank {}.\".format(len(boxes1.shape)))\n if len(boxes2.shape) > 2: raise ValueError(\"boxes2 must have rank either 1 or 2, but has rank {}.\".format(len(boxes2.shape)))\n\n if len(boxes1.shape) == 1: boxes1 = np.expand_dims(boxes1, axis=0)\n if len(boxes2.shape) == 1: boxes2 = np.expand_dims(boxes2, axis=0)\n\n if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError(\"It must be boxes1.shape[1] == boxes2.shape[1] == 4, but it is boxes1.shape[1] == {}, boxes2.shape[1] == {}.\".format(boxes1.shape[1], boxes2.shape[1]))\n\n if coords == 'centroids':\n # TODO: Implement a version that uses fewer computation steps (that doesn't need conversion)\n boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2minmax')\n boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2minmax')\n elif coords != 'minmax':\n raise ValueError(\"Unexpected value for `coords`. Supported values are 'minmax' and 'centroids'.\")\n\n intersection = np.maximum(0, np.minimum(boxes1[:,1], boxes2[:,1]) - np.maximum(boxes1[:,0], boxes2[:,0])) * np.maximum(0, np.minimum(boxes1[:,3], boxes2[:,3]) - np.maximum(boxes1[:,2], boxes2[:,2]))\n union = (boxes1[:,1] - boxes1[:,0]) * (boxes1[:,3] - boxes1[:,2]) + (boxes2[:,1] - boxes2[:,0]) * (boxes2[:,3] - boxes2[:,2]) - intersection\n\n return intersection / union\n\ndef convert_coordinates(tensor, start_index, conversion='minmax2centroids'):\n '''\n Convert coordinates for axis-aligned 2D boxes between two coordinate formats.\n Creates a copy of `tensor`, i.e. does not operate in place. Currently there are\n two supported coordinate formats that can be converted from and to each other:\n 1) (xmin, xmax, ymin, ymax) - the 'minmax' Others\n 2) (cx, cy, w, h) - the 'centroids' Others\n Note that converting from one of the supported formats to another and back is\n an identity operation up to possible rounding errors for integer tensors.\n Arguments:\n tensor (array): A Numpy nD array containing the four consecutive coordinates\n to be converted somewhere in the last axis.\n start_index (int): The index of the first coordinate in the last axis of `tensor`.\n conversion (str, optional): The conversion direction. Can be 'minmax2centroids'\n or 'centroids2minmax'. Defaults to 'minmax2centroids'.\n Returns:\n A Numpy nD array, a copy of the input tensor with the converted coordinates\n in place of the original coordinates and the unaltered elements of the original\n tensor elsewhere.\n '''\n ind = start_index\n tensor1 = np.copy(tensor).astype(np.float)\n if conversion == 'minmax2centroids':\n tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+1]) / 2.0 # Set cx\n tensor1[..., ind+1] = (tensor[..., ind+2] + tensor[..., ind+3]) / 2.0 # Set cy\n tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind] # Set w\n tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+2] # Set h\n elif conversion == 'centroids2minmax':\n tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin\n tensor1[..., ind+1] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax\n tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin\n tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax\n else:\n raise ValueError(\"Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.\")\n\n return tensor1\n\ndef greedy_nms(y_pred_decoded, iou_threshold=0.45, coords='minmax'):\n '''\n Perform greedy non-maximum suppression on the input boxes.\n Greedy NMS works by selecting the box with the highest score and\n removing all boxes around it that are too close to it measured by IoU-similarity.\n Out of the boxes that are left over, once again the one with the highest\n score is selected and so on, until no boxes with too much overlap are left.\n This is a basic, straight-forward NMS algorithm that is relatively efficient,\n but it has a number of downsides. One of those downsides is that the box with\n the highest score might not always be the box with the best fit to the object.\n There are more sophisticated NMS techniques like [this one](https://lirias.kuleuven.be/bitstream/123456789/506283/1/3924_postprint.pdf)\n that use a combination of nearby boxes, but in general there will probably\n always be a trade-off between speed and quality for any given NMS technique.\n Arguments:\n y_pred_decoded (list): A batch of decoded predictions. For a given batch size `n` this\n is a list of length `n` where each list element is a 2D Numpy array.\n For a batch item with `k` predicted boxes this 2D Numpy array has\n shape `(k, 6)`, where each row contains the coordinates of the respective\n box in the Others `[class_id, score, xmin, xmax, ymin, ymax]`.\n Technically, the number of columns doesn't have to be 6, it can be\n arbitrary as long as the first four elements of each row are\n `xmin`, `xmax`, `ymin`, `ymax` (in this order) and the last element\n is the score assigned to the prediction. Note that this function is\n agnostic to the scale of the score or what it represents.\n iou_threshold (float, optional): All boxes with a Jaccard similarity of\n greater than `iou_threshold` with a locally maximal box will be removed\n from the set of predictions, where 'maximal' refers to the box score.\n Defaults to 0.45 following the paper.\n coords (str, optional): The coordinate Others of `y_pred_decoded`.\n Can be one of the formats supported by `iou()`. Defaults to 'minmax'.\n Returns:\n The predictions after removing non-maxima. The Others is the same as the input Others.\n '''\n y_pred_decoded_nms = []\n for batch_item in y_pred_decoded: # For the labels of each batch item...\n boxes_left = np.copy(batch_item)\n maxima = [] # This is where we store the boxes that make it through the non-maximum suppression\n while boxes_left.shape[0] > 0: # While there are still boxes left to compare...\n maximum_index = np.argmax(boxes_left[:,1]) # ...get the index of the next box with the highest confidence...\n maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...\n maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it\n boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`\n if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...\n similarities = iou(boxes_left[:,2:], maximum_box[2:], coords=coords) # ...compare (IoU) the other left over boxes to the maximum box...\n boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box\n y_pred_decoded_nms.append(np.array(maxima))\n\n return y_pred_decoded_nms\n\ndef _greedy_nms(predictions, iou_threshold=0.45, coords='minmax'):\n '''\n The same greedy non-maximum suppression algorithm as above, but slightly modified for use as an internal\n function for per-class NMS in `decode_y()`.\n '''\n boxes_left = np.copy(predictions)\n maxima = [] # This is where we store the boxes that make it through the non-maximum suppression\n while boxes_left.shape[0] > 0: # While there are still boxes left to compare...\n maximum_index = np.argmax(boxes_left[:,0]) # ...get the index of the next box with the highest confidence...\n maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...\n maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it\n boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`\n if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...\n similarities = iou(boxes_left[:,1:], maximum_box[1:], coords=coords) # ...compare (IoU) the other left over boxes to the maximum box...\n boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box\n return np.array(maxima)\n\ndef _greedy_nms2(predictions, iou_threshold=0.45, coords='minmax'):\n '''\n The same greedy non-maximum suppression algorithm as above, but slightly modified for use as an internal\n function in `decode_y2()`.\n '''\n boxes_left = np.copy(predictions)\n maxima = [] # This is where we store the boxes that make it through the non-maximum suppression\n while boxes_left.shape[0] > 0: # While there are still boxes left to compare...\n maximum_index = np.argmax(boxes_left[:,1]) # ...get the index of the next box with the highest confidence...\n maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...\n maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it\n boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`\n if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...\n similarities = iou(boxes_left[:,2:], maximum_box[2:], coords=coords) # ...compare (IoU) the other left over boxes to the maximum box...\n boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box\n return np.array(maxima)\n\ndef decode_y(y_pred,\n confidence_thresh=0.01,\n iou_threshold=0.45,\n top_k=200,\n input_coords='centroids',\n normalize_coords=False,\n img_height=None,\n img_width=None):\n '''\n Convert model prediction output back to a Others that contains only the positive box predictions\n (i.e. the same Others that `enconde_y()` takes as input).\n After the decoding, two stages of prediction filtering are performed for each class individually:\n First confidence thresholding, then greedy non-maximum suppression. The filtering results for all\n classes are concatenated and the `top_k` overall highest confidence results constitute the final\n predictions for a given batch item. This procedure follows the original Caffe implementation.\n For a slightly different and more efficient alternative to decode raw model output that performs\n non-maximum suppresion globally instead of per class, see `decode_y2()` below.\n Arguments:\n y_pred (array): The prediction output of the SSD model, expected to be a Numpy array\n of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)`, where `#boxes` is the total number of\n boxes predicted by the model per image and the last axis contains\n `[one-hot vector for the classes, 4 predicted coordinate offsets, 4 anchor box coordinates, 4 variances]`.\n confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific\n positive class in order to be considered for the non-maximum suppression stage for the respective class.\n A lower value will result in a larger part of the selection process being done by the non-maximum suppression\n stage, while a larger value will result in a larger part of the selection process happening in the confidence\n thresholding stage. Defaults to 0.01, following the paper.\n iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`\n with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers\n to the box score. Defaults to 0.45 following the paper.\n top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the\n non-maximum suppression stage. Defaults to 200, following the paper.\n input_coords (str, optional): The box coordinate Others that the model outputs. Can be either 'centroids'\n for the Others `(cx, cy, w, h)` (box center coordinates, width, and height) or 'minmax'\n for the Others `(xmin, xmax, ymin, ymax)`. Defaults to 'centroids'.\n normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])\n and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs\n relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.\n Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect\n coordinates. Requires `img_height` and `img_width` if set to `True`. Defaults to `False`.\n img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.\n img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.\n Returns:\n A python list of length `batch_size` where each list element represents the predicted boxes\n for one image and contains a Numpy array of shape `(boxes, 6)` where each row is a box prediction for\n a non-background class for the respective image in the Others `[class_id, confidence, xmin, xmax, ymin, ymax]`.\n '''\n if normalize_coords and ((img_height is None) or (img_width is None)):\n raise ValueError(\"If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`\".format(img_height, img_width))\n\n # 1: Convert the box coordinates from the predicted anchor box offsets to predicted absolute coordinates\n\n y_pred_decoded_raw = np.copy(y_pred[:,:,:-8]) # Slice out the classes and the four offsets, throw away the anchor coordinates and variances, resulting in a tensor of shape `[batch, n_boxes, n_classes + 4 coordinates]`\n\n if input_coords == 'centroids':\n y_pred_decoded_raw[:,:,[-2,-1]] = np.exp(y_pred_decoded_raw[:,:,[-2,-1]] * y_pred[:,:,[-2,-1]]) # exp(ln(w(pred)/w(anchor)) / w_variance * w_variance) == w(pred) / w(anchor), exp(ln(h(pred)/h(anchor)) / h_variance * h_variance) == h(pred) / h(anchor)\n y_pred_decoded_raw[:,:,[-2,-1]] *= y_pred[:,:,[-6,-5]] # (w(pred) / w(anchor)) * w(anchor) == w(pred), (h(pred) / h(anchor)) * h(anchor) == h(pred)\n y_pred_decoded_raw[:,:,[-4,-3]] *= y_pred[:,:,[-4,-3]] * y_pred[:,:,[-6,-5]] # (delta_cx(pred) / w(anchor) / cx_variance) * cx_variance * w(anchor) == delta_cx(pred), (delta_cy(pred) / h(anchor) / cy_variance) * cy_variance * h(anchor) == delta_cy(pred)\n y_pred_decoded_raw[:,:,[-4,-3]] += y_pred[:,:,[-8,-7]] # delta_cx(pred) + cx(anchor) == cx(pred), delta_cy(pred) + cy(anchor) == cy(pred)\n y_pred_decoded_raw = convert_coordinates(y_pred_decoded_raw, start_index=-4, conversion='centroids2minmax')\n elif input_coords == 'minmax':\n y_pred_decoded_raw[:,:,-4:] *= y_pred[:,:,-4:] # delta(pred) / size(anchor) / variance * variance == delta(pred) / size(anchor) for all four coordinates, where 'size' refers to w or h, respectively\n y_pred_decoded_raw[:,:,[-4,-3]] *= np.expand_dims(y_pred[:,:,-7] - y_pred[:,:,-8], axis=-1) # delta_xmin(pred) / w(anchor) * w(anchor) == delta_xmin(pred), delta_xmax(pred) / w(anchor) * w(anchor) == delta_xmax(pred)\n y_pred_decoded_raw[:,:,[-2,-1]] *= np.expand_dims(y_pred[:,:,-5] - y_pred[:,:,-6], axis=-1) # delta_ymin(pred) / h(anchor) * h(anchor) == delta_ymin(pred), delta_ymax(pred) / h(anchor) * h(anchor) == delta_ymax(pred)\n y_pred_decoded_raw[:,:,-4:] += y_pred[:,:,-8:-4] # delta(pred) + anchor == pred for all four coordinates\n else:\n raise ValueError(\"Unexpected value for `input_coords`. Supported input coordinate formats are 'minmax' and 'centroids'.\")\n\n # 2: If the model predicts normalized box coordinates and they are supposed to be converted back to absolute coordinates, do that\n\n if normalize_coords:\n y_pred_decoded_raw[:,:,-4:-2] *= img_width # Convert xmin, xmax back to absolute coordinates\n y_pred_decoded_raw[:,:,-2:] *= img_height # Convert ymin, ymax back to absolute coordinates\n\n # 3: Apply confidence thresholding and non-maximum suppression per class\n\n n_classes = y_pred_decoded_raw.shape[-1] - 4 # The number of classes is the length of the last axis minus the four box coordinates\n\n y_pred_decoded = [] # Store the final predictions in this list\n for batch_item in y_pred_decoded_raw: # `batch_item` has shape `[n_boxes, n_classes + 4 coords]`\n pred = [] # Store the final predictions for this batch item here\n for class_id in range(1, n_classes): # For each class except the background class (which has class ID 0)...\n single_class = batch_item[:,[class_id, -4, -3, -2, -1]] # ...keep only the confidences for that class, making this an array of shape `[n_boxes, 5]` and...\n threshold_met = single_class[single_class[:,0] > confidence_thresh] # ...keep only those boxes with a confidence above the set threshold.\n if threshold_met.shape[0] > 0: # If any boxes made the threshold...\n maxima = _greedy_nms(threshold_met, iou_threshold=iou_threshold, coords='minmax') # ...perform NMS on them.\n maxima_output = np.zeros((maxima.shape[0], maxima.shape[1] + 1)) # Expand the last dimension by one element to have room for the class ID. This is now an arrray of shape `[n_boxes, 6]`\n maxima_output[:,0] = class_id # Write the class ID to the first column...\n maxima_output[:,1:] = maxima # ...and write the maxima to the other columns...\n pred.append(maxima_output) # ...and append the maxima for this class to the list of maxima for this batch item.\n # Once we're through with all classes, keep only the `top_k` maxima with the highest scores\n pred = np.concatenate(pred, axis=0)\n if pred.shape[0] > top_k: # If we have more than `top_k` results left at this point, otherwise there is nothing to filter,...\n top_k_indices = np.argpartition(pred[:,1], kth=pred.shape[0]-top_k, axis=0)[pred.shape[0]-top_k:] # ...get the indices of the `top_k` highest-score maxima...\n pred = pred[top_k_indices] # ...and keep only those entries of `pred`...\n y_pred_decoded.append(pred) # ...and now that we're done, append the array of final predictions for this batch item to the output list\n\n return y_pred_decoded\n\n\ndef decode_y2(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k='all',\n input_coords='centroids',\n normalize_coords=False,\n img_height=None,\n img_width=None):\n '''\n Convert model prediction output back to a Others that contains only the positive box predictions\n (i.e. the same Others that `enconde_y()` takes as input).\n Optionally performs confidence thresholding and greedy non-maximum suppression after the decoding stage.\n Note that the decoding procedure used here is not the same as the procedure used in the original Caffe implementation.\n For each box, the procedure used here assigns the box's highest confidence as its predicted class. Then it removes\n all boxes for which the highest confidence is the background class. This results in less work for the subsequent\n non-maximum suppression, because the vast majority of the predictions will be filtered out just by the fact that\n their highest confidence is for the background class. It is much more efficient than the procedure of the original\n implementation, but the results may also differ.\n Arguments:\n y_pred (array): The prediction output of the SSD model, expected to be a Numpy array\n of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)`, where `#boxes` is the total number of\n boxes predicted by the model per image and the last axis contains\n `[one-hot vector for the classes, 4 predicted coordinate offsets, 4 anchor box coordinates, 4 variances]`.\n confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in any positive\n class required for a given box to be considered a positive prediction. A lower value will result\n in better recall, while a higher value will result in better precision. Do not use this parameter with the\n goal to combat the inevitably many duplicates that an SSD will produce, the subsequent non-maximum suppression\n stage will take care of those. Defaults to 0.5.\n iou_threshold (float, optional): `None` or a float in [0,1]. If `None`, no non-maximum suppression will be\n performed. If not `None`, greedy NMS will be performed after the confidence thresholding stage, meaning\n all boxes with a Jaccard similarity of greater than `iou_threshold` with a locally maximal box will be removed\n from the set of predictions, where 'maximal' refers to the box score. Defaults to 0.45.\n top_k (int, optional): 'all' or an integer with number of highest scoring predictions to be kept for each batch item\n after the non-maximum suppression stage. Defaults to 'all', in which case all predictions left after the NMS stage\n will be kept.\n input_coords (str, optional): The box coordinate Others that the model outputs. Can be either 'centroids'\n for the Others `(cx, cy, w, h)` (box center coordinates, width, and height) or 'minmax'\n for the Others `(xmin, xmax, ymin, ymax)`. Defaults to 'centroids'.\n normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])\n and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs\n relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.\n Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect\n coordinates. Requires `img_height` and `img_width` if set to `True`. Defaults to `False`.\n img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.\n img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.\n Returns:\n A python list of length `batch_size` where each list element represents the predicted boxes\n for one image and contains a Numpy array of shape `(boxes, 6)` where each row is a box prediction for\n a non-background class for the respective image in the Others `[class_id, confidence, xmin, xmax, ymin, ymax]`.\n '''\n if normalize_coords and ((img_height is None) or (img_width is None)):\n raise ValueError(\"If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`\".format(img_height, img_width))\n\n # 1: Convert the classes from one-hot encoding to their class ID\n y_pred_converted = np.copy(y_pred[:,:,-14:-8]) # Slice out the four offset predictions plus two elements whereto we'll write the class IDs and confidences in the next step\n y_pred_converted[:,:,0] = np.argmax(y_pred[:,:,:-12], axis=-1) # The indices of the highest confidence values in the one-hot class vectors are the class ID\n y_pred_converted[:,:,1] = np.amax(y_pred[:,:,:-12], axis=-1) # Store the confidence values themselves, too\n\n # 2: Convert the box coordinates from the predicted anchor box offsets to predicted absolute coordinates\n if input_coords == 'centroids':\n y_pred_converted[:,:,[4,5]] = np.exp(y_pred_converted[:,:,[4,5]] * y_pred[:,:,[-2,-1]]) # exp(ln(w(pred)/w(anchor)) / w_variance * w_variance) == w(pred) / w(anchor), exp(ln(h(pred)/h(anchor)) / h_variance * h_variance) == h(pred) / h(anchor)\n y_pred_converted[:,:,[4,5]] *= y_pred[:,:,[-6,-5]] # (w(pred) / w(anchor)) * w(anchor) == w(pred), (h(pred) / h(anchor)) * h(anchor) == h(pred)\n y_pred_converted[:,:,[2,3]] *= y_pred[:,:,[-4,-3]] * y_pred[:,:,[-6,-5]] # (delta_cx(pred) / w(anchor) / cx_variance) * cx_variance * w(anchor) == delta_cx(pred), (delta_cy(pred) / h(anchor) / cy_variance) * cy_variance * h(anchor) == delta_cy(pred)\n y_pred_converted[:,:,[2,3]] += y_pred[:,:,[-8,-7]] # delta_cx(pred) + cx(anchor) == cx(pred), delta_cy(pred) + cy(anchor) == cy(pred)\n y_pred_converted = convert_coordinates(y_pred_converted, start_index=-4, conversion='centroids2minmax')\n elif input_coords == 'minmax':\n y_pred_converted[:,:,2:] *= y_pred[:,:,-4:] # delta(pred) / size(anchor) / variance * variance == delta(pred) / size(anchor) for all four coordinates, where 'size' refers to w or h, respectively\n y_pred_converted[:,:,[2,3]] *= np.expand_dims(y_pred[:,:,-7] - y_pred[:,:,-8], axis=-1) # delta_xmin(pred) / w(anchor) * w(anchor) == delta_xmin(pred), delta_xmax(pred) / w(anchor) * w(anchor) == delta_xmax(pred)\n y_pred_converted[:,:,[4,5]] *= np.expand_dims(y_pred[:,:,-5] - y_pred[:,:,-6], axis=-1) # delta_ymin(pred) / h(anchor) * h(anchor) == delta_ymin(pred), delta_ymax(pred) / h(anchor) * h(anchor) == delta_ymax(pred)\n y_pred_converted[:,:,2:] += y_pred[:,:,-8:-4] # delta(pred) + anchor == pred for all four coordinates\n else:\n raise ValueError(\"Unexpected value for `coords`. Supported values are 'minmax' and 'centroids'.\")\n\n # 3: If the model predicts normalized box coordinates and they are supposed to be converted back to absolute coordinates, do that\n if normalize_coords:\n y_pred_converted[:,:,2:4] *= img_width # Convert xmin, xmax back to absolute coordinates\n y_pred_converted[:,:,4:] *= img_height # Convert ymin, ymax back to absolute coordinates\n\n # 4: Decode our huge `(batch, #boxes, 6)` tensor into a list of length `batch` where each list entry is an array containing only the positive predictions\n y_pred_decoded = []\n for batch_item in y_pred_converted: # For each image in the batch...\n boxes = batch_item[np.nonzero(batch_item[:,0])] # ...get all boxes that don't belong to the background class,...\n boxes = boxes[boxes[:,1] >= confidence_thresh] # ...then filter out those positive boxes for which the prediction confidence is too low and after that...\n if iou_threshold: # ...if an IoU threshold is set...\n boxes = _greedy_nms2(boxes, iou_threshold=iou_threshold, coords='minmax') # ...perform NMS on the remaining boxes.\n if top_k != 'all' and boxes.shape[0] > top_k: # If we have more than `top_k` results left at this point...\n top_k_indices = np.argpartition(boxes[:,1], kth=boxes.shape[0]-top_k, axis=0)[boxes.shape[0]-top_k:] # ...get the indices of the `top_k` highest-scoring boxes...\n boxes = boxes[top_k_indices] # ...and keep only those boxes...\n y_pred_decoded.append(boxes) # ...and now that we're done, append the array of final predictions for this batch item to the output list\n\n return y_pred_decoded\n","repo_name":"liuguiyangnwpu/DL.EyeSight","sub_path":"eagle/brain/ssd/box_encode_decode_utils.py","file_name":"box_encode_decode_utils.py","file_ext":"py","file_size_in_byte":28222,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"53"} +{"seq_id":"4059856923","text":"#!/usr/bin/env python3\n\nimport rospy\nimport tf_conversions\nimport tf2_ros\nimport geometry_msgs.msg\nimport math\n\n\ndef main():\n rospy.init_node('solar_system')\n\n br = tf2_ros.TransformBroadcaster()\n t = geometry_msgs.msg.TransformStamped()\n\n # Parameters reading to use the launch file\n distance_to_parent = rospy.get_param(\"~distance_to_parent\")\n period = rospy.get_param(\"~period\")\n # rotation_period = rospy.get_param(\"~rotation_period\")\n\n frequency = 100\n rate = rospy.Rate(frequency)\n alpha = 0\n\n while not rospy.is_shutdown():\n\n alpha += 1/period/frequency # higher alpha = higher velocity (or higher frequency with low alpha)\n if alpha > 2* math.pi:\n alpha = 0\n\n t.header.stamp = rospy.Time.now()\n\n # # Sun to Mercury\n # rho = 0.387\n #\n # t.header.frame_id = 'Sun'\n # t.child_frame_id = 'Mercury'\n # t.transform.translation.x = rho * math.cos(alpha)\n # t.transform.translation.y = rho * math.sin(alpha)\n # t.transform.translation.z = 0.0\n # q = tf_conversions.transformations.quaternion_from_euler(0, 0, 10*alpha)\n # t.transform.rotation.x = q[0]\n # t.transform.rotation.y = q[1]\n # t.transform.rotation.z = q[2]\n # t.transform.rotation.w = q[3]\n\n # Sun to Mercury\n t.header.frame_id = rospy.remap_name('parent')\n t.child_frame_id = rospy.remap_name('child')\n t.transform.translation.x = distance_to_parent * math.cos(alpha)\n t.transform.translation.y = distance_to_parent * math.sin(alpha)\n t.transform.translation.z = 0.0\n q = tf_conversions.transformations.quaternion_from_euler(0, 0, 10*alpha)\n t.transform.rotation.x = q[0]\n t.transform.rotation.y = q[1]\n t.transform.rotation.z = q[2]\n t.transform.rotation.w = q[3]\n\n\n\n\n # Send the transformations\n br.sendTransform(t)\n\n # Sleep\n rate.sleep()\n\n rospy.spin()\n\nif __name__ == '__main__':\n main()","repo_name":"cobvinicius/PSR_repo","sub_path":"Parte_11/psr_parte11/src/Ex3/Solar_System.py","file_name":"Solar_System.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16505131161","text":"# Qt\nfrom PyQt6 import QtCore, QtGui, QtWidgets\nfrom PyQt6.QtCore import Qt\nfrom PyQt6.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QLabel, QGraphicsDropShadowEffect\n\n# Custom Imports\nfrom SettingsManager import SettingsManager, SignalManager\nfrom MultiMediaPlayer import MultiMediaPlayer\nfrom styles import *\n\n# Left Panel ================================================================================\nclass LeftPanel(QWidget):\n def __init__(self, parent: QWidget, settingsManager: SettingsManager, signalManager: SignalManager) -> None:\n super().__init__(parent)\n self.info = FileInformation(self, settingsManager, signalManager)\n self.fileUpload = FileUploader(self, settingsManager, signalManager)\n self.initComponent()\n\n\n def initComponent(self) -> None:\n self.leftPanelLayout = QVBoxLayout(self)\n #self.leftPanelLayout.addWidget(self.label, 1)\n self.leftPanelLayout.addWidget(self.info, 10)\n self.leftPanelLayout.addWidget(self.fileUpload, 1)\n\n def paintEvent(self, a0: QtGui.QPaintEvent | None) -> None:\n painter = QtGui.QPainter(self)\n\n painter.setBrush(QtGui.QColor(Color.BACKGROUND_COLOR_SECONDARY.value))\n\n borderColour = QtGui.QColor(Color.ACCENT_COLOR_PRIMARY_DARK.value)\n painter.setPen(borderColour)\n pen = QtGui.QPen(borderColour)\n pen.setWidth(BORDER_WIDTH) \n painter.setPen(pen)\n\n infoRect = self.info.geometry()\n fileUploadRect = self.fileUpload.geometry()\n\n painter.drawRoundedRect(infoRect, BORDER_RADIUS, BORDER_RADIUS)\n painter.drawRoundedRect(fileUploadRect, BORDER_RADIUS, BORDER_RADIUS)\n\n# INFO DISPLAY ==============================================================================\nclass FileInformation(QWidget):\n \n def __init__(self, parent: QWidget, settingsManager: SettingsManager, signalManager: SignalManager):\n super().__init__(parent)\n \n # Widgets\n self.player = MultiMediaPlayer(self, settingsManager, signalManager)\n self.filetext = FileTextComponent(self, settingsManager, signalManager)\n\n # Layout\n self.mainLayout = QVBoxLayout(self)\n self.mainLayout.setContentsMargins(0,0,0,0)\n self.mainLayout.addWidget(self.player, 10)\n self.mainLayout.addWidget(self.filetext, 3)\n \nclass FileTextComponent(QWidget):\n def __init__(self, parent: QWidget, settingsManager: SettingsManager, signalManager: SignalManager):\n super().__init__(parent)\n self.fileManager = settingsManager\n self.signalManager = signalManager\n self.fileInformation = settingsManager.getCurrentFileInformation()\n \n self.fileNameLabel = QLabel(\"File Name\")\n self.fileNameLabel.setFont(Font.H2_FONT_Q.value)\n self.fileNameLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileName = QLabel(self.fileInformation[\"name\"])\n self.fileName.setFont(Font.H3_FONT_Q.value)\n self.fileName.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileExtensionLabel = QLabel(\"Extension\")\n self.fileExtensionLabel.setFont(Font.H2_FONT_Q.value)\n self.fileExtensionLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileExtension = QLabel(self.fileInformation[\"extension\"])\n self.fileExtension.setFont(Font.H3_FONT_Q.value)\n self.fileExtension.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileSizeLabel = QLabel(\"Size\")\n self.fileSizeLabel.setFont(Font.H2_FONT_Q.value)\n self.fileSizeLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileSize = QLabel(self.fileInformation[\"size\"])\n self.fileSize.setFont(Font.H3_FONT_Q.value)\n self.fileSize.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileRateLabel = QLabel(\"Rate\")\n self.fileRateLabel.setFont(Font.H2_FONT_Q.value)\n self.fileRateLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileRate = QLabel(self.fileInformation[\"rate\"])\n self.fileRate.setFont(Font.H3_FONT_Q.value)\n self.fileRate.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileResolutionLabel = QLabel(\"Resolution\")\n self.fileResolutionLabel.setFont(Font.H2_FONT_Q.value)\n self.fileResolutionLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileResolution = QLabel(str(self.fileInformation[\"resolution\"]))\n self.fileResolution.setFont(Font.H3_FONT_Q.value)\n self.fileResolution.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileCodecsLabel = QLabel(\"Codec\")\n self.fileCodecsLabel.setFont(Font.H2_FONT_Q.value)\n self.fileCodecsLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.fileCodecs = QLabel(str(self.fileInformation[\"codecs\"]))\n self.fileCodecs.setFont(Font.H3_FONT_Q.value)\n self.fileCodecs.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.signalManager.fileDropped.connect(self.updateFileInformation)\n\n # Layout\n self.mainLayout = QtWidgets.QGridLayout(self)\n # Row 0\n self.mainLayout.addWidget(self.fileNameLabel, 0, 0)\n self.mainLayout.addWidget(self.fileExtensionLabel, 0, 1)\n self.mainLayout.addWidget(self.fileResolutionLabel, 0, 2)\n\n # Row 1\n self.mainLayout.addWidget(self.fileName, 1, 0)\n self.mainLayout.addWidget(self.fileExtension, 1, 1)\n self.mainLayout.addWidget(self.fileResolution, 1, 2)\n\n spacerItem = QtWidgets.QSpacerItem(20, 20)\n self.mainLayout.addItem(spacerItem, 2, 0, 1, 3)\n\n # Row 2\n self.mainLayout.addWidget(self.fileSizeLabel, 3, 0)\n self.mainLayout.addWidget(self.fileRateLabel, 3, 1)\n self.mainLayout.addWidget(self.fileCodecsLabel, 3, 2)\n\n # Row 3\n self.mainLayout.addWidget(self.fileSize, 4, 0)\n self.mainLayout.addWidget(self.fileRate, 4, 1)\n self.mainLayout.addWidget(self.fileCodecs, 4, 2)\n\n def updateFileInformation(self):\n self.fileInformation = self.fileManager.getCurrentFileInformation()\n \n self.fileName.setText(str(self.fileInformation[\"name\"]))\n self.fileExtension.setText(str(self.fileInformation[\"extension\"]))\n self.fileSize.setText(str(self.fileInformation[\"size\"]))\n self.fileResolution.setText(str(self.fileInformation[\"resolution\"]))\n self.fileRate.setText(str(self.fileInformation[\"rate\"]))\n self.fileCodecs.setText(str(self.fileInformation[\"codecs\"]))\n\n\nclass FileUploader(QWidget):\n def __init__(self, parent: QWidget, settingsManager: SettingsManager, signalManager: SignalManager):\n super().__init__(parent)\n\n # Drop and File Choose Components\n self.dragDrop = FileDropFrame(settingsManager, signalManager)\n self.button = QtWidgets.QPushButton(\"Choose File\")\n self.button.setObjectName(\"chooseFile\")\n self.button.setCursor(Qt.CursorShape.PointingHandCursor)\n self.button.clicked.connect(self.chooseFile)\n\n # Layouts\n self.mainLayout = QtWidgets.QGridLayout(self)\n\n # Add the dragDrop widget to the grid layout\n self.mainLayout.addWidget(self.dragDrop, 0, 0, 4, 3) # Takes 4 rows\n\n # Add the button to the grid layout\n self.mainLayout.addWidget(self.button, 2, 3, 2, 1, Qt.AlignmentFlag.AlignCenter) # Takes 1 row\n\n def chooseFile(self):\n file_dialog = QtWidgets.QFileDialog(self)\n file_dialog.setOptions(QtWidgets.QFileDialog.Option.DontConfirmOverwrite)\n file_paths, _ = file_dialog.getOpenFileNames(self, \"Choose File\", \"\", \"All Files (*);;Audio Files (*.mp3 *.wav);;Video Files (*.mp4 *.avi)\")\n\n if file_paths:\n chosen_file = file_paths[0]\n self.dragDrop.fileManager.setNewInputFilePath(chosen_file)\n self.dragDrop.fileManager.updateFileInfo()\n self.dragDrop.signalManager.fileDropped.emit()\n\nclass FileDropFrame(QtWidgets.QFrame):\n def __init__(self, settingsManager: SettingsManager, signalManager: SignalManager):\n super().__init__()\n self.fileManager = settingsManager\n self.signalManager = signalManager\n #self.setFrameStyle(QtWidgets.QFrame.Shape.Panel)\n self.setAcceptDrops(True)\n self.setStyleSheet(\n f\"background-color: {Color.FILE_DROP_FRAME_DEFAULT.value};\"\n f\"border-radius: {BORDER_RADIUS}px;\"\n )\n # Create a QLabel for displaying text\n self.textLabel = QLabel(\"Drop files here\", self)\n self.textLabel.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n def resizeEvent(self, event):\n # Center the text label within the frame when the frame is resized\n self.textLabel.setGeometry(\n (self.width() - self.textLabel.width()) // 2,\n (self.height() - self.textLabel.height()) // 2,\n self.textLabel.width(),\n self.textLabel.height(),\n )\n\n def dragEnterEvent(self, event: QtGui.QDragEnterEvent):\n mime_data = event.mimeData()\n self.setStyleSheet(\n f\"background-color: {Color.FILE_DROP_FRAME_DRAG.value};\"\n f\"border-radius: {BORDER_RADIUS}px;\")\n \n if isinstance(mime_data, QtCore.QMimeData) and mime_data.hasUrls():\n event.acceptProposedAction()\n\n def dragMoveEvent(self, event: QtGui.QDragMoveEvent):\n mime_data = event.mimeData()\n if isinstance(mime_data, QtCore.QMimeData) and mime_data.hasUrls():\n event.acceptProposedAction()\n\n def dropEvent(self, event: QtGui.QDropEvent):\n mime_data = event.mimeData()\n if isinstance(mime_data, QtCore.QMimeData) and mime_data.hasUrls():\n file_path = mime_data.urls()[0].toLocalFile()\n self.fileManager.setNewInputFilePath(file_path)\n self.fileManager.updateFileInfo()\n self.setStyleSheet(\n f\"background-color: {Color.FILE_DROP_FRAME_DEFAULT.value};\"\n f\"border-radius: {BORDER_RADIUS}px\"\n )\n self.signalManager.fileDropped.emit()\n\n\n\n","repo_name":"JamesBedsonUPF/AudioVideoEncoding","sub_path":"FFMPEG_Converter_Python/LeftPanel.py","file_name":"LeftPanel.py","file_ext":"py","file_size_in_byte":10085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72374702889","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 25 19:02:26 2021\n\n@author: ichioka\n\"\"\"\n\nimport streamlit as st\nimport time\n\nst.title(\"やることリスト\")\n\n \nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import date\n\nleft_column,right_column = st.columns(2) \n\n\ndef main():\n homework1 = st.date_input('課題1の提出期限を書いてください。',\n min_value=date.today(),\n value=date.today(),\n )\n left_column.write(homework1) \n \n \n\n kyou1 = datetime.date.today() \n if homework1 <= kyou1 + relativedelta(days=+1):\n right_column.write('急いで課題1に取り組みましょう!!!')\n elif homework1 <= kyou1 + relativedelta(weeks=+1):\n right_column.write('そろそろ課題1に取り組みましょう!!')\n else :\n right_column.write('計画的に課題1を勧めましょう!')\n \nif __name__ == '__main__':\n main() \n \n@st.cache(allow_output_mutation=True)\ndef cache_lst1():\n lst1 = []\n return lst1\n\nlst1 = cache_lst1()\ninput = st.text_input('課題1の内容')\n\nif input:\n lst1.append(input)\nif st.checkbox('delete1'):\n delete = st.selectbox('課題1の削除する要素を選択して下さい', options=lst1)\n if st.button('Delete1'):\n lst1.remove(delete)\n st.success(f'Delete1 : {delete}')\n\nif st.checkbox('change1'):\n change_from = st.selectbox('課題1の変更する要素を選択して下さい', options=lst1)\n change_index = lst1.index(change_from)\n change_to = st.text_input('どのように課題1を変更しますか')\n if st.button('Change1'):\n lst1.remove(change_from)\n lst1.insert(change_index, change_to)\n st.success(f'Change1 {change_from} to {change_to}')\n \nst.table(lst1)\n\n\n\nprogress1 = st.slider('課題1の進捗は?',0, 100, 50)\n'課題1進捗:',progress1\n\n\nleft_column,right_column = st.columns(2) \n\n\ndef main():\n homework2 = st.date_input('課題2の提出期限を書いてください。',\n min_value=date.today(),\n value=date.today(),\n )\n left_column.write(homework2) \n \n \n\n kyou2 = datetime.date.today() \n if homework2 <= kyou2 + relativedelta(days=+1):\n right_column.write('急いで課題2に取り組みましょう!!!')\n elif homework2 <= kyou2 + relativedelta(weeks=+1):\n right_column.write('そろそろ課題2に取り組みましょう!!')\n else :\n right_column.write('計画的に課題2を勧めましょう!')\n \nif __name__ == '__main__':\n main() \n \n@st.cache(allow_output_mutation=True)\ndef cache_lst2():\n lst2 = []\n return lst2\n\nlst2 = cache_lst2()\ninput = st.text_input('課題2の内容')\n\nif input:\n lst2.append(input)\nif st.checkbox('delete2'):\n delete = st.selectbox('課題2の削除する要素を選択して下さい', options=lst2)\n if st.button('Delete2'):\n lst2.remove(delete)\n st.success(f'Delete2 : {delete}')\n\nif st.checkbox('change2'):\n change_from = st.selectbox('課題2の変更する要素を選択して下さい', options=lst2)\n change_index = lst2.index(change_from)\n change_to = st.text_input('どのように課題2を変更しますか')\n if st.button('Change2'):\n lst2.remove(change_from)\n lst2.insert(change_index, change_to)\n st.success(f'Change2 {change_from} to {change_to}')\n \nst.table(lst2)\n\n\n\nprogress2 = st.slider('課題2の進捗は?',0, 100, 50)\n'課題2進捗:',progress2\n\nleft_column,right_column = st.columns(2) \n\n\ndef main():\n homework3 = st.date_input('課題3の提出期限を書いてください。',\n min_value=date.today(),\n value=date.today(),\n )\n left_column.write(homework3) \n \n \n\n kyou3 = datetime.date.today() \n if homework3 <= kyou3 + relativedelta(days=+1):\n right_column.write('急いで課題3に取り組みましょう!!!')\n elif homework3 <= kyou3 + relativedelta(weeks=+1):\n right_column.write('そろそろ課題3に取り組みましょう!!')\n else :\n right_column.write('計画的に課題3を勧めましょう!')\n \nif __name__ == '__main__':\n main() \n\n@st.cache(allow_output_mutation=True)\ndef cache_lst3():\n lst3 = []\n return lst3\n\nlst3 = cache_lst3()\ninput = st.text_input('課題3の内容')\n\nif input:\n lst3.append(input)\nif st.checkbox('delete3'):\n delete = st.selectbox('課題3の削除する要素を選択して下さい', options=lst3)\n if st.button('Delete3'):\n lst3.remove(delete)\n st.success(f'Delete3 : {delete}')\n\nif st.checkbox('change3'):\n change_from = st.selectbox('課題3の変更する要素を選択して下さい', options=lst3)\n change_index = lst3.index(change_from)\n change_to = st.text_input('どのように課題3を変更しますか')\n if st.button('Change3'):\n lst3.remove(change_from)\n lst3.insert(change_index, change_to)\n st.success(f'Change3 {change_from} to {change_to}')\n \nst.table(lst3)\n\n\n\nprogress3 = st.slider('課題3の進捗は?',0, 100, 50)\n'課題3進捗:',progress3\n\n\n\n\n\n\n","repo_name":"0932aki/ichi_todo","sub_path":"ichi_todo.py","file_name":"ichi_todo.py","file_ext":"py","file_size_in_byte":5378,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18528277429","text":"from __future__ import absolute_import\nimport os\nimport shutil\n\nfrom netCDF4 import Dataset\n\nfrom lisfloodutilities.compare.nc import NetCDFComparator\n\nfrom lisflood.main import lisfloodexe\n\nfrom .test_utils import setoptions, mk_path_out\n\n\nclass TestRepMaps():\n settings_files = {'base': os.path.join(os.path.dirname(__file__), 'data/LF_ETRS89_UseCase/settings/base.xml')}\n\n def teardown_method(self):\n shutil.rmtree(os.path.join(os.path.dirname(__file__), 'data/LF_ETRS89_UseCase/out/testrep'), ignore_errors=True)\n\n def test_no_reported(self):\n path_out = mk_path_out('data/LF_ETRS89_UseCase/out/testrep')\n settings = setoptions(self.settings_files['base'], vars_to_set={'PathOut': path_out})\n lisfloodexe(settings)\n files = [os.path.join(settings.output_dir, f) for f in os.listdir(settings.output_dir) if f.endswith('.nc') or f.endswith('.tss')]\n assert not files\n\n def test_end_reported(self):\n path_out = mk_path_out('data/LF_ETRS89_UseCase/out/testrep')\n settings = setoptions(self.settings_files['base'], ['repEndMaps'], vars_to_set={'PathOut': path_out})\n lisfloodexe(settings)\n files = [os.path.join(settings.output_dir, f) for f in os.listdir(settings.output_dir) if f.endswith('.end.nc')]\n no_files = [os.path.join(settings.output_dir, f) for f in os.listdir(settings.output_dir) if f.endswith('.nc') and '.end.' not in f]\n assert files\n assert not no_files\n\n def test_state_reported(self):\n path_out = mk_path_out('data/LF_ETRS89_UseCase/out/testrep')\n settings = setoptions(self.settings_files['base'], ['repStateMaps'], vars_to_set={'PathOut': path_out})\n lisfloodexe(settings)\n no_files = [os.path.join(settings.output_dir, f) for f in os.listdir(settings.output_dir) if f.endswith('.end.nc')]\n files = [os.path.join(settings.output_dir, f) for f in os.listdir(settings.output_dir) if f.endswith('.nc') and '.end.' not in f]\n assert files\n assert not no_files\n\n def test_end_state_reported(self):\n path_out = mk_path_out('data/LF_ETRS89_UseCase/out/testrep')\n settings = setoptions(self.settings_files['base'], ['repEndMaps', 'repStateMaps', 'repDischargeMaps'], vars_to_set={'PathOut': path_out})\n lisfloodexe(settings)\n comparator = NetCDFComparator(settings.maskpath, array_equal=True)\n end_files = [os.path.join(settings.output_dir, f) for f in os.listdir(settings.output_dir) if f.endswith('.end.nc')]\n state_files = [os.path.join(settings.output_dir, f) for f in os.listdir(settings.output_dir) if f.endswith('.nc') and '.end.' not in f]\n assert end_files\n assert state_files\n # assert that unique timestep in end maps is equal to last timestep in state maps\n for end_file in end_files:\n basename = end_file.replace('.end.nc', '')\n state_file = '{}.nc'.format(basename)\n if not os.path.exists(state_file):\n continue\n state_nc = Dataset(state_file)\n end_nc = Dataset(end_file)\n var_name = [k for k in state_nc.variables if len(state_nc.variables[k].dimensions) == 3][0]\n vara = state_nc.variables[var_name]\n varb = end_nc.variables['{}.end'.format(var_name)]\n assert 'time' not in end_nc.variables\n\n # compare latest timestep in state map with unique timestep in end map\n comparator.compare_arrays(vara[-1][:, :], varb[:, :], varname=basename)\n assert not comparator.errors\n","repo_name":"hzeinivand/lisflood","sub_path":"tests/test_state_end_maps.py","file_name":"test_state_end_maps.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9343584470","text":"\"\"\"\nVarious functions to make it easier to build test models.\n\"\"\"\n\nimport time\nimport numpy\n\nfrom openmdao.core.group import Group\nfrom openmdao.core.parallel_group import ParallelGroup\nfrom openmdao.core.explicitcomponent import ExplicitComponent\nfrom openmdao.core.indepvarcomp import IndepVarComp\n\nfrom openmdao.utils.mpi import MPI\n\n\nclass DynComp(ExplicitComponent):\n \"\"\"\n A component with a settable number of params and outputs.\n \"\"\"\n def __init__(self, ninputs, noutputs,\n nl_sleep=0.001, ln_sleep=0.001,\n var_factory=float, vf_args=()):\n super().__init__()\n\n self.ninputs = ninputs\n self.noutputs = noutputs\n self.var_factory = var_factory\n self.vf_args = vf_args\n self.nl_sleep = nl_sleep\n self.ln_sleep = ln_sleep\n\n def setup(self):\n for i in range(self.ninputs):\n self.add_input(f'i{i}', self.var_factory(*self.vf_args))\n\n for i in range(self.noutputs):\n self.add_output(f'o{i}', self.var_factory(*self.vf_args))\n\n def compute(self, inputs, outputs):\n time.sleep(self.nl_sleep)\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Jacobian for Sellar discipline 1.\n \"\"\"\n time.sleep(self.ln_sleep)\n\n\ndef make_subtree(parent, nsubgroups, levels,\n ncomps, ninputs, noutputs, nconns, var_factory=float):\n \"\"\"Construct a system subtree under the given parent group.\"\"\"\n\n if levels <= 0:\n return\n\n if levels == 1: # add leaf nodes\n create_dyncomps(parent, ncomps, ninputs, noutputs, nconns,\n var_factory=var_factory)\n else: # add more subgroup levels\n for i in range(nsubgroups):\n g = parent.add_subsystem(\"G%d\"%i, Group())\n make_subtree(g, nsubgroups, levels-1,\n ncomps, ninputs, noutputs, nconns,\n var_factory=var_factory)\n\n\ndef create_dyncomps(parent, ncomps, ninputs, noutputs, nconns,\n var_factory=float):\n \"\"\"Create a specified number of DynComps with a specified number\n of variables (ninputs and noutputs), and add them to the given parent\n and add the number of specified connections.\n \"\"\"\n for i in range(ncomps):\n parent.add_subsystem(\"C%d\" % i, DynComp(ninputs, noutputs, var_factory=var_factory))\n\n if i > 0:\n for j in range(nconns):\n parent.connect(\"C%d.o%d\" % (i-1,j), \"C%d.i%d\" % (i, j))\n\n\nif __name__ == '__main__':\n import sys\n from openmdao.core.problem import Problem\n from openmdao.devtools.debug import config_summary\n\n vec_size = 1000\n num_comps = 50\n pts = 2\n\n\n class SubGroup(Group):\n def setup(self):\n create_dyncomps(self, num_comps, 2, 2, 2,\n var_factory=lambda: numpy.zeros(vec_size))\n cname = \"C%d\"%(num_comps-1)\n self.add_objective(\"%s.o0\" % cname)\n self.add_constraint(\"%s.o1\" % cname, lower=0.0)\n\n\n p = Problem()\n g = p.model\n\n if 'gmres' in sys.argv:\n from openmdao.solvers.linear.scipy_iter_solver import ScipyKrylov\n g.linear_solver = ScipyKrylov()\n\n g.add_subsystem(\"P\", IndepVarComp('x', numpy.ones(vec_size)))\n\n g.add_design_var(\"P.x\")\n\n par = g.add_subsystem(\"par\", ParallelGroup())\n for pt in range(pts):\n ptname = \"G%d\"%pt\n ptg = par.add_subsystem(ptname, SubGroup())\n #create_dyncomps(ptg, num_comps, 2, 2, 2,\n #var_factory=lambda: numpy.zeros(vec_size))\n g.connect(\"P.x\", \"par.%s.C0.i0\" % ptname)\n\n #cname = ptname + '.' + \"C%d\"%(num_comps-1)\n #g.add_objective(\"par.%s.o0\" % cname)\n #g.add_constraint(\"par.%s.o1\" % cname, lower=0.0)\n\n p.setup()\n p.final_setup()\n p.run_model()\n #\n from openmdao.devtools.memory import max_mem_usage\n print(\"mem:\", max_mem_usage())\n\n config_summary(p)\n","repo_name":"OpenMDAO/OpenMDAO","sub_path":"openmdao/test_suite/build4test.py","file_name":"build4test.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"53"} +{"seq_id":"42253843506","text":"from enum import Enum\n\nimport dateutil.parser\n\nfrom maestro_cli.services.maestro_api import MaestroApiClient\n\n\nclass RunStatus(Enum):\n PENDING = \"PENDING\"\n CREATING = \"CREATING\"\n RUNNING = \"RUNNING\"\n STOPPED = \"STOPPED\"\n FINISHED = \"FINISHED\"\n ERROR = \"ERROR\"\n\n\nclass RunMetric:\n def __init__(\n self,\n latency_avg,\n latency_p99,\n latency_p95,\n latency_p90,\n latency_p50,\n success_count,\n total_count,\n min_datetime,\n max_datetime,\n ):\n self.latency_avg = latency_avg\n self.latency_p99 = latency_p99\n self.latency_p95 = latency_p95\n self.latency_p90 = latency_p90\n self.latency_p50 = latency_p50\n self.success_count = success_count\n self.total_count = total_count\n self.min_datetime = min_datetime\n self.max_datetime = max_datetime\n\n\nclass RunMetricApi:\n @staticmethod\n def run_metric_json_to_object(json):\n return RunMetric(\n latency_avg=json.get(\"latency_avg\"),\n latency_p99=json.get(\"latency_p99\"),\n latency_p95=json.get(\"latency_p95\"),\n latency_p90=json.get(\"latency_p90\"),\n latency_p50=json.get(\"latency_p50\"),\n success_count=json.get(\"success_count\"),\n total_count=json.get(\"total_count\"),\n min_datetime=dateutil.parser.parse(json.get(\"min_datetime\")),\n max_datetime=dateutil.parser.parse(json.get(\"max_datetime\")),\n )\n\n @staticmethod\n def all(run_id, time_interval=15):\n\n return MaestroApiClient.get(\n \"/api/run_metrics/%s\" % run_id,\n data={\"time_interval\": time_interval},\n mapper=RunMetricApi.run_metric_json_to_object,\n )\n","repo_name":"Farfetch/maestro","sub_path":"cli/maestro_cli/services/maestro_api/run_metric.py","file_name":"run_metric.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"36027991767","text":"import warnings\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression, LassoCV, RidgeCV\nfrom sklearn.linear_model.coordinate_descent import ConvergenceWarning\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.tree import DecisionTreeRegressor\n\n\ndef notEmpty(s):\n return s != ''\n\n\nmpl.rcParams['font.sans-serif'] = [u'simHei']\nmpl.rcParams['axes.unicode_minus'] = False\n# 拦截异常\nwarnings.filterwarnings(action='ignore', category=ConvergenceWarning)\n\nnames = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']\npath = \"datas/boston_housing.data\"\n# 由于数据文件格式不统一,所以读取的时候,先按照一行一个字段属性读取数据,然后再按照每行数据进行处理\nfd = pd.read_csv(path, header=None)\ndata = np.empty((len(fd), 14))\nfor i, d in enumerate(fd.values):\n d = map(float, filter(notEmpty, d[0].split(' ')))\n data[i] = list(d)\n\nx, y = np.split(data, (13,), axis=1)\ny = y.ravel()\n\nprint(\"样本数据量:%d, 特征个数:%d\" % x.shape)\nprint(\"target样本数据量:%d\" % y.shape[0])\n\n# 数据的分割,\nx_train1, x_test1, y_train1, y_test1 = train_test_split(x, y, train_size=0.8, random_state=14)\nx_train, x_test, y_train, y_test = x_train1, x_test1, y_train1, y_test1\nprint(\"训练数据集样本数目:%d, 测试数据集样本数目:%d\" % (x_train.shape[0], x_test.shape[0]))\n\n# 标准化\nss = MinMaxScaler()\n\nx_train = ss.fit_transform(x_train, y_train)\nx_test = ss.transform(x_test)\n\nprint(\"原始数据各个特征属性的调整最小值:\", ss.min_)\nprint(\"原始数据各个特征属性的缩放数据值:\", ss.scale_)\n\n# 构建模型(回归)\nmodel = DecisionTreeRegressor(criterion='mae', max_depth=7)\n# 模型训练\nmodel.fit(x_train, y_train)\n# 模型预测\ny_test_hat = model.predict(x_test)\n\n# 评估模型\nscore = model.score(x_test, y_test)\nprint(\"Score:\", score)\n\n# 构建线性回归\nlr = LinearRegression()\nlr.fit(x_train, y_train)\nlr_y_test_hat = lr.predict(x_test)\nlr_score = lr.score(x_test, y_test)\nprint(\"lr:\", lr_score)\n# 构建lasso\nlasso = LassoCV(alphas=np.logspace(-3, 1, 20))\nlasso.fit(x_train, y_train)\nlasso_y_test_hat = lasso.predict(x_test)\nlasso_score = lasso.score(x_test, y_test)\nprint(\"lasso:\", lasso_score)\n# 构建岭回归\nridge = RidgeCV(alphas=np.logspace(-3, 1, 20))\nridge.fit(x_train, y_train)\nridge_y_test_hat = ridge.predict(x_test)\nridge_score = ridge.score(x_test, y_test)\nprint(\"ridge:\", ridge_score)\n\n# 7. 画图\nplt.figure(figsize=(12, 6), facecolor='w')\nln_x_test = range(len(x_test))\n\nplt.plot(ln_x_test, y_test, 'r-', lw=2, label=u'实际值')\nplt.plot(ln_x_test, lr_y_test_hat, 'b-', lw=2, label=u'Linear回归,$R^2$=%.3f' % lr_score)\nplt.plot(ln_x_test, lasso_y_test_hat, 'y-', lw=2, label=u'Lasso回归,$R^2$=%.3f' % lasso_score)\nplt.plot(ln_x_test, ridge_y_test_hat, 'c-', lw=2, label=u'Ridge回归,$R^2$=%.3f' % ridge_score)\nplt.plot(ln_x_test, y_test_hat, 'g-', lw=4, label=u'回归决策树预测值,$R^2$=%.3f' % score)\nplt.xlabel(u'数据编码')\nplt.ylabel(u'租赁价格')\nplt.legend(loc='lower right')\nplt.grid(True)\nplt.title(u'波士顿房屋租赁数据预测')\nplt.show()\n","repo_name":"myDemoMike/MachineLearning","sub_path":"003LinearRegression/回归算法:波士顿房屋租赁价格预测.py","file_name":"回归算法:波士顿房屋租赁价格预测.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3040650891","text":"import math\r\n#Trabalho aplicado 1 - Cecília Penha\r\n#Equação 1\r\ndef func(x):\r\n f = (x*x*x)-math.cos(x)-x+1\r\n return f\r\ndef valor(a,b,f1,f2):\r\n if(f1==0):\r\n print(\"raiz = %f\\n\" %a)\r\n return\r\n if(f2==0):\r\n print(\"raiz = %f\\n\" %b)\r\n return\r\n if(f1>0 and f2<0):\r\n print(\"a equacao tem pelo menos um solucao nesse intervalo: [%f,%f]\\n\" %(a,b))\r\n return\r\n elif(f1<0 and f2>0):\r\n print(\"a equacao tem pelo menos uma solucao nesse intervalo: [%f,%f]\\n\" %(a,b))\r\n return\r\n else:\r\n print(\"nao e possivel afirmar que existe uma solucao nesse intervalo, tente outros dois numeros\")\r\n return\r\na = float(input(\"digite a:\\n\"))\r\nb = float(input(\"digite b:\\n\"))\r\nf1 = func(a)\r\nf2 = func(b)\r\nr = valor(a,b,f1,f2)\r\nprint(r)\r\n ","repo_name":"Cecilia-Penha/Trabalho-_Calculo","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2186952494","text":"def solution(records):\n answer = []\n user = {}\n \n for i in range(len(records)):\n record = records[i].split()\n \n if record[0] != 'Leave':\n user[record[1]] = record[2]\n\n \n for i in range(len(records)):\n record = records[i].split()\n \n if record[0] == 'Enter':\n answer.append(user[record[1]] + \"님이 들어왔습니다.\")\n elif record[0] == 'Leave':\n answer.append(user[record[1]] + \"님이 나갔습니다.\")\n \n return answer","repo_name":"rloldl-c/algorithm","sub_path":"프로그래머스/lv2/42888. 오픈채팅방/오픈채팅방.py","file_name":"오픈채팅방.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20375817889","text":"#coding=utf-8\nimport pymongo\nconn=pymongo.MongoClient(host='127.0.0.1',port=27017)\ndb=conn.test\n\ndef find_pymo():\n\tssname=input('请输入你要查询的东西:')\n\tmessage=input('请输入你想查询的内容:')\n\tdata=db.xiyou.find({ssname:message})\n\tfor i in data:\n\t\tprint(\"名字\"+i['name']+' '+\"年龄\"+str(i['age']+' '+'经验'+i['experience']))\n\ndef find_all():\n\tprint('以下是库里面的所有信息')\n\tdata=db.xiyou.find({},{'_id':0})\n\tfor i in data:\n\t\tprint(\"名字\"+i['name']+' '+\"年龄\"+str(i['age']+' '+'经验'+i['experience']))\n\ndef add_pymo():\n\tnnname=input('请输入你新增的名字:')\n\tnnage=input('请输入你新增的年龄:')\n\tnnexp=input('请输入你新增的经验:')\n\tdata=db.xiyou.insert({'name':nnname,'age':nnage,'experience':nnexp})\n\ndef modf_pymo():\n\tsstype=input('请输入你要输入的种类 (例如name/age/exp):')\n\tmessage=input('请输入你想选哪个:')\n\tinfo=input('请输入你要修改的:')\n\tcon=input('请输入新的内容:')\n\ttry:\n\t\tdb.xiyou.update({sstype:message},{'$set':{info:con}})\n\texcept:\n\t\tprint('no exist!')\n\tfinally:\n\t\tprint('sucessfully!')\n\ndef delete_pymo():\n\topt2=input('删除指定文档请按a,删除所有文档请按c:')\n\tif opt2 == 'a':\n\t\tdel_doc=input('请输入指定文档的类型:')\n\t\tdel_con=input('输入你想删的一个:')\n\t\tdb.xiyou.remove({del_doc:del_con})\n\telif opt2 == 'c':\n\t\tprint('warnning!!')\n\t\twa=input('pressing \"go\" to continue:')\n\t\tif wa == 'go':\n\t\t\tdb.xiyou.remove()\n\t\telse:\n\t\t\tprint('happy')\n\nwhile True:\n\tcmmd=input('查询请按1,新增请按2,修改请按3,删除请按4,退出请按q:')\n\tif cmmd == '1':\n\t\topt1=input('查询大概请按all,详细查询请按detial:')\n\t\tif opt1 == 'all':\n\t\t\tfind_all()\n\t\telif opt1 == 'detiak':\n\t\t\tfind_pymo()\n\t\tprint('OK lo')\n\telif cmmd == '2':\n\t\tadd_pymo()\n\t\tprint('OK lo')\n\telif cmmd == '3':\n\t\tmodf_pymo()\n\telif cmmd == '4':\n\t\tdelete_pymo()\n\t\tprint('OK lo')\n\telif cmmd == 'q':\n\t\tprint('see you tomorrow')\n\t\tbreak\n\t\n","repo_name":"aallenchen2018/login-sys","sub_path":"邓秋云.py","file_name":"邓秋云.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"45144138148","text":"def solution(n):\n answer = []\n for i in range(n+1):\n if i == 0 or i==1:\n answer.append(i)\n else:\n f = answer[i-1]+answer[i-2]\n answer.append(f%1234567)\n return answer[-1] \n\n\n\nsolution(3)\n\n\n\"\"\"\n\n걸린시간 : 10분\n\n생각한 해결방안 :\n A.\n 피보나치 0 1 1 2 3 5 ... \n F0 = 0 \n F1 = 1\n F N = F N-1 + N-2\n 1. 기준이 되어야하는 2개가 필요함 (0,1)\n 2. 0,1을 넣고 그다음부턴 i-1 + i-2가 되므로 그 수를 배열에추가\n 3. 마지막 원소\n\n틀린점 :\n 없음\n\n다른사람코드 및 리뷰 :\n def fibonacci(num):\n a, b = 0, 1\n for i in range(num):\n a, b = b, (a+b)%1234567\n return a\n\n => 간단하게 코드를 짯다.\n 해설)a,b=b,a+b 는 (a, b) = (b, a+b) 와 같습니다. \n 같은 위치에 있는 왼쪽 a에는 오른쪽 b값을, 왼쪽 b에는 오른쪽 a+b 값을 동시에 할당하라는 뜻입니다. \n 파이썬에서는 2개 이상의 복수의 변수를 동시에 상호 할당할 수 있습니다.\n \n\"\"\"","repo_name":"comjayoncloud/study-python","sub_path":"CodingTest/프로그래머스/코딩테스트 - 레벨2/피보나치 수.py","file_name":"피보나치 수.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1652497866","text":"\"\"\"\nExercise 1:\nWrite a function that returns \"num\" squared \n\"\"\"\n\ndef squared(num):\n sq = num ** 2\n return (sq)\n\nprint (\"1#\", squared(5))\n\n\n\"\"\"\nExercise 2:\nWrite a function `multiply(subject, times)`, that returns `subject` multiple by `times`. \nNotice what happen when Yoy use number, and what happen if you use string,\nas an argument.\n\n\"\"\"\n\n\ndef multiply(subject, times):\n result = subject * times\n return result\n\nprint (\"2#\", multiply(2, 7)) # result of multiplication\nprint (\"2#\", multiply(5, \"JackDaniels\")) # 5 times JackDaniels\n\n\n\"\"\"\nExercise 3\nWrite a 'power` function that take two arguments:\n\n* `base`: must be\n* `exponent`: optional with standard value of 2.\n\nFunction must return base value put to `exponent' power.\n\"\"\"\n\n\ndef power (base, exponent = 2):\n return (base ** exponent)\n\nprint (\"3#\", power(6, 2)) # returns 36\nprint (\"3#\", power(6)) # also returns 36\n\n\n\"\"\"\nExercise 4\nWrite a fuction \"convert_to_usd\", that takes \"zlotys\" as a parameter. Function must return given value in dollars.\n(take 3.85 PLN as a 1 USD, as a value)\n\"\"\"\n\n\ndef convert_to_usd(zlotys):\n usd = zlotys / 3.85\n return (usd)\n\nprint(\"4#\", convert_to_usd(50))\n\n\n\"\"\"\nExercise 5 \nWrite a `create_name` function, that takes: `name', `surname`, and `nickname`. Function must return\nstring with name, nickname and surname connected.\n\"\"\"\n\n\ndef create_name(name, surname, nickname):\n return name +\" \"+ nickname + \" \"+ surname\n\nprint(\"5#\", create_name(\"Marcin\", \"Plotka\", \"Martinez\"))\n\n\n\"\"\"\nExercise 6\nWrite a `calculate_net` function that takes 'gross' and 'vat' as a price of buy and tax values.\nFunction must return netto value of the price\n\"\"\"\n\n\ndef calculate_net(gross, vat):\n netto = gross / (1 + vat)\n return netto\n\n\nprint(\"6#\", calculate_net(123, 0.23))\n\n\n\"\"\"\nExercise 7\nWrite a 'square area' function that count and returns square field, taking 2 parameters as\nvalues of it sides\n\"\"\"\n\ndef square_area(x,y):\n area = x*y\n return area\n\nprint (\"7#\", square_area(5,20))\n\n\n\"\"\"\nExercise 8\nWrite a 'circle_circut' function that takes diameter of a circle, and returns it circumference\n\"\"\"\n\ndef circle_circut(r):\n circ = 2 * 3.1415 * r\n return circ\nprint (\"8#\", circle_circut(10))\n\n\n\"\"\"\nExercise 9\nWrite a function 'dogs_age' that will count dogs age. Function should takes dogs age as parameter.\nFor 2 first years dogs age is equal to 10.5 of humans age. After this each dogs year is equal to 4 human.\nfunction should return dog's age.\n egz: azor = dogs_age(1.5) == 15.75\n egz: burek = dogs_age(5) == 33\n\"\"\"\n\ndef dogs_age(age):\n if age < 2:\n return age * 10.5\n else:\n return age * 4 + 13\n\nprint (\"9#\", dogs_age(1.5))\nprint (\"9#\", dogs_age(5))\n\n\n\"\"\"\nWrite \"chessboard\" function that takes 'n' parameter, as an optional.\nStandard value of n should be 8. Function must white a chessboard on the console,\nmade from # and spaces.\n\"\"\"\n\ndef chessboard(n=8):\n for row in range(0, n):\n r = \"\"\n for col in range(0, n):\n\n if row % 2 == 1:\n\n if col % 2 == 0:\n r += \"#\"\n else:\n r += \" \"\n else:\n\n if col % 2 == 0:\n r += \" \"\n else:\n r += \"#\"\n print(r)\n\nprint(\"10#\"), chessboard()\n\n\n\"\"\"\nExercise 11\nWrite \"find a number\" function that take a number and checking if it is divisible by 4, and return True or False.\n\"\"\"\n\ndef find_a_number(number):\n if number % 4 == 0:\n return True\n else:\n return False\n\nprint (\"11#\", find_a_number(40))\nprint(\"11#\", find_a_number(34))\n\n\n\"\"\"\nExercise 12\nWrite a 'histogram' function, that will take a list of numbers, and returns histogram made from \"#\"\nIf values, given by user will be different than number, function should return \"None\".\n\n\"\"\"\ndef histogram(array):\n hist = \"\"\n\n for i in array:\n if type(i) == int:\n hist += (i * \"#\") + \"\\n\"\n\n else:\n return None\n\n return hist\n\n\nh = histogram([2, 6, 3, 1])\nm= histogram([1, 2, 'error!'])\n\n\nprint(\"12#\", h)\nprint(\"12#\", m)\n\n\n\"\"\"\nExercise 13\nWrite a function that will count a Fibonacci sequence. Function should return n, as a value of this number,\nin Fibonacci sequence\n\"\"\"\n\ndef fibonacci(n):\n\n if n == 0 or n == 1:\n return n\n else:\n return fibonacci(n-1)+ fibonacci(n-2)\n\nprint(\"13#\", fibonacci(9))\n ","repo_name":"PanMartinez/Bootcamp_Repeat","sub_path":"01Basics/01functions.py","file_name":"01functions.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8253131139","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nclass RecipeFetcher:\n def scrape_recipe(self, recipe_url):\n results = {}\n page_html = requests.get(recipe_url)\n page_graph = BeautifulSoup(page_html.content, features=\"lxml\")\n results['ingredients'] = [ingredient.text for ingredient in\n page_graph.find_all('span', {'itemprop': 'recipeIngredient'}) or\n page_graph.find_all('span', {'class': 'ingredients-item-name'})]\n results['directions'] = [direction.text.strip('\\n ') for direction in\n page_graph.find_all('li', {'class': 'step'})\n or page_graph.find_all('li', {'class': 'subcontainer instructions-section-item'})]\n return results\n","repo_name":"haydenudelson/337-project-2","sub_path":"recipe_fetcher.py","file_name":"recipe_fetcher.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7874532425","text":"# -*- coding: utf-8 -*-\nfrom typing import Optional\n\nfrom sqlalchemy.sql import and_\nfrom sqlalchemy import func, select\n\nfrom fastapi import APIRouter, Query\nfrom fastapi import Depends\n\nfrom commons.code import *\nfrom commons.func import md5, REGEX_MOBILE\n\nfrom settings import settings\n\nfrom models.mysql.system import db_engine, t_account\nfrom models.mysql.system.t_user import t_user\nfrom models.mysql import *\n\nfrom handlers import tool\nfrom handlers.items import ItemOutOperateSuccess, ItemOutOperateFailed\nfrom handlers.items.user import ListDataUser, ItemInAddUser, ItemInEditUser, ItemOutUserList, ItemOutUser, \\\n ItemOutUserGroup, ItemOutUserRole\nfrom handlers.exp import MyException\nfrom handlers.const import *\n\nrouter = APIRouter(tags=[TAGS_USER], dependencies=[Depends(tool.check_token)])\n\n\n@router.get(\"/user\", tags=[TAGS_USER], response_model=ItemOutUserList, name='获取用户')\nasync def get_users(userinfo: dict = Depends(tool.get_userinfo_from_token), page: Optional[int] = Query(settings.web_page, description='第几页'), limit: Optional[int] = Query(settings.web_page_size, description='每页条数'), name: Optional[str] = Query(None, description='用户名'), mobile: Optional[str] = Query(None, description='用户手机号', regex=REGEX_MOBILE)):\n item_out = ItemOutUserList()\n\n # 检查权限\n tool.check_operation_permission(userinfo['id'], PERMISSION_USER_VIEW)\n\n with db_engine.connect() as conn:\n # 获取当前有多少数据\n count_sql = select([func.count(t_user.c.id)]).where(t_user.c.sub_status != TABLE_SUB_STATUS_INVALID_DEL)\n\n # 获取分页后的用户列表\n user_sql = select([\n t_user.c.id,\n t_user.c.name,\n t_user.c.head_img_url,\n t_user.c.mobile,\n t_user.c.status,\n t_user.c.sub_status,\n ]).where(t_user.c.sub_status != TABLE_SUB_STATUS_INVALID_DEL).where(t_user.c.sub_status != TABLE_SUB_STATUS_INVALID_DEL)\n\n if name is not None:\n # 用户名过滤\n name = name.strip()\n count_sql = count_sql.where(t_user.c.name.like('%{}%'.format(name)))\n user_sql = user_sql.where(t_user.c.name.like('%{}%'.format(name)))\n\n if mobile is not None:\n # 用户手机号过滤\n mobile = mobile.strip()\n count_sql = count_sql.where(t_user.c.mobile.like('%{}%'.format(mobile)))\n user_sql = user_sql.where(t_user.c.mobile.like('%{}%'.format(mobile)))\n\n total = conn.execute(count_sql).scalar()\n\n user_sql = user_sql.order_by('sort', 'id')\n if page != 0:\n user_sql = user_sql.limit(limit).offset((page - 1) * limit)\n\n item_out_data = ListDataUser(\n result=[],\n total=total,\n page=page,\n limit=limit,\n )\n user_obj_list = conn.execute(user_sql).fetchall()\n for user_obj in user_obj_list:\n groups = tool.get_user_groups(user_obj.id, conn)\n roles = tool.get_user_roles(user_obj.id, conn)\n item_out_data.result.append(\n ItemOutUser(\n id=user_obj.id,\n name=user_obj.name,\n head_img_url=user_obj.head_img_url,\n mobile=user_obj.mobile,\n status=user_obj.status,\n sub_status=user_obj.sub_status,\n groups=[ItemOutUserGroup(\n id=group.id,\n name=group.name,\n code=group.code,\n intro=group.intro,\n ) for group in groups] if groups is not None else [],\n roles=[ItemOutUserRole(\n id=role.id,\n pid=role.pid,\n name=role.name,\n code=role.code,\n intro=role.intro,\n is_super=role.is_super,\n ) for role in roles] if roles is not None else []\n )\n )\n\n item_out.data = item_out_data\n return item_out\n\n\n@router.post(\"/user\", tags=[TAGS_USER], response_model=ItemOutOperateSuccess, name='添加用户')\nasync def add_user(item_in: ItemInAddUser, userinfo: dict = Depends(tool.get_userinfo_from_token)):\n \"\"\"\n 添加用户\\n\n :param item_in:\\n\n :param userinfo:\\n\n :return:\n \"\"\"\n # 鉴权\n tool.check_operation_permission(userinfo['id'], PERMISSION_USER_ADD)\n\n conn = db_engine.connect()\n trans = conn.begin()\n\n try:\n # 1.新增用户\n # 1.1 设置用户盐值\n user_salt = tool.get_rand_str(6)\n # 1.2 执行新增\n user_val = {\n 'name': item_in.name,\n 'head_img_url': item_in.head_img_url,\n 'mobile': item_in.mobile,\n 'email': item_in.email,\n 'salt': user_salt,\n 'password': md5(item_in.password, user_salt),\n 'creator': userinfo['name']\n }\n user_sql = t_user.insert().values(user_val)\n user_res = conn.execute(user_sql)\n\n # 2.新增账号\n # 添加一个用户名的登录账号\n account_sql = t_account.insert().values({\n 'user_id': user_res.lastrowid,\n 'open_code': item_in.name,\n 'category': TABLE_ACCOUNT_CATEGORY_CUSTOM,\n 'creator': userinfo['name']\n })\n conn.execute(account_sql)\n\n if item_in.email:\n # 填写了邮箱,添加一个邮箱的登录账号\n account_sql = t_account.insert().values({\n 'user_id': user_res.lastrowid,\n 'open_code': item_in.email,\n 'category': TABLE_ACCOUNT_CATEGORY_EMAIL,\n 'creator': userinfo['name']\n })\n conn.execute(account_sql)\n\n if item_in.mobile:\n # 填写了手机号,添加一个手机号的登录账号\n account_sql = t_account.insert().values({\n 'user_id': user_res.lastrowid,\n 'open_code': item_in.mobile,\n 'category': TABLE_ACCOUNT_CATEGORY_PHONE,\n 'creator': userinfo['name']\n })\n conn.execute(account_sql)\n\n if item_in.role_ids:\n # 3.指定了角色,绑定用户角色关系\n tool.bind_user_roles(user_res.lastrowid, item_in.role_ids, userinfo, conn)\n\n if item_in.group_ids:\n # 4.指定了组,绑定用户与组关系\n tool.bind_user_groups(user_res.lastrowid, item_in.group_ids, userinfo, conn)\n\n trans.commit()\n return ItemOutOperateSuccess()\n except MyException as mex:\n trans.rollback()\n raise mex\n except Exception as ex:\n trans.rollback()\n raise MyException(status_code=HTTP_500_INTERNAL_SERVER_ERROR, detail=ItemOutOperateFailed(code=HTTP_500_INTERNAL_SERVER_ERROR, msg=str(ex)))\n finally:\n conn.close()\n\n\n@router.put(\"/user/{user_id}\", tags=[TAGS_USER], response_model=ItemOutOperateSuccess, name=\"修改用户\")\nasync def edit_user(user_id: int, item_in: ItemInEditUser, userinfo: dict = Depends(tool.get_userinfo_from_token)):\n \"\"\"\n 修改用户\\n\n :param user_id:\\n\n :param item_in:\\n\n :param userinfo:\\n\n :return:\n \"\"\"\n # 鉴权\n tool.check_operation_permission(userinfo['id'], PERMISSION_USER_EDIT)\n\n conn = db_engine.connect()\n trans = conn.begin()\n\n try:\n # 1.查找用户\n user_sql = t_user.select().where(t_user.c.id == user_id).limit(1).with_for_update()\n user_obj = conn.execute(user_sql).fetchone()\n if not user_obj:\n raise MyException(status_code=HTTP_404_NOT_FOUND, detail={'code': HTTP_404_NOT_FOUND, 'msg': 'user not exists'})\n\n # 2.修改用户\n user_val = {\n 'editor': userinfo['name']\n }\n if item_in.name is not None:\n user_val['name'] = item_in.name\n if item_in.head_img_url is not None:\n user_val['head_img_url'] = item_in.head_img_url\n if item_in.mobile is not None:\n user_val['mobile'] = item_in.mobile\n if item_in.email is not None:\n user_val['email'] = item_in.email\n if item_in.password is not None:\n user_val['password'] = item_in.password\n\n update_user_sql = t_user.update().where(t_user.c.id == user_id).values(user_val)\n conn.execute(update_user_sql)\n\n # 3.修改账号\n # 3.1 获取账号\n account_sql = t_account.select().where(t_account.c.user_id == user_obj.id).order_by('sort', 'id').with_for_update()\n account_res = conn.execute(account_sql).fetchall()\n # 3.2 遍历账号,并修改\n for account in account_res:\n if account.category == TABLE_ACCOUNT_CATEGORY_CUSTOM and item_in.name and account.open_code != item_in.name:\n # 账号类型为自定义,并且修改了用户名\n tmp_account_update_sql = t_account.update().where(t_account.c.id == account.id).values({\n 'open_code': item_in.name,\n 'editor': userinfo['name']\n })\n conn.execute(tmp_account_update_sql)\n\n elif account.category == TABLE_ACCOUNT_CATEGORY_PHONE and item_in.mobile and account.open_code != item_in.mobile:\n # 账号类型为手机号,并且用户修改了手机号\n tmp_account_update_sql = t_account.update().where(t_account.c.id == account.id).values({\n 'open_code': item_in.mobile,\n 'editor': userinfo['name']\n })\n conn.execute(tmp_account_update_sql)\n\n elif account.category == TABLE_ACCOUNT_CATEGORY_EMAIL and item_in.email and account.open_code != item_in.email:\n # 账号类型为手机号,并且用户修改了手机号\n tmp_account_update_sql = t_account.update().where(t_account.c.id == account.id).values({\n 'open_code': item_in.mobile,\n 'editor': userinfo['name']\n })\n conn.execute(tmp_account_update_sql)\n\n if item_in.role_ids is not None:\n # 解绑旧的用户-角色关系\n tool.unbind_user_roles(user_id, 0, userinfo, conn)\n\n # 绑定新的用户-角色关系\n tool.bind_user_roles(user_id, item_in.role_ids, userinfo, conn)\n\n if item_in.group_ids is not None:\n # 解绑旧的用户-用户组关系\n tool.unbind_user_groups(user_id, 0, userinfo, conn)\n\n # 绑定新的用户-用户组关系\n tool.bind_user_groups(user_id, item_in.group_ids, userinfo, conn)\n\n # 提交事务\n trans.commit()\n\n return ItemOutOperateSuccess()\n except MyException as mex:\n raise mex\n except:\n trans.rollback()\n raise MyException(status_code=HTTP_500_INTERNAL_SERVER_ERROR, detail=ItemOutOperateFailed(code=HTTP_500_INTERNAL_SERVER_ERROR, msg='inter server error'))\n finally:\n conn.close()\n\n\n@router.put(\"/user/{user_id}/disable\", tags=[TAGS_USER], name=\"禁用用户\")\nasync def disable_user(user_id: int, userinfo: dict = Depends(tool.get_userinfo_from_token)):\n \"\"\"\n 禁用用户\\n\n :param user_id:\\n\n :param userinfo:\\n\n :return:\n \"\"\"\n # 鉴权\n tool.check_operation_permission(userinfo['id'], PERMISSION_USER_DISABLE)\n\n conn = db_engine.connect()\n trans = conn.begin()\n\n try:\n # 1.查找用户\n user_sql = t_user.select().where(t_user.c.id == user_id).limit(1).with_for_update()\n conn.execute(user_sql).fetchone()\n\n # 2.修改用户状态为禁用\n update_user_sql = t_user.update().where(and_(\n t_user.c.id == user_id,\n t_user.c.status == TABLE_STATUS_VALID,\n t_user.c.sub_status == TABLE_SUB_STATUS_VALID,\n )).values({\n 'status': TABLE_STATUS_INVALID,\n 'sub_status': TABLE_SUB_STATUS_INVALID_DISABLE\n })\n conn.execute(update_user_sql)\n\n # 3.提交事务\n trans.commit()\n\n return ItemOutOperateSuccess()\n except MyException as mex:\n raise mex\n except:\n trans.rollback()\n raise MyException(status_code=HTTP_500_INTERNAL_SERVER_ERROR, detail=ItemOutOperateFailed(code=HTTP_500_INTERNAL_SERVER_ERROR, msg='inter server error'))\n finally:\n conn.close()\n\n\n@router.put(\"/user/{user_id}/enable\", tags=[TAGS_USER], name='启用用户')\nasync def enable_user(user_id: int, userinfo: dict = Depends(tool.get_userinfo_from_token)):\n \"\"\"\n 启用用户\\n\n :param user_id:\\n\n :param userinfo:\\n\n :return:\n \"\"\"\n # 鉴权\n tool.check_operation_permission(userinfo['id'], PERMISSION_USER_ENABLE)\n\n conn = db_engine.connect()\n trans = conn.begin()\n\n try:\n # 1.查找用户\n user_sql = t_user.select().where(t_user.c.id == user_id).limit(1).with_for_update()\n conn.execute(user_sql).fetchone()\n\n # 2.修改用户状态为启用\n update_user_sql = t_user.update().where(and_(\n t_user.c.id == user_id,\n t_user.c.status == TABLE_STATUS_INVALID,\n t_user.c.sub_status == TABLE_SUB_STATUS_INVALID_DISABLE,\n )).values({\n 'status': TABLE_STATUS_VALID,\n 'sub_status': TABLE_SUB_STATUS_VALID\n })\n conn.execute(update_user_sql)\n\n # 3.提交事务\n trans.commit()\n\n return ItemOutOperateSuccess()\n except:\n trans.rollback()\n raise MyException(status_code=HTTP_500_INTERNAL_SERVER_ERROR, detail=ItemOutOperateFailed(code=HTTP_500_INTERNAL_SERVER_ERROR, msg='inter server error'))\n finally:\n conn.close()\n\n\n@router.delete(\"/user/{user_id}\", tags=[TAGS_USER], name='删除用户')\nasync def del_user(user_id: int, userinfo: dict = Depends(tool.get_userinfo_from_token)):\n \"\"\"\n 删除用户\\n\n :param user_id:\\n\n :param userinfo:\\n\n :return:\n \"\"\"\n # 鉴权\n tool.check_operation_permission(userinfo['id'], PERMISSION_USER_DEL)\n\n conn = db_engine.connect()\n trans = conn.begin()\n\n try:\n # 1.查找用户\n user_sql = t_user.select().where(t_user.c.id == user_id).limit(1).with_for_update()\n conn.execute(user_sql).fetchone()\n\n # 2.修改用户状态为无效(软删除)\n update_user_sql = t_user.update().where(and_(\n t_user.c.id == user_id,\n t_user.c.sub_status != TABLE_SUB_STATUS_INVALID_DEL,\n )).values({\n 'status': TABLE_STATUS_INVALID,\n 'sub_status': TABLE_SUB_STATUS_INVALID_DEL\n })\n conn.execute(update_user_sql)\n\n # 3.提交事务\n trans.commit()\n\n return ItemOutOperateSuccess()\n except:\n trans.rollback()\n raise MyException(status_code=HTTP_500_INTERNAL_SERVER_ERROR, detail=ItemOutOperateFailed(code=HTTP_500_INTERNAL_SERVER_ERROR, msg='inter server error'))\n finally:\n conn.close()\n","repo_name":"jicao56/normalAdmin","sub_path":"handlers/routers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":15067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15113927718","text":"# A minha resolução fica alterando de contador conforme o mesmo termina\r\n\r\nfrom time import sleep\r\n\r\nwhile True:\r\n\r\n # contagem sentido crescente\r\n\r\n for v in range(1, 11):\r\n print(v)\r\n sleep(0.5)\r\n\r\n if v == 10:\r\n print()\r\n\r\n # contagem sentido decrescente\r\n\r\n for n in range(10, 0, -1):\r\n print(n)\r\n sleep(0.5)\r\n\r\n if n == 1:\r\n print()\r\n","repo_name":"Relampago14/Python-3-Udemy","sub_path":"Exercícios/exercício 7.py","file_name":"exercício 7.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7144345485","text":"####################################################################################\n# Trade with Binance API\n####################################################################################\n\nimport requests, hmac, hashlib, json\nfrom datetime import datetime\n\nAPI_KEY = \"d128ad3126390ed8b40ae6f839b24492d4ce6d5a4d4dfdf2d52d374b3c4d61bb\"\nAPI_SECRET = \"a63a26280782019d00356226af8ee783aa97a5cf43e343229939a72f2b15bc80\"\nAPI_ENDPOINT = \"https://testnet.binancefuture.com/fapi\"\n\n####################################################################################\n\ndef _getCurrentTime():\n try:\n res = requests.get(API_ENDPOINT + '/v1/time').json()\n readable = datetime.fromtimestamp(res['serverTime'] / 1e3)\n print('[Server Time]', readable)\n return res['serverTime']\n except:\n print(\"[Server Time Error]\")\n return None\n\ndef _getCurrentPrice(symbol):\n res = requests.get(API_ENDPOINT + '/v1/ticker/price?symbol=' + symbol).json()\n _printJson(res)\n return res['price']\n\ndef _getSignature(query, key):\n key = bytes(key, 'UTF-8')\n query = query.encode()\n h = hmac.new(key, query, hashlib.sha256)\n return h.hexdigest()\n\ndef _printJson(data):\n print(json.dumps(data, indent=2))\n\nif __name__ == '__main__':\n print(\"Execute commands in each files.\")","repo_name":"chase-allen-tech/binance-trade-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"7538712873","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nimport numpy as np\nfrom pickle import load\nfrom tensorflow import keras\n\nfrom .forms import SignUpForm\nscaler = load(open('Resource/scaler.pkl', 'rb'))\nmodel = keras.models.load_model('Resource/model/')\n\ndef signup_view(request):\n\tif request.user.is_authenticated:\n\t\treturn redirect('users:dashboard')\n\tif request.method == \"POST\":\n\t\tform = SignUpForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tpassword = form.cleaned_data.get('password1')\n\t\t\tuser = authenticate(username=username, password=password)\n\t\t\tlogin(request, user)\n\t\t\treturn redirect('users:dashboard')\n\t\telse:\n\t\t\tmessages.error(request, 'Correct the errors below')\n\telse:\n\t\tform = SignUpForm()\n\n\treturn render(request, 'app/signup.html', {'form': form})\n\n\n@login_required\ndef dashboard_view(request):\n\tresult = None\n\treasons = []\n\tmessage = {}\n\tif request.method == 'POST':\n\t\tday = int(request.POST.get(\"day\", 0))\n\t\tfdbk = int(request.POST.get(\"feedback\", 0))\n\t\tleave_record = int(request.POST.get(\"leave_record\", 0))\n\t\tshift = int(request.POST.get(\"shift\", 0))\n\t\tvehicle_condition = int(request.POST.get(\"vehicle_condition\", 0))\n\t\ttraffic_condition = int(request.POST.get(\"traffic_condition\", 0))\n\t\troad_condition = int(request.POST.get(\"road_condition\", 0))\n\t\tinput = np.array([[day,fdbk,leave_record, shift, vehicle_condition, traffic_condition, road_condition]])\n\t\tinput = scaler.transform(input.astype(np.float))\n\t\tresult = model.predict(input)[0][0]\n\t\thrs = int(abs(result))\n\t\tminutes = round((abs(result) % 1) * 60)\n\t\tif result < 0:\n\t\t\tresult = 'Your order is {} hours and {} minutes late'.format(hrs, minutes)\n\t\t\tif vehicle_condition == 16:\n\t\t\t\treasons.append('vechicle')\n\t\t\tif traffic_condition == 16:\n\t\t\t\treasons.append('traffic')\n\t\t\tif road_condition == 16:\n\t\t\t\treasons.append('road')\n\t\t\tif not reasons:\n\t\t\t\treasons.append('')\n\t\t\tmessage['value'] = 'Reason: Unfavourable ' + (', ').join(reasons) + ' condition'\n\t\t\tmessage['is_late'] = True\n\t\telif result == 0:\n\t\t\tresult = 'Your order will arrive on time'\n\t\t\tmessage['value'] = ''\n\t\telse:\n\t\t\tresult = 'Your order is {} hours and {} minutes early'.format(hrs, minutes)\n\t\t\tif vehicle_condition == 15:\n\t\t\t\treasons.append('vechicle')\n\t\t\tif traffic_condition == 15:\n\t\t\t\treasons.append('traffic')\n\t\t\tif road_condition == 15:\n\t\t\t\treasons.append('road')\n\t\t\tif not reasons:\n\t\t\t\treasons.append('')\n\t\t\tmessage['value'] = 'Reason: Favourable ' + (', ').join(reasons) + ' condition'\n\t\t\tmessage['is_late'] = False\n\n\treturn render(request, 'app/dashboard.html', {'result': result, 'reason': message})\n\n\ndef home_view(request):\n\treturn render(request, 'app/home.html')","repo_name":"Jyolsnakodoth/Transformers","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15741163156","text":"\"\"\" Train SVM classifier using features (CNN codes) extracted from CIFAR-10 dataset by transfer learning \"\"\"\n\n# Import libs\nimport numpy as np\nfrom numpy import genfromtxt\nfrom sklearn.svm import SVC\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport argparse\n\ndef main():\n \"\"\" Train SVM model, use CNN codes for CIFAR-10 dataset\"\"\"\n\n # Load CNN codes and categories from file\n X = genfromtxt(config.x)\n y = genfromtxt(config.y)\n\n print(X.shape)\n print(y.shape)\n\n # Specify tuned parameters of SVM\n C_range = np.logspace(-3, 10, 10)\n tuned_parameters = [{'C':C_range}, {'kernel':['linear']}, {'decision_function_shape':['ovo']}]\n\n # Use grid search to choose optimal parameters of SVM\n # (Random search would be better for more parameters)\n svm = GridSearchCV(SVC(), param_grid=tuned_parameters, n_jobs=config.jobs)\n\n # Train SVM using best value of hyperparameters\n svm.fit(X, y)\n print(\"The best parameters are %s with a score of %0.2f\"\n % (svm.best_params_, svm.best_score_))\n\n # Save SVM model to file\n joblib.dump(svm, 'svm.pkl')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # Paths for CNN codes and categories\n parser.add_argument('--x', type=str, default='x.csv')\n parser.add_argument('--y', type=str, default='y.csv')\n\n # Parameters for grid search\n parser.add_argument('--jobs', type=int, default=2)\n\n # Path for storage of SVM model\n parser.add_argument('--out', type=str, default=\"svm.pkl\")\n config = parser.parse_args()\n main()\n","repo_name":"fagan2888/image-classification-exercise","sub_path":"04-train-svm.py","file_name":"04-train-svm.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39382658448","text":"import sys\nimport os\nfrom pyspark.sql import SparkSession, functions, types\nimport sys\nfrom math import radians, cos, sin, asin, sqrt\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\nimport datetime as dt\nfrom datetime import datetime\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nfrom pykalman import KalmanFilter\nfrom xml.dom.minidom import parse, parseString\nfrom pandas import DataFrame\nimport difflib\nimport gzip\nimport json\nfrom scipy import stats\nfrom pylab import *\nimport re\nimport datetime\n\n\n'''\ndo people like the first movie better than the second one in\na series\n'''\n'''\nhow to run:\nexport PYSPARK_PYTHON=python3\n\nexport PATH=$PATH:/home/sarbjot/spark-2.4.3-bin-hadoop2.7/bin\n\nexport JAVA_HOME=/usr/lib/jvm/java-8-oracle/\n\nspark-submit part1.py wiki-copy-1 output_statsQ\n'''\nspark = SparkSession.builder.appName('weather ETL').getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\n\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\nassert spark.version >= '2.3' # make sure we have Spark 2.3+\n\nschema = types.StructType([\n types.StructField('wikidata_id', types.StringType()),\n types.StructField('label', types.StringType()),\n types.StructField('imdb_id', types.StringType()),\n types.StructField('rotten_tomatoes_id', types.StringType()),\n types.StructField('metacritic_id', types.StringType()),\n types.StructField('enwiki_title', types.StringType()),\n types.StructField('genre', types.StringType()),\n types.StructField('main_subject', types.StringType()),\n types.StructField('filming_location', types.StringType()),\n types.StructField('director', types.StringType()),\n types.StructField('cast_member', types.StringType()),\n types.StructField('series', types.StringType()),\n types.StructField('publication_date', types.StringType()),\n types.StructField('based_on', types.StringType()),\n types.StructField('country_of_origin', types.StringType()),\n types.StructField('original_language', types.StringType()),\n types.StructField('made_profit', types.StringType()),\n\n])\n\n\ndef main():\n in_directory = sys.argv[1]\n out_directory = sys.argv[2]\n # 1. read the input direcotry of .csv.gz files\n #weather = spark.read.csv(in_directory, schema=observation_schema)\n wiki_df = spark.read.json(in_directory, schema=schema)\n wiki_df = wiki_df.select(wiki_df['label'],wiki_df['rotten_tomatoes_id'],wiki_df['enwiki_title'],wiki_df['series'], wiki_df['publication_date'] )\n #wiki_df.show()\n\n # filter movies not in series\n\n wiki_df = wiki_df.filter(wiki_df.series. isNotNull())\n #wiki_df.show()\n\n #write to json ( only one json output file!!!!)\n wiki_df.coalesce(1).write.format('json').save('cleaned_data',mode= 'overwrite')\n\n #read from json in pandas dataframe (easier to work with since series data is smaller)\n print(os.listdir(path='cleaned_data'))\n folder_list = os.listdir(path='cleaned_data')\n\n #find the right json file in the folder\n r = re.compile(r'json$')\n newlist = list(filter(r.search, folder_list)) # Read Note\n print(newlist)\n #print(the_json_file)\n series_df = pd.read_json('cleaned_data/'+newlist[0], lines=True)\n\n\n grouped_series = series_df.groupby('series')\n #grouped_series = grouped_series.filter(lambda x: x['label'].count()<=1)\n print(grouped_series.first())\n\n #convert publication date into datetime objects\n compare_ratings_df = pd.DataFrame(columns= ['movie_name1', 'rotten_id1','rotten_rating1','movie_name2', 'rotten_id2','rotten_rating2'])\n for serie, serie_df in grouped_series:\n #print(serie)\n serie_df['publication_data'] = pd.to_datetime(serie_df['publication_date'],format='%Y-%m-%d')\n #print(series_df.select_dtypes(include=[np.datetime64]))\n serie_df = serie_df.sort_values(by='publication_date')\n #print(serie_df)\n #take each group. take first and second. insert into new dataframe\n if serie_df['label'].count() >=2 :\n # find oldest and second oldest and put ratings in dataframe\n #make np array of all the releveant data\n np_array = np.array([serie_df.iloc[0]['enwiki_title'], serie_df.iloc[0]['rotten_tomatoes_id'],0,serie_df.iloc[1]['enwiki_title'], serie_df.iloc[1]['rotten_tomatoes_id'],0])\n #convert to series\n series_instance = pd.Series(np_array, index=['movie_name1', 'rotten_id1','rotten_rating1','movie_name2', 'rotten_id2','rotten_rating2'])\n # add the series row to the dataframe\n compare_ratings_df = compare_ratings_df.append(series_instance,ignore_index=True)\n\n\n\n #find ratings assocated to each movie\n rotten_df = pd.read_json('wiki-copy-1/rotten-tomatoes.json', lines=True)\n rotten_df = rotten_df.drop(['audience_average', 'audience_ratings','critic_average','critic_percent','imdb_id'], axis=1)\n #create series for ratings1 and ratings2. keeping appending it. then insert into DataFrame\n ratings1_np = np.array([])\n ratings2_np = np.array([])\n\n for index,row in compare_ratings_df.iterrows():\n #for first movie\n id1 = row[1]\n rotten_row = rotten_df.loc[rotten_df['rotten_tomatoes_id'] == id1]\n a_rating1 = rotten_row['audience_percent']\n ratings1_np = np.append(ratings1_np, a_rating1, axis=0)\n\n #for second movie\n id2 = row[4]\n rotten_row2 = rotten_df.loc[rotten_df['rotten_tomatoes_id'] == id2]\n a_rating2 = rotten_row2['audience_percent']\n ratings2_np = np.append(ratings2_np, a_rating2, axis=0)\n\n\n\n compare_ratings_df['rotten_rating1'] = pd.Series(ratings1_np)\n compare_ratings_df['rotten_rating2'] = pd.Series(ratings2_np)\n\n compare_ratings_df.dropna(inplace=True, axis='rows')\n\n #print(compare_ratings_df)\n compare_ratings_df.to_csv (r'readyForAnalysis.csv', index = None, header=True) #Don't forget to add '.csv' at the end of the path\n\n '''\n # Attempt to Normalize the data\n\n print(\"normal test rotten_rating1\")\n print(stats.normaltest(compare_ratings_df['rotten_rating1']).pvalue)\n compare_ratings_df['rotten_rating1']= compare_ratings_df['rotten_rating1']**2\n fig, ax = plt.subplots()\n\n ax.hist(compare_ratings_df['rotten_rating1'], 30, density=4)\n\n plt.show()\n '''\n print(\"\\n\\n\\n\\n\\n\\n\\n\\nAnalysis:\\n\")\n print(\"\\nLevene's Test\")\n print(\"H0 of Levene's Test: two samples have equal variance. If p>0.05 proceed as if equal variance\\np-value is:\\n\")\n print(stats.levene(compare_ratings_df['rotten_rating1'], compare_ratings_df['rotten_rating2']).pvalue)\n print(\"The two data sets have close enough variance for t-test\\n\\n\\n\")\n\n\n print(\"Normal Test: if p > 0.05 then proceed as if normal\")\n print(\"p value of normal test on rotten_rating1\\np-value is:\")\n print(stats.normaltest(compare_ratings_df['rotten_rating1']).pvalue)\n print(\"p value of normal test on rotten_rating2\\np-value is:\")\n print(stats.normaltest(compare_ratings_df['rotten_rating2']).pvalue)\n\n print(\"Data not normal, therefore use Mann-Whitney U test:\\n\")\n print(\"\\nNull hypothesis = first movie in series recieved lower than or equal ratings to second movie in series, \\n alternative hypothesis: first movie recieved ratings greater than second movie in series\")\n print(\"p-value of Mann-Whitney U test:\")\n print(stats.mannwhitneyu(compare_ratings_df['rotten_rating1'], compare_ratings_df['rotten_rating2'], alternative='greater').pvalue)\n print(\"P-value is less than 0.05 level of signifigance, therefore reject the null and accetp the alternative hypothesis\")\n # VISUALIZTION\n legend = ['First Movie', 'Sequel']\n first_movie = compare_ratings_df['rotten_rating1']\n sequel = compare_ratings_df['rotten_rating2']\n plt.hist([first_movie, sequel], color=['blue', 'red'])\n plt.xlabel(\"Rating Scores\")\n plt.ylabel(\"Frequency\")\n plt.legend(legend)\n plt.xticks(np.arange(0, 110, step=10))\n plt.yticks(np.arange(0, 36, step=2))\n plt.title('Part 1:\\n Ratings of First Movie vs Second Movie in Series')\n #plt.show()\n plt.savefig('firstmovie_vs_sequel.png')\n\n\nif __name__=='__main__':\n main()\n","repo_name":"sidharthmiglani/Data-Science","sub_path":"part1/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":8250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26888291705","text":"# ==== PATHS ===================\r\n\r\nPATH_TO_DATASET = \"titanic.csv\"\r\nOUTPUT_SCALER_PATH = 'scaler.pkl'\r\nENCODER_PATH = 'encoder.pkl'\r\nOUTPUT_MODEL_PATH = 'logistic_regression.pkl'\r\n\r\n\r\n# ======= PARAMETERS ===============\r\n\r\n# imputation parameters\r\nIMPUTATION_DICT = \\\r\n {'age': 28.0,\r\n 'fare': 14.4542,\r\n 'sex': 'Missing',\r\n 'cabin': 'Missing',\r\n 'embarked': 'Missing',\r\n 'title': 'Missing'}\r\n\r\n\r\n# encoding parameters\r\nFREQUENT_LABELS = \\\r\n {'sex': ['female', 'male'],\r\n 'cabin': ['C', 'Missing'],\r\n 'embarked': ['C', 'Q', 'S'],\r\n 'title': ['Miss', 'Mr', 'Mrs']}\r\n\r\nDUMMY_VARIABLES = \\\r\n ['sex_male', 'cabin_Missing', 'cabin_Rare',\r\n 'embarked_S', 'embarked_C', 'embarked_Q',\r\n 'title_Mr', 'title_Miss', 'title_Mrs']\r\n\r\n\r\n# ======= FEATURE GROUPS =============\r\n\r\nTARGET = 'survived'\r\n\r\nCATEGORICAL_VARS = ['sex', 'cabin', 'embarked', 'title']\r\n\r\nNUMERICAL_TO_IMPUTE = ['age', 'fare']","repo_name":"luanmisaelmoura/assignment_deploy_ml_models_course","sub_path":"Procedural_Programming_Assignment/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33286367941","text":"from collections import deque\n\n# def solution(m, n, puddles):\n# graph = [[1] * n for x in range(m)]\n \n# for puddle in puddles:\n# x = puddle[0] - 1\n# y = puddle[1] - 1\n# graph[x][y] = 0\n\n# queue = deque()\n# queue.append([0,0])\n\n# dx = [0, 1]\n# dy = [1, 0]\n# # 아래, 오른\n# answer = 0\n\n# while queue:\n# tmp = queue.popleft()\n# x = tmp[0]\n# y = tmp[1]\n\n# for i in range(2):\n# nx = dx[i] + x\n# ny = dy[i] + y\n# if nx < 0 or nx >= m or ny < 0 or ny >= n:\n# continue\n# if graph[nx][ny] == 0:\n# continue\n# queue.append([nx, ny])\n# if nx == m - 1 and ny == n - 1:\n# answer += 1\n \n# r = 1000000007\n# answer %= r\n# return answer\n\n\ndef solution(m, n, puddles):\n answer = 0\n arr = [[0] * (m+1) for _ in range(n+1)]\n arr[1][1] = 1\n \n \n for i in range(1, n+1):\n for j in range(1, m+1):\n if [j, i] in puddles:\n continue\n if [i, j] == [1,1]:\n continue\n \n arr[i][j] = arr[i-1][j] + arr[i][j-1]\n return arr[-1][-1] % 1000000007\n \n\n\nprint(solution(4, 3, [[2,2]] ) )\n","repo_name":"chulhee23/today_ps","sub_path":"programmers/dynamic_programming/등굣길.py","file_name":"등굣길.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"2909271295","text":"#!/usr/bin/python3\n'''Function that multiplies 2 matrix\nMatrix should be of integers or floats\nReturns the new matrix\n\n'''\n\n\ndef matrix_mul(m_a, m_b):\n ''' This func raise Type errors\n checks if the matrix can be multiplied\n returns new matrix\n '''\n if type(m_a) != list:\n raise TypeError('m_a must be a list')\n else:\n for i in m_a:\n if type(i) != list:\n raise TypeError('m_a must be a list of lists')\n\n if type(m_b) != list:\n raise TypeError('m_b must be a list')\n else:\n for i in m_b:\n if type(i) != list:\n raise TypeError('m_b must be a list of lists')\n if len(m_a) == 0:\n raise ValueError(\"m_a can't be empty\")\n if len(m_b) == 0:\n raise ValueError(\"m_b can't be empty\")\n\n for i in m_a:\n if len(i) == 0:\n raise ValueError(\"m_a can't be empty\")\n\n for i in m_b:\n if len(i) == 0:\n raise ValueError(\"m_b can't be empty\")\n\n for i in m_a:\n for i_1 in i:\n if type(i_1) != int and type(i_1) != float:\n raise TypeError('m_a should contain only integers or floats')\n\n for j in m_b:\n for j_1 in j:\n if type(j_1) != int and type(j_1) != float:\n raise TypeError('m_b should contain only integers or floats')\n\n size_m_1 = len(m_a[0])\n size_m_2 = len(m_b[0])\n\n for i in m_a:\n if len(i) != size_m_1:\n raise TypeError('each row of m_a must be of the same size')\n for j in m_b:\n if len(j) != size_m_2:\n raise TypeError('each row of m_b must be of the same size')\n if len(m_a[0]) != len(m_b):\n raise ValueError(\"m_a and m_b can't be multiplied\")\n\n rows = len(m_a)\n columns = len(m_b[0])\n new_matrix = []\n for i in range(rows):\n new_matrix.append([0] * columns)\n for i in range(len(m_a)):\n for j in range(len(m_b[0])):\n for x in range(len(m_b)):\n new_matrix[i][j] = new_matrix[i][j] + (m_a[i][x] * m_b[x][j])\n return(new_matrix)\n","repo_name":"josecaro02/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/100-matrix_mul.py","file_name":"100-matrix_mul.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10793556339","text":"\nimport fire\n\n\n\nfrom core.xt_fetch import FetchTests\n\n\nclass XT:\n \n \n def __init__(self):\n self.FetchTests = FetchTests(\"BDDTests\",\"0*.py\",\"/Users/abk/dev/python/ofc/bit/sb-automation-py-behave/BDDFramework/steps\",\"step.*\")\n \n def GetTests(self):\n self.FetchTests.fetch_test_functions()\n self.FetchTests.save_in_xl(\"TestFunctions.xlsx\")\n \nif __name__==\"__main__\":\n fire.Fire(XT)\n ","repo_name":"abkabhishek/Xtracker","sub_path":"Xtracker/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30497136529","text":"#ex4 Declarați o variabila cu șirul: “Ananas”. Afișati șirul în următoarele feluri pe ecran: -A\n#n a n a s\n#- Ana nas\n#- An:ana:s\n#- Ana_na_s\n#- nananananananana\n\na=\"Ananas\"\n'''\nfor i in a:\n print(i)\n\n'''\n\nprint(a[0],a[1],a[2],a[3],a[4],a[5], sep=\"\\n\")\n\nb=a[:len(a)//2]\nc=a[len(a)//2:]\nprint(b,c, sep=\"\\n\")\n\nd=a[:2]\ne=a[2:5]\nf=a[5:6]\ng=a[:3]\nh=a[3:5]\nprint(d,e,f, sep=\":\")\nprint(g,h,f, sep=\"_\")\nprint(h*8)\n","repo_name":"Rebecamty/Teme-Py","sub_path":"Cap 2 Tema ex4.py","file_name":"Cap 2 Tema ex4.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35126077768","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as st\nfrom scipy.stats import beta\n\nfrom scipy.integrate import quad\nimport ghalton\n\nimport time\nimport datetime\nimport argparse\nimport pickle\n\n# Information Theory Functions ------------------------------------------------------------------\n\n# KL-divergence\ndef KLdivergence(p,q):\n # Takes two probability numbers and returns a number\n \n return p*np.log(p/q) + (1-p)*np.log((1-p)/(1-q))\n''' \n# Probability distribution over max probability; treated as distribution\nclass Rhomax_dist(st.rv_continuous):\n def _pdf(self, x, a1,a2,b1,b2):\n return beta.pdf(x,a1,b1)*beta.cdf(x,a2,b2)+beta.pdf(x,a2,b2)*beta.cdf(x,a1,b1)\n''' \n \n# Probability distribution over max probability\ndef rhomax_integrand(x, a1, a2, b1, b2):\n # Takes numbers and returns a prob. number\n \n pdf1 = beta.pdf(x,a1,b1)\n pdf2 = beta.pdf(x,a2,b2)\n cdf1 = beta.cdf(x,a1,b1)\n cdf2 = beta.cdf(x,a2,b2)\n \n rho = pdf1 * cdf2 + pdf2 * cdf1\n \n integrand = -rho * np.log( rho )\n return integrand\n\n'''\n# Vectorized version of above (probably not useful)\ndef Rhomax_vectorized(x, beta_params):\n # Takes full beta_params and returns an array (over batch)\n \n a1 = beta_params[:,0,0] # alpha for arm 0\n b1 = beta_params[:,1,0] # beta for arm 0\n a2 = beta_params[:,0,1] # alpha for arm 1\n b2 = beta_params[:,1,1] # beta for arm 1\n \n pdf1 = beta.pdf(x,a1,b1)\n pdf2 = beta.pdf(x,a2,b2)\n cdf1 = beta.cdf(x,a1,b1)\n cdf2 = beta.cdf(x,a2,b2)\n \n rho = pdf1 * cdf2 + pdf2 * cdf1\n \n return rho\n'''\n\n# Posterior distribution over outcome given posterior over probs.\ndef Prob(x, a, b):\n # Takes x = 0 or 1 and a,b = numbers; returns a prob. number\n \n '''\n if x == 1: # Success\n integrand = lambda p: beta.pdf(p,a,b) * p\n elif x == 0:\n integrand = lambda p: beta.pdf(p,a,b) * (1-p)\n \n integral = quad(integrand, 0, 1)[0]\n return integral\n '''\n \n if x == 1: # Success\n return a/(a+b)\n elif x == 0:\n return b/(a+b)\n \n# Differential entropy over max probability\ndef Entropy_int(a1, a2, b1, b2):\n # Takes numbers and returns a number\n \n integrand = lambda p: rhomax_integrand(p, a1,a2,b1,b2) \n integral = quad(integrand, 0, 1)[0] \n return integral\n\n\n# Estimate entropy via sampling\nseq = ghalton.Halton(1)\ndef Entropy_est(a1, a2, b1, b2, samples=100):\n seq.reset()\n x = seq.get(samples)\n x = np.reshape(x, samples)\n integral = np.mean( rhomax_integrand(x,a1,a2,b1,b2) )\n return integral\n\n\n# Main function ---------------------------------------------------------------------------------\n\ndef main(args):\n # Input unpacking\n p1 = args.p1 # Success probability of the first arm\n p2 = args.p2 # Success probability of the second arm\n n = args.n # Total number of plays\n N = args.N # Batch/replicas (for averaging)\n n_rec = args.n_rec # Record every n plays\n algo = args.algo # 'Thompson', 'Infomax'\n seed = args.seed # Random seed\n verbose = args.verbose # Print messages (time)\n \n # Set random seed\n np.random.seed(seed)\n \n # Input representation\n p = np.array([p1,p2]) # Prob. of arms\n \n # Output initialization\n curr_cum_reward = np.zeros(N)\n curr_cum_subopt = np.zeros(N) # Number of times you pull suboptimal arm\n cum_reward = np.zeros([N,int(n/n_rec)]) # Record cumulative reward at each step\n cum_subopt = np.zeros([N,int(n/n_rec)]) # Record number of suboptimal plays\n plays = np.zeros(int(n/n_rec))\n \n # Setup\n if algo == 'Thompson':\n # Initialize beta distribution parameters\n # beta_params_(batch, {alpha,beta}, arm)\n # 0.5 = uninformative prior, 1 = uniform prior\n beta_params = 1*np.ones([N,2,2]) # [[[a1,a2],[b1,b2]], ...]\n elif algo in ['Infomax', 'Infomax_est']:\n beta_params = 1*np.ones([N,2,2])\n if algo == 'Infomax':\n Entropy = Entropy_int\n elif algo == 'Infomax_est':\n Entropy = Entropy_est\n \n # Simulation\n t_start = time.time()\n t1 = time.time()\n for t in range(n):\n \n # Choose arm\n if algo == 'Thompson':\n theta = np.random.beta(beta_params[:,0,:],beta_params[:,1,:]) # [batch, arm]\n action = np.argmax(theta,1) # Choose the arm corresponding to the optimal draw (N-array)\n elif algo in ['Infomax', 'Infomax_est']:\n action = np.zeros(N, dtype = np.int8)\n for i in range(N): # Loop through batch\n a1, b1 = beta_params[i,:,0] # Arm 0\n a2, b2 = beta_params[i,:,1] # Arm 1\n \n # Original entropy (don't need for comparing difference)\n #Entropy0 = Entropy(a1, a2, b1, b2)\n \n # Arm 0\n # Difference in entropy when 0 (failure) observed\n delH0_0 = Entropy(a1, a2, b1 + 1, b2) #- Entropy0\n # Difference in entropy when 1 (success) observed\n delH0_1 = Entropy(a1 + 1, a2, b1, b2) #- Entropy0\n # Expected decrease in entropy\n dH0 = Prob(0,a1,b1)*delH0_0 + Prob(1,a1,b1)*delH0_1\n \n # Arm 1\n # Difference in entropy when 0 (failure) observed\n delH1_0 = Entropy(a1, a2, b1, b2 + 1) #- Entropy0\n # Difference in entropy when 1 (success) observed\n delH1_1 = Entropy(a1, a2 + 1, b1, b2) #- Entropy0\n # Expected decrease in entropy\n dH1 = Prob(0,a2,b2)*delH1_0 + Prob(1,a2,b2)*delH1_1\n \n action[i] = int(dH0 > dH1) # pick action that decreases H more (i.e. dH more negative)\n \n # Record suboptimal plays\n curr_cum_subopt += action\n if t % n_rec == 0:\n cum_subopt[:,int(t/n_rec)] = curr_cum_subopt\n #if t > 0:\n # cum_subopt[:,t] = cum_subopt[:,t-1]\n #cum_subopt[:,t] += action\n\n # Play the arm\n chance = np.random.random(N) # Random number between 0 and 1\n r = (p[action] > chance).astype(int) # Get reward\n \n # Keep track of cumulative rewards\n curr_cum_reward += r\n # Record cumulative rewards\n if t % n_rec == 0:\n cum_reward[:,int(t/n_rec)] = curr_cum_reward\n plays[int(t/n_rec)] = t+1\n \n # Update parameters\n if algo in ['Thompson', 'Infomax', 'Infomax_est']: # Same posterior updates for both\n # Update a\n beta_params[:,0,:][np.arange(N),action] += r\n # Update b\n beta_params[:,1,:][np.arange(N),action] += 1-r\n \n # Time\n if verbose:\n if (t+1) % int(n/10) == 0:\n t2 = time.time()\n print('Runtime for plays ' + str(t-(int(n/10)-1)) + ' - ' + str(t) + ': ' + str(t2-t1) + ' s')\n t1 = t2\n \n t_end = time.time()\n \n output = {'cum_reward':cum_reward, 'cum_subopt':cum_subopt, 'beta_params':beta_params, 'plays': plays}\n\n if verbose:\n print('Total runtime: ' + str(t_end-t_start) + ' s')\n print(\"Cumulative Reward:\", cum_reward[0][-1])\n \n return output\n \n# Plotting functions ----------------------------------------------------------------------------\n \n# Plot learned beta distribution\ndef plot_beta(x, a1, a2, b1, b2, p1, p2, n, name, savefig):\n # ai, bi's = beta distribution parameters for each arm\n \n plt.figure()\n beta1 = beta.pdf(x, a1, b1)\n beta2 = beta.pdf(x, a2, b2)\n plt.plot(x, beta1, 'r', linewidth=3, label='Superior arm')\n plt.plot(x, beta2, 'b', linewidth=3, label='Inferior arm')\n plt.axvline(x=p1,color='r', linewidth=1, linestyle='--')\n plt.axvline(x=p2,color='b', linewidth=1, linestyle='--')\n plt.legend(loc='best')\n plt.xlabel(r'$\\theta$')\n plt.ylabel(r'$p(\\theta)$')\n plt.title('Learned beta distributions for each arm after ' + str(n) + ' plays')\n if savefig:\n plt.savefig('beta_distribution_' + name + '.png')\n plt.close()\n else:\n plt.show()\n \n# Plot regret\ndef plot_regret(plays, c_mean, p1, p2, N, name, savefig, savedata, algo):\n plt.figure()\n # Optimal expected number of suboptimal plays (Lai-Robbins lower bound)\n LB = np.log(plays)/KLdivergence(p1,p2)\n # Optimal regret\n regret_opt = np.maximum(LB * (p1 - p2)-20, np.zeros(LB.shape))\n # Actual regret\n regret_act = c_mean * (p1 - p2)\n plt.plot(plays, regret_opt, 'k', linewidth=3, label='Lai-Robbins bound slope')\n plt.plot(plays, regret_act, 'r', linewidth=3, label=algo)\n plt.legend(loc='best')\n plt.gca().set_xscale('log',basex=10)\n plt.ylim([0, np.ceil(np.amax(regret_act)) ]) # Adaptive max of graph\n plt.xlabel('Total number of plays')\n plt.ylabel(r'Regret $(p_1-p_2)$')\n plt.title('Regret averaged over ' + str(N) + ' replicas')\n if savedata:\n savePath = 'Save/'\n save_time = datetime.datetime.today().strftime('%Y-%m-%d_%H:%M')\n pickle_name = save_time + '.pkl'\n with open(savePath + pickle_name, 'wb') as f:\n pickle.dump(regret_act, f)\n if savefig:\n plt.savefig('regret_' + name + '.png')\n plt.close()\n else:\n plt.show()\n\nif __name__ == \"__main__\":\n # Parse inputs ------------------------------------------------------------------------------\n # Example command: python bandit.py --n 100 --algo Infomax --verbose 1\n parser = argparse.ArgumentParser(description='2-armed bandit')\n \n parser.add_argument(\"--p1\", default=0.9, type=float, help=\"Success probability of the superior arm\")\n parser.add_argument(\"--p2\", default=0.8, type=float, help=\"Success probability of the inferior arm\")\n parser.add_argument(\"--n\", default=1000, type=int, help=\"Total number of plays\")\n parser.add_argument(\"--N\", default=1, type=int, help=\"Replicas (For averaging)\")\n parser.add_argument(\"--n_rec\", default=1, type=int, help=\"Record every n plays\")\n parser.add_argument(\"--algo\", default='Thompson', type=str, help=\"Decision algorithm; 'Thompson', 'Infomax'\")\n parser.add_argument(\"--seed\", default=111, type=int, help=\"Random seed\")\n parser.add_argument(\"--savefig\", default=0, type=int, help=\"Save figures\")\n parser.add_argument(\"--savedata\", default=0, type=int, help=\"Save regret data\")\n parser.add_argument(\"--verbose\", default=0, type=int, help=\"Print messages\")\n \n args = parser.parse_args()\n\n # Run main function -------------------------------------------------------------------------\n output = main(args)\n \n # Save output -------------------------------------------------------------------------------\n\n name = 'p1=' + str(args.p1) + \\\n '_p2=' + str(args.p2) + \\\n '_n=' + str(args.n) + \\\n '_N=' + str(args.N) + \\\n '_n_rec=' + str(args.n_rec) + \\\n '_algo=' + str(args.algo) + \\\n '_seed=' + str(args.seed)\n \n #pickle.dump(output, open(name + '.pickle',\"wb\"))\n \n # Extract outputs ---------------------------------------------------------------------------\n a = output['cum_reward']\n b = output['beta_params']\n c = output['cum_subopt']\n plays = output['plays']\n c_mean = np.mean(c,0) # Take average over batches\n\n # Plot beta distribution --------------------------------------------------------------------\n x = np.linspace(0,1,1000) # Parameter space [0,1]\n plot_beta(x,b[0,0,0],b[0,0,1],b[0,1,0],b[0,1,1],args.p1,args.p2,args.n,name,args.savefig)\n\n # Plot regret -------------------------------------------------------------------------------\n plot_regret(plays,c_mean,args.p1,args.p2,args.N,name,args.savefig, args.savedata, args.algo)\n","repo_name":"hjjimmykim/SchwabBandit","sub_path":"bandit.py","file_name":"bandit.py","file_ext":"py","file_size_in_byte":11934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21747161021","text":"\"\"\" 1부터 100까지 임의의 수를 생성하고 생성된 임의의 수를 맞추는 게임 프로그램\n숫자를 하나 입력하면 임의로 생성된 수보다 높은지 낮은지 정답인지 알려준다.\n정답을 맞힌 경우 정답을 몇 번 만에 맞추었는지 그 결과로 게임의 승부를 알 수 있다. \"\"\"\n\nimport random\n\nrandom_num = random.randint(1,100)\n\n#print(random_num)\n\ncnt = 1\n\nwhile True:\n try:\n my_num = int(input('1부터 100 사이의 숫자를 입력하세요.:'))\n if my_num > random_num:\n print('다운')\n elif my_num < random_num:\n print('업')\n else:\n print(f'축하합니다. {cnt}회 만에 맞추었습니다.')\n break\n cnt += 1\n except:\n print('에러가 발생했습니다. 숫자를 입력하세요')","repo_name":"shinlama/Toy_project","sub_path":"숫자 맞추기 게임 만들기/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36235541475","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 13 17:03:05 2022\r\n@author: mishrai\r\nLangmuir Probe Analysis\r\nEP490: Introduction to Plasma Physics\r\nRose-Hulman Institute of Technology\r\n\"\"\"\r\n#-----------------------references-----------------------#\r\n'''\r\nhttps://www.seas.ucla.edu/~ffchen/Publs/Chen210R.pdf\r\nhttps://davidpace.com/example-of-langmuir-probe-analysis/\r\nhttps://advlabs.aapt.org/wiki/File%3A4409\r\n'''\r\n#-----------------------initializing-----------------------#\r\nimport matplotlib.pyplot as pl\r\n\r\n#Te=3.57#eV\r\n#Te_hot=7.69#eV\r\n#Vf=-38.97#V\r\nIsat=-0.000129#A\r\n\r\nme=9.109e-31#kg\r\ne=1.602e-19#C\r\nk=1.38e-23#J/K\r\nM=6.62e-26#kg: Mass of Argon atom\r\nA=0.738e-4#m^2: Area of probe\r\n\r\n\r\n'''\r\nNotes:\r\n L2[i][0] is probe bias (V)\r\n L2[i][1] is probe current (A)\r\n'''\r\n#-----------------------functions-----------------------#\r\ndef initial_dataset_reading():\r\n with open(\"sampleIV.txt\",\"r\") as f:\r\n a=f.read()\r\n L=[]\r\n L2=[[-66.40,-0.0001290]]\r\n for i in a: \r\n L=a.split(\",\")\r\n \r\n for i in range(1,len(L)-1):\r\n l=L[i][2:len(L[i])-3]\r\n l=l.split('\\t')\r\n for j in range(len(l)): \r\n l[j]=float(l[j])\r\n if l not in L2:\r\n L2.append(l)\r\n L2=lol_sort(L2)\r\n with open(\"sampleIV_optimized.txt\",'w') as f: \r\n for i in range(len(L2)): \r\n tl=''\r\n tl+=str(L2[i][0])+'\\t'+str((L2[i][1]+Isat))+'\\n'\r\n f.write(tl)\r\n \r\n print(\"Number of data points collected by Langmuir probe: \", len(L)-1)\r\n return(L2)\r\n\r\ndef optimized_dataset_reading():\r\n with open(\"sampleIV_optimized.txt\",\"r\") as f:\r\n a=f.read()\r\n L=a.split(\"\\n\")\r\n del L[len(L)-1]\r\n for i in range(len(L)): \r\n L[i]=L[i].split('\\t')\r\n L[i][0]=float(L[i][0])\r\n L[i][1]=float(L[i][1])\r\n print(\"Number of unique data points: \",len(L))\r\n return L\r\n\r\ndef lol_sort(L):\r\n for i in range(len(L)):\r\n for j in range(len(L)-1-i):\r\n if L[j][0]>L[j+1][0]:\r\n L[j],L[j+1]=L[j+1],L[j]\r\n\r\n return(L)\r\n\r\ndef average(L,N):\r\n pass\r\n\r\ndef plotIV_lin(L):\r\n X=[]\r\n Y=[]\r\n for i in L: \r\n X.append(i[0])\r\n Y.append(i[1])\r\n pl.plot(X,Y)\r\n #pl.yscale('log')\r\n pl.xlabel('Probe bias (V)')\r\n pl.ylabel('Probe current (A)')\r\n pl.show()\r\n #pl.savefig('IV_linear')\r\n\r\ndef plotIV_log(L):\r\n X=[]\r\n Y=[]\r\n for i in L: \r\n X.append(i[0])\r\n Y.append(i[1])\r\n pl.plot(X,Y)\r\n\r\n\r\n pl.yscale('log')\r\n pl.xlabel('Probe bias (V)')\r\n pl.ylabel('Electron current (A)')\r\n pl.savefig('IV_log')\r\n pl.show()\r\n \r\ndef derivative(L): \r\n L2=[L[0],]\r\n for i in range(1,len(L)):\r\n for j in range(len(L2)):\r\n if L[i][0]==L2[j][0]:\r\n break\r\n else: \r\n L2.append(L[i])\r\n print(len(L2))\r\n \r\n L=L2\r\n dL=[]\r\n #E0=abs(phi_p-L[0])\r\n dL.append([L[0][0],(L[1][1]-L[0][1])/(L[1][0]-L[0][0])])\r\n \r\n for i in range(1,len(L)-1):\r\n dL.append([L[i][0],(L[i+1][1]-L[i-1][1])/(L[i+1][0]-L[i-1][0])])\r\n \r\n dL.append([L[len(L)-1][0],(L[len(L)-1][1]-L[len(L)-2][1])/(L[len(L)-1][0]-L[len(L)-2][0])])\r\n \r\n \r\n #Uncomment this if you want to show and ave dI/dV plot\r\n X=[]\r\n Y=[]\r\n for i in dL: \r\n X.append(i[0])\r\n Y.append(i[1])\r\n pl.plot(X,Y)\r\n\r\n\r\n #pl.yscale('log')\r\n pl.xlabel('Probe bias (V)')\r\n pl.ylabel('d2Ie/dV2')\r\n pl.savefig('d2IdV2')\r\n pl.show()\r\n print(dL) \r\n return(dL)\r\n\r\ndef EEDF(L):\r\n for i in range(len(L)):\r\n L[i][0]=abs(phi_p-L[i][0])\r\n L[i][1]=abs(2/(2*A*e)*(2*me*L[i][0]/e)**(1/2)*L[i][1])\r\n \r\n print(L)\r\n L=lol_sort(L)\r\n edf=[]\r\n for i in range(0,int(50/1)):\r\n edf.append([i*1,0])\r\n for i in L:\r\n sl=int(i[0]//1)\r\n edf[sl][1]+=i[1]\r\n \r\n \r\n \r\n \r\n \r\n X=[]\r\n Y=[]\r\n for i in edf: \r\n X.append(i[0])\r\n Y.append(i[1])\r\n pl.plot(X,Y)\r\n\r\n\r\n #pl.yscale('log')\r\n pl.xlabel('Electron energy (eV)')\r\n pl.ylabel('EEDF (eV^-1)')\r\n pl.savefig('EEDF')\r\n pl.show()\r\n \r\n#-----------------------main code-----------------------#\r\n\r\n#L=initial_dataset_reading() #<< execute this if running code for the first time\r\nL=optimized_dataset_reading()\r\n\r\nplotIV_log(L)\r\n\r\nphi_p=-17.19#V\r\nprint('The plasma potential is', phi_p, 'V')\r\n#phi_f=(-38.4-36.4-36-33.6-32.8-32.4)/6#V\r\nphi_f=-34.93#V\r\nprint('The floating potential is',phi_f,'V')\r\n\r\n#Te=(-17.31+23.85)/(-2.5+3.7)#eV\r\nTe=5.45\r\nprint(\"The electron temperature is\",Te,\"eV\")\r\n\r\nIsat=-0.000129#A\r\nEEDF(derivative(derivative(L)))\r\n\r\n#--------------------------------------------------------#\r\n\r\n\r\n\r\n","repo_name":"IshaanMishra997/Langmuir-Probe-Analysis","sub_path":"langmuir_probe_analysis.py","file_name":"langmuir_probe_analysis.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5970881404","text":"\n# IMPORT GENERAL FUNCTIONS\n\n# utility \nimport json\nimport time\nimport sys\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\n\n# model \nimport argparse\nimport torch\nimport optuna\n\n# output analysis \nfrom sklearn import metrics\nfrom sklearn.metrics import matthews_corrcoef, confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\n\n# explainability\n# import explainability\nimport my_explainability as explainability\n\n# IMPORT USER-DEFINED FUNCTIONS\nfrom data import DataFetch, GraphData, get_batch_and_loader\nfrom model import GNN\nfrom utils import EarlyStopping, WeightedBCELoss, get_classification_threshold_auc, get_classification_threshold_precision_recall, brier_skill_score, calculate_metrics, enable_dropout, plot_losses\n\n# DEFINE EXTRA FUNCTIONS\n\ndef get_model_output(model, data_batch, params):\n \n x = data_batch.x.to(params['device'])\n y = data_batch.y.unsqueeze(1).to(params['device'])\n edge_index = data_batch.edge_index.to(params['device'])\n edge_weight = data_batch.edge_attr.to(params['device'])\n batch = data_batch.batch.to(params['device'])\n target_index = data_batch.target_index.to(params['device'])\n\n # look in forward() in model.py for model architecture\n if params['use_edge']=='True':\n output = model(x, edge_index, edge_weight, batch, target_index)\n else:\n output = model(x, edge_index, None, batch, target_index)\n model_output = {'output':output}\n\n return model_output, y\n\n\nactivation = {}\ndef get_activation(name):\n def hook(model, input, output):\n activation[name] = output.detach()\n return hook\n\n\n\n\n# Define the objective tuning function for Optuna\ndef hyperparameter_tuning(trial, train_loader, validate_loader, params):\n\n learning_rate = 0.0001\n self_loops = trial.suggest_categorical(\"self_loops\", [True, False])\n ratio = trial.suggest_float('ratio', 0.1, 0.9, step=0.1)\n gnn_layer = trial.suggest_categorical(\"gnn_layer\", [\"gcn\", \"graphconv\" , \"gat\"])\n pooling_method = trial.suggest_categorical(\"pooling_method\", [\"sum\", \"mean\",\"topkpool_sum\",\"topkpool_mean\",\"sagpool_sum\",\"sagpool_mean\"])\n dropout_rate = trial.suggest_float('dropout_rate', 0.1, 0.5, step=0.1)\n hidden_dim = trial.suggest_int('hidden_dim', 32, 512, step=32)\n hidden_dim_2 = trial.suggest_int('hidden_dim_2', 32, 512, step=32)\n hidden_layers = trial.suggest_int('hidden_layers', 1, 3, step=1)\n \n model = GNN(\n num_features_static_graph = params['num_features_static'], \n hidden_dim = hidden_dim,\n hidden_dim_2 = hidden_dim_2,\n hidden_layers = hidden_layers,\n gnn_layer = gnn_layer, \n pooling_method = pooling_method, \n dropout_rate = dropout_rate, \n ratio = ratio,\n self_loops = self_loops )\n \n model.to(params['device'])\n\n if params['loss']=='bce':\n train_criterion = torch.nn.BCEWithLogitsLoss()\n valid_criterion = torch.nn.BCEWithLogitsLoss()\n elif params['loss']=='weighted_bce':\n train_criterion = WeightedBCELoss(params['num_samples_train_dataset'], params['num_samples_train_minority_class'], params['num_samples_train_majority_class'], params['device'])\n valid_criterion = WeightedBCELoss(params['num_samples_valid_dataset'], params['num_samples_valid_minority_class'], params['num_samples_valid_majority_class'], params['device'])\n elif params['loss']=='mse':\n train_criterion = torch.nn.MSELoss(reduction='sum') \n valid_criterion = torch.nn.MSELoss(reduction='sum')\n \n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=(params['learning_rate']/10) )\n \n # evaluate model on train set\n for epoch in range(5): \n model.train()\n for train_batch in train_loader:\n output, y = get_model_output(model, train_batch, params)\n loss = train_criterion(output['output'], y) \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(\"completed epoch {}, with loss: {}\".format(epoch,torch.round(loss,2)))\n \n # evaluate on validation set\n valid_output = np.array([])\n valid_y = np.array([])\n epoch_valid_loss = []\n model.eval()\n \n for validate_batch in validate_loader:\n output, y = get_model_output(model, validate_batch, params)\n valid_output = np.concatenate((valid_output, output['output'].reshape(-1).detach().cpu().numpy()))\n valid_y = np.concatenate((valid_y, y.reshape(-1).detach().cpu().numpy()))\n loss = valid_criterion(output['output'], y) \n epoch_valid_loss.append(loss.item())\n \n # final result \n val_loss = np.mean(epoch_valid_loss)\n print('learning_rate, dropout_rate, ratio, hidden_dim, hidden_dim_2, hidden_layers, gnn_layer, self_loops, pooling_method')\n print(learning_rate, dropout_rate, ratio, hidden_dim, hidden_dim_2, hidden_layers, gnn_layer, self_loops, pooling_method)\n\n return val_loss\n\n\n\ndef train_model(model, train_loader, validate_loader, params):\n\n model.to(params['device'])\n model_path = '{}/checkpoint_{}.pt'.format(params['outpath'], params['outname'])\n early_stopping = EarlyStopping(patience=params['patience'], path=model_path)\n\n if params['loss']=='bce':\n train_criterion = torch.nn.BCEWithLogitsLoss()\n valid_criterion = torch.nn.BCEWithLogitsLoss()\n elif params['loss']=='weighted_bce':\n train_criterion = WeightedBCELoss(params['num_samples_train_dataset'], params['num_samples_train_minority_class'], params['num_samples_train_majority_class'], params['device'])\n valid_criterion = WeightedBCELoss(params['num_samples_valid_dataset'], params['num_samples_valid_minority_class'], params['num_samples_valid_majority_class'], params['device'])\n elif params['loss']=='mse':\n train_criterion = torch.nn.MSELoss(reduction='sum') \n valid_criterion = torch.nn.MSELoss(reduction='sum')\n \n optimizer = torch.optim.Adam(model.parameters(), lr=params['learning_rate'], weight_decay=(params['learning_rate']/10) )\n\n train_losses = []\n valid_losses = []\n separate_loss_terms = {'NN_train':[], 'NN_valid':[]}\n \n # store for calculating classification threshold on last epoch\n valid_output = np.array([])\n valid_y = np.array([])\n\n #-----------------------------------------------------\n # START TRAINING\n for epoch in range(params['max_epochs']):\n\n separate_loss_terms_epoch = {'NN_train':[],'NN_valid':[]} \n\n # evaluate model on train set\n model.train()\n epoch_train_loss = [] \n \n for train_batch in train_loader:\n output, y = get_model_output(model, train_batch, params)\n loss = train_criterion(output['output'], y) \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n epoch_train_loss.append(loss.item())\n \n print(\"completed epoch {}\".format(epoch))\n \n # evaluate on validation set\n model.eval()\n epoch_valid_loss = []\n \n for validate_batch in validate_loader:\n output, y = get_model_output(model, validate_batch, params)\n valid_output = np.concatenate((valid_output, output['output'].reshape(-1).detach().cpu().numpy()))\n valid_y = np.concatenate((valid_y, y.reshape(-1).detach().cpu().numpy()))\n \n loss = valid_criterion(output['output'], y) \n epoch_valid_loss.append(loss.item())\n \n # check if early stopping \n early_stopping(np.mean(epoch_valid_loss), model)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n \n train_losses.append(np.mean(epoch_train_loss))\n valid_losses.append(np.mean(epoch_valid_loss))\n for term_name in separate_loss_terms:\n separate_loss_terms[term_name].append(np.mean(separate_loss_terms_epoch[term_name]))\n print(\"epoch {}\\ttrain loss : {}\\tvalidate loss : {}\".format(epoch, np.mean(epoch_train_loss), np.mean(epoch_valid_loss)))\n\n # STOP TRAINING\n #-----------------------------------------------------\n\n # load the checkpoint with the best model\n model.load_state_dict(torch.load(model_path))\n \n # plot_losses\n plot_losses(train_losses, valid_losses, '{}/{}'.format(params['outpath'], params['outname']))\n\n # use last values from validation set\n if params['threshold_opt']== 'auc' and params['loss']=='bce': \n threshold = get_classification_threshold_auc(valid_output, valid_y)\n elif params['threshold_opt']== 'precision_recall' and params['loss']=='bce':\n threshold = get_classification_threshold_precision_recall(valid_output, valid_y)\n elif params['threshold_opt']== 'auc' and params['loss']=='weighted_bce': \n threshold = get_classification_threshold_auc(valid_output, valid_y)\n elif params['threshold_opt']== 'precision_recall' and params['loss']=='weighted_bce':\n threshold = get_classification_threshold_precision_recall(valid_output, valid_y) \n elif params['loss']=='mse':\n threshold = None\n \n return model, threshold\n\ndef test_model(model, test_loader, threshold, params):\n\n num_samples = 10 # number of MC samples\n test_output = [np.array([]) for _ in range(num_samples)]\n test_y = [np.array([]) for _ in range(num_samples)]\n\n representations = pd.DataFrame()\n model.eval()\n enable_dropout(model)\n\n for sample in range(num_samples):\n counter = 0\n for test_batch in test_loader:\n output, y = get_model_output(model, test_batch, params)\n test_output[sample] = np.concatenate((test_output[sample], output['output'].reshape(-1).detach().cpu().numpy()))\n test_y[sample] = np.concatenate((test_y[sample], y.reshape(-1).detach().cpu().numpy()))\n counter += 1\n\n # report standard error for uncertainty\n test_output_se = np.array(test_output).std(axis=0) / np.sqrt(num_samples)\n\n # take average over all samples to get expected value\n test_output = np.array(test_output).mean(axis=0)\n test_y = np.array(test_y).mean(axis=0)\n \n if (params['loss']=='bce') or (params['loss']=='weighted_bce'):\n results = pd.DataFrame({'actual':test_y, 'pred_raw':test_output, 'pred_raw_se':test_output_se})\n results['pred_binary'] = (results['pred_raw']>threshold).astype(int)\n metric_results = calculate_metrics(test_y, results['pred_binary'], test_output)\n elif params['loss']=='mse':\n results = pd.DataFrame({'actual':test_y, 'pred_raw':test_output, 'pred_raw_se':test_output_se})\n mse = metrics.mean_squared_error(test_y, test_output)\n r2 = metrics.r2_score(test_y, test_output)\n metric_results = {'MSE':mse,'R2':r2}\n\n return results, metric_results\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # the following should be updated for each experiment\n parser.add_argument('--featfile', type=str, help='filepath for featfile csv')\n parser.add_argument('--model_type', type=str, help='one of baseline, graph, graph_no_target or explainability')\n parser.add_argument('--experiment', type=str, help='a unique name for the experiment used in output file prefixes')\n parser.add_argument('--batchsize', type=int, help='batchsize for training, recommend 500 for baselines and 250 for graphs', default=500)\n \n # the following can optionally be configured for each experiment\n parser.add_argument('--outpath', type=str, help='directory for results output', default='results')\n parser.add_argument('--statfile', type=str, help='filepath for statfile csv', default='data/statfile.csv')\n parser.add_argument('--mask_target', type=str, help='mask target patient info', default=True)\n parser.add_argument('--maskfile', type=str, help='filepath for maskfile csv', default='True')\n parser.add_argument('--edgefile', type=str, help='filepath for edgefile csv', default='data/edgefile.csv')\n parser.add_argument('--gnn_layer', type=str, help='type of gnn layer to use: gcn, graphconv, gat', default='graphconv')\n parser.add_argument('--use_edge', type=str, help='use or not edges in graph', default=True)\n parser.add_argument('--add_self_loops', type=str, help='use or not self loops in graph', default=False)\n parser.add_argument('--directed', type=str, help='use or not edges in graph', default=True)\n parser.add_argument('--pooling_method', type=str, help='type of gnn pooling method to use: target, sum, mean, topkpool_sum, topkpool_mean, sagpool_sum, sagpool_mean', default='target')\n parser.add_argument('--num_workers', type=int, help='number of workers for data loaders', default=6)\n parser.add_argument('--max_epochs', type=int, help='maximum number of training epochs if early stopping criteria not met', default=100)\n parser.add_argument('--patience', type=int, help='how many epochs to wait for early stopping after last time validation loss improved', default=8)\n parser.add_argument('--learning_rate', type=float, help='learning rate for model training', default=0.001)\n parser.add_argument('--hidden_dim', type=int, help='number of hidden dimensions in (non-LSTM) neural network layers', default=20)\n parser.add_argument('--hidden_layers', type=int, help='number of hidden layers after input layer in the network ', default=1)\n parser.add_argument('--loss', type=str, help='which loss function to use: bce_weighted_single, bce_weighted_sum', default='bce_weighted_single')\n parser.add_argument('--gamma', type=float, help='weight parameter on the overall NN loss (required for bce_weighted_sum loss)', default=1)\n parser.add_argument('--alpha', type=float, help='weight parameter on the target term of the loss (required for bce_weighted_sum loss)', default=1)\n parser.add_argument('--beta', type=float, help='weight parameter on the family term of the loss (required for bce_weighted_sum loss)', default=1)\n parser.add_argument('--delta', type=float, help='weight parameter on the lstm term of the loss (required for bce_weighted_sum loss, longitudinal models only)', default=1)\n parser.add_argument('--dropout_rate', type=float, help='the dropout rate in the neural networks', default=0.5)\n parser.add_argument('--threshold_opt', type=str, help='what metric to optimize when determining the classification threshold (either auc or precision_recall)', default='precision_recall')\n parser.add_argument('--ratio', type=float, help='the graph pooling ratio for node reduction methods, determining portion of nodes to retain', default=0.5)\n \n # extra parameters used for experiments presented in paper - in general these can be ignored\n parser.add_argument('--num_positive_samples', type=int, help='number of case samples from test set used in explainability analysis', default=5000)\n parser.add_argument('--explainability_mode', action='store_true', help='explainability flag for running the post-training analysis')\n parser.add_argument('--tuning_mode', action='store_true', help='hyperparameter tuning flag')\n parser.add_argument('--explainer_input', type=str, help='optional explainability input file')\n parser.add_argument('--device', type=str, help='specific device to use, e.g. cuda:1, if not given detects gpu or cpu automatically', default='na')\n\n args = vars(parser.parse_args())\n\n filepaths = {'maskfile':args['maskfile'],\n 'featfile':args['featfile'],\n 'statfile':args['statfile'], \n 'edgefile':args['edgefile']}\n params = {'model_type':args['model_type'],\n 'mask_target':args['mask_target'],\n 'use_edge':args['use_edge'],\n 'add_self_loops':args['add_self_loops'],\n 'directed':args['directed'],\n 'gnn_layer':args['gnn_layer'],\n 'pooling_method':args['pooling_method'],\n 'outpath':args['outpath'],\n 'outname':args['experiment'],\n 'batchsize':args['batchsize'], \n 'num_workers':args['num_workers'],\n 'max_epochs':args['max_epochs'],\n 'patience':args['patience'],\n 'learning_rate':args['learning_rate'],\n 'hidden_dim':args['hidden_dim'],\n 'hidden_layers':args['hidden_layers'],\n 'loss':args['loss'], \n 'gamma':args['gamma'], \n 'alpha':args['alpha'], \n 'beta':args['beta'], \n 'delta':args['delta'],\n 'dropout_rate':args['dropout_rate'], \n 'threshold_opt':args['threshold_opt'], \n 'ratio':args['ratio'], \n 'tuning_mode':args['tuning_mode'],\n 'explainability_mode':args['explainability_mode'], \n 'explainer_input':args['explainer_input'],\n 'device_specification':args['device'],\n 'num_positive_samples':args['num_positive_samples']}\n \n if params['device_specification'] != 'na':\n params['device'] = torch.device(params['device_specification'])\n else:\n params['device'] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n print('using the following device: {}'.format(params['device']))\n print('STARTING DATA FETCH')\n fetch_data = DataFetch(\n maskfile=filepaths['maskfile'], \n featfile=filepaths['featfile'], \n statfile=filepaths['statfile'], \n edgefile=filepaths['edgefile'], \n params=params)\n \n train_patient_list = fetch_data.train_patient_list\n params['num_batches_train'] = int(np.ceil(len(train_patient_list)/params['batchsize']))\n params['num_samples_train_dataset'] = len(fetch_data.train_patient_list)\n params['num_samples_train_minority_class'] = fetch_data.num_samples_train_minority_class\n params['num_samples_train_majority_class'] = fetch_data.num_samples_train_majority_class\n\n validate_patient_list = fetch_data.validate_patient_list\n params['num_batches_validate'] = int(np.ceil(len(validate_patient_list)/params['batchsize']))\n params['num_samples_valid_dataset'] = len(fetch_data.validate_patient_list)\n params['num_samples_valid_minority_class'] = fetch_data.num_samples_valid_minority_class\n params['num_samples_valid_majority_class'] = fetch_data.num_samples_valid_majority_class\n\n test_patient_list = fetch_data.test_patient_list\n params['num_batches_test'] = int(np.ceil(len(test_patient_list)/params['batchsize']))\n\n print('STARTING BATCH PREPARATION')\n train_dataset, train_loader = get_batch_and_loader(train_patient_list, fetch_data, params, shuffle=True)\n validate_dataset, validate_loader = get_batch_and_loader(validate_patient_list, fetch_data, params, shuffle=True)\n test_dataset, test_loader = get_batch_and_loader(test_patient_list, fetch_data, params, shuffle=False)\n params['num_features_static'] = len(fetch_data.static_features)\n \n model = GNN(\n num_features_static_graph = params['num_features_static'], \n hidden_dim = params['hidden_dim'], \n hidden_dim_2 = params['hidden_dim'],\n hidden_layers = params['hidden_layers'],\n gnn_layer = params['gnn_layer'], \n pooling_method = params['pooling_method'], \n dropout_rate = params['dropout_rate'], \n ratio = params['ratio'],\n self_loops = params['add_self_loops'])\n\n model_path = '{}/{}_model.pth'.format(params['outpath'], params['outname'])\n results_path = '{}/{}_results.csv'.format(params['outpath'], params['outname'])\n stats_path = '{}/{}_stats.csv'.format(params['outpath'], params['outname'])\n\n print('STARTING MODEL TRAIN/TEST') \n\n if params['explainability_mode']:\n results = pd.read_csv(results_path)\n stats = pd.read_csv(stats_path)\n threshold = float(stats[stats['name']=='threshold']['value'])\n # select graphs to explain\n if params['num_positive_samples']=='all':\n exp_patient_list = test_patient_list\n else: \n samples = explainability.sampling(results, num_positive_samples=params['num_positive_samples'], uncertainty_rate=0.8)\n exp_patient_list = test_patient_list[samples]\n \n # load one graph at a time\n params['batchsize'] = 1\n exp_dataset, exp_loader = get_batch_and_loader(exp_patient_list, fetch_data, params, shuffle=False)\n\n # free up memory no longer needed\n del fetch_data \n del train_dataset\n del validate_dataset\n del test_dataset\n\n print(\"Loading model\")\n model.load_state_dict(torch.load(model_path))\n model.to(params['device'])\n torch.backends.cudnn.enabled = False\n explainability.gnn_explainer(model, exp_loader, exp_patient_list, params, threshold)\n \n elif params['tuning_mode']:\n # free up memory no longer needed\n del fetch_data \n del train_dataset\n del validate_dataset\n del test_dataset\n \n # Create an Optuna study and optimize the objective function\n study = optuna.create_study(direction='minimize')\n study.optimize(lambda trial: hyperparameter_tuning(trial, train_loader, validate_loader, params), n_trials=50)\n \n # Get the best hyperparameters\n best_params = study.best_params\n print(\"Best Hyperparameters:\", best_params)\n\n \n else:\n # normal training model\n # free up memory no longer needed\n del fetch_data \n del train_dataset\n del validate_dataset\n del test_dataset\n\n # model training\n START = time.time()\n model, threshold = train_model(model, train_loader, validate_loader, params)\n torch.save(model.state_dict(), model_path)\n END = time.time()\n params['num_parameters'] = sum(p.numel() for p in model.parameters())\n params['threshold'] = threshold\n params['training_time'] = (END-START)/60\n\n # model testing\n results, metric_results = test_model(model, test_loader, threshold, params)\n results['target_id'] = test_patient_list\n results.to_csv(results_path, index=None)\n params.update(metric_results)\n stats = pd.DataFrame({'name':list(params.keys()), 'value':list(params.values())})\n stats.to_csv(stats_path, index=None)\n\n","repo_name":"dsgelab/gnn_family_pedigree","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1765639373","text":"# Lists\n # [1, 2, 3, 4]\n # [{}, {}, {}]\nmatrix = [[0, 1], [2, 3]] # matrix is a two dimensional list\n # to make a list of 100 same items\nzeros = [0] * 100 # will give you [0, 0, 0, 0, 0, 0, 0, 0 ...]\n # to concatenate list use +\nletters = [\"a\", \"b\", \"c\", \"d\"]\nnumber_ones = [1] * 5\nconcatenate_list = letters + number_ones\n# print(concatenate_list)\n\n # use list function to create a new list\ntwenty_numbers = list(range(1, 21))\n# print(twenty_numbers)\n\n # To separate strings into single characters\nhello = \"Hello World!\"\n# print(list(hello))\n\n # To the length use len function\n# print(len(hello))\n\n# Accessing Items\n # use [] with index to find item in list\n# print(letters[0])\n # Return new list from the index, letters[0:3] == letters[:3], if not specified beginning starts at 0\nspecial_letters = letters[0:3]\n# print(special_letters, letters)\n\n # removing item from list to skip every other element\n# print(twenty_numbers[::3])\n # to reverse a list just add negative 1\n# print(twenty_numbers[::-1])\n\n# List Unpacking\n # assigning variables in a list \nnumbers = [1, 2, 3, 4]\nfirst, second, *rest = numbers # first and second will be the first two values in the list, *rest will be the rest\n# print(rest)\n\n# Looping Over Lists\n # to access the index use enumerate(letters)\n # will return a tuple which is a read only \n # still can use square brackets to access the index\nfor index, letter in enumerate(letters):\n print(index, \",\", letter)\n\n# Adding and Removing Items\n #Adding\n # At end of list append method\nletters.append(\"e\")\n # To add in specific spot insert method\nletters.insert(1, \"1\")\n\n # Remove\n # pop method to remove from end of list\n# print(letters.pop())\n# print(letters)\n # also use pop method with index of what you want removed\nshow = letters.pop(1)\n# print(show)\nprint(letters)\n # remove will remove the first found matching value\nletters.remove(\"b\")\n # if not in the list it will come back with error\nprint(letters)\n # del method will delete single value or a range\n# del letters[0:3]\n# print(letters)\n # clear will delete the full list\nletters.clear()","repo_name":"RobertEdmonds/LearnPython","sub_path":"data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17314474148","text":"from selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\n\r\nfrom utils.BasePage import WebPage\r\nfrom utils.BaseElement import Element, InputElement\r\nfrom utils.locator import Locator\r\n\r\n\r\nclass MainPage(WebPage):\r\n\r\n base_url = \"https://jupiter.cloud.planittesting.com/#/\"\r\n #base_url = \"https://jupiter2.cloud.planittesting.com/#/shop\"\r\n\r\n @property\r\n def contact_page_link(self):\r\n return Element(self.drv, Locator(By.XPATH,'//*[@id=\"nav-contact\"]/a'))\r\n\r\n @property\r\n def shop_page_link(self):\r\n return Element(self.drv, Locator(By.XPATH, '//*[@id=\"nav-shop\"]/a'))\r\n #return Element(self.drv, Locator(By.ID, '\"nav-shop'))\r\n\r\n def open_mainPage(self):\r\n mainPage = MainPage(self.drv)\r\n try:\r\n mainPage.start_chrome(mainPage.url)\r\n except NoSuchElementException:\r\n print(\"Couldn't open =>\" + self.drv.current_url)\r\n return False\r\n\r\n def click_ContactPage_from_mainPage(self):\r\n\r\n try:\r\n elms = self.drv.find_elements(By.CLASS_NAME, \"nav\")\r\n nav_Contact = elms[0].find_element(By.ID, \"nav-contact\")\r\n nav_Contact.click()\r\n return True\r\n except NoSuchElementException:\r\n print(\"Couldn't open =>\" + self.drv.current_url)\r\n return False\r\n\r\n def click_ShopPage_from_mainPage(self):\r\n\r\n try:\r\n elms = self.drv.find_elements(By.CLASS_NAME, \"nav\")\r\n nav_Contact = elms[0].find_element(By.ID, \"nav-shop\")\r\n nav_Contact.click()\r\n return True\r\n except NoSuchElementException:\r\n print(\"Couldn't open =>\" + self.drv.current_url)\r\n return False\r\n\r\n def click_CartPage_from_mainPage(self):\r\n\r\n try:\r\n elms = self.drv.find_elements(By.CLASS_NAME, \"nav\")\r\n nav_Cart = elms[1].find_element(By.ID, \"nav-cart\")\r\n nav_Cart.click()\r\n except NoSuchElementException:\r\n print(\"Couldn't open =>\" + self.drv.current_url)\r\n return False\r\n # Wait for page to upload\r\n timeout = 30\r\n WebDriverWait(self.drv, timeout).until(presence_of_element_located((By.CLASS_NAME, \"table\")))\r\n","repo_name":"YArkobachi/Planit-Technical-Assessment-Automation","sub_path":"pages/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30783258381","text":"\nimport logging\nfrom typing import Callable\nfrom functools import partial\n\nlog = logging.getLogger(__name__)\n\nclass Registry:\n\tdef __init__(self):\n\t\tself.INITIALIZERS = {}\n\t\tself.MODULES = {}\n\n\tdef register(self, name : str, init_func : Callable, *opts_s, **opts_kw):\n\t\tif opts_s or opts_kw:\n\t\t\tinit_func = partial(init_func, *opts_s, **opts_kw)\n\n\t\tif name in self.INITIALIZERS or name in self.MODULES:\n\t\t\tlog.warn(f'Module {name} is already registered')\n\t\t\n\t\tself.INITIALIZERS[name] = init_func\n\n\tdef register_concrete(self, name : str, dset_object):\n\n\t\tif name in self.MODULES:\n\t\t\tlog.warn(f'Module {name} is already registered')\n\t\t\n\t\tself.MODULES[name] = dset_object\n\n\t\treturn dset_object\n\n\tdef list_available_dsets(self):\n\t\tnames = set(self.INITIALIZERS.keys()).union(self.MODULES.keys())\n\t\tnames = list(names)\n\t\tnames.sort()\n\t\treturn names\n\n\tdef get(self, name : str):\n\t\tobj = self.MODULES.get(name)\n\t\t\n\t\tif obj is None:\n\t\t\tinit_func = self.INITIALIZERS.get(name)\n\n\t\t\tif init_func is None:\n\t\t\t\tdslist = '\\n '.join(self.list_available_dsets())\n\t\t\t\t# KeyError can't display newlines https://stackoverflow.com/questions/46892261/new-line-on-error-message-in-keyerror-python-3-3\n\t\t\t\traise ValueError(f'No dataset called {name} in registry, avaiable datasets:\\n {dslist}')\n\n\t\t\telse:\n\t\t\t\tobj = init_func()\n\t\t\t\tself.register_concrete(name, obj)\n\n\t\treturn obj\n\n\tdef register_class(self, *args, **kwargs):\n\t\tdef decorator(class_to_register):\n\t\t\tconfigs = getattr(class_to_register, 'configs')\n\n\t\t\t# config generator function\n\t\t\tif isinstance(configs, Callable):\n\t\t\t\tconfigs = configs()\n\n\t\t\tfor cfg in configs:\n\t\t\t\tself.register(cfg['name'], partial(class_to_register, cfg))\t\n\n\t\t\treturn class_to_register\n\n\t\treturn decorator\n\n\nDatasetRegistry = Registry()\n","repo_name":"adynathos/road-anomaly-benchmark","sub_path":"road_anomaly_benchmark/datasets/dataset_registry.py","file_name":"dataset_registry.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"40157698558","text":"from datetime import datetime, date, timedelta\nfrom io import BytesIO\n# flask\nfrom wtforms import Form, DateField, SubmitField\nfrom flask import Flask, render_template, make_response, send_file\n# myplotlib\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib import pyplot as plt\n\nclass AnalyzeForm(Form):\n f = DateField('From', [])\n t = DateField('To', [])\n submit = SubmitField('表示')\n\ndef analyze(Log, request):\n fmt = '%Y-%m-%d'\n form = AnalyzeForm(request.form)\n f, t = query(request, 'f'), query(request, 't')\n hasQuery = f is not None and t is not None\n _t = None if t is None else (datetime.strptime(t, fmt) + timedelta(days=1)).date()\n logs = list(map(none2zero, Log.query \\\n .filter(Log.date>=f) \\\n .filter(Log.date<=_t) \\\n .all())) if hasQuery else []\n if hasQuery:\n form.f.data = datetime.strptime(f, fmt).date()\n form.t.data = datetime.strptime(t, fmt).date()\n return render_template('analyze.html'\n , form=form, logs=logs)\n\ndef none2zero(log):\n temp = 0 if log.temp is None else log.temp\n ec = 0 if log.ec is None else log.ec\n return {'temp': temp, 'ec': ec, 'date': log.date}\n\n\ndef plot(Log, request):\n f, t = query(request, 'f'), query(request, 't')\n logs = Log.query.filter(Log.date>=f).filter(Log.date<=t).all()\n if len(logs) == 0:\n return send_file('static/img/no_data.png', 'image/png')\n temps = list(map(lambda log:\n 0 if log.temp is None else log.temp, logs))\n ecs = list(map(lambda log:\n 0 if log.ec is None else log.ec, logs))\n dates = list(map(lambda log: log.date, logs))\n # plot\n font = {'fontname': 'Mona'}\n plt.figure(1)\n plt.subplot(211)\n plt.xlabel('時間', **font)\n plt.ylabel('温度', **font)\n plt.plot(dates, temps)\n plt.axis([dates[0], dates[len(dates)-1], -5, 40])\n plt.subplot(212)\n plt.xlabel('時間', **font)\n plt.ylabel('電気伝導度 アナログ入力(0-1024)', **font)\n plt.plot(dates, ecs)\n plt.axis([dates[0], dates[len(dates)-1], 500, 700])\n # adjust layout\n plt.tight_layout()\n # make response\n canvas = FigureCanvas(plt.figure(1))\n po = BytesIO()\n canvas.print_png(po)\n response = make_response(po.getvalue())\n response.headers['Content-Type'] = 'image/png'\n return response\n\ndef query(req, name):\n return req.args.get(name)\n\n","repo_name":"yoshmiru/aqua","sub_path":"handler/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35446303544","text":"kg_veg = float(input())\nkg_fruits = float(input())\ntotal_veg = int(input())\ntotal_fruits = int(input())\n\nkg_veg_euro = kg_veg / 1.94\nkg_fruits_euro = kg_fruits / 1.94\n\nveg_total = kg_veg_euro * total_veg\nfruit_total = kg_fruits_euro * total_fruits\n\ngain = format(veg_total + fruit_total, '.2f')\n\nprint(gain)\n","repo_name":"maon0002/Programming-Basics-with-Python-July-September-2022","sub_path":"first_steps_in_coding__more_exercises/04_vegetable_market.py","file_name":"04_vegetable_market.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24798180644","text":"# -- coding: utf-8 --**\nimport requests\nimport json\n\n# 设置请求头部信息\nheaders = {\"Content-Type\": \"application/json\"}\n\n# 设置机器人的IP地址和端口号\nurl = \"http://192.168.56.2/api/v2.0.0/robots/robot\"\n\n# 设置机器人的移动速度,单位是米/秒\nspeed = 0.5\n\n# 设置机器人要移动到的目的地坐标\ndestination = {\"x\": 19.300, \"y\": 5.450, \"theta\": 177.797}\n\n# 构造请求体\npayload = {\n \"positions\": [\n {\n \"position\": {\n \"x\": destination[\"x\"],\n \"y\": destination[\"y\"],\n \"theta\": destination[\"theta\"]\n },\n \"speed\": speed\n }\n ]\n}\n\n# 发送POST请求\nresponse = requests.post(url, headers=headers, data=json.dumps(payload))\n\n# 处理响应结果\nresult = json.loads(response.text)\n\nif result[\"success\"]:\n print(\"机器人移动成功!\")\nelse:\n print(\"机器人移动失败:\", result[\"message\"])","repo_name":"mingchen-sjtu/NeuralSymbol_AI","sub_path":"src/fmauch_universal_robot/gazebo_ros_link_attacher/scripts/mir_control.py","file_name":"mir_control.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41627920788","text":"#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\n# } Driver Code Ends\n#User function Template for python3\nclass Solution:\n def countSubstring(self, S): \n #code here\n N=len(S)\n ans=0\n for i in range(N):\n sm=0\n la=0\n for j in range(i,N):\n if ord(S[j])>=65 and ord(S[j])<=91:\n la+=1\n else:\n sm+=1\n if la==sm:\n ans+=1\n else:\n continue\n return ans\n \n\n#{ \n # Driver Code Starts.\nif __name__ == '__main__': \n t = int(input())\n for _ in range(t):\n S = input()\n ob = Solution()\n ans = ob.countSubstring(S)\n print(ans)\n\n# } Driver Code Ends","repo_name":"akashprap/Coding-Problems","sub_path":"Easy/Count the Substrings/count-the-substrings.py","file_name":"count-the-substrings.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12461711529","text":"import os\nimport argparse\nimport ffmpeg\nimport config\nimport subprocess\nimport time\nimport csv\n\nap = argparse.ArgumentParser()\n\nap.add_argument(\"-d\", \"--directory\", required=True,\n help=\"path to directory with files\")\n\n# ap.add_argument(\"-o\", \"--output\", type=str,\n# help=\"path to optional output video file\")\n# ap.add_argument(\"-dl\", \"--detection-line\", type=float, default=0.5,\n# help=\"detection line height in percentage (default: 0.5)\")\n# ap.add_argument(\"-s\", \"--skip-frames\", type=int, default=30,\n# help=\"# of skip frames between detections\")\n\n\n# def check_if_video(path):\n# metadata = get_metadata(path)\n# return metadata['codec_type'] == 'video'\n\n# def get_metadata(path):\n# print(path)\n# return ffmpeg.probe(path, select_streams = \"v\")['streams'][0]\n\ndef getMetadata(filePath):\n return ffmpeg.probe(filePath, select_streams=\"v\")['streams'][0]\n\n\ndef fileHasVideoStream(file_path):\n try:\n video_stream = getMetadata(file_path)\n if video_stream:\n return True\n return False\n except ffmpeg.Error as e:\n return False\n\n\ndef videoConvert(filePath, w, h, fps=25):\n try:\n input_vid = ffmpeg.input(filePath)\n fileName = os.path.basename(filePath)\n convFileName = os.path.join(\n 'output', \"%sx%s_%s_%s\" % (w, h, fps, fileName))\n (\n input_vid\n .filter('scale', w=w, h=h)\n .filter('fps', fps=fps, round='up')\n .output(convFileName)\n .overwrite_output()\n .run()\n )\n return convFileName\n except ffmpeg.Error as e:\n return False\n print(\"output\")\n print(e.stdout)\n print(\"err\")\n print(e.stderr)\n\n\ndef processVideo(filePath, params):\n st = time.time()\n subprocess.run([\"python\", config.scriptLocation] +\n config.scriptParamsFixed + params + [\"-i\", filePath])\n et = time.time()\n return et - st\n\n\ndef getProccessData():\n if os.path.exists(config.outputFilePath) is False or os.stat(config.outputFilePath).st_size == 0:\n return {'down': 0, 'up': 0}\n\n with open(config.outputFilePath, \"r\") as file:\n lastLine = file.readlines()[-1].replace(\"\\n\", \"\")\n lastLine = lastLine.split(\";\")\n obj = {'down': lastLine[-2:-1][0], 'up': lastLine[-1:][0]}\n return obj\n\n\ndef getParams(param):\n _params = []\n for k, v in param.items():\n _params = _params + [k] + [v]\n return _params\n\n\ndef getFileHeaders():\n return config.metadataProps + \\\n [\"fps\", \"c_width\", \"c_heigth\", \"filename\", \"fName\", \"up\", \"down\", \"elapsedTime\"] + \\\n [k for k in config.scriptParams[0]]\n\n\ndef writeFileHeaders():\n with open(config.runsOutputFileName, 'w', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, getFileHeaders())\n writer.writeheader()\n f.close()\n\n\ndef writeFileRow(row):\n with open(config.runsOutputFileName, 'a', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, getFileHeaders())\n writer.writerow(row)\n f.close()\n\n\ndef main():\n args = vars(ap.parse_args())\n\n writeFileHeaders()\n\n files = []\n\n # Listar os ficheiros de uma diretoria\n for entry in os.scandir(args[\"directory\"]):\n if entry.is_file():\n files.append(entry.path)\n\n # Processar os ficheiros\n for f in files:\n\n # Check if is a video file\n if fileHasVideoStream(f):\n metaData = getMetadata(f)\n fileObj = {}\n for l in config.metadataProps:\n fileObj[l] = metaData[l]\n\n # Convert options\n for fps in config.fps:\n fileObj[\"fps\"] = fps\n for width in config.scale:\n fileObj[\"c_width\"] = width[\"w\"]\n fileObj[\"c_heigth\"] = width[\"h\"]\n\n fileObj[\"filename\"] = f\n\n fName = videoConvert(f, width[\"w\"], width[\"h\"], fps)\n fileObj[\"fName\"] = f\n\n for param in config.scriptParams:\n\n elapsed_time = processVideo(fName, getParams(param))\n\n fileObj = {**fileObj, **getProccessData(), **\n param, \"elapsedTime\": elapsed_time}\n\n if os.path.exists(config.outputFilePath):\n os.remove(config.outputFilePath)\n\n writeFileRow(fileObj)\n # Delete converted file\n if os.path.exists(fName):\n os.remove(fName)\n\n\nif __name__ == \"__main__\":\n main()\n\n # subprocess.run([\"python\"] + [config.scriptLocation] + config.scriptParams)\n\n # 'codec_name': 'h264',\n # 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10',\n # 'profile': 'High',\n # 'display_aspect_ratio': '16:9',\n # 'r_frame_rate': '25/1'\n # 'avg_frame_rate': '25/1',\n # duration\n # 'duration_ts': 205290000, 'duration': '2281.000000',\n # 'bit_rate': '2091719', 'bits_per_raw_sample': '8',\n\n# https://github.com/kkroening/ffmpeg-python\n# https://github.com/kkroening/ffmpeg-python/tree/master/examples#audiovideo-pipeline\n","repo_name":"fernando996/Data-Executor","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4224993334","text":"import hashlib\n\nname = 'Хохлов Владислав Сергеевич'\n\nsystems = [\n 'debian:10.1',\n 'debian:10.2',\n 'debian:10.3',\n 'debian:10.8',\n 'ubuntu:20.04',\n 'ubuntu:20.10',\n 'ubuntu:18.04',\n 'ubuntu:18.10',\n 'fedora:34',\n 'fedora:33',\n 'fedora:32',\n 'fedora:31',\n]\n\nprint(systems[int(hashlib.md5(name.encode('utf-8')).hexdigest(), 16) % len(systems)])\n\n\n","repo_name":"Entergro/CV","sub_path":"choose_system.py","file_name":"choose_system.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5835848721","text":"from bs4 import BeautifulSoup as bs\nimport urllib\nimport os.path \nimport json\n\n\n\nclass nasa_images:\n \n def __init__(self, num_images, outfolder):\n #number of images desired\n self.num_images = num_images\n #folder to insert the images\n self.outfolder = outfolder\n\n def download(self):\n site = 'https://earthobservatory.nasa.gov/images'\n\n page = urllib.request.urlopen(site).read()\n soup = bs(page, features= \"html.parser\")\n soup.prettify()\n \n imgs = soup.findAll('div', {'class': 'thumbnail-image'})\n \n counter = 0\n \n for img in imgs:\n \n if counter == self.num_images:\n return True\n break\n \n page = urllib.request.urlopen(site + img.a.get('href')[7:])\n soup = bs(page, features = 'html.parser')\n soup.prettify()\n \n map_img = soup.findAll('div', {'class': 'panel-image'})[0]\n \n #handles exception to the typical format. if not true, no file is downloaded\n if map_img.a:\n filename = self.outfolder + map_img.a.get('href').split('/')[-1]\n\n if not os.path.exists(filename):\n urllib.request.urlretrieve(map_img.a.get('href'), filename)\n counter += 1\n \n #scrolling further through the webpage to get more images\n #each page contains five images\n for i in range(2, (self.num_images - counter)//5 + 1):\n site = 'https://earthobservatory.nasa.gov/topic/image-of-the-day/getRecords?page=' + str(i)\n page = urllib.request.urlopen(site)\n data = json.load(page)\n \n #looping through dictionary entry, which contains details that let me\n #load each post\n for i in data['data']:\n slug = i['slug'].split('/')[-1]\n link = 'https://earthobservatory.nasa.gov/images/' + str(i['id']) + '/' + slug\n\n page = urllib.request.urlopen(link)\n \n soup = bs(page, features = 'html.parser')\n soup.prettify()\n \n map_img = soup.findAll('div', {'class': 'panel-image'})[0]\n #handles exceptions to the normal format\n if map_img.a:\n filename = self.outfolder + map_img.a.get('href').split('/')[-1]\n \n if not os.path.exists(filename):\n urllib.request.urlretrieve(map_img.a.get('href'), filename)\n\nimages = nasa_images(20, r\"C:\\Users\\devin.simmons.ctr\\Desktop\\projects\\nasa_images\\images\\\\\")\nimages.download()","repo_name":"devinsimmons/nasa_images","sub_path":"scripts/nasa_images.py","file_name":"nasa_images.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19808892278","text":"import sqlite3\nimport os\nimport sys\nimport FLAHCAC3AModule\nimport FLAHCAC2Module\nimport FLAHCAC3AIPOPModule\nimport FLAHCAValidations\n#import dbsetupcsv\n\n\n\ndef main(argv):\n db = os.getcwd()+r'\\2019FLAHCAcsv.db'\n conn = sqlite3.connect(db)\n cursor = conn.cursor()\n print(\"This is from where the database is sourced: \",db)\n\n # This is from where I get the FL AHCA number and check its validity\n hospital_ahca_number = FLAHCAValidations.Which_Hospital(cursor)\n\n\n Medicare_Charges =\\\n FLAHCAC3AModule.MedicareCharges(hospital_ahca_number,cursor)\n Medicare_Revenues =\\\n FLAHCAC3AModule.MedicareRevenues(hospital_ahca_number,cursor)\n MedicAID_Charges = \\\n FLAHCAC3AModule.MedicAIDCharges(hospital_ahca_number,cursor)\n MedicAID_Revenues = \\\n FLAHCAC3AModule.MedicAIDRevenues(hospital_ahca_number,cursor)\n OtherGov_Charges = \\\n FLAHCAC3AModule.OtherGovCharges(hospital_ahca_number,cursor)\n OtherGov_Revenues = \\\n FLAHCAC3AModule.OtherGovRevenues(hospital_ahca_number,cursor)\n\n CharityBadDebt_Charges = \\\n FLAHCAC3AModule.CharityBadDebtCharges(hospital_ahca_number,cursor)\n CharityBadDebt_Revenues = \\\n FLAHCAC3AModule.CharityBadDebtRevenues(hospital_ahca_number,cursor)\n NonMagCareComm_Charges = \\\n FLAHCAC3AModule.NonMagCareCommCharges(hospital_ahca_number,cursor)\n NonMagCareComm_Revenues = \\\n FLAHCAC3AModule.NonMagCareCommRevenues(hospital_ahca_number,cursor)\n #CSN_Charges = FLAHCAModule.CSNCharges(hospital_ahca_number,cursor)\n #CSN_Revenues = FLAHCAModule.CSNRevenues(hospital_ahca_number,cursor)\n MgdCareComm_Charges = \\\n FLAHCAC3AModule.MgdCareCommCharges(hospital_ahca_number,cursor)\n MgdCareComm_Revenues = \\\n FLAHCAC3AModule.MgdCareCommRevenues(hospital_ahca_number,cursor)\n Employee_Discounts = \\\n FLAHCAC3AModule.EmployeeDiscounts(hospital_ahca_number,cursor)\n Other_Deductions = \\\n FLAHCAC3AModule.OtherDeductions(hospital_ahca_number,cursor)\n\n Total_Charges = Medicare_Charges + MedicAID_Charges + OtherGov_Charges \\\n + CharityBadDebt_Charges + NonMagCareComm_Charges + MgdCareComm_Charges\n Total_Revenue = Medicare_Revenues + MedicAID_Revenues + OtherGov_Revenues \\\n + CharityBadDebt_Revenues + NonMagCareComm_Revenues \\\n + MgdCareComm_Revenues\n\n\n print()\n print(\"ALL OF THESE BELOW ARE FROM C3A\")\n print(\"_______________________________\")\n print(\"Medicare Charges are :\", Medicare_Charges)\n print(\"Medicare Revenue is :\", Medicare_Revenues)\n print()\n print(\"Medicaid Charges are :\", MedicAID_Charges)\n print(\"Medicaid Revenues is :\", MedicAID_Revenues)\n print()\n print(\"Other Gov Charges are :\", OtherGov_Charges)\n print(\"Other Gov Revenue is :\", OtherGov_Revenues)\n print()\n print(\"Charity and Bad Charges are :\", CharityBadDebt_Charges)\n print(\"Charity and Bad Revenue is :\", CharityBadDebt_Revenues)\n print()\n print(\"Non-Managed Care Commercial Charges are :\", NonMagCareComm_Charges)\n print(\"Non-Managed Care Commercial Revenue is :\", NonMagCareComm_Revenues)\n print()\n print(\"Managed Care Commercial Charges are :\", MgdCareComm_Charges)\n print(\"Managed Care Commercial Revenue is :\", MgdCareComm_Revenues)\n print()\n print(\"Employee Discounts are :\", Employee_Discounts)\n print(\"Other Deductions are :\", Other_Deductions)\n print()\n print(\"Total Charges are :\", Total_Charges)\n print(\"Total Revenue is :\", Total_Revenue)\n# Generation of numbers from c2\n C2_Total_Charges = \\\n FLAHCAC2Module.C2TotalCharges(hospital_ahca_number, cursor)\n C2_Total_Revenue = \\\n FLAHCAC2Module.C2TotalRevenue(hospital_ahca_number, cursor)\n C2_Other_Op_Revenue = \\\n FLAHCAC2Module.C2OtherOpRevenue(hospital_ahca_number, cursor)\n C2_Total_Op_Expense = \\\n FLAHCAC2Module.C2TotalOperatingExpense(hospital_ahca_number, cursor)\n Expense_ratio = round(C2_Total_Op_Expense/C2_Total_Charges,4) #Runding to four places\n print()\n print(\"ALL OF THESE BELOW ARE FROM C2\")\n print(\"______________________________\")\n print(\"C2 Total Charges are :\", C2_Total_Charges)\n print(\"C2 Total Revenue is :\", C2_Total_Revenue)\n print(\"C2 Other Operating Revenue is :\", C2_Other_Op_Revenue)\n print(\"C2 Total Operating Expense is :\", C2_Total_Op_Expense)\n print(\"Expense as a percent of charges is:\", Expense_ratio)\n\n# Generation of IP and OP numbers for Commercial PPO and FLAHCAModule\n MgdCareComm_INPATIENT_Charges = \\\n FLAHCAC3AIPOPModule.IPCharges(hospital_ahca_number,cursor)\n MgdCareComm_INPATIENT_Revenue = \\\n FLAHCAC3AIPOPModule.IPRevenue(hospital_ahca_number,cursor)\n MgdCareComm_OUTPATIENT_Charges = \\\n FLAHCAC3AIPOPModule.OPCharges(hospital_ahca_number,cursor)\n MgdCareComm_OUTPATIENT_Revenue = \\\n FLAHCAC3AIPOPModule.OPRevenue(hospital_ahca_number,cursor)\n\n\n print()\n print(\"ALL OF THESE BELOW ARE FROM C3A\")\n print(\"_______________________________\")\n print(\"Managed Care Commercial INPATIENT Charges are :\", \\\n MgdCareComm_INPATIENT_Charges, type(MgdCareComm_INPATIENT_Charges))\n print(\"Managed Care Commercial INPATIENT Revenue is :\", \\\n MgdCareComm_INPATIENT_Revenue)\n print(\"Managed Care Commercial OUTPATIENT Charges are :\", \\\n MgdCareComm_OUTPATIENT_Charges)\n print(\"Managed Care Commercial OUTPATIENT Revenue is :\", \\\n MgdCareComm_OUTPATIENT_Revenue)\n\n#Does C3A totals match c2?#\n\n print()\n print(\"VALIDATION SECTION\")\n print(\"_______________________________\")\n FLAHCAValidations.Charges_Validation(Total_Charges,C2_Total_Charges)\n FLAHCAValidations.Revenue_Validation(Total_Revenue,C2_Total_Revenue,\\\n Employee_Discounts,Other_Deductions,C2_Other_Op_Revenue)\n FLAHCAValidations.IPOP_Validation(MgdCareComm_Charges, \\\n MgdCareComm_Revenues,MgdCareComm_INPATIENT_Charges, \\\n MgdCareComm_OUTPATIENT_Charges,MgdCareComm_INPATIENT_Revenue, \\\n MgdCareComm_OUTPATIENT_Revenue)\n\n#Create a table for the informaiton we source above\n print(hospital_ahca_number, type(hospital_ahca_number))\n print(Medicare_Charges, type(Medicare_Charges))\n print(Medicare_Revenues, type(Medicare_Revenues))\n\n\n cursor.execute(\"\"\"INSERT INTO results2019 (hospitalahcanumber,\n MedicareCharges,MedicareRevenues,MedicAIDCharges,\n MedicAIDRevenues,OtherGovCharges,OtherGovRevenues,CharityBadDebtCharges,\n CharityBadDebtRevenues,NonMagCareCommCharges,NonMagCareCommRevenues,\n MgdCareCommCharges,MgdCareCommRevenues,EmployeeDiscounts,OtherDeductions,\n MgdCareCommINPATIENTCharges,MgdCareCommINPATIENTRevenue,\n MgdCareCommOUTPATIENTCharges,MgdCareCommOUTPATIENTRevenue,\n TotalCharges,TotalRevenue)\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (hospital_ahca_number,Medicare_Charges,Medicare_Revenues,MedicAID_Charges,\n MedicAID_Revenues,OtherGov_Charges, OtherGov_Revenues,\n CharityBadDebt_Charges, CharityBadDebt_Revenues, NonMagCareComm_Charges,\n NonMagCareComm_Revenues, MgdCareComm_Charges, MgdCareComm_Revenues,\n Employee_Discounts,Other_Deductions,MgdCareComm_INPATIENT_Charges,\n MgdCareComm_INPATIENT_Revenue, MgdCareComm_OUTPATIENT_Charges,\n MgdCareComm_OUTPATIENT_Revenue, Total_Charges,Total_Revenue))\n conn.commit()\n cursor.close()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"PeterVanLoon/FL_AHCA","sub_path":"2019SQLquery.py","file_name":"2019SQLquery.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18476392754","text":"\"\"\"\n本地插件开发,本地生成命令行命令,注册参数,autouse且不用调用直接前置自动启动命令演示\n\"\"\"\nimport pytest\n\n\ndef pytest_addoption(parser):\n # 注册host\n parser.addoption(\n \"--host\", action=\"store\", default=\"test\", help=\"change test host:default\")\n # 注册ini中username\n parser.addini(\n \"username\", type=None, default=\"test\", help=\"add ini username\")\n\n # 注册url地址\n parser.addini(\n \"base_url1\", type=None, default=\"\", help=\" base_url1\",)\n\n parser.addini(\n \"base_url2\", type=None, default=\"\", help=\"base_url2\",)\n\n\n@pytest.fixture()\n# 获得用用在命令行输入内容\ndef host(request):\n if request.config.getoption(\"--host\") == \"test\":\n return \"http://49.235.92.12:7005\"\n if request.config.getoption(\"--host\") == \"uat\":\n return \"http://49.235.92.12:8005\"\n else:\n return \"host not exists\"\n\n\n@pytest.fixture(scope=\"session\")\ndef base_url1(request):\n return request.config.getini(\"base_url1\")\n\n\n@pytest.fixture(scope=\"session\")\ndef base_url2(request):\n return request.config.getini(\"base_url2\")\n\n\n# 添加参数autouse 后自动执行,没有添加就不行执行\n@pytest.fixture(autouse=True)\ndef request_print(request):\n print(request.module)\n print(request.function)\n print(request.fspath)\n print(request.scope)\n\n\n# pytest.ini 读取\n@pytest.fixture(autouse=True)\ndef get_ini(request):\n ops = request.config.getini(\"addopts\")\n print(\"读取到的ops是:{}\".format(ops))\n user = request.config.getini(\"username\")\n print(\"读取到的username是:{}\".format(user))\n return ops, user\n","repo_name":"qangcheng/Api_test","sub_path":"request内置函数用法/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12773382889","text":"# claclassss - A blueprint of attributes and \n# methods which we can use to create objects.\n# object - An instance of a class.\n# attribute - Data attached to a class or \n# instance of a class.\n# method - Procedures or functions attached to a class.\n\n# import turtle\n\nclass dog:\n def __init__(self, breed, weight, energy, speak) :\n self.breed = breed\n self.weight = weight\n self.energy = energy\n self.speak = speak\n # self.interior_angles = (self.sides -2)*180\n # self.angle = self.interior_angles/self.sides\n \n # def draw(self):\n # for i in range(self.sides):\n # turtle.forward(self.size)\n # turtle.right(180-self.angle)\n # turtle.done()\n\nBilbo_Waggins = dog(\"labrador\", 80, \"low\", \"woof\") \njohn = dog(\"mandog\", 250, \"low\", \"feed me\")\n\n# pentagon = shape(5, \"Pentagon\", 100)\n# hexagon = shape(6,\"Hexagon\", 100)\n# dodecahedron = shape(12, \"Dodecahedron\", (20))\n\nprint(john.breed)\nprint(john.weight)\n\n\n\n# print(pentagon.sides)\n# print(pentagon.name)\n\n# dodecahedron.draw()","repo_name":"ainsleyd86/DFESW11py","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28482837632","text":"import altair as alt\nfrom vega_datasets import data\nfrom dash import Dash, dcc, html, Input, Output\nimport pandas as pd\n\ndata = pd.read_csv(\"vgsales.csv\")\n\ndef plot_altair(genre):\n chart = alt.Chart(data[data['Genre']==genre], title=f\"Mean Global Sales {genre}\").mark_line(color='red').encode(\n x='Year',\n y=alt.Y('mean(Global_Sales)',title=\"Mean Global Sales\"))\n return chart.to_html()\n\ncolors = {\n 'background': '#111111',\n 'text': '#7FDBFF'\n}\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = Dash(__name__, external_stylesheets=external_stylesheets)\n\nserver = app.server\n\napp.layout = html.Div([\n html.H1(children='Visualization of Video Game sales', style={'font-size': \"260%\", 'color':'#660000'}),\n html.Br(),\n html.Br(),\n html.Br(),\n\n\n html.Iframe(\n id='line',\n style={'border-width': '0', 'width': '200%', 'height': '400px'},\n srcDoc=plot_altair(genre='Sports')),\n html.Div(children='''\n Please select Genre of Video Game\n ''', style={'color':'#660000'}),\n dcc.Dropdown(\n id='genre', value='Sports',\n options=[{'label': i, 'value': i} for i in data['Genre'].unique()],\n style={'height': '30px', 'width': '200px'})])\n\n@app.callback(\n Output('line', 'srcDoc'),\n Input('genre', 'value'))\n\ndef update_output(genre):\n return plot_altair(genre)\n\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"RamiroMejia/video_game_dash_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32394641054","text":"import mlrun\nfrom mlrun.execution import MLClientCtx\nfrom mlrun.datastore import DataItem\nimport snowflake.connector as snow\nimport os\nimport numpy as np\nfrom dask.distributed import Client\nfrom dask.dataframe import from_delayed\nfrom dask import delayed\nfrom dask import dataframe as dd\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n \n@delayed\ndef load(batch):\n try:\n print(\"BATCHING\")\n df_ = batch.to_pandas()\n return df_\n except Exception as e:\n print(f\"Failed on {batch} for {e}\")\n pass\n\ndef load_delayed(dask_client, connection_info, query, out_dir, write_out=False, publish=False): \n context = mlrun.get_or_create_ctx('dask-cluster') \n sfAccount = context.get_secret('account')\n context.log_result('sfAccount', sfAccount)\n context.logger.info(f'sfAccount = {sfAccount}')\n # setup dask client from the MLRun dask cluster function\n if dask_client:\n client = mlrun.import_function(dask_client).client\n context.logger.info(f'Existing dask client === >>> {client}\\n')\n else:\n client = Client()\n context.logger.info(f'\\nNewly created dask client === >>> {client}\\n')\n \n query = query\n\n conn = snow.connect(**connection_info)\n cur = conn.cursor()\n cur.execute(query)\n batches = cur.get_result_batches()\n print(f'batches len === {len(batches)}\\n')\n \n dfs = [] \n for batch in batches:\n if batch.rowcount > 0:\n df = load(batch)\n dfs.append(df) \n ddf = from_delayed(dfs)\n \n # materialize the query results set for some sample compute\n \n ddf_sum = ddf.sum().compute()\n ddf_mean = ddf.mean().compute()\n ddf_describe = ddf.describe().compute()\n ddf_grpby = ddf.groupby(\"C_CUSTKEY\").count().compute()\n \n context.logger.info(f'sum === >>> {ddf_sum}\\n')\n context.logger.info(f'mean === >>> {ddf_mean}\\n')\n context.logger.info(f'ddf head === >>> {ddf.head()}\\n')\n context.logger.info(f'ddf === >>> {ddf}\\n')\n\n context.log_result('number of rows', len(ddf.index)) \n \n context.log_dataset('dask_data_frame', ddf)\n context.log_dataset(\"my_df_describe\", df=ddf_describe)\n context.log_dataset(\"my_df_grpby\", df=ddf_grpby)\n \n ddf.persist(name = 'customer')\n if publish and (not client.list_datasets()): \n client.publish_dataset(customer=ddf)\n \n if write_out:\n dd.to_parquet(df=ddf, path=out_dir)\n context.log_result('parquet', out_dir)\n","repo_name":"igz-us-sales/snowflake-dask","sub_path":"snowflake-dask.py","file_name":"snowflake-dask.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19112413657","text":"n = {'atleta':'','salto':[],'media':0}\nwhile True:\n n['salto'] = []\n atleta = input('nome do atleta: ')\n n['atleta'] = atleta\n for x in range(1,6):\n salto = float(input('{}° salto: '.format(x)))\n n['salto'].append(salto)\n n['salto'].sort()\n n['media'] = (n['salto'][1]+n['salto'][2]+n['salto'][3])/3\n print('''\nMelhor salto: {} m\nPior salto: {} m\nMédia dos demais saltos: {:.2f} m\n\nResultado final:\n{}: {:.2f} m'''.format(n['salto'][-1],n['salto'][0],n['media'],n['atleta'],n['media']))","repo_name":"dev-everaldo-cyrino/PythonBrasil","sub_path":"3-EstruturadeRepeticao/ex--26/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71344967527","text":"from stitchnet_layers.gaussian_dist import * \nfrom stitchnet_layers.x_dist import *\nfrom stitchnet_layers.y_dist import *\nfrom stitchnet_layers.state import State\nfrom shared_files.param import *\nl = State.layers\nimport tensorflow.keras as tfk\nimport numpy as np\nfrom focal_loss import sparse_categorical_focal_loss\n\ndef get_qhat_znt_expand_direct(zn_1_expand,n):\n \n zn_expand = l['z{}_expand_to_z{}_expand'.format(n-1,n)](zn_1_expand)\n\n return zn_expand\n\n\ndef get_qhat_zt_expand_direct(x):\n \n qhat_zt_expand_direct = {}\n \n zn_1t_expand = x\n for n in range(1,6):\n zn_1t_expand = get_qhat_znt_expand_direct(zn_1t_expand,n)\n qhat_zt_expand_direct['qhat_zt_expand_direct{}'.format(n)] = zn_1t_expand\n\n return qhat_zt_expand_direct\n\ndef get_all_qhat_z(all_expands):\n \n qhat_z = []\n\n for i in range(len(all_expands)):\n qhat_z_layer = l['z{}_expand_to_z{}'.format(5-i,5-i)](all_expands['qhat_zt_expand_direct{}'.format(5-i)])\n qhat_z.append(qhat_z_layer)\n\n return qhat_z\n\ndef get_qhat_z(x):\n \n all_expands = get_qhat_zt_expand_direct(x)\n \n qhat_z = get_all_qhat_z(all_expands) \n \n return qhat_z\n\n\ndef combine_params(q_mean,q_logvar,p_mean,p_logvar):\n \n q_var = K.exp(q_logvar)\n q_var_inv = 1/q_var\n\n p_var = K.exp(p_logvar)\n p_var_inv = 1/p_var\n\n var = 1/(p_var_inv + q_var_inv)\n logvar = K.log(var)\n\n mean_numerator = q_mean*q_var_inv + p_mean*p_var_inv\n mean_denominator = (p_var_inv + q_var_inv)\n \n mean = mean_numerator/mean_denominator\n\n return mean,logvar\n\ndef get_level_info(qhat_zn_1,zn_expanded,zn_sample,level,gen):\n \n if level == 5:\n p_zn_1 = get_unit_gaussian_dist()\n else:\n zn_1_expanded = l['z{}_expand_to_z{}_expand'.format(level+1,level)](zn_expanded)\n p_zn_1 = l['z{}_expand_to_z{}'.format(level,level)](zn_1_expanded) \n\n \n if gen:\n q_zn_1 = p_zn_1\n else:\n q_zn_1 = combine_params(qhat_zn_1[0],qhat_zn_1[1],\n p_zn_1[0],p_zn_1[1])\n \n zn_1_sample = gaussian_sample(q_zn_1[0],q_zn_1[1])\n zn_1_expanded = l['z{}_to_z{}_expand'.format(level,level)](zn_1_sample) \n \n\n return p_zn_1,q_zn_1,zn_1_expanded,zn_1_sample\n\n\ndef z_information(qhat_z,gen = False):\n \n p_z = []\n q_z = []\n z_expanded = []\n z_sample = []\n\n level_5 = get_level_info(qhat_z[0],None,None,5-0,gen)\n p_z.append(level_5[0])\n q_z.append(level_5[1])\n z_expanded.append(level_5[2])\n z_sample.append(level_5[3])\n for i in range(1,5):\n level = 5 - i\n level_n_1 = get_level_info(qhat_z[i],z_expanded[-1],z_sample[-1],level,gen)\n p_z.append(level_n_1[0])\n q_z.append(level_n_1[1])\n z_expanded.append(level_n_1[2])\n z_sample.append(level_n_1[3])\n\n out={}\n out['p_z'] = p_z\n out['q_z'] = q_z\n out['z_expanded'] = z_expanded\n out['z_sample'] = z_sample\n\n return out\n\n\ndef get_decoded_z(z_expanded):\n return l['decoder'](z_expanded[-1])\n\ndef get_visuals(decoded_z,lab):\n\n alpha_visual = l[lab+'_alpha_visual'](decoded_z)\n beta_visual = l[lab+'_beta_visual'](decoded_z)\n\n return alpha_visual,beta_visual\n\ndef get_visual(decoded_z,lab):\n\n return l[lab+'_visual'](decoded_z)\n\ndef create_output_dict(z_sample,x_reconstructed,x_blk_reconstructed,x_hel_reconstructed,x_con_reconstructed,x_ggo_reconstructed,p_y):\n \n out = {}\n out['z5_sample'] = z_sample[0]\n out['z4_sample'] = z_sample[1]\n out['z3_sample'] = z_sample[2]\n out['z2_sample'] = z_sample[3]\n out['z1_sample'] = z_sample[4]\n out['x_reconstructed'] = x_reconstructed\n out['x_ggo_reconstructed'] = x_ggo_reconstructed\n out['x_con_reconstructed'] = x_con_reconstructed\n out['x_blk_reconstructed'] = x_blk_reconstructed\n out['x_hel_reconstructed'] = x_hel_reconstructed\n out['y_reconstructed'] = p_y\n\n return out\n\ndef create_loss_dict(xent,cce,z_sample,p_z,q_z):\n \n # get losses\n loss_dict = {}\n\n # x recon loss\n loss_dict['XENT'] = xent \n loss_dict['CCE'] = cce\n\n # p_z loss \n for i in range(5):\n loss_dict['p_z{}'.format(5-i)] = -gaussian_ll(z_sample[i],p_z[i][0],p_z[i][1]) \n \n # q_z loss \n for i in range(5):\n loss_dict['q_z{}'.format(5-i)] = gaussian_ll(z_sample[i],q_z[i][0],q_z[i][1])\n \n loss = 0\n for x in loss_dict.values():\n loss += x\n \n loss_dict['loss'] = loss\n loss_dict['KL'] = loss - loss_dict['XENT'] - loss_dict['CCE']\n\n return loss_dict\n\ndef predict(inputs,Y_hel,Y_lab,gen):\n x = inputs \n \n qhat_z = get_qhat_z(x)\n \n z_info = z_information(qhat_z,gen = gen)\n \n all_out = {}\n all_loss = {'KL':0,'loss':0,'XENT':0,'CCE':0}\n\n z_expand = z_info['z_expanded'] \n \n all_decoded = get_decoded_z(z_expand)\n\n hel_alpha_visual,hel_beta_visual = get_visuals(all_decoded,'hel')\n con_alpha_visual,con_beta_visual = get_visuals(all_decoded,'con')\n ggo_alpha_visual,ggo_beta_visual = get_visuals(all_decoded,'ggo')\n blk_alpha_visual,blk_beta_visual = get_visuals(all_decoded,'blk')\n y_visual = get_visual(all_decoded,'y')\n\n p_x_hel = visual_to_x_dist(hel_alpha_visual,hel_beta_visual,'hel')\n p_x_con = visual_to_x_dist(con_alpha_visual,con_beta_visual,'con')\n p_x_ggo = visual_to_x_dist(ggo_alpha_visual,ggo_beta_visual,'ggo')\n p_x_blk = visual_to_x_dist(blk_alpha_visual,blk_beta_visual,'blk')\n p_y = visual_to_y_dist_param(y_visual)\n\n #hel_cce = tf.keras.losses.CategoricalCrossentropy(reduction = tf.keras.losses.Reduction.NONE)(Y_hel,p_y[:BS//3])\n #lab_cce = tf.keras.losses.CategoricalCrossentropy(reduction = tf.keras.losses.Reduction.NONE)(Y_lab,p_y[-BS//3:])\n cce = sparse_categorical_focal_loss(tf.concat([tf.math.argmax(Y_hel,axis = -1),tf.math.argmax(Y_lab,axis = -1)],axis = 0),\n tf.concat([p_y[:BS//3],p_y[2*BS//3:]],axis = 0),\n gamma = 2)\n cce = tf.reduce_mean(cce,axis = [1,2])\n #hel_cce = tf.reduce_mean(hel_cce,axis = [1,2])\n #lab_cce = tf.reduce_mean(lab_cce,axis = [1,2])\n cce = tf.concat([cce[:BS//3],cce[:BS//3]*0,cce[BS//3:]],axis = 0) * 100\n\n x_blk_reconstructed = dist_to_x(p_x_blk)[...,0]\n x_hel_reconstructed = dist_to_x(p_x_hel)[...,0]\n x_con_reconstructed = dist_to_x(p_x_con)[...,0]\n x_ggo_reconstructed = dist_to_x(p_x_ggo)[...,0]\n x_reconstructed = ((x_blk_reconstructed*p_y[...,0]) \n + (x_hel_reconstructed*p_y[...,1]) \n + (x_con_reconstructed*p_y[...,2]) \n + (x_ggo_reconstructed*p_y[...,3]))\n\n blk_xent = -x_ll(x,p_x_blk)[...,0]\n hel_xent = -x_ll(x,p_x_hel)[...,0]\n ggo_xent = -x_ll(x,p_x_ggo)[...,0]\n con_xent = -x_ll(x,p_x_con)[...,0]\n xent = ( (blk_xent*p_y[...,0]) \n + (hel_xent*p_y[...,1]) \n + (con_xent*p_y[...,2]) \n + (ggo_xent*p_y[...,3]))\n xent = tf.reduce_mean(xent,axis=[1,2])\n\n out = create_output_dict(z_info['z_sample'],\n x_reconstructed,\n x_blk_reconstructed,\n x_hel_reconstructed,\n x_con_reconstructed,\n x_ggo_reconstructed,\n p_y)\n all_out.update(out)\n \n loss_dict = create_loss_dict(xent,cce,z_info['z_sample'],\n z_info['p_z'],z_info['q_z'])\n for key in all_loss:\n all_loss[key] += loss_dict[key] \n\n return all_out,all_loss\n \nclass myModel(tfk.Model):\n def __init__(self):\n super(myModel,self).__init__()\n \n self.l = State.layers\n\n def call(self,inputs,gen = False):\n \n T_hel,Y_hel,T_ncp,T_lab,Y_lab = inputs\n X = tf.concat([T_hel,T_ncp,T_lab],axis = 0)\n out,loss_dict = predict(X,Y_hel,Y_lab,gen = gen)\n out['loss'] = loss_dict['loss'] \n out['Actual KL'] = loss_dict['KL']/l['KL'] \n out['XENT'] = loss_dict['XENT'] \n out['CCE'] = loss_dict['CCE'] \n self.add_loss(loss_dict['loss'])\n \n Y_lab_pred = out['y_reconstructed'][2*BS//3:]\n Y_lab_pred = tf.one_hot(tf.math.argmax(Y_lab_pred,axis = -1),4)\n Y_lab = tf.one_hot(tf.math.argmax(Y_lab,axis = -1),4)\n\n con_prec = tf.math.reduce_sum(Y_lab_pred[...,3] * Y_lab[...,3],axis = [1,2]) / (tf.math.reduce_sum(Y_lab_pred[...,3],axis = [1,2]) + 1e-7)\n con_recall =tf.math.reduce_sum(Y_lab_pred[...,3] * Y_lab[...,3],axis = [1,2]) / (tf.math.reduce_sum(Y_lab[...,3],axis = [1,2]) + 1e-7)\n con_f1 = (2*con_prec * con_recall)/(con_prec + con_recall + 1e-7)\n ggo_prec = tf.math.reduce_sum(Y_lab_pred[...,2] * Y_lab[...,2],axis = [1,2]) / (tf.math.reduce_sum(Y_lab_pred[...,2],axis = [1,2]) + 1e-7)\n ggo_recall =tf.math.reduce_sum(Y_lab_pred[...,2] * Y_lab[...,2],axis = [1,2]) / (tf.math.reduce_sum(Y_lab[...,2],axis = [1,2]) + 1e-7)\n ggo_f1 = (2*ggo_prec * ggo_recall)/(ggo_prec + ggo_recall + 1e-7)\n\n self.add_metric(ggo_f1,name = 'GGO F1',aggregation = 'mean') \n self.add_metric(con_f1,name = 'CON F1',aggregation = 'mean') \n self.add_metric(loss_dict['XENT'],name = 'XENT',aggregation = 'mean') \n self.add_metric(loss_dict['CCE'],name = 'CCE',aggregation = 'mean') \n self.add_metric(loss_dict['KL']/l['KL'],name = 'Actual KL',aggregation = 'mean') \n self.add_metric(loss_dict['KL'],name = 'Scaled KL',aggregation = 'mean') \n\n return out\n\n\n","repo_name":"JudahZammit/stitchnet","sub_path":"stitchnet_layers/filler.py","file_name":"filler.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33357308727","text":"import pyowm\n\nowm = pyowm.OWM(\"ee5b93394a79292907548fdf0b8a8813\", language=\"ru\")\n\nplace = input(\"Введите город/страну: \")\n\nobservation = owm.weather_at_place(place)\nweather = observation.get_weather()\n\ntemp = weather.get_temperature(\"celsius\")[\"temp\"]\n\nif temp <= 10:\n print(\"В городе/стране \" + place + \" сейчас температура\", temp, \"℃.\" +\n \"\\nЛучше оденься потеплее.\")\nelif temp <= 17:\n print(\"В городе/стране \" + place + \" сейчас температура\", temp, \"℃.\" +\n \"\\nТепло и не холодно, но лучше что-нибудь накинуть.\")\nelse:\n print(\"В городе/стране \" + place + \" сейчас температура\", temp, \"℃.\" +\n \"\\nНа улице жара, выходи на легке.\")\n","repo_name":"KulataevKanat/PythonData","sub_path":"programs/workWithAPI/weatherAdvisor.py","file_name":"weatherAdvisor.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8639113818","text":"class Solution: \n def primeRange(self,M,N):\n prime = [True] * (N + 1) #generating boolean array\n prime[0] = False\n prime[1] = False\n for i in range (2, int(math.sqrt(N)) + 1):\n if prime[i] == True: #if no is prime\n for p in range(i*i , N+1 , i):#(start , end , increment)\n prime[p] = False\n ans = []\n for i in range(M , len(prime)):\n if prime[i] == True:\n ans.append(i)\n return ans \n \n","repo_name":"thetanishkagupta/Problem-Solving-","sub_path":"Sieve of Eratosthenes(Prime numbers in a range).py","file_name":"Sieve of Eratosthenes(Prime numbers in a range).py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2134618542","text":"def restore(r, c, d, original, length):\n while [r, c] != original:\n pro[r][c] = 0\n length -= 1\n r -= d[0]\n c -= d[1]\n return length\n\n\ndef look(core):\n global length, min_len, count, max_count\n if count + (len(cores)-core) >= max_count:\n if core == len(cores):\n if count > max_count or (count == max_count and length < min_len):\n max_count = count\n min_len = length\n else:\n for d in drc:\n r, c = cores[core]\n while 0 < r < N-1 and 0 < c < N-1:\n if pro[r+d[0]][c+d[1]]:\n break\n else:\n r += d[0]\n c += d[1]\n pro[r][c] = 2\n length += 1\n if count + (len(cores)-core) == max_count and length > min_len:\n break\n else:\n count += 1\n look(core + 1)\n count -= 1\n length = restore(r, c, d, cores[core], length)\n look(core + 1)\n\n\ndrc = [[-1, 0], [1, 0], [0, -1], [0, 1]]\nfor tc in range(1, int(input())+1):\n N = int(input())\n pro = [list(map(int, input().split())) for _ in range(N)]\n cores = []\n for i in range(N):\n for j in range(N):\n if pro[i][j]:\n cores.append([i, j])\n length, min_len, count, max_count = 0, N*N, 0, 0\n look(0)\n print(f'#{tc} {min_len}')\n","repo_name":"WoosubLeee/algorithm-study","sub_path":"SWEA/etc/1767_프로세서 연결하기.py","file_name":"1767_프로세서 연결하기.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6102675267","text":"import json\nimport os\nimport re\n\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom playwright.sync_api import (\n Page,\n Error as PlaywrightError,\n TimeoutError as PlaywrightTimeoutError,\n)\n\n\ndef test_diff(page: Page):\n \"\"\"Save and track changes in LibGuides content.\"\"\"\n api_params = {\n \"site_id\": os.environ.get(\"API_SITE_ID\"),\n \"key\": os.environ.get(\"API_KEY\"),\n \"expand\": \"pages\",\n # retrieve published guides only\n \"status\": \"1\",\n }\n guides_retrieved = requests.get(\n \"/\".join(\n [\n os.environ.get(\"API_BASE_URL\").rstrip(\"/\"),\n os.environ.get(\"API_ENDPOINT\"),\n ]\n ),\n params=api_params,\n ).json()\n for guide in guides_retrieved:\n # count_hit increases overwhelm the diff\n guide.pop(\"count_hit\", None)\n # cache busting parameters bust the diff\n if \"thumbnail_url\" in guide:\n guide[\"thumbnail_url\"] = guide[\"thumbnail_url\"].rsplit(\"&cb=\")[0]\n with open(f\"content/pages.json\", \"w\") as f:\n f.write(json.dumps(guides_retrieved, indent=4, sort_keys=True))\n # click OK for cookies\n page.goto(os.environ.get(\"SITE_BASE_URL\"))\n page.click(\"#s-ui-cc-close-btn\")\n for guide in guides_retrieved:\n if guide[\"redirect_url\"]:\n continue\n for guide_page in guide[\"pages\"]:\n # skip pages that are actually redirects to elsewhere\n if guide_page[\"redirect_url\"]:\n continue\n # skip pages that are hidden\n if guide_page[\"enable_display\"] == \"0\":\n continue\n # replace all non-word characters with hyphens\n # example:\n # “Scientific Researches!—New Discoveries in PNEUMATICKS!”\n # -Scientific-Researches--New-Discoveries-in-PNEUMATICKS--\n filename_name = re.sub(\"[^a-zA-Z0-9-]\", \"-\", guide_page[\"name\"])\n # replace all consecutive hyphens with single hyphen\n # example:\n # -Scientific-Researches--New-Discoveries-in-PNEUMATICKS--\n # -Scientific-Researches-New-Discoveries-in-PNEUMATICKS-\n filename_name = re.sub(\"--+\", \"-\", filename_name)\n # remove all start and end hyphens\n # example:\n # -Scientific-Researches-New-Discoveries-in-PNEUMATICKS-\n # Scientific-Researches-New-Discoveries-in-PNEUMATICKS\n filename_name = filename_name.strip(\"-\")\n # replace all slash characters with underscores\n # example:\n # about/policies\n # about_policies\n if guide_page[\"friendly_url\"]:\n filename_friendly_url = f'{re.sub(\"/\", \"_\", guide_page[\"friendly_url\"].split(os.environ.get(\"SITE_BASE_URL\"))[-1])}--'\n else:\n filename_friendly_url = \"\"\n # construct filename\n filename = f'{filename_name}--{filename_friendly_url}g-{guide[\"id\"]}-p-{guide_page[\"id\"]}.html'\n # catch any timeouts and continue the loop\n try:\n # open the page in a browser\n page.goto(guide_page[\"url\"])\n page.wait_for_load_state(\"networkidle\")\n except PlaywrightTimeoutError as e:\n # write error to file\n with open(f\"content/pages/{filename}\", \"w\") as file:\n file.write(str(e))\n continue\n except PlaywrightError as error:\n # NS_ERROR_ABORT seems like a browser issue of some kind\n if error.message == \"NS_ERROR_ABORT\":\n # write error to file\n with open(f\"content/pages/{filename}\", \"w\") as file:\n file.write(str(error))\n continue\n else:\n raise error\n soup = BeautifulSoup(page.content(), \"html.parser\")\n # select page main content only\n pagemain = soup.find(id=\"s-lg-guide-main\")\n # remove dynamic content\n if pagemain.find(id=\"s-lg-page-prevnext\"):\n pagemain.find(id=\"s-lg-page-prevnext\").decompose()\n if pagemain.find(class_=\"s-lc-whw\"):\n pagemain.find(class_=\"s-lc-whw\").clear()\n if pagemain.find_all(id=re.compile(\"^libchat_\")):\n for libchat in pagemain.find_all(id=re.compile(\"^libchat_\")):\n libchat.decompose()\n if pagemain.select(\".s-la-content-faqlist\"):\n for faqlist in pagemain.select(\".s-la-content-faqlist\"):\n faqlist.clear()\n if pagemain.select(\".s-lg-system-list .s-lg-guide-info-updated\"):\n for updated in pagemain.select(\n \".s-lg-system-list .s-lg-guide-info-updated\"\n ):\n updated.clear()\n if pagemain.select(\".s-lg-system-list .s-lg-guide-info-views\"):\n for views in pagemain.select(\n \".s-lg-system-list .s-lg-guide-info-views\"\n ):\n views.clear()\n if pagemain.select(\".ep_view_timestamp strong\"):\n for timestamp in pagemain.select(\".ep_view_timestamp strong\"):\n timestamp.clear()\n if pagemain.select(\".s-lg-rss .s-lg-rss-list\"):\n for rss in pagemain.select(\".s-lg-rss .s-lg-rss-list\"):\n rss.clear()\n if pagemain.select(\".hours-today .hours-col-time\"):\n for time in pagemain.select(\".hours-today .hours-col-time\"):\n time.clear()\n if pagemain.select(\".resize-sensor\"):\n for expand in pagemain.select(\".resize-sensor\"):\n expand.decompose()\n # write html to file\n with open(f\"content/pages/{filename}\", \"w\") as file:\n file.write(pagemain.prettify())\n","repo_name":"caltechlibrary/overtime","sub_path":".github/workflows/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42174413082","text":"import requests\nfrom bs4 import BeautifulSoup\n\nURL = \"https://www.shabdkosh.com/word-of-the-day/hindi-english\"\npage = requests.get(URL)\n\nsoup = BeautifulSoup(page.content, \"html.parser\")\n\njob_elements = soup.find_all(\"p\", class_=\"pt-3\")\nprint(job_elements[0].find_all(\"a\")[1].text.strip())\n\n# for job_element in job_elements:\n# inner_as = job_element.find_all(\"a\")\n# all_a.append(inner_as[1].text.strip())\n\n# print(all_a)","repo_name":"isonlaxman/stream-video-tools","sub_path":"hindiWordDay/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70585529770","text":"#!/usr/bin/env python\n\nimport json\nimport sys\nimport urllib2\n\nfrom disconnect_mapping import disconnect_mapping\n\ndef write_header_file(output_file, output):\n \"\"\"Writes the header file containing the block rules in c++\n This function is awful but works for now. Maybe move to another file.\n \"\"\"\n entry_count = len(output)\n output_file.write(r\"\"\"#ifndef NET_URL_REQUEST_TPFILTER_ENTRIES_H_\n#define NET_URL_REQUEST_TPFILTER_ENTRIES_H_\n\nnamespace net {\n\n#define TPFILTER_FLAG_EXCEPTION 1\n#define TPFILTER_FLAG_MATCH_DOMAIN 2\n#define TPFILTER_FLAG_MATCH_BEGIN 4\n#define TPFILTER_FLAG_MATCH_END 8\n#define TPFILTER_FLAG_HAS_WILDCARD 16\n#define TPFILTER_FLAG_HAS_SEPARATOR 32\n#define TPFILTER_FLAG_MATCH_CASE 64\n#define TPFILTER_FLAG_THIRD_PARTY 128\n#define TPFILTER_FLAG_FIRST_PARTY 256\n\nstruct tpfilter_entry {\n const char **matches;\n int flags;\n const char **domains, **domains_skip;\n};\n\n\"\"\")\n output_file.write(\"#define TPFILTER_ENTRY_COUNT %s\\n\\n\" % entry_count)\n\n\n for i, o in enumerate(output):\n output_file.write('const char *m%s[] = { \"%s^\", NULL };\\n' % (i, o))\n\n output_file.write(\n \"\\nstruct tpfilter_entry TPFILTER_ENTRIES[TPFILTER_ENTRY_COUNT] = {\")\n\n for i in range(entry_count):\n output_file.write(\"\\n {m%s, \" % (i))\n output_file.write(\"TPFILTER_FLAG_MATCH_DOMAIN | TPFILTER_FLAG_HAS_SEPARATOR | TPFILTER_FLAG_THIRD_PARTY, NULL, NULL }\")\n if not i == (entry_count - 1):\n output_file.write(\",\")\n output_file.write(\"\\n};\\n\\n}\\n\\n#endif\")\n\ndef load_json_from_url(url):\n try:\n loaded_json = json.loads(urllib2.urlopen(url).read())\n except:\n sys.stderr.write(\"Error loading %s\\n\" % url)\n sys.exit(-1)\n return loaded_json\n\ndef find_hosts(blocklist_json, allow_list, output_file, log_file,\n list_categories):\n \"\"\"Finds hosts that we should block from the Disconnect json.\n Args:\n blocklist_json: A JSON blob containing Disconnect's list.\n allow_list: Hosts that we can't put on the blocklist.\n output_file: A file-handle to the output file.\n log_file: A filehandle to the log file.\n \"\"\"\n # Number of items published\n publishing = 0\n\n # Remember previously-processed domains so we don't print them more than once\n domain_dict = {};\n\n # Array holding hash bytes to be written to f_out. We need the total bytes\n # before writing anything.\n output = [];\n\n categories = blocklist_json[\"categories\"]\n\n for c in categories:\n add_category_to_list = False\n for lc in list_categories.split(\",\"):\n if c.find(lc) != -1:\n add_category_to_list = True\n if not add_category_to_list:\n continue\n if add_category_to_list:\n # Is this list a single-category list?\n if len(list_categories) == 1:\n # Reset output to only include this category's content\n output = []\n if log_file:\n log_file.write(\"Processing %s\\n\" % c)\n\n # Objects of type\n # { Automattic: { http://automattic.com: [polldaddy.com] }}\n # Domain lists may or may not contain the address of the top-level site.\n for org in categories[c]:\n for orgname in org:\n org_json = org[orgname]\n dnt_value = org_json.pop('dnt', '')\n # Exclude sites that obey Do Not Track standards\n assert dnt_value in [\"w3c\", \"eff\", \"\"]\n if dnt_value != \"\":\n continue\n for top in org_json:\n domains = org_json[top]\n for d in domains:\n d = d.encode('utf-8');\n if c == \"Disconnect\":\n try:\n if not disconnect_mapping[d] in list_categories:\n continue\n except KeyError:\n sys.stderr.write(\n \"[ERROR] %s not found in disconnect_mapping\" % d\n )\n if not d in allow_list:\n if log_file:\n log_file.write(\"[m] %s\\n\" % (d));\n publishing += 1\n domain_dict[d] = 1;\n output.append(d);\n # for o in output:\n # if output_file:\n # output_file.write(\"%s\\n\" % (o))\n write_header_file(output_file, output)\n\ndef main():\n disconnect_url = \"https://raw.githubusercontent.com/mozilla-services/shavar-prod-lists/master/disconnect-blacklist.json\"\n allowlist_url = \"https://raw.githubusercontent.com/mozilla-services/shavar-list-exceptions/master/allow_list\"\n\n # Use local files for testing\n #disconnect_filename = \"disconnect-blacklist.json\"\n #allowlist_filename = \"allow_list\"\n\n #list_categories = \"Advertising,Analytics,Social,Disconnect,Content\"\n list_categories = \"Advertising,Analytics,Social,Disconnect\"\n output_filename = \"tpfilter_entries.h\"\n log_filename = \"create_blockfile.log\"\n\n # open output and log files\n try:\n output_file = open(output_filename, \"wb\")\n log_file = open(log_filename, \"w\")\n except:\n sys.stderr.write(\"Error opening %s\\n\" % url)\n sys.exit(-1)\n\n # process disconnect\n blocklist_json = load_json_from_url(disconnect_url)\n # read local file for testing\n #with open(disconnect_filename) as f:\n # blocklist_json = json.load(f)\n\n # load our allowlist\n allowed = set()\n # read local files for testing\n #for line in open(allowlist_filename).readlines():\n for line in urllib2.urlopen(allowlist_url).readlines():\n line = line.strip()\n # don't add blank lines or comments\n if not line or line.startswith('#'):\n continue\n allowed.add(line)\n\n find_hosts(blocklist_json, allowed, output_file, log_file,\n list_categories)\n\n if output_file:\n output_file.close()\n if log_file:\n log_file.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chlorsoft/oxidan","sub_path":"scripts/create_blockfile.py","file_name":"create_blockfile.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31045460856","text":"def fibonacci_series(n):\n a = 0\n b = 1\n if n == 1:\n print(a)\n elif n == 2:\n print(a, b)\n else:\n print(\"\\n\", a, b, end=\" \")\n for i in range(n-2):\n c = a + b\n a = b\n b = c\n print(c, end=\" \")\n\n\nnum = int(input(\"\\nPlease Enter Place till you want to get Fibonacci Series: \"))\nfibonacci_series(num)\n","repo_name":"arpitgupta630/Python","sub_path":"Youtube/fibonacci_series.py","file_name":"fibonacci_series.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28851636672","text":"from flask import Flask, jsonify, Blueprint\nfrom googleapiclient.discovery import build \nimport os\nfrom dotenv import load_dotenv\n\napp = Flask(__name__)\n\nload_dotenv()\n\nyt_api = os.getenv('YT_API')\n\nyoutube = build('youtube', 'v3', developerKey=yt_api) # Generate API KEY yt_api from Google Cloud Console after enabling youtube api v3 and generating apiKey from credential section\n\nget_yt_comment = Blueprint('get_yt_comment', __name__)\n\n@get_yt_comment.route('/get-youtube-data/')\ndef get_yt_comments(video_id):\n\n res = youtube.commentThreads().list(\n part='snippet', \n videoId=video_id,\n order='relevance',\n textFormat='plainText',\n maxResults=500\n ).execute()\n\n comments = []\n for item in res['items']:\n comment = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comments.append(comment)\n\n return jsonify(comments)","repo_name":"tcet-opensource/Cybode","sub_path":"api.cybode.com/routes/social/youtubedata.py","file_name":"youtubedata.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"27634295957","text":"import asyncio\nimport curses\nfrom itertools import cycle\nimport os.path\nimport random\nimport time\n\nfrom curses_tools import draw_frame, read_controls, get_frame_size\nfrom explosion import explode\nfrom game_over import show_gameover\nfrom game_scenario import get_garbage_delay_tics, PHRASES\nfrom obstacles import Obstacle\nfrom physics import update_speed\nfrom sleep import sleep\n\n\nTIC_TIMEOUT = 0.1\nMIN_TIC_OFFSET = 1\nMAX_TIC_OFFSET = 30\nANIMATION_REPEAT_RATE = 2\nBORDER_SIZE = 1\nOFFSET_TO_SPACESHIP_EDGE = 2\nSECONDS_PER_YEAR = 1.5\nYEAR_WINDOW_HEIGHT = 4\nYEAR_WINDOW_WIDTH = 40\nGUN_YEAR = 2020\n\nSPACESHIP_ANIMATION_FILE_NAMES = ['rocket_frame_1.txt', 'rocket_frame_2.txt']\nGARBAGE_ANIMATION_FILE_NAMES = [\n 'duck.txt',\n 'hubble.txt',\n 'lamp.txt',\n 'trash_large.txt',\n 'trash_small.txt',\n 'trash_xl.txt'\n]\n\ncoroutines = []\nobstacles = []\nobstacles_in_last_collisions = []\nspaceship_frame = ''\nyear = 1957\n\n\nasync def blink(canvas, row, column, offset_tics, symbol='*'):\n canvas.addstr(row, column, symbol, curses.A_DIM)\n await sleep(offset_tics)\n\n while True:\n canvas.addstr(row, column, symbol)\n await sleep(3)\n\n canvas.addstr(row, column, symbol, curses.A_BOLD)\n await sleep(5)\n\n canvas.addstr(row, column, symbol)\n await sleep(3)\n\n canvas.addstr(row, column, symbol, curses.A_DIM)\n await sleep(20)\n\n\nasync def change_year():\n global year\n while True:\n await sleep(int(SECONDS_PER_YEAR / TIC_TIMEOUT))\n year += 1\n\n\nasync def draw_year(canvas):\n global year\n shown_message = ''\n year_row = label_column = 1\n message_row = 2\n while True:\n canvas.addstr(year_row, label_column, str(year))\n year_message = PHRASES.get(year)\n if year_message:\n canvas.addstr(message_row, label_column, year_message)\n shown_message = year_message\n\n if shown_message and not year_message:\n canvas.addstr(\n message_row,\n label_column,\n ' ' * (YEAR_WINDOW_WIDTH - label_column)\n )\n shown_message = ''\n\n await sleep()\n\n\nasync def fire(canvas, start_row, start_column, rows_speed=-0.3, columns_speed=0):\n \"\"\"Display animation of gun shot, direction and speed can be specified.\"\"\"\n\n row, column = start_row, start_column\n\n canvas.addstr(round(row), round(column), '*')\n await sleep()\n\n canvas.addstr(round(row), round(column), 'O')\n await sleep()\n canvas.addstr(round(row), round(column), ' ')\n\n row += rows_speed\n column += columns_speed\n\n symbol = '-' if columns_speed else '|'\n\n rows, columns = canvas.getmaxyx()\n max_row, max_column = rows - 1, columns - 1\n\n curses.beep()\n\n while 0 < row < max_row and 0 < column < max_column:\n for obstacle in obstacles:\n if obstacle.has_collision(row, column):\n obstacles_in_last_collisions.append(obstacle)\n return\n\n canvas.addstr(round(row), round(column), symbol)\n await sleep()\n canvas.addstr(round(row), round(column), ' ')\n row += rows_speed\n column += columns_speed\n\n\nasync def run_spaceship(canvas, start_row, start_column):\n row_number, column_number = canvas.getmaxyx()\n max_row = row_number - BORDER_SIZE\n max_column = column_number - BORDER_SIZE\n row_speed = column_speed = 0\n\n while True:\n row_direction, column_direction, pressed_space = read_controls(canvas)\n row_speed, column_speed = update_speed(\n row_speed,\n column_speed,\n row_direction / 10,\n column_direction / 10\n )\n new_row = start_row + row_speed\n new_column = start_column + column_speed\n ship_height, ship_width = get_frame_size(spaceship_frame)\n frame_max_row = new_row + ship_height\n frame_max_column = new_column + ship_width\n new_row = min(frame_max_row, max_row) - ship_height\n new_column = min(frame_max_column, max_column) - ship_width\n new_row = max(new_row, BORDER_SIZE)\n new_column = max(new_column, BORDER_SIZE)\n\n start_row, start_column = new_row, new_column\n draw_frame(canvas, start_row, start_column, spaceship_frame)\n drawn_frame = spaceship_frame\n\n if pressed_space and year >= GUN_YEAR:\n fire_column = start_column + OFFSET_TO_SPACESHIP_EDGE\n coroutines.append(fire(canvas, start_row, fire_column))\n\n await sleep()\n draw_frame(canvas, start_row, start_column, drawn_frame, negative=True)\n\n ship_collided = False\n for obstacle in obstacles:\n ship_collided = ship_collided or obstacle.has_collision(\n start_row,\n start_column,\n obj_size_rows=ship_height,\n obj_size_columns=ship_width\n )\n\n if ship_collided:\n break\n\n await show_gameover(canvas)\n\n\nasync def animate_spaceship(animation_frames):\n for frame in cycle(animation_frames):\n global spaceship_frame\n spaceship_frame = frame\n await sleep(2)\n\n\ndef get_stars(canvas, line_number, column_number):\n window_square = line_number * column_number\n stars = []\n used_coords = []\n first_line, first_column = 1, 1\n last_active_line, last_active_column = line_number - 2, column_number - 2\n for _ in range(int(window_square / 10)):\n while True:\n star_line = random.randint(first_line, last_active_line)\n star_column = random.randint(first_column, last_active_column)\n if (star_line, star_column) not in used_coords:\n break\n star_symbol = random.choice('+*.:')\n star = blink(\n canvas,\n star_line,\n star_column,\n random.randint(MIN_TIC_OFFSET, MAX_TIC_OFFSET),\n symbol=star_symbol\n )\n used_coords.append((star_line, star_column))\n stars.append(star)\n\n return stars\n\n\ndef get_frames(file_names):\n frames = []\n for file_name in file_names:\n file_path = os.path.join('animation', file_name)\n with open(file_path, 'r') as file:\n frame = file.read()\n frames.append(frame)\n\n return frames\n\n\nasync def fill_orbit_with_garbage(canvas, garbage_frames):\n _, column_number = canvas.getmaxyx()\n while True:\n garbage_delay = get_garbage_delay_tics(year)\n if garbage_delay:\n garbage_frame = random.choice(garbage_frames)\n garbage_column = random.randrange(1, column_number)\n garbage_body = fly_garbage(canvas, garbage_column, garbage_frame)\n coroutines.append(garbage_body)\n await sleep(garbage_delay or 1)\n\n\nasync def fly_garbage(canvas, column, garbage_frame, speed=0.5):\n \"\"\"Animate garbage, flying from top to bottom. Сolumn position will stay same, as specified on start.\"\"\"\n rows_number, columns_number = canvas.getmaxyx()\n\n column = max(column, 0)\n column = min(column, columns_number - 1)\n obstacle_height, obstacle_width = get_frame_size(garbage_frame)\n obstacle = Obstacle(\n 0,\n column,\n rows_size=obstacle_height,\n columns_size=obstacle_width\n )\n obstacles.append(obstacle)\n\n row = 0\n\n while row < rows_number and obstacle not in obstacles_in_last_collisions:\n draw_frame(canvas, row, column, garbage_frame)\n await asyncio.sleep(0)\n draw_frame(canvas, row, column, garbage_frame, negative=True)\n row += speed\n obstacle.row = row\n\n if obstacle in obstacles_in_last_collisions:\n center_obstacle_row = row + obstacle_height / 2\n center_obstacle_column = column + obstacle_width / 2\n await explode(canvas, center_obstacle_row, center_obstacle_column)\n obstacles_in_last_collisions.remove(obstacle)\n\n obstacles.remove(obstacle)\n\n\nasync def show_obstacles(canvas):\n \"\"\"Display bounding boxes of every obstacle in a list\"\"\"\n\n while True:\n boxes = []\n\n for obstacle in obstacles:\n boxes.append(obstacle.dump_bounding_box())\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame)\n\n await asyncio.sleep(0)\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame, negative=True)\n\n\ndef draw(canvas):\n canvas.nodelay(True)\n canvas.border()\n curses.curs_set(False)\n\n row_number, column_number = canvas.getmaxyx()\n stars = get_stars(canvas, row_number, column_number)\n\n spaceship_frames = get_frames(SPACESHIP_ANIMATION_FILE_NAMES)\n spaceship = animate_spaceship(spaceship_frames)\n\n row, column = row_number / 2, column_number / 2\n spaceship_motion = run_spaceship(canvas, row, column)\n\n garbage_frames = get_frames(GARBAGE_ANIMATION_FILE_NAMES)\n new_garbage = fill_orbit_with_garbage(canvas, garbage_frames)\n\n year_canvas = canvas.derwin(\n YEAR_WINDOW_HEIGHT,\n YEAR_WINDOW_WIDTH,\n row_number - YEAR_WINDOW_HEIGHT,\n column_number - YEAR_WINDOW_WIDTH\n )\n year_title = draw_year(year_canvas)\n\n initial_coroutines = [\n *stars,\n spaceship,\n spaceship_motion,\n new_garbage,\n change_year(),\n year_title\n ]\n coroutines.extend(initial_coroutines)\n\n while True:\n for coroutine in coroutines.copy():\n try:\n coroutine.send(None)\n except StopIteration:\n coroutines.remove(coroutine)\n\n canvas.refresh()\n canvas.border()\n time.sleep(TIC_TIMEOUT)\n\n\nif __name__ == '__main__':\n curses.update_lines_cols()\n curses.wrapper(draw)\n","repo_name":"AndreyAD1/async_spaceship","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11383954956","text":"from django.urls import path\nfrom .views import UserCreateView, LoginView, CreateEventView, EventRetrieveUpdateDeleteView\n\n\nurlpatterns = [\n path('register/', UserCreateView.as_view(), name='signup'),\n path('login/', LoginView.as_view(), name='login'),\n # event\n path('v3/app/events', CreateEventView.as_view(),name='create_event'),\n path('v3/app/events/', EventRetrieveUpdateDeleteView.as_view(),name='get_event'),\n\n\n]\n","repo_name":"Vishalanshuman/EventManager_task1","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38171429567","text":"\"\"\"fish tickets\n\nRevision ID: 6a59b01b8943\nRevises: \nCreate Date: 2023-05-22 17:36:34.829737\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '6a59b01b8943'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('ticket_entry',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('entry_date', sa.Date(), nullable=True),\n sa.Column('tide_am_pm', sa.Boolean(), nullable=True),\n sa.Column('set_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n mysql_charset='utf8mb4'\n )\n op.create_table('ticket_ticket',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('landing_num', sa.String(length=20), nullable=True),\n sa.Column('permit_num', sa.String(length=255), nullable=True),\n sa.Column('weight', sa.Integer(), nullable=True),\n sa.Column('landing_time', sa.DateTime(), nullable=True),\n sa.Column('ticket_notes', sa.String(length=255), nullable=True),\n sa.Column('ticket_entry_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('landing_num'),\n mysql_charset='utf8mb4'\n )\n op.drop_table('conversation_messages')\n op.drop_table('conversation_dialogue')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('conversation_dialogue',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('created_at', mysql.DATETIME(), nullable=True),\n sa.Column('updated_at', mysql.DATETIME(), nullable=True),\n sa.Column('user_id', mysql.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('from_user_id', mysql.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('to_user_id', mysql.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('shared_id', mysql.VARCHAR(length=100), nullable=False),\n sa.Column('subject', mysql.VARCHAR(length=255), nullable=True),\n sa.Column('trash', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),\n sa.Column('draft', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),\n sa.Column('unread', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id'),\n mysql_collate='utf8mb4_0900_ai_ci',\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.create_table('conversation_messages',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('created_at', mysql.DATETIME(), nullable=True),\n sa.Column('updated_at', mysql.DATETIME(), nullable=True),\n sa.Column('conversation_id', mysql.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('user_id', mysql.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('message', mysql.TEXT(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n mysql_collate='utf8mb4_0900_ai_ci',\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.drop_table('ticket_ticket')\n op.drop_table('ticket_entry')\n # ### end Alembic commands ###\n","repo_name":"coglenn/a2-flask-shop","sub_path":"migrations/versions/6a59b01b8943_fish_tickets.py","file_name":"6a59b01b8943_fish_tickets.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43500413655","text":"class Solution(object):\n def quickSort(self, array):\n \"\"\"\n input: int[] array\n return: int[]\n \"\"\"\n # write your solution here\n if not array:\n return array\n if len(array) == 0:\n return array\n\n self.partition(array, 0, len(array) - 1)\n return array\n\n def partition(self, array, low, high):\n if low < high:\n pivot = array[low]\n i = low + 1\n j = high\n while i <= j:\n if array[i] < pivot:\n i += 1\n elif array[j] >= pivot: # have to have equal\n j -= 1\n else:\n array[i], array[j] = array[j], array[i]\n array[j], array[low] = array[low], array[j]\n self.partition(array, low, j - 1)\n self.partition(array, j + 1, high)\n else:\n return\n\nif __name__ == \"__main__\":\n solution = Solution()\n\n test_array = [1, 2, 3, 4, 9, 5, 6, 7, 0]\n print('array is: ' + str(test_array))\n print('sorted array: ' + str(solution.quickSort(test_array)) + '\\n')\n\n test_array = []\n print('array is: ' + str(test_array))\n print('sorted array: ' + str(solution.quickSort(test_array)) + '\\n')\n\n test_array = None\n print('array is: ' + str(test_array))\n print('sorted array: ' + str(solution.quickSort(test_array)) + '\\n')\n\n\n test_array = [1, 1, 2, 2, 1, 1, 2, 2, 1]\n print('array is: ' + str(test_array))\n print('sorted array: ' + str(solution.quickSort(test_array)) + '\\n')\n\n test_array = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n print('array is: ' + str(test_array))\n print('sorted array: ' + str(solution.quickSort(test_array)) + '\\n')\n\n test_array = [1, 1, 1, 1, 1, 1, 1, 1, 1]\n print('array is: ' + str(test_array))\n print('sorted array: ' + str(solution.quickSort(test_array)) + '\\n')","repo_name":"wanlipu/coding-python","sub_path":"array/classic_sorting_algorithm/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27705801537","text":"from fastapi import APIRouter, Body\nfrom fastapi.encoders import jsonable_encoder\n\nfrom server.database import (\n retrieve_message,\n retrieve_messages,\n retrieve_user_message,\n)\nfrom server.models.users import (\n ErrorResponseModel,\n ResponseModel\n)\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_description=\"Messages retrieved\")\nasync def get_all_messages():\n messages = await retrieve_messages()\n if messages:\n return ResponseModel(messages, \"messages data retrieved successfully\")\n return ResponseModel(messages, \"Empty list returned\")\n\n\n@router.get(\"/rid/{rid}\", response_description=\"Message data retrieved\")\nasync def get_room_messages(rid):\n message = await retrieve_message(rid)\n if message:\n return ResponseModel(message, \"message data retrieved successfully\")\n return ErrorResponseModel(\"An error occurred.\", 404, \"message doesn't exist.\")\n\n@router.get(\"/user/{user}\", response_description=\"Message data retrieved\")\nasync def get_user_messages(user):\n message = await retrieve_user_message(user)\n if message:\n return ResponseModel(message, \"message data retrieved successfully\")\n return ErrorResponseModel(\"An error occurred.\", 404, \"message doesn't exist.\")\n","repo_name":"Bossman8/RocketChatAudit","sub_path":"rocket_audit/app/server/routes/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74773117926","text":"from datetime import datetime\nimport random\nimport time\n\n#list of odd numbers\nodd_numbers = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,\n 33,35,37,39,41,43,45,47,49,51,53,55,57,59]\n\n#for loop suite\nfor i in range(5):\n #generate a random number\n random_number = random.randint(0,60)\n\n #get the current minute from datetime module\n this_minute = datetime.today().minute\n\n #if-else suite\n if this_minute in odd_numbers:\n print (this_minute, 'is an odd minute')\n else:\n print (this_minute,'is an even minute')\n\n #put for loop to sleep for some random seconds\n print (random_number,'is the random number')\n time.sleep(random_number)\n","repo_name":"jaikherajani/PythonCodes","sub_path":"head_first/c1p1_odd_even_minutes.py","file_name":"c1p1_odd_even_minutes.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28248965","text":"# see: https://leetcode.com/problems/backspace-string-compare/description/\n# 自分の解答\nclass Solution:\n def backspaceCompare(self, s: str, t: str) -> bool:\n s2 = []\n t2 = []\n\n for i in s:\n if i == \"#\":\n if len(s2):\n s2.pop()\n else:\n s2.append(i)\n\n for j in t:\n if j == \"#\":\n if len(t2):\n t2.pop()\n else:\n t2.append(j)\n print(t2)\n\n return \"\".join(s2) == \"\".join(t2)\n","repo_name":"mizutaninaoki/AtCoderPractice","sub_path":"leet_code/easy/backspace-string-compare.py","file_name":"backspace-string-compare.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25587786999","text":"\r\nfrom django.shortcuts import render\r\nfrom django.http import JsonResponse\r\nfrom .forms import UserCreationForm, AuthenticationForm\r\nfrom django.views.generic.base import View\r\nfrom django.contrib.auth import login, authenticate\r\nfrom mainApp.authenticate import FaceBackendAuthenication\r\nfrom django.contrib.auth.models import User\r\n# # creating some global variables\r\n# us = ''\r\n# em = ''\r\n# pswd = ''\r\n# usL = ''\r\n# pswdL = ''\r\n\r\n# # Create your views here.\r\n# def index(request):\r\n# return render(request , 'first.html')\r\n\r\ndef welcome(request):\r\n return render(request , 'welcome.html')\r\n\r\n# def faceSignUpSystem(request):\r\n# global us , em , pswd\r\n# if request.method==\"POST\":\r\n# m=sql.connect(host='localhost' , user='root' , password='mysql' , database='srajan')\r\n# cursor = m.cursor()\r\n# d=request.POST\r\n# for key , value in d.items():\r\n# if key==\"user_name\":\r\n# us = value\r\n# if key==\"email\":\r\n# em = value\r\n# if key==\"password\":\r\n# pswd = value\r\n# c = \"insert into one Values('{}','{}','{}')\".format(us,em,pswd)\r\n# cursor.execute(c)\r\n# m.commit()\r\n# return render(request , 'signup_page.html')\r\n\r\n# def faceLoginSystem(request):\r\n# global usL , pswdL\r\n# if request.method == \"POST\":\r\n# m=sql.connect(host='localhost' , user='root' ,password='mysql' , database='srajan')\r\n# cursor = m.cursor()\r\n# d=request.POST\r\n# for key ,value in d.items():\r\n# if key==\"user_name\":\r\n# usL = value\r\n# if key==\"password\":\r\n# pswdL = value\r\n# c = \"select * from one where user_name='{}' and password='{}'\".format(usL , pswdL)\r\n# cursor.execute(c)\r\n# t=tuple(cursor.fetchall())\r\n# if t==():\r\n# return render (request , 'error.html')\r\n# else:\r\n# return render(request , 'welcome.html')\r\n# return render(request , 'login_page.html')\r\n\r\n\r\n# class UserSignUpView(View):\r\n\r\n# def post(self, request):\r\n\r\n# try:\r\n# username = request.POST.get(\"username\")\r\n# password = request.POST.get(\"password\")\r\n# email = request.POST.get(\"email\")\r\n# newUser = User(email=email, username=username)\r\n\r\n# newUser.set_password(password)\r\n# newUser.save()\r\n# user = authenticate(username=username,password=password)\r\n# login(user)\r\n# # return render(request , 'welcome.html')\r\n# return JsonResponse({\r\n# \"message\": \"Created successfully\",\r\n# \"user\": newUser.to_json()\r\n# })\r\n# except Exception as e:\r\n# print(e)\r\n# return render(request, 'error.html')\r\n\r\nclass UserRegistration(View):\r\n# this takes the username, password, email and a image for user sign up\r\n def get(self, request):\r\n return render(request, 'registration/signup.html')\r\n\r\n def post(self, request):\r\n form = UserCreationForm(request.POST, request.FILES)\r\n \r\n\r\n \r\n if form.is_valid():\r\n form.save()\r\n username = form.cleaned_data['username']\r\n password = form.cleaned_data['password2']\r\n # user = authenticate(username=username, password=password)\r\n # login( request,user)\r\n return JsonResponse({\r\n \"sucess\":\"true\",\r\n 'data': \"successfull login\"\r\n })\r\n else: \r\n print(form.errors)\r\n return JsonResponse({\r\n \"sucess\"\r\n 'data': form.errors\r\n })\r\n \r\n\r\n\r\nclass UserFaceLogin(View):\r\n def post(self,request):\r\n \r\n \r\n # print(request.FILES)\r\n \r\n\r\n try:\r\n form = request.POST\r\n oldUser = FaceBackendAuthenication().faceAuthenicateByImageBase(form)\r\n login(request, oldUser)\r\n\r\n return JsonResponse({\r\n \"sucess\" :\"true\",\r\n 'data': \"successfull login\"\r\n })\r\n except Exception as e:\r\n return JsonResponse({\r\n \"sucess\":\"false\",\r\n 'data': str(e)\r\n })\r\n\r\n # return JsonResponse({\r\n\r\n # })\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef user_login(request):\r\n if request.method == 'GET':\r\n return render(request, 'registration/login.html')\r\n else :\r\n return JsonResponse({\r\n \"message\": \"post login\"\r\n })\r\n\r\ndef verify_username(request):\r\n user = User.objects.filter(username=request.POST['user_name'])\r\n if user:\r\n return JsonResponse({\r\n 'isValid': True\r\n })\r\n else:\r\n return JsonResponse({\r\n 'isValid': False\r\n })","repo_name":"SrajanAgrawal/FAuth","sub_path":"MyProject/mainApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16814456947","text":"import torch\nimport torch.nn as nn\n\n\ndef train_fn(model, optimizer, criterion, dataloader, device, sum_loss=True):\n model.train()\n final_loss = 0\n for data in dataloader:\n optimizer.zero_grad()\n inputs, targets = data['x'].to(device), data['y'].to(device)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n if sum_loss:\n loss += nn.L1Loss()(torch.sum(targets, 1), torch.sum(outputs, 1)) * 8e-7\n loss.backward()\n optimizer.step()\n final_loss += loss.item()\n final_loss /= len(dataloader)\n return final_loss\n\n\ndef valid_fn(model, criterion, inputs, targets, device):\n model.eval()\n inputs = torch.tensor(inputs.values, dtype=torch.float).to(device)\n targets = torch.tensor(targets.values, dtype=torch.float).to(device)\n targets = torch.reshape(targets, (-1, 1))\n with torch.no_grad():\n outputs = model(inputs)\n loss = criterion(outputs, targets).item()\n return loss\n\n\ndef inference_fn(model, inputs, device):\n model.eval()\n inputs = torch.tensor(inputs.values, dtype=torch.float).to(device)\n with torch.no_grad():\n outputs = model(inputs)\n return outputs.sigmoid().detach().cpu().numpy()\n\n","repo_name":"bokutotu/MoA","sub_path":"nn/src/model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10411008747","text":"from turtle import update\r\nfrom django.conf.urls import url\r\nfrom . import views\r\n\r\napp_name='customers'\r\n\r\nurlpatterns = [\r\n url(r'^$', views.customers_home),\r\n url(r'^create/$', views.create_customer, name=\"create\"),\r\n url(r'^getall/$', views.get_all_customer),\r\n url(r'^getbycode/$', views.get_cust_by_code),\r\n url(r'^update/$', views.update_customer),\r\n url(r'^delete/$', views.delete_customer)\r\n]","repo_name":"adhilabu/supermarket","sub_path":"customers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9702901217","text":"\"\"\"\nExample of the moment block Lanczos recursion for moments of the\nself-energy (MBLSE) solver.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyscf import gto, scf, agf2, lib\nfrom dyson import MBLSE, util\n\nniter = 1\ngrid = np.linspace(-40, 20, 1024)\n\n# Define a self-energy using PySCF\nmol = gto.M(atom=\"O 0 0 0; O 0 0 1\", basis=\"6-31g\", verbose=0)\nmf = scf.RHF(mol).run()\nse_static = np.diag(mf.mo_energy)\nse = agf2.AGF2(mf, nmom=(None, None)).build_se()\nse_moms = se.moment(range(2*niter+2))\n\n# Use the solver to get the spectral function\nsolver = MBLSE(se_static, se_moms)\nsolver.kernel()\ne, v = solver.get_dyson_orbitals()\nsf = util.build_spectral_function(e, v, grid, eta=1.0)\n\n# Get a reference spectral function for comparison\ngf = se.get_greens_function(se_static)\nsf_ref = util.build_spectral_function(gf.energy, gf.coupling, grid, eta=1.0)\n\n# Plot the results\nplt.plot(grid, sf_ref, \"C0-\", label=\"Reference\")\nplt.plot(grid, sf, \"C1-\", label=\"MBLSE\")\nplt.legend()\nplt.xlabel(\"Frequency (Ha)\")\nplt.ylabel(\"Spectral function\")\nplt.show()\n","repo_name":"BoothGroup/dyson","sub_path":"examples/00-mblse.py","file_name":"00-mblse.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"2778618055","text":"from BillWorker import BillWorker\n\ndef do1(start_date:str,end_date:str=\"\"):\n \"\"\"\n 生产领料单处理\n \"\"\"\n #T_PRD_PICKMTRLDATA_C T_PRD_PICKMTRLDATA_CE 核算表不引入\n tb_h=\"T_PRD_PICKMTRL\" #单据头表\n tb_L=\"T_PRD_PICKMTRL_L\"\n tb_e=\"T_PRD_PICKMTRLDATA\" #主单据体表\n tb_e1=\"T_PRD_PICKMTRLDATA_A\"\n tb_eeL=\"T_PRD_PICKMTRLDATA_L\"\n tb_LK=\"T_PRD_PICKMTRLDATA_LK\"\n b=BillWorker()\n b.pHead(tb_h,start_date,end_date)\n b.pEntry(tb_h,tb_L,entryId=\"FPKID\")\n b.pEntry(tb_h,tb_e)\n #b.pEntry(tb_h,tb_e1)\n b.pEntryX(tb_e,tb_e1,tb_h)\n b.pEntry(tb_e,tb_eeL,headId=\"FENTRYID\",entryId=\"FPKID\")\n b.pEntry(tb_e,tb_LK,headId=\"FENTRYID\",entryId=\"FLINKID\") #源单各id映射待后续更正\n b.to_sql()\n\ndef do2(start_date:str,end_date:str=\"\"):\n \"\"\"\n 生产退料单处理\n \"\"\"\n #T_PRD_RETURNMTRLENTRY_C T_PRD_RETURNMTRLENTRY_CE 核算表不引入\n tb_h=\"T_PRD_RETURNMTRL\" #单据头表\n tb_L=\"T_PRD_RETURNMTRL_L\"\n tb_e=\"T_PRD_RETURNMTRLENTRY\" #主单据体表\n tb_e1=\"T_PRD_RETURNMTRLENTRY_A\"\n tb_eeL=\"T_PRD_RETURNMTRLENTRY_L\"\n tb_LK=\"T_PRD_RETURNMTRLENTRY_LK\"\n b=BillWorker()\n b.pHead(tb_h,start_date,end_date)\n b.pEntry(tb_h,tb_L,entryId=\"FPKID\")\n b.pEntry(tb_h,tb_e)\n #b.pEntry(tb_h,tb_e1)\n b.pEntryX(tb_e,tb_e1,tb_h)\n b.pEntry(tb_e,tb_eeL,headId=\"FENTRYID\",entryId=\"FPKID\")\n b.pEntry(tb_e,tb_LK,headId=\"FENTRYID\",entryId=\"FLINKID\") #源单各id映射待后续更正\n b.to_sql()\n\n\ndef do3(start_date:str,end_date:str=\"\"):\n \"\"\"\n 生产退库单处理\n \"\"\"\n #T_PRD_RESTOCKENTRY_C T_PRD_RESTOCKENTRY_CE T_PRD_RESTOCKENTRY_CF 核算表不引入\n tb_h=\"T_PRD_RESTOCK\" #单据头表\n tb_L=\"T_PRD_RESTOCK_L\"\n tb_e=\"T_PRD_RESTOCKENTRY\" #主单据体表\n tb_e1=\"T_PRD_RESTOCKENTRY_A\"\n tb_eeL=\"T_PRD_RESTOCKENTRY_L\"\n tb_LK=\"T_PRD_RESTOCKENTRY_LK\"\n b=BillWorker()\n b.pHead(tb_h,start_date,end_date)\n b.pEntry(tb_h,tb_L,entryId=\"FPKID\")\n b.pEntry(tb_h,tb_e)\n #b.pEntry(tb_h,tb_e1)\n b.pEntryX(tb_e,tb_e1,tb_h)\n b.pEntry(tb_e,tb_eeL,headId=\"FENTRYID\",entryId=\"FPKID\")\n b.pEntry(tb_e,tb_LK,headId=\"FENTRYID\",entryId=\"FLINKID\") #源单各id映射待后续更正\n b.to_sql()\n\n\ndef do4(start_date:str,end_date:str=\"\"):\n \"\"\"\n 生产入库单处理\n \"\"\"\n #T_PRD_INSTOCKENTRY_C T_PRD_INSTOCKENTRY_CE T_PRD_INSTOCKENTRY_CF 核算表不引入\n tb_h=\"T_PRD_INSTOCK\" #单据头表\n tb_L=\"T_PRD_INSTOCK_L\"\n tb_e=\"T_PRD_INSTOCKENTRY\" #主单据体表\n tb_e1=\"T_PRD_INSTOCKENTRY_A\"\n tb_eeL=\"T_PRD_INSTOCKENTRY_L\"\n tb_LK=\"T_PRD_INSTOCKENTRY_LK\"\n b=BillWorker()\n b.pHead(tb_h,start_date,end_date)\n b.pEntry(tb_h,tb_L,entryId=\"FPKID\")\n b.pEntry(tb_h,tb_e)\n b.pEntryX(tb_e,tb_e1,tb_h)\n b.pEntry(tb_e,tb_eeL,headId=\"FENTRYID\",entryId=\"FPKID\")\n b.pEntry(tb_e,tb_LK,headId=\"FENTRYID\",entryId=\"FLINKID\") #源单各id映射待后续更正\n b.to_sql()\n\n\ndef do5(start_date:str,end_date:str=\"\"):\n \"\"\"\n 生产补料单处理\n \"\"\"\n #T_PRD_FEEDMTRLDATA_C T_PRD_FEEDMTRLDATA_CE 核算表不引入\n tb_h=\"T_PRD_FEEDMTRL\" #单据头表\n tb_L=\"T_PRD_FEEDMTRL_L\"\n tb_e=\"T_PRD_FEEDMTRLDATA\" #主单据体表\n tb_e1=\"T_PRD_FEEDMTRLDATA_Q\"\n tb_eeL=\"T_PRD_FEEDMTRLDATA_L\"\n tb_LK=\"T_PRD_FEEDMTRLDATA_LK\"\n b=BillWorker()\n b.pHead(tb_h,start_date,end_date)\n b.pEntry(tb_h,tb_L,entryId=\"FPKID\")\n b.pEntry(tb_h,tb_e)\n #b.pEntry(tb_h,tb_e1)\n b.pEntryX(tb_e,tb_e1,tb_h)\n b.pEntry(tb_e,tb_eeL,headId=\"FENTRYID\",entryId=\"FPKID\")\n b.pEntry(tb_e,tb_LK,headId=\"FENTRYID\",entryId=\"FLINKID\") #源单各id映射待后续更正\n b.to_sql()\n","repo_name":"szzend/KDCloud","sub_path":"prd_pick.py","file_name":"prd_pick.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24185239938","text":"import matplotlib.pyplot as plt\n\ndef load_data(lines_type, eff, res):\n for i in range(1, len(lines_type)):\n line_values = lines_type[i].split(',')\n eff.append(line_values[1])\n res.append(sum([float(i) for i in line_values[2:]]) / len(line_values[2:]))\n\ndef main():\n efforts = [[], [], [], [], []]\n results_types = [[], [], [], [], []]\n\n for i, file_name in enumerate(['rsel.csv', 'cel-rs.csv','2cel-rs.csv', 'cel.csv', '2cel.csv']):\n with open(file_name, 'r') as file:\n load_data(file.readlines(), efforts[i], results_types[i])\n\n plt.figure(figsize=(6, 6))\n plt_params = [\n ['b', '1-Evol-RS'],\n ['g', '1-Coev-RS'],\n ['r', '2-Coev-RS'],\n ['k', '1-Coev'],\n ['m', '2-Coev']\n ]\n for i, param in enumerate(plt_params):\n plt.plot(efforts[i], results_types[i], color=param[0], label=param[1])\n\n plt.legend(loc=4)\n plt.xlim(xmax=500000)\n plt.xlabel('Rozegranych gier')\n plt.ylabel('Odsetek wygranych gier')\n plt.savefig('myplot.pdf')\n plt.show()\n plt.close()\n\nif __name__ == '__main__':\n main()","repo_name":"dbachorz/KCK","sub_path":"visualisation/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24410505805","text":"import torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport os\nimport sys\nimport random\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom pytorch_pretrained_vit import ViT\nfrom p1_dataset import p1_data\n\n# Set random seed for reproducibility\nmanualSeed = 0\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\n\ntest_path = sys.argv[1]\noutput_path = sys.argv[2]\nmodel_path = './p1_model.pth'\n\nbatch_size = 8\nworkers = 2\nnum_classes = 37\n\ntest_tfm = transforms.Compose([\n transforms.Resize((384, 384)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntest_dataset = p1_data(test_path, mode='test', transform=test_tfm)\n\n# Create the dataloader\ntest_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n# Decide which device we want to run on\ndevice = torch.device(\"cuda\" if (torch.cuda.is_available()) else \"cpu\")\n\nmodel = ViT('B_16_imagenet1k', pretrained=True, num_classes=num_classes)\nmodel = model.to(device)\nmodel.load_state_dict(torch.load(model_path))\nmodel.eval()\n\npredictions = []\nfilenames = []\n\nfor i, (test_data, filename) in enumerate(test_loader):\n test_data = test_data.to(device) \n with torch.no_grad():\n class_logits = model(test_data)\n predictions.extend(class_logits.argmax(dim=-1).cpu().numpy().tolist())\n filenames.extend(filename)\n\nwith open(output_path, \"w\") as f:\n f.write(\"filename,label\\n\")\n # For the rest of the rows, each image name corresponds to a predicted class.\n for fn, pred in zip(filenames, predictions):\n f.write(f\"{fn},{pred}\\n\")\n","repo_name":"yiwei32/NTU_courses","sub_path":"2021_Fall/DLCV/hw3/p1_test.py","file_name":"p1_test.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40241247375","text":"from telethon.errors import (ChannelInvalidError, ChannelPrivateError,\n ChannelPublicGroupNaError)\nfrom telethon.tl import functions\nfrom telethon.tl.functions.channels import (GetFullChannelRequest,\n InviteToChannelRequest)\nfrom telethon.tl.functions.messages import GetFullChatRequest\nfrom TelethonHell.plugins import *\n\n\nasync def get_chatinfo(event):\n chat = event.pattern_match.group(1)\n chat_info = None\n if chat:\n try:\n chat = int(chat)\n except ValueError:\n pass\n if not chat:\n if event.reply_to_msg_id:\n replied_msg = await event.get_reply_message()\n if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:\n chat = replied_msg.fwd_from.channel_id\n else:\n chat = event.chat_id\n try:\n chat_info = await event.client(GetFullChatRequest(chat))\n except:\n try:\n chat_info = await event.client(GetFullChannelRequest(chat))\n except ChannelInvalidError:\n await parse_error(event, \"Invalid channel/group\")\n return None\n except ChannelPrivateError:\n await parse_error(event, \"Unaccessable channel.\")\n return None\n except ChannelPublicGroupNaError:\n await parse_error(event, \"Channel or supergroup doesn't exist\")\n return None\n except (TypeError, ValueError):\n await parse_error(event, \"Invalid channel/group\")\n return None\n return chat_info\n\n\ndef user_full_name(user):\n names = [user.first_name, user.last_name]\n names = [i for i in list(names) if i]\n full_name = \" \".join(names)\n return full_name\n\n\n@hell_cmd(pattern=\"inviteall(?:\\s|$)([\\s\\S]*)\")\nasync def get_users(event):\n hel_ = event.text[11:]\n hell = await eor(event, f\"__Inviting members from__ {hel_}\")\n kraken = await get_chatinfo(event)\n chat = await event.get_chat()\n if event.is_private:\n return await eod(hell, \"Nice try you fool!\")\n s = 0\n f = 0\n error = \"None\"\n await hell.edit(\"**INVITING USERS !!**\")\n async for user in event.client.iter_participants(kraken.full_chat.id):\n try:\n await event.client(InviteToChannelRequest(channel=chat, users=[user.id]))\n s += 1\n await hell.edit(\n f\"**INVITING USERS.. **\\n\\n**Invited :** `{s}` users \\n**Failed to Invite :** `{f}` users.\\n\\n**×Error :** `{error}`\"\n )\n except Exception as e:\n error = str(e)\n f += 1\n return await hell.edit(\n f\"**INVITING FINISHED** \\n\\n**Invited :** `{s}` users \\n**Failed :** `{f}` users.\"\n )\n\n\n@hell_cmd(pattern=\"add(?:\\s|$)([\\s\\S]*)\")\nasync def _(event):\n if \"addsudo\" in event.raw_text.lower() or \"addblacklist\" in event.raw_text.lower():\n return\n to_add_users = event.pattern_match.group(1)\n if event.is_private:\n await eod(event, f\"Use `{hl}add` users to a chat, not to a Private Message\")\n else:\n LOGS.info(to_add_users)\n if not event.is_channel and event.is_group:\n for user_id in to_add_users.split(\" \"):\n try:\n await event.client(\n functions.messages.AddChatUserRequest(\n chat_id=event.chat_id, user_id=user_id, fwd_limit=1000000\n )\n )\n except Exception as e:\n await event.reply(str(e))\n else:\n for user_id in to_add_users.split(\" \"):\n try:\n await event.client(\n functions.channels.InviteToChannelRequest(\n channel=event.chat_id, users=[user_id]\n )\n )\n except Exception as e:\n return await parse_error(event, e)\n await eod(event, \"Added user to the chat..\")\n\n\nCmdHelp(\"invite\").add_command(\n \"add\", \"\", \"Adds the given user to the group\"\n).add_command(\n \"inviteall\", \"\", \"Scraps user from the targeted group to your group. Basically Kidnapps user from one chat to another\"\n).add_info(\n \"Invite them.\"\n).add_warning(\n \"✅ Harmless Module.\"\n).add()\n","repo_name":"The-HellBot/Plugins-T","sub_path":"TelethonHell/plugins/invite.py","file_name":"invite.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"13299269859","text":"import os\nimport pandas as pd\nimport networkx as nx\nimport networkx.algorithms.community as nx_com\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nimport numpy as np\nfrom statistics import mean\nfrom statistics import median\n\nprint(\"Generating network plots...\")\ndata_dir = os.getcwd() + \"/data/\"\ndata_output_dir = os.getcwd() + \"analysis/data/network_analysis\"\n# Get a list of all folders in data directory\ndataset_folders = [f for f in os.listdir(data_dir) if os.path.isdir(data_dir + f)]\n\nfor partition in \"test\", \"train\", \"valid\", \"merged\":\n community_output_dir = \"{}/communities/{}\".format(data_output_dir, partition)\n components_output_dir = \"{}/components/{}\".format(data_output_dir, partition)\n degree_output_dir = \"{}/degree/{}\".format(data_output_dir, partition)\n pagerank_output_dir = \"{}/pagerank/{}\".format(data_output_dir, partition)\n graphics_output_dir = os.getcwd() + \"analysis/output/network_analysis/{}/\".format(partition)\n for dataset in dataset_folders:\n try:\n df = pd.read_csv(data_dir + \"/{}/CSVFiles/{}.csv\".format(dataset, partition))\n except:\n print(\"Couldn't load data for {}. Make sure to follow all instructions in the README \\\n to perform analysis on this dataset. For example, \\\n Wikidata5M needs to be downloaded first.\".format(dataset))\n continue\n directedGraph = nx.from_pandas_edgelist(df, 'head', 'tail', edge_attr=\"predicate\", create_using=nx.DiGraph)\n \n undirectedGraph = nx.from_pandas_edgelist(df, 'head', 'tail')\n components = nx.connected_components(undirectedGraph)\n component_sizes = sorted([len(component) for component in components])\n communities = nx_com.louvain_communities(undirectedGraph)\n pageranks = nx.pagerank(directedGraph)\n # We save pageranks in 10^-3 \n pageranks = [pagerank * 1000 for pagerank in pageranks.values()]\n degrees = [entry[1] for entry in nx.degree(directedGraph)]\n community_sizes = sorted([len(community) for community in communities])\n \n pagerank_distribution = Counter([round(pr, 2) for pr in pageranks])\n degree_distribution = Counter(degrees)\n\n output_dirs = [community_output_dir, components_output_dir, degree_output_dir, pagerank_output_dir, graphics_output_dir]\n for directory in output_dirs:\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n \n for folder in \"communities\", \"components\", \"degree\", \"pagerank\":\n if not os.path.exists(graphics_output_dir + folder):\n os.makedirs(graphics_output_dir + folder)\n \n # Write communities and their size to a CSV file\n with open(community_output_dir + \"/{}.csv\".format(dataset), \"w\") as writer:\n writer.write(\"CommunityID,Size\\n\")\n for i, community in enumerate(communities):\n writer.write(\"{},{}\\n\".format(i, len(community)))\n\n # Write components and their size to a CSV file\n with open(components_output_dir + \"/{}.csv\".format(dataset), \"w\") as writer:\n writer.write(\"ComponentID,Size\\n\")\n for i, size in enumerate(component_sizes):\n writer.write(\"{},{}\\n\".format(i, size))\n \n # Write degree distribution to a CSV file\n with open(degree_output_dir + \"/{}.csv\".format(dataset), \"w\") as writer:\n writer.write(\"Degree,Count\\n\")\n for degree, count in degree_distribution.items():\n writer.write(\"{},{}\\n\".format(degree, count))\n\n # Write pagerank distribution to a CSV file\n with open(pagerank_output_dir + \"/{}.csv\".format(dataset), \"w\") as writer:\n writer.write(\"PageRank,Count\\n\")\n for pr, count in pagerank_distribution.items():\n writer.write(\"{},{}\\n\".format(pr, count))\n \n plt.rcParams['axes.facecolor'] = '#e5e5e5'\n plt.rcParams['axes.grid'] = True\n plt.rcParams['grid.alpha'] = 1\n plt.rcParams['grid.linewidth'] = 1\n plt.rcParams['grid.color'] = \"#ffffff\"\n plt.rcParams['axes.axisbelow'] = True\n label_color = '#333333'\n primaryColor = '#20639b'\n plt.rcParams['text.color'] = label_color\n plt.rcParams['axes.labelcolor'] = label_color\n plt.rcParams['xtick.color'] = label_color\n plt.rcParams['ytick.color'] = label_color\n\n # Create community sizes diagram\n fig, ax = plt.subplots()\n plt.yscale('log')\n ax.bar(range(len(community_sizes)), community_sizes, color=primaryColor)\n fig.suptitle(\"{} Communities in {}\".format(len(community_sizes),dataset), fontsize=14, fontweight=\"bold\",)\n ax.set_title(\"with avg size {} and median size {}\".format(round(mean(community_sizes), 2), round(median(community_sizes))), fontsize=10)\n ax.set_xlabel('Community ID', fontsize=10)\n ax.set_ylabel('Size of community', fontsize=10)\n fig.savefig(graphics_output_dir + 'communities/{}.png'.format(dataset), dpi=300)\n\n # Create component sizes diagram\n fig, ax = plt.subplots()\n plt.yscale('log')\n ax.bar(range(len(component_sizes)), component_sizes)\n fig.suptitle(\"{} Components in {}\".format(len(component_sizes),dataset),fontsize=14, fontweight=\"bold\",)\n ax.set_title(\"with avg size {} and median size {}\".format(round(mean(component_sizes), 2), round(median(component_sizes), 2)), fontsize=10)\n ax.set_xlabel('Component ID', fontsize=10)\n ax.set_ylabel('Size of component', fontsize=10)\n fig.savefig(graphics_output_dir + 'components/{}.png'.format(dataset), dpi=500)\n\n # Create pagerank diagram\n pagerank_x = pagerank_distribution.keys()\n pagerank_y = pagerank_distribution.values()\n fig, ax = plt.subplots()\n plt.yscale('log')\n plt.xscale('log')\n ax.scatter(pagerank_x, pagerank_y, c=primaryColor, s=10)\n fig.suptitle(\"Pagerank distribution for {} \".format(dataset),fontsize=14, fontweight=\"bold\",)\n ax.set_title(\"with avg pagerank {} and median pagerank {}, in 10^-3\".format(round(mean(pageranks), 3), round(median(pageranks), 3)), fontsize=10)\n ax.set_xlabel('Pagerank in 10^-3', fontsize=10)\n ax.set_ylabel('Number of nodes', fontsize=10)\n fig.savefig(graphics_output_dir + 'pagerank/{}.png'.format(dataset), dpi=300)\n\n # Create degree diagram\n degree_x = degree_distribution.keys()\n degree_y = degree_distribution.values()\n fig, ax = plt.subplots()\n plt.yscale('log')\n plt.xscale('log')\n ax.scatter(degree_x, degree_y, c=primaryColor, s=10)\n fig.suptitle(\"Degree distribution for {} \".format(dataset),fontsize=14, fontweight=\"bold\",)\n ax.set_title(\"with avg degree {} and median degree {}\".format(round(mean(degrees), 2), round(median(degrees), 2)), fontsize=10)\n ax.set_xlabel('Degree', fontsize=10)\n ax.set_ylabel('Number of nodes', fontsize=10)\n fig.savefig(graphics_output_dir + 'degree/{}.png'.format(dataset), dpi=300)\n\n\n \n \n \n\n\n\n\n","repo_name":"SDM-TIB/LinkPredBias","sub_path":"analysis/generate_network_plots.py","file_name":"generate_network_plots.py","file_ext":"py","file_size_in_byte":7172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20646550211","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 14 11:07:33 2020\n\n@author: Ryan\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport uuid\nimport boto3\nimport logging\nimport re\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ns3_bucket_name = os.environ['s3bucketname']\n\n\ndef move_old_file_to_history():\n s3 = boto3.resource('s3') \n s3_client = boto3.client('s3')\n my_bucket = s3.Bucket(s3_bucket_name) \n for obj in my_bucket.objects.all():\n if obj.key.startswith('text/'):\n copy_source = '/' + s3_bucket_name + '/' + str(obj.key)\n logger.info(f\"Historical file found, moving file {copy_source}\")\n s3_client.copy_object(Bucket=s3_bucket_name,\n CopySource=copy_source,\n Key='history/' + str(obj.key))\n s3_client.delete_objects(Bucket=s3_bucket_name,\n Delete={\n 'Objects':[\n {\n 'Key': str(obj.key)\n }]})\n\n\ndef lambda_handler(event, context):\n s3 = boto3.resource('s3') \n \n# =============================================================================\n \n# URL = 'https://www.unifyconsulting.com/'\n# page = requests.get(URL)\n# \n# soup = BeautifulSoup(page.content, 'html.parser') \n# whitelist = ['p'] \n# text_elements = [t for t in soup.find_all(text=True) if t.parent.name in whitelist]\n# text_elements = \" \".join(text_elements)\n# text_elements = re.sub('\\n', '', text_elements)\n# text_elements = \" \".join(text_elements.split())\n \n \n# =============================================================================\n\n URL= 'https://docs.python.org/3/glossary.html'\n page = requests.get(URL)\n \n soup = BeautifulSoup(page.content, 'html.parser')\n whitelist = ['p' ] \n text_elements = [t for t in soup.find_all(text=True) if t.parent.name in whitelist]\n text_elements = ' '.join(text_elements)\n text_elements = re.sub('\\n', '', text_elements)\n \n# =============================================================================\n \n logger.info(f\"Lambda to scrape website {URL}\")\n logger.info(f\"Uploading to {s3_bucket_name}\")\n move_old_file_to_history () \n\n file_uuid = str(uuid.uuid4().hex)\n text_filename = '/tmp/text_' + file_uuid + '.txt'\n s3_text_filename = 'text/text_' + file_uuid + '.txt'\n \n with open(text_filename, \"w\") as text_file:\n text_file.write(text_elements)\n \n s3.Bucket(s3_bucket_name).upload_file(text_filename, s3_text_filename) \n\n output_dict = {'url' : URL, 'uuid' : file_uuid}\n \n \n return output_dict\n","repo_name":"rlenaha2/unify_aws_training","sub_path":"advanced_training/code/lambda_scraper.py","file_name":"lambda_scraper.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7723712246","text":"import os\nimport numpy as np\nimport cv2\n\nfrom z_models import runmodel, loadmodel\nfrom z_util import mosaic, util, ffmpeg, filt, data\nfrom z_util import image_processing as impro\n\n'''\n---------------------Video Init---------------------\n'''\n\n\ndef video_init(opt, path):\n util.clean_tempfiles()\n fps, endtime, height, width = ffmpeg.get_video_infos(path)\n if opt.fps != 0:\n fps = opt.fps\n ffmpeg.video2voice(path, './tmp/voice_tmp.mp3')\n ffmpeg.video2image(path, './tmp/video2image/output_%05d.' + opt.tempimage_type, fps)\n imagepaths = os.listdir('./tmp/video2image')\n imagepaths.sort()\n return fps, imagepaths, height, width\n\n\n'''\n---------------------Add Mosaic---------------------\n'''\n\n\ndef addmosaic_img(opt, netS):\n path = opt.media_path\n print('Add Mosaic:', path)\n img = impro.imread(path)\n mask = runmodel.get_ROI_position(img, netS, opt)[0]\n img = mosaic.addmosaic(img, mask, opt)\n impro.imwrite(os.path.join(opt.result_dir, os.path.splitext(os.path.basename(path))[0] + '_add.jpg'), img)\n\n\ndef addmosaic_video(opt, netS):\n path = opt.media_path\n fps, imagepaths = video_init(opt, path)[:2]\n # get position\n positions = []\n for i, imagepath in enumerate(imagepaths, 1):\n img = impro.imread(os.path.join('./tmp/video2image', imagepath))\n mask, x, y, size, area = runmodel.get_ROI_position(img, netS, opt)\n positions.append([x, y, area])\n cv2.imwrite(os.path.join('./tmp/ROI_mask', imagepath), mask)\n print('\\r', 'Find ROI location:' + str(i) + '/' + str(len(imagepaths)),\n util.get_bar(100 * i / len(imagepaths), num=35), end='')\n print('\\nOptimize ROI locations...')\n mask_index = filt.position_medfilt(np.array(positions), 7)\n\n # add mosaic\n for i in range(len(imagepaths)):\n mask = impro.imread(os.path.join('./tmp/ROI_mask', imagepaths[mask_index[i]]), 'gray')\n img = impro.imread(os.path.join('./tmp/video2image', imagepaths[i]))\n if impro.mask_area(mask) > 100:\n img = mosaic.addmosaic(img, mask, opt)\n cv2.imwrite(os.path.join('./tmp/addmosaic_image', imagepaths[i]), img)\n print('\\r', 'Add Mosaic:' + str(i + 1) + '/' + str(len(imagepaths)),\n util.get_bar(100 * i / len(imagepaths), num=35), end='')\n print()\n ffmpeg.image2video(fps,\n './tmp/addmosaic_image/output_%05d.' + opt.tempimage_type,\n './tmp/voice_tmp.mp3',\n os.path.join(opt.result_dir, os.path.splitext(os.path.basename(path))[0] + '_add.mp4'))\n\n\n'''\n---------------------Style Transfer---------------------\n'''\n\n\ndef styletransfer_img(opt, netG):\n print('Style Transfer_img:', opt.media_path)\n img = impro.imread(opt.media_path)\n img = runmodel.run_styletransfer(opt, netG, img)\n suffix = os.path.basename(opt.model_path).replace('.pth', '').replace('style_', '')\n impro.imwrite(\n os.path.join(opt.result_dir, os.path.splitext(os.path.basename(opt.media_path))[0] + '_' + suffix + '.jpg'),\n img)\n\n\ndef styletransfer_video(opt, netG):\n path = opt.media_path\n positions = []\n fps, imagepaths = video_init(opt, path)[:2]\n\n for i, imagepath in enumerate(imagepaths, 1):\n img = impro.imread(os.path.join('./tmp/video2image', imagepath))\n img = runmodel.run_styletransfer(opt, netG, img)\n cv2.imwrite(os.path.join('./tmp/style_transfer', imagepath), img)\n print('\\r', 'Transfer:' + str(i) + '/' + str(len(imagepaths)), util.get_bar(100 * i / len(imagepaths), num=35),\n end='')\n print()\n suffix = os.path.basename(opt.model_path).replace('.pth', '').replace('style_', '')\n ffmpeg.image2video(fps,\n './tmp/style_transfer/output_%05d.' + opt.tempimage_type,\n './tmp/voice_tmp.mp3',\n os.path.join(opt.result_dir,\n os.path.splitext(os.path.basename(path))[0] + '_' + suffix + '.mp4'))\n\n\n'''\n---------------------Clean Mosaic---------------------\n'''\n\n\ndef get_mosaic_positions(opt, netM, imagepaths, savemask=True):\n # get mosaic position\n positions = []\n for i, imagepath in enumerate(imagepaths, 1):\n img_origin = impro.imread(os.path.join('./tmp/video2image', imagepath))\n x, y, size, mask = runmodel.get_mosaic_position(img_origin, netM, opt)\n if savemask:\n cv2.imwrite(os.path.join('./tmp/mosaic_mask', imagepath), mask)\n positions.append([x, y, size])\n print('\\r', 'Find mosaic location:' + str(i) + '/' + str(len(imagepaths)),\n util.get_bar(100 * i / len(imagepaths), num=35), end='')\n print('\\nOptimize mosaic locations...')\n positions = np.array(positions)\n for i in range(3):\n positions[:, i] = filt.medfilt(positions[:, i], opt.medfilt_num)\n return positions\n\n\ndef cleanmosaic_img(opt, netG, netM):\n path = opt.media_path\n print('Clean Mosaic:', path)\n img_origin = impro.imread(path)\n x, y, size, mask = runmodel.get_mosaic_position(img_origin, netM, opt)\n cv2.imwrite('./mask/' + os.path.basename(path), mask)\n img_result = img_origin.copy()\n if size != 0:\n img_mosaic = img_origin[y - size:y + size, x - size:x + size]\n if opt.traditional:\n img_fake = runmodel.traditional_cleaner(img_mosaic, opt)\n else:\n img_fake = runmodel.run_pix2pix(img_mosaic, netG, opt)\n img_result = impro.replace_mosaic(img_origin, img_fake, mask, x, y, size, opt.no_feather)\n else:\n print('Do not find mosaic')\n impro.imwrite(os.path.join(opt.result_dir, os.path.splitext(os.path.basename(path))[0] + '_clean.jpg'), img_result)\n\n\ndef cleanmosaic_video_byframe(opt, netG, netM):\n path = opt.media_path\n fps, imagepaths = video_init(opt, path)[:2]\n positions = get_mosaic_positions(opt, netM, imagepaths, savemask=True)\n # clean mosaic\n for i, imagepath in enumerate(imagepaths, 0):\n x, y, size = positions[i][0], positions[i][1], positions[i][2]\n img_origin = impro.imread(os.path.join('./tmp/video2image', imagepath))\n img_result = img_origin.copy()\n if size != 0:\n img_mosaic = img_origin[y - size:y + size, x - size:x + size]\n if opt.traditional:\n img_fake = runmodel.traditional_cleaner(img_mosaic, opt)\n else:\n img_fake = runmodel.run_pix2pix(img_mosaic, netG, opt)\n mask = cv2.imread(os.path.join('./tmp/mosaic_mask', imagepath), 0)\n img_result = impro.replace_mosaic(img_origin, img_fake, mask, x, y, size, opt.no_feather)\n cv2.imwrite(os.path.join('./tmp/replace_mosaic', imagepath), img_result)\n print('\\r', 'Clean Mosaic:' + str(i + 1) + '/' + str(len(imagepaths)),\n util.get_bar(100 * i / len(imagepaths), num=35), end='')\n print()\n ffmpeg.image2video(fps,\n './tmp/replace_mosaic/output_%05d.' + opt.tempimage_type,\n './tmp/voice_tmp.mp3',\n os.path.join(opt.results_dir, os.path.splitext(os.path.basename(path))[0] + '_clean.mp4'))\n\n\ndef cleanmosaic_video_fusion(opt, netG, netM):\n path = opt.media_path\n N = opt.N\n if 'HD' in os.path.basename(opt.model_path):\n INPUT_SIZE = 256\n else:\n INPUT_SIZE = 128\n fps, imagepaths, height, width = video_init(opt, path)\n positions = get_mosaic_positions(opt, netM, imagepaths, savemask=True)\n\n # clean mosaic\n img_pool = np.zeros((height, width, 3 * N), dtype='uint8')\n for i, imagepath in enumerate(imagepaths, 0):\n x, y, size = positions[i][0], positions[i][1], positions[i][2]\n\n # image read stream\n mask = cv2.imread(os.path.join('./tmp/mosaic_mask', imagepath), 0)\n if i == 0:\n for j in range(0, N):\n img_pool[:, :, j * 3:(j + 1) * 3] = impro.imread(\n os.path.join('./tmp/video2image', imagepaths[np.clip(i + j - 12, 0, len(imagepaths) - 1)]))\n else:\n img_pool[:, :, 0:(N - 1) * 3] = img_pool[:, :, 3:N * 3]\n img_pool[:, :, (N - 1) * 3:] = impro.imread(\n os.path.join('./tmp/video2image', imagepaths[np.clip(i + 12, 0, len(imagepaths) - 1)]))\n img_origin = img_pool[:, :, int((N - 1) / 2) * 3:(int((N - 1) / 2) + 1) * 3]\n\n if size == 0: # can not find mosaic,\n cv2.imwrite(os.path.join('./tmp/replace_mosaic', imagepath), img_origin)\n else:\n\n mosaic_input = np.zeros((INPUT_SIZE, INPUT_SIZE, 3 * N + 1), dtype='uint8')\n mosaic_input[:, :, 0:N * 3] = impro.resize(img_pool[y - size:y + size, x - size:x + size, :], INPUT_SIZE)\n mask_input = impro.resize(mask, np.min(img_origin.shape[:2]))[y - size:y + size, x - size:x + size]\n mosaic_input[:, :, -1] = impro.resize(mask_input, INPUT_SIZE)\n\n mosaic_input = data.im2tensor(mosaic_input, bgr2rgb=False, use_gpu=opt.use_gpu, use_transform=False,\n is0_1=False)\n unmosaic_pred = netG(mosaic_input)\n img_fake = data.tensor2im(unmosaic_pred, rgb2bgr=False, is0_1=False)\n img_result = impro.replace_mosaic(img_origin, img_fake, mask, x, y, size, opt.no_feather)\n cv2.imwrite(os.path.join('./tmp/replace_mosaic', imagepath), img_result)\n print('\\r', 'Clean Mosaic:' + str(i + 1) + '/' + str(len(imagepaths)),\n util.get_bar(100 * i / len(imagepaths), num=35), end='')\n print()\n ffmpeg.image2video(fps,\n './tmp/replace_mosaic/output_%05d.' + opt.tempimage_type,\n './tmp/voice_tmp.mp3',\n os.path.join(opt.result_dir, os.path.splitext(os.path.basename(path))[0] + '_clean.mp4'))\n","repo_name":"Z863058/demosaic","sub_path":"z_cores/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":9799,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"15819264512","text":"# This module contains all requirements for \"module Inner_funcs\".\r\n\r\nimport time\r\nfrom os import name, system\r\nfrom random import randint\r\n\r\n\r\ndef clearBoard():\r\n \"\"\"This function clears the pervious board to make place forthe new one.\r\n It also has a seccond countdown :) \"\"\"\r\n time.sleep(1)\r\n if name == \"nt\":\r\n system(\"cls\")\r\n else:\r\n system(\"clear\")\r\n\r\n\r\ndef randomMove(board):\r\n \"\"\"A function which helps computer to choose a random move in game.\"\"\"\r\n while True:\r\n selection = randint(1, len(board))\r\n if isinstance(board[selection-1], int):\r\n return selection\r\n\r\n\r\ndef possibleMove(board, move):\r\n \"\"\"This function helps the programm to check wheather the chosen move\r\n can be used or not .\"\"\"\r\n if move in range(1, 10) and isinstance(board[move-1], int):\r\n return True\r\n return False\r\n\r\n\r\ndef x_or_o():\r\n \"\"\"A starter for our programm which also gives users the opportunity to\r\n choose between 'X' and 'O' .\"\"\"\r\n clearBoard()\r\n print(\"Welcome to TicTakToe\\nChoose wheater you want to be X or O :\")\r\n while True:\r\n player = input()\r\n if player.lower() == 'o':\r\n player = 'O'\r\n computer = 'X'\r\n break\r\n elif player.lower() == 'x':\r\n player = 'X'\r\n computer = 'O'\r\n break\r\n else:\r\n print(\"Error!!\\nchoose between 'x' and 'o' please ...\")\r\n print(f\"You are : {player}\\nComputer is : {computer}\")\r\n return player, computer\r\n","repo_name":"sinbadBahri/Tic_Tac_Toe","sub_path":"Game_packages/data/requirements/required_funcs.py","file_name":"required_funcs.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33857847721","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nimport Freenove_DHT as DHT\nimport dehumidifier as dm\nDHTPin = 11 # define the pin of DHT11\n\ndef loop():\n dht = DHT.DHT(DHTPin) # create a DHT class object\n dhm = dm.DeHumidifier()\n \n while True:\n chk = dht.readDHT11()\n if chk is dht.DHTLIB_OK:\n temp = convert(dht.temperature)\n dhm.check_status(dht)\n print(f'Humidity : {dht.humidity:,.2f}, \\t Temperature : {temp:,.2f}')\n else:\n pass\n \n sleep(2)\n \ndef convert(temp):\n ''' Convert a celcius temperature reading to fahrenheit '''\n try:\n fh = (temp * 9/5) + 32\n return fh\n except ValueError:\n return 0\n \nif __name__ == '__main__':\n print('Program is starting...')\n try:\n loop()\n except KeyboardInterrupt:\n GPIO.cleanup()\n exit()\n","repo_name":"israel-dryer/Aqua-Pi","sub_path":"Testing/dehumidifer_test.py","file_name":"dehumidifer_test.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"7621605176","text":"import numpy as np\nfrom DataCompression.msc.vae_agarap import AE\nfrom DataCompression.src.decoder_mse import decode\n\n# load the data\ndata = np.load('digits389.npy', allow_pickle=True).item()\nx_test = data['Xtest']\nx_train = data['Xtrain']\ny_test = data['Ytest']\ny_train = data['Ytrain']\n\n# train the original vae to get the low dim spaces (this is slow because used single batches)\nprint(\"Original VAE loss:\")\nae = AE()\nae.train_ae(x_train, epochs=2)\n# get the lower dimensional space it produced\nlatent = ae.get_latent(x_train)\n\n# reformat the data to fit the decoder method\nlatent = np.asarray(latent)\n\n# train the decoder network\nprint(\"Our decoder network loss:\")\ndecode(x_train, latent, epochs=20)\n\n# after 2 epochs for compression network and 20 epochs for decompression network\n# achieves loss of 0.128\n","repo_name":"jabader97/DataCompression","sub_path":"exp/playground/pg002_decoder_mse_mnist.py","file_name":"pg002_decoder_mse_mnist.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7462421780","text":"# -*- coding: utf8 -*-\nimport os\nimport sys\nimport glob\nimport multiprocessing\n\nSetOption(\"num_jobs\", multiprocessing.cpu_count()) # build with all available cpu cores/threads\n\nenv = Environment()\nenv.Decider(\"MD5\")\nconf = Configure(env)\n\ntry:\n # default case for ubuntu 18.04\n env.ParseConfig('pkg-config --cflags --libs opencv')\nexcept OSError as e:\n print(\"use opencv4\")\n # case for ubuntu 20.04, 21.04\n env.ParseConfig('pkg-config --cflags --libs opencv4')\n\n\nenv.Append(CXXFLAGS=['-std=c++11', \"-O3\", \"-g\"])\n\nsources = set(glob.glob(\"*.cpp\") + glob.glob(\"*.c\"))\n\nenv.Program(\"siti\", list(sources))\n\n","repo_name":"Telecommunication-Telemedia-Assessment/SITI","sub_path":"src/SITI/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"39577414446","text":"from hypothesis import given\n\nfrom ims.ext.trial import TestCase\n\nfrom ..._address import Address, RodGarettAddress, TextOnlyAddress\nfrom ...strategies import rodGarettAddresses, textOnlyAddresses\nfrom .._json import jsonDeserialize, jsonSerialize\nfrom .json import jsonFromRodGarettAddress, jsonFromTextOnlyAddress\n\n\n__all__ = ()\n\n\nclass UnknownAddress(Address):\n \"\"\"\n Unknown Address subclass.\n \"\"\"\n\n\nclass AddressSerializationTests(TestCase):\n \"\"\"\n Tests for serialization of :class:`TextOnlyAddress`\n \"\"\"\n\n def test_serialize_unknown(self) -> None:\n \"\"\"\n :func:`jsonSerialize` raises TypeError for unknown address types.\n \"\"\"\n address = UnknownAddress()\n e = self.assertRaises(TypeError, jsonSerialize, address)\n self.assertEqual(\n str(e), \"Unknown address type: UnknownAddress(description=None)\"\n )\n\n\nclass TextOnlyAddressSerializationTests(TestCase):\n \"\"\"\n Tests for serialization of :class:`TextOnlyAddress`\n \"\"\"\n\n @given(textOnlyAddresses())\n def test_serialize(self, address: TextOnlyAddress) -> None:\n \"\"\"\n :func:`jsonSerialize` serializes the given address.\n \"\"\"\n self.assertEqual(\n jsonSerialize(address), jsonFromTextOnlyAddress(address)\n )\n\n\nclass RodGarettAddressSerializationTests(TestCase):\n \"\"\"\n Tests for serialization of :class:`RodGarettAddress`\n \"\"\"\n\n @given(rodGarettAddresses())\n def test_serialize(self, address: RodGarettAddress) -> None:\n \"\"\"\n :func:`jsonSerialize` serializes the given address.\n \"\"\"\n self.assertEqual(\n jsonSerialize(address), jsonFromRodGarettAddress(address)\n )\n\n\nclass TextOnlyAddressDeserializationTests(TestCase):\n \"\"\"\n Tests for deserialization of :class:`TextOnlyAddress`\n \"\"\"\n\n @given(textOnlyAddresses())\n def test_deserialize(self, address: TextOnlyAddress) -> None:\n \"\"\"\n :func:`jsonDeserialize` returns a address with the correct data.\n \"\"\"\n self.assertEqual(\n jsonDeserialize(jsonFromTextOnlyAddress(address), TextOnlyAddress),\n address,\n )\n\n\nclass RodGarettAddressDeserializationTests(TestCase):\n \"\"\"\n Tests for deserialization of :class:`RodGarettAddress`\n \"\"\"\n\n @given(rodGarettAddresses())\n def test_deserialize(self, address: RodGarettAddress) -> None:\n \"\"\"\n :func:`jsonDeserialize` returns a address with the correct data.\n \"\"\"\n self.assertEqual(\n jsonDeserialize(\n jsonFromRodGarettAddress(address), RodGarettAddress\n ),\n address,\n )\n","repo_name":"burningmantech/ranger-ims-server","sub_path":"src/ims/model/json/test/test_address.py","file_name":"test_address.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"26377651697","text":"import asyncio\nfrom multiprocessing import Process\n\nimport asyncpg\nfrom redis import asyncio as aioredis\nimport tornado.ioloop\nimport tornado.netutil\nimport tornado.process\nimport tornado.httpserver\nimport tornado.web\nimport tornado.log\nimport tornado.options\n\nimport config\nimport url as ur\nfrom services.service import services_init\nfrom services.judge import JudgeServerClusterService\n\n\nasync def materialized_view_task():\n db = await asyncpg.connect(database=config.DBNAME_OJ, user=config.DBUSER_OJ, password=config.DBPW_OJ, host='localhost')\n rs = await aioredis.Redis(host='localhost', port=6379, db=1)\n p = rs.pubsub()\n await p.subscribe('materialized_view_req')\n\n async def _update():\n ret = await rs.incr('materialized_view_counter') - 1\n await db.execute('REFRESH MATERIALIZED VIEW challenge_state;')\n return ret\n\n counter = await _update()\n async for msg in p.listen():\n if msg['type'] != 'message':\n continue\n\n ind = int(msg['data'])\n if ind <= counter:\n continue\n\n counter = await _update()\n\nif __name__ == \"__main__\":\n httpsock = tornado.netutil.bind_sockets(5500)\n def run_materialized_view_task():\n try:\n loop = asyncio.new_event_loop()\n loop.run_until_complete(materialized_view_task())\n loop.run_forever()\n\n finally:\n loop.stop()\n loop.close()\n\n view_task_process = Process(target=run_materialized_view_task)\n view_task_process.start()\n\n # tornado.process.fork_processes(4)\n db = asyncio.get_event_loop().run_until_complete(asyncpg.create_pool(database=config.DBNAME_OJ, user=config.DBUSER_OJ, password=config.DBPW_OJ, host='localhost'))\n rs = aioredis.Redis(host='localhost', port=6379, db=1)\n\n services_init(db, rs)\n app = tornado.web.Application(ur.get_url(db, rs), autoescape='xhtml_escape', cookie_secret=config.COOKIE_SEC)\n # NOTE: for dev\n # app = tornado.web.Application(ur.get_url(db, rs), autoescape='xhtml_escape', cookie_secret=config.COOKIE_SEC, debug=True, autoreload=True)\n\n tornado.log.enable_pretty_logging()\n\n tornado.options.parse_command_line()\n\n httpsrv = tornado.httpserver.HTTPServer(app, xheaders=True)\n httpsrv.add_sockets(httpsock)\n\n tornado.ioloop.IOLoop.current().run_sync(JudgeServerClusterService.inst.start)\n\n try:\n tornado.ioloop.IOLoop.current().start()\n except:\n pass\n\n finally:\n view_task_process.kill()\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(db.close())\n loop.run_until_complete(rs.close())\n loop.run_until_complete(JudgeServerClusterService.inst.disconnect_all_server())\n tornado.ioloop.IOLoop.current().stop()\n tornado.ioloop.IOLoop.current().close()\n","repo_name":"TFcis/NTOJ","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"9849108099","text":"'''\r\nSuppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.\r\n\r\n(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).\r\n\r\nYou are given a target value to search. If found in the array return its index, otherwise return -1.\r\n\r\nYou may assume no duplicate exists in the array.\r\n\r\nYour algorithm's runtime complexity must be in the order of O(log n).\r\n\r\nExample 1:\r\n\r\nInput: nums = [4,5,6,7,0,1,2], target = 0\r\nOutput: 4\r\nExample 2:\r\n\r\nInput: nums = [4,5,6,7,0,1,2], target = 3\r\nOutput: -1\r\n'''\r\ndef sol(nums):\r\n reverse = False\r\n if not len(nums):\r\n return -1\r\n if target > nums[-1]:\r\n nums.reverse()\r\n reverse = True\r\n length = len(nums)\r\n for i in range(1,length+1):\r\n if target == nums[length - i]:\r\n return i-1 if reverse else length-i\r\n return -1\r\n","repo_name":"oknashar/interview-preparation","sub_path":"top75LeetCode/Arrays/33.searchInSortedRotatedArr.py","file_name":"33.searchInSortedRotatedArr.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34606506956","text":"import base64, boto3, botocore, logging, os, sys, json\nfrom flask import Flask\nfrom flask_cors import CORS\n\nos.putenv('LANG', 'en_US.UTF-8')\nos.putenv('LC_ALL', 'en_US.UTF-8')\n\nlogging.basicConfig(\n handlers=[\n logging.FileHandler(\"gradeServer.log\"),\n logging.StreamHandler()\n ],\n level=logging.INFO)\n\nCODE_DIR = \"./submission\"\nRESULT_PATH=\"ta/grading/{project}/{netId}.json\"\nTEST_DIR = \"/tmp/test/{netId}\"\nSUBMISSIONS = 'submissions'\nBUCKET = 'caraza-harter-cs301'\nsession = boto3.Session(profile_name='cs301ta')\ns3 = session.client('s3')\n\napp = Flask(__name__)\nCORS(app)\n\n\n# General template to fetch from s3\ndef s3Fetcher(path, name, raiseError = True):\n try:\n response = s3.get_object(Bucket=BUCKET, Key=path)\n return response['Body'].read().decode('utf-8')\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"NoSuchKey\":\n logging.info(\n \"key {} doesn't exist when look up for {}.\".format(path, name))\n else:\n logging.warning(\n \"Unexpected error {} when look up for {}.\".format(e.response['Error']['Code'], name))\n if raiseError:\n raise e\n\ndef lookupNetId(googleId):\n path = 'users/google_to_net_id/%s.txt' % googleId\n return s3Fetcher(path, \"netId\", False)\n\ndef lookupGrade(netId, project):\n resultPath = RESULT_PATH.format(project=project, netId=netId)\n response = s3Fetcher(resultPath, \"Grade Result\", False)\n return response\n\ndef htmlGrade(resultStr):\n result = json.loads(resultStr)\n grade = result.get(\"score\", 0)\n tests = result.get(\"tests\", None)\n template = \"
  • Grade: {grade}{tests}
\"\n testsHtml = \"\"\n if tests:\n for test in tests:\n tTest = test.get(\"test\", \"\")\n tResult = test.get(\"result\", \"\")\n testsHtml += \"
  • test: {} result: {}\".format(tTest, tResult)\n headerInfo = template.format(grade=grade, tests=testsHtml)\n if grade == 100:\n comments = \"Good work!\"\n else:\n comments = \"There are some errors in your script. Please make sure you run test.py before submission!\"\n return json.dumps({\"detail\" : headerInfo, \"comments\" : comments})\n\n@app.route('/')\ndef index():\n return \"index\"\n\n@app.route('//')\ndef gradingJson(project, googleId):\n netId = lookupNetId(googleId)\n result = lookupGrade(netId, project)\n if result:\n return htmlGrade(result)\n else:\n return json.dumps({\"detail\" : \"\", \"comments\" : \"\"})\n","repo_name":"shenghaozou/showGrade","sub_path":"server/gradeServer.py","file_name":"gradeServer.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36625551020","text":"import cv2\r\nimport easyocr\r\nfrom collections import Counter\r\nimport requests\r\n\r\n\r\ndef verify_text(plate_number):\r\n # API endpoint URL\r\n url = 'http://localhost:5000/api/verify_text'\r\n\r\n # Number plate text to verify\r\n # number_plate = 'AAE 011' # this one is dummy\r\n number_plate = plate_number # this one is for real\r\n\r\n # Create the payload\r\n payload = {'text': number_plate}\r\n\r\n # Send the POST request to the API endpoint\r\n response = requests.post(url, json=payload)\r\n\r\n # Check the response\r\n if response.status_code == 200:\r\n data = response.json()\r\n exists = data.get('exists')\r\n print(exists)\r\n\r\n if exists:\r\n print(f\"The number plate '{number_plate}' exists in the database.\")\r\n else:\r\n print(\r\n f\"The number plate '{number_plate}' does not exist in the database.\")\r\n else:\r\n print(\"Error: Failed to receive a valid response from the API.\")\r\n\r\n\r\n# Path to the trained Haar cascade XML file for license plate detection\r\nharcascade = \"model/haarcascade_russian_plate_number.xml\"\r\n\r\n# Initialize video capture from default camera\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Set the width and height of the video capture\r\ncap.set(3, 640) # width\r\ncap.set(4, 480) # height\r\n\r\n# Minimum area threshold for license plate detection\r\nmin_area = 500\r\n\r\n# Initialize a counter to track the number of detected plates\r\ncount = 0\r\nrecognized_texts = []\r\n\r\n# Initialize EasyOCR reader with the desired language(s)\r\nreader = easyocr.Reader(['en'])\r\n\r\nwhile True:\r\n # Read a frame from the video capture\r\n success, img = cap.read()\r\n\r\n # Load the license plate Haar cascade classifier\r\n plate_cascade = cv2.CascadeClassifier(harcascade)\r\n\r\n # Convert the frame to grayscale for license plate detection\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Detect license plates in the grayscale image\r\n plates = plate_cascade.detectMultiScale(img_gray, 1.1, 4)\r\n\r\n # Iterate over the detected license plates\r\n for (x, y, w, h) in plates:\r\n # Calculate the area of the license plate\r\n area = w * h\r\n\r\n # Check if the area exceeds the minimum area threshold\r\n if area > min_area:\r\n # Draw a rectangle around the license plate\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n\r\n # Extract the region of interest (ROI) containing the license plate\r\n img_roi = img[y: y+h, x:x+w]\r\n\r\n # Perform OCR on the ROI using EasyOCR\r\n results = reader.readtext(img_roi)\r\n\r\n # Extract the recognized text from the results\r\n text = [result[1] for result in results]\r\n verify_text(' '.join(text))\r\n\r\n # Print the recognized characters in the terminal\r\n print(\"License Plate:\", ' '.join(text))\r\n\r\n # Display the resulting image with license plate detection\r\n cv2.imshow(\"Result\", img)\r\n\r\n # Check if 's' key is pressed to save the detected license plate\r\n if cv2.waitKey(1) & 0xFF == ord('s'):\r\n # Save the image containing the license plate\r\n cv2.imwrite(\"plates/scaned_img_\" + str(count) + \".jpg\", img_roi)\r\n\r\n # Display a confirmation message on the main image\r\n cv2.rectangle(img, (0, 200), (640, 300), (0, 255, 0), cv2.FILLED)\r\n cv2.putText(img, \"Plate Saved\", (150, 265),\r\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (0, 0, 255), 2)\r\n cv2.imshow(\"Results\", img)\r\n\r\n # Wait for 500ms to display the confirmation message\r\n cv2.waitKey(500)\r\n\r\n # Increment the counter for the next detected license plate\r\n count += 1\r\n\r\n\r\n# take help of the gate to count vehicle number\r\n","repo_name":"ImranKhanPrince/Web-Interface-Based-Automation-of-Toll-Collection-System-Using-Computer-Vision","sub_path":"Computer Vision Detector/number-plate.py","file_name":"number-plate.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33866251241","text":"#!/bin/env/python\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\nfrom funcs import *\n\n__VERSION__ = \"0.0.1\"\n__AUTHOR__ = u\"Pekka Järvinen\"\n__YEAR__ = 2017\n__DESCRIPTION__ = u\"Arch Installer - Partition generator. Version {0}.\".format(__VERSION__)\n__EPILOG__ = u\"%(prog)s v{0} (c) {1} {2}-\".format(__VERSION__, __AUTHOR__, __YEAR__)\n\n__EXAMPLES__ = [\n u'',\n u'-' * 60,\n u'%(prog)s',\n u'-' * 60,\n]\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=__DESCRIPTION__,\n epilog=__EPILOG__,\n usage=os.linesep.join(__EXAMPLES__),\n )\n\n optional = parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n\n optional.add_argument('--verbose', '-v', action='count', required=False, default=0, dest='verbose',\n help=\"Be verbose. -vvv.. Be more verbose.\")\n\n required.add_argument('--device ', '-d', action=FullPaths, type=is_block_device, dest='device',\n required=True,\n help='Target device (for example /dev/sda).')\n\n parser._action_groups.append(optional)\n\n args = parser.parse_args()\n\n if int(args.verbose) > 0:\n logging.getLogger().setLevel(logging.DEBUG)\n log.info(\"Being verbose\")\n\n unmount_all(args.device)\n\n block_device = get_block_device(args.device)\n\n if 'children' not in block_device:\n log.error(\"No partitions found??\")\n sys.exit(1)\n\n os.makedirs(os.path.join(INSTALL_DIR_PREFIX), exist_ok=True)\n\n if not os.path.isdir(INSTALL_DIR_PREFIX):\n log.error(\"Not a dir: {}\".format(INSTALL_DIR_PREFIX))\n sys.exit(1)\n\n log.info(\"Mounting partitions..\")\n\n # mount last partition as /\n # must exist before /boot\n for p in reversed(block_device['children']):\n dev = \"/dev/{}\".format(p['name'])\n\n if p['parttype'] == UUID_SWAP:\n continue\n elif p['parttype'] == UUID_BIOS:\n continue\n elif p['parttype'] == UUID_OTHER:\n mount(dev, INSTALL_DIR_PREFIX)\n break\n\n # Mount first as \"/boot\"\n for p in block_device['children']:\n dev = \"/dev/{}\".format(p['name'])\n\n if p['parttype'] == UUID_SWAP:\n continue\n elif p['parttype'] == UUID_BIOS:\n continue\n elif p['parttype'] == UUID_OTHER:\n BOOTDIR = os.path.join(INSTALL_DIR_PREFIX, \"boot\")\n os.makedirs(BOOTDIR, exist_ok=True)\n mount(dev, BOOTDIR)\n break\n\n for p in get_block_device(args.device)['children']:\n print(p['mountpoint'])\n","repo_name":"raspi/pyarchinstall","sub_path":"2mount.py","file_name":"2mount.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27099645009","text":"#!/usr/bin/env python3\n\nimport rospy\nimport logging\n\nfrom deneigus.srv import acknowledge\nfrom logging_utils import setup_logger, get_logger\nfrom std_msgs.msg import Int32\n\n\nclass SoufflanteNode:\n def __init__(self):\n self.logger = get_logger(\"SoufflanteNode\")\n self.logger.debug(\"Started SoufflanteNode init\")\n\n self.goal = 1\n self.goal_reach = True\n\n self.soufflante_cmd_pub = rospy.Publisher('/soufflante_cmd_auto', Int32, queue_size=10)\n self.soufflante_new_goal_sub = rospy.Subscriber('/soufflante_new_goal', Int32, self.soufflante_goal_callback)\n self.soufflante_height_sub = rospy.Subscriber('/soufflante_height', Int32, self.soufflante_height_callback)\n\n rospy.wait_for_service('acknowledge')\n path_func = rospy.ServiceProxy('acknowledge', acknowledge)\n path_func('Soufflante', 1)\n\n def soufflante_goal_callback(self, msg):\n if msg.data==0:\n rospy.wait_for_service('acknowledge')\n path_func = rospy.ServiceProxy('acknowledge', acknowledge)\n path_func('Soufflante', 1)\n self.goal_reach = True\n else:\n self.soufflante_cmd_pub.publish(msg.data)\n self.goal_reach = False\n self.goal = msg.data\n\n def soufflante_height_callback(self, msg):\n if self.goal_reach==False and msg.data==self.goal:\n rospy.wait_for_service('acknowledge')\n path_func = rospy.ServiceProxy('acknowledge', acknowledge)\n path_func('Soufflante', 1)\n self.goal_reach = True\n\n\nif __name__ == '__main__':\n rospy.init_node('SoufflanteNode', anonymous=False)\n\n setup_logger(__file__, print_level=logging.INFO)\n logger = get_logger(\"SoufflanteNode\")\n logger.info(\"SoufflanteNode main Started\")\n\n SoufflanteNode()\n rospy.spin()\n\n logger.info(\"SoufflanteNode main Stopped\")","repo_name":"lefake/DeneigUS","sub_path":"src/soufflante_node.py","file_name":"soufflante_node.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42002454875","text":"a,b,n=map(int, input().split())\nframes=input().split()\nframes.append(0)\nframes.append(0)\n# framesを数値に変換\nfor idx,score in enumerate(frames):\n # 数値に変換\n if score == 'G': score = 0\n frames[idx] = int(score)\n# 各フレームのスコア\nframe_score = []\n# スペアかストライクか\nstk_flg = []\n# フレーム毎投擲数\nt_cnt = 0\n# スコア表を埋める\nfor idx,score in enumerate(frames):\n # 未投擲ならば\n if t_cnt == 0:\n frame_score.append(score)\n # ストライクなら追加スコアの加算\n if score == b:\n frame_score[-1] += frames[idx + 1]\n frame_score[-1] += frames[idx + 2]\n else:\n # 次の投擲へ\n t_cnt += 1\n continue\n # 二回目の投擲なら(t_cntが0以外)\n else:\n # スコア加算\n frame_score[-1] += score\n # スペアだったら追加スコア加算\n if frame_score[-1] == b:\n frame_score[-1] += frames[idx + 1]\n # カウント初期化\n t_cnt = 0\n continue\n\nprint(sum(frame_score))","repo_name":"tndd/history-pzr","sub_path":"A005.py","file_name":"A005.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5681808518","text":"import numpy as np\nimport scipy.integrate\n\nfrom kooplearn.datasets.misc import DataGenerator\n\n\nclass DuffingOscillator(DataGenerator):\n \"\"\"\n A class for simulating the Duffing Oscillator.\n\n The Duffing Oscillator is a mathematical model used to describe a damped\n driven harmonic oscillator with nonlinear effects. It is commonly used\n in physics and engineering to study chaotic behavior.\n\n Args:\n alpha (float, optional): The stiffness coefficient (default is 0.5).\n beta (float, optional): The nonlinear coefficient (default is 0.0625).\n gamma (float, optional): The damping coefficient (default is 0.1).\n delta (float, optional): Another damping coefficient (default is 2.5).\n omega (float, optional): The angular frequency of the driving force (default is 2.0).\n dt (float, optional): The time step size for the numerical integration (default is 0.01).\n\n Attributes:\n alpha (float): The stiffness coefficient.\n beta (float): The nonlinear coefficient.\n gamma (float): The damping coefficient.\n delta (float): Another damping coefficient.\n omega (float): The angular frequency of the driving force.\n dt (float): The time step size for numerical integration.\n\n Examples:\n\n .. code-block:: python\n\n duffing = DuffingOscillator(alpha=0.5, beta=0.0625, gamma=0.1, delta=2.5, omega=2.0, dt=0.01)\n initial_conditions = np.array([0.0, 0.0])\n trajectory = duffing.sample(initial_conditions, T=100)\n\n \"\"\"\n\n def __init__(\n self, alpha=0.5, beta=0.0625, gamma=0.1, delta=2.5, omega=2.0, dt=0.01\n ):\n \"\"\"\n Initializes a DuffingOscillator object.\n\n Args:\n alpha (float, optional): The stiffness coefficient (default is 0.5).\n beta (float, optional): The nonlinear coefficient (default is 0.0625).\n gamma (float, optional): The damping coefficient (default is 0.1).\n delta (float, optional): Another damping coefficient (default is 2.5).\n omega (float, optional): The angular frequency of the driving force (default is 2.0).\n dt (float, optional): The time step size for the numerical integration (default is 0.01).\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.delta = delta\n self.omega = omega\n self.dt = dt\n\n def D(self, t, x):\n \"\"\"\n The derivative function representing the Duffing oscillator's equations of motion.\n\n Args:\n t (float): The current time.\n x (np.ndarray): An array representing the current state [position, velocity].\n\n Returns:\n np.ndarray: An array representing the derivatives of the state [velocity, acceleration].\n \"\"\"\n dx = np.array(\n [\n x[1],\n -self.delta * x[1]\n - self.alpha * x[0]\n - self.beta * x[0] ** 3\n + self.gamma * np.cos(self.omega * t),\n ]\n )\n return dx\n\n def sample(self, X0: np.ndarray, T: int = 1):\n \"\"\"\n Generate the trajectory of the Duffing oscillator.\n\n Args:\n X0 (np.ndarray): The initial conditions as an array [initial_position, initial_velocity].\n T (int, optional): The number of time steps (default is 1).\n\n Returns:\n np.ndarray: An array containing the trajectory of the oscillator with shape (T+1, 2),\n where each row represents [position, velocity] at a given time step.\n \"\"\"\n sim_time = self.dt * (T + 1)\n t_eval = np.linspace(0, sim_time, T + 1, endpoint=True)\n t_span = (0, t_eval[-1])\n sol = scipy.integrate.solve_ivp(\n self.D, t_span, X0, t_eval=t_eval, method=\"RK45\"\n )\n return sol.y.T\n\n\nclass Lorenz63(DataGenerator):\n \"\"\"\n A class for simulating the Lorenz-63 chaotic dynamical system.\n\n The Lorenz-63 system is a simplified mathematical model of atmospheric\n convection that exhibits chaotic behavior.\n\n Args:\n sigma (float, optional): The :math:`\\\\sigma` parameter (default is 10).\n mu (float, optional): The :math:`\\\\mu` parameter (default is 28).\n beta (float, optional): The :math:`\\\\beta` parameter (default is 8/3).\n dt (float, optional): The time step size for numerical integration (default is 0.01).\n\n Attributes:\n sigma (float): The :math:`\\\\sigma` parameter.\n mu (float): The :math:`\\\\mu` parameter.\n beta (float): The :math:`\\\\beta` parameter.\n dt (float): The time step size for numerical integration.\n M_lin (np.ndarray): The linearized matrix of the Lorenz-63 system.\n\n Examples:\n\n .. code-block:: python\n\n lorenz = Lorenz63(sigma=10, mu=28, beta=8/3, dt=0.01)\n initial_conditions = np.array([1.0, 0.0, 0.0])\n trajectory = lorenz.sample(initial_conditions, T=100)\n\n \"\"\"\n\n def __init__(self, sigma=10, mu=28, beta=8 / 3, dt=0.01):\n \"\"\"\n Initializes a Lorenz63 object.\n\n Args:\n sigma (float, optional): The :math:`\\\\sigma` parameter (default is 10).\n mu (float, optional): The :math:`\\\\mu` parameter (default is 28).\n beta (float, optional): The :math:`\\\\beta` parameter (default is 8/3).\n dt (float, optional): The time step size for numerical integration (default is 0.01).\n \"\"\"\n self.sigma = sigma\n self.mu = mu\n self.beta = beta\n self.dt = dt\n self.M_lin = np.array(\n [[-self.sigma, self.sigma, 0], [self.mu, 0, 0], [0, 0, -self.beta]]\n )\n\n def sample(self, X0: np.ndarray, T: int = 1):\n \"\"\"\n Generate the trajectory of the Lorenz-63 system.\n\n Args:\n X0 (np.ndarray): The initial conditions as an array [x, y, z].\n T (int, optional): The number of time steps (default is 1).\n\n Returns:\n np.ndarray: An array containing the trajectory of the system with shape (T+1, 3),\n where each row represents [x, y, z] at a given time step.\n \"\"\"\n sim_time = self.dt * (T + 1)\n t_eval = np.linspace(0, sim_time, T + 1, endpoint=True)\n t_span = (0, t_eval[-1])\n sol = scipy.integrate.solve_ivp(\n self.D, t_span, X0, t_eval=t_eval, method=\"RK45\"\n )\n return sol.y.T\n\n def D(self, t, x):\n \"\"\"\n The derivative function representing the Lorenz-63 equations.\n\n Args:\n t (float): The current time.\n x (np.ndarray): An array representing the current state [x, y, z].\n\n Returns:\n np.ndarray: An array representing the derivatives of the state [dx/dt, dy/dt, dz/dt].\n \"\"\"\n dx = self.M_lin @ x\n dx[1] -= x[2] * x[0]\n dx[2] += x[0] * x[1]\n return dx\n","repo_name":"CSML-IIT-UCL/kooplearn","sub_path":"kooplearn/datasets/deterministic.py","file_name":"deterministic.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"5621781214","text":"def Mideng(li):\n if(type(li)!=list):\n return\n if(len(li)==1):\n return [li]\n result=[]\n for i in range(0,len(li[:])):\n bak=li[:]\n head=bak.pop(i) #head of the recursive-produced value\n for j in Mideng(bak):\n j.insert(0,head)\n result.append(j)\n return result\ndef MM(n):\n if(type(n)!=int or n<2):\n return\n return Mideng(list(range(1,n)))\n","repo_name":"opnsesame/Data-Structures-and-Algorithms-Exercises","sub_path":"test/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9985359292","text":"import os\nfrom glob import glob\nfrom setuptools import setup\n\npackage_name = 'tf_examples'\n\nsetup(\n name=package_name,\n version='1.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n (os.path.join('share', package_name, 'launch'),\n glob('launch/*.launch.py')),\n (os.path.join('share', package_name, 'launch'),\n glob('launch/*.rviz')),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='MASUTANI Yasuhiro',\n maintainer_email='ai-robot-book@googlegroups.com',\n description='TF examples for AI Robot Book',\n license='Apache License 2.0',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'satellite_broadcaster = tf_examples.satellite_broadcaster:main',\n 'planet_broadcaster = tf_examples.planet_broadcaster:main',\n 'satellite_listener = tf_examples.satellite_listener:main',\n 'dummy_sensor_publisher = tf_examples.dummy_sensor_publisher:main',\n 'dummy_sensor_subscriber = tf_examples.dummy_sensor_subscriber:main',\n ],\n },\n)\n","repo_name":"AI-Robot-Book/appendixE","sub_path":"tf_examples/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43345945848","text":"#! /usr/bin/python\n\nimport sys\nprint (\"Please Enter the Input of the temperature Unit:\")\n\nprint (\"\")\nFROM = input(\"Type 'C'(celsius)', 'F'(fahrenheit), 'K'(kelvin), 'R'(rankine) :\").upper()\nprint(FROM)\nif FROM == \"C\" or FROM == \"F\" or FROM == \"K\" or FROM == \"R\":\n print (\"\")\n VALUE = float(input(\" Please Input the temperature in that unit :\"))\n print(\"\")\n \n if FROM == \"C\":\n kelvin = (VALUE + 273)\n elif FROM == \"F\":\n kelvin = ((5/9) * (VALUE - 32) + 273)\n elif FROM == \"K\":\n kelvin = VALUE\n elif FROM == \"R\":\n kelvin = (VALUE * (5/9))\n\nelse:\n print(\"\")\n print (\"Invalid temperature unit entered\")\n sys.exit(1)\nprint (\"\")\n\nTO = input(\"Please enter the desired temperature unit:\\n Type 'C'(celsius)', 'F'(fahrenheit), 'K'(kelvin), 'R'(rankine) :\").upper()\nprint(\"\")\nif TO == \"C\" or TO == \"F\" or TO == \"K\" or TO == \"R\":\n print (\"\")\n if TO == \"C\":\n END = (kelvin - 273)\n elif TO == \"F\":\n END = (1.8 * (kelvin - 273) + 32)\n elif TO == \"K\":\n END = kelvin\n elif TO == \"R\":\n END = (kelvin * (9/5))\nelse:\n print (\"\")\n print (\"Invalid temperature unit entered\")\n sys.exit(1)\n\nexpected = float(input(\"Please enter the input of student :\"))\nif (expected == END):\n print(\"Correct\")\nelse:\n print(\"Incorrect\")\n\nprint (\"Your\" , FROM , \"value of\" , VALUE , \", is\" , END , \"in\" , TO )\n","repo_name":"potter-02/temperature_conversion","sub_path":"temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21133284228","text":"import numpy as np\nimport torch\n\nfrom pysgg.utils.miscellaneous import intersect_2d\n\n\ndef boxlist_iou(boxlist1, boxlist2, to_cuda=True):\n \"\"\"Compute the intersection over union of two set of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Arguments:\n box1: (BoxList) bounding boxes, sized [N,4].\n box2: (BoxList) bounding boxes, sized [M,4].\n\n Returns:\n (tensor) iou, sized [N,M].\n\n Reference:\n https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py\n \"\"\"\n if boxlist1.size != boxlist2.size:\n raise RuntimeError(\n \"boxlists should have same image size, got {}, {}\".format(boxlist1, boxlist2))\n\n N = len(boxlist1)\n M = len(boxlist2)\n\n if to_cuda:\n if boxlist1.bbox.device.type != 'cuda':\n boxlist1.bbox = boxlist1.bbox.cuda()\n if boxlist2.bbox.device.type != 'cuda':\n boxlist2.bbox = boxlist2.bbox.cuda()\n\n box1 = boxlist1.bbox\n box2 = boxlist2.bbox\n\n area1 = boxlist1.area()\n area2 = boxlist2.area()\n\n lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]\n rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]\n\n TO_REMOVE = 1\n\n wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n\n\ndef intersect_2d_torch_tensor(x1, x2):\n return torch.from_numpy(intersect_2d(x1.numpy(), x2.numpy()))\n\n\ndef dump_hit_indx_dict_to_tensor(pred_pair_mat, gt_box_hit_idx_dict):\n \"\"\"\n for compare the prediction and gt easily, we need to expand the N to M box match results to\n array.\n here, give relationship prediction pair matrix, expand the gt_box_hit_idx_dit to the array.\n We do the full connection of hit gt box idx of each prediction pairs\n :param pred_pair_mat:\n :param gt_box_hit_idx_dict: the hit gt idx of each prediction box\n :return:\n to_cmp_pair_mat: expanded relationship pair result (N, 2), store the gt box indexs.\n N is large than initial prediction pair matrix\n initial_pred_idx_seg: marking the seg for each pred pairs. If it hit multiple detection gt,\n it could have more than one prediction pairs, we need to mark that they are indicated to\n same initial predations\n \"\"\"\n to_cmp_pair_mat = []\n initial_pred_idx_seg = []\n # write result into the pair mat\n for pred_idx, pred_pair in enumerate(pred_pair_mat):\n sub_pred_hit_idx_set = gt_box_hit_idx_dict[pred_pair[0].item()]\n obj_pred_hit_idx_set = gt_box_hit_idx_dict[pred_pair[1].item()]\n # expand the prediction index by full combination\n for each_sub_hit_idx in sub_pred_hit_idx_set:\n for each_obj_hit_idx in obj_pred_hit_idx_set:\n to_cmp_pair_mat.append([each_sub_hit_idx, each_obj_hit_idx])\n initial_pred_idx_seg.append(pred_idx) #\n if len(to_cmp_pair_mat) == 0:\n to_cmp_pair_mat = torch.zeros((0, 2), dtype=torch.int64)\n else:\n to_cmp_pair_mat = torch.from_numpy(np.array(to_cmp_pair_mat, dtype=np.int64))\n\n initial_pred_idx_seg = torch.from_numpy(np.array(initial_pred_idx_seg, dtype=np.int64))\n return to_cmp_pair_mat, initial_pred_idx_seg\n\n\nLONGTAIL_CATE_IDS_DICT = {\n 'head': [31, 20, 22, 30, 48],\n 'body': [29, 50, 1, 21, 8, 43, 40, 49, 41, 23, 7, 6, 19, 33, 16, 38],\n 'tail': [11, 14, 46, 37, 13, 24, 4, 47, 5, 10, 9, 34, 3, 25, 17, 35, 42, 27, 12, 28,\n 39, 36, 2, 15, 44, 32, 26, 18, 45]\n}\n\nLONGTAIL_CATE_IDS_QUERY = {}\nfor long_name, cate_id in LONGTAIL_CATE_IDS_DICT.items():\n for each_cate_id in cate_id:\n LONGTAIL_CATE_IDS_QUERY[each_cate_id] = long_name\n\nPREDICATE_CLUSTER = [[50, 20, 9], [22, 48, 49], [31], [31, 41, 1], [31, 30]]\nENTITY_CLUSTER = [[91, 149, 53, 78, 20, 79, 90, 56, 68]]\n\n\ndef get_cluster_id(cluster, cate_id):\n for idx, each in enumerate(cluster):\n if cate_id in each:\n return each[0]\n return -1\n\n\ndef transform_cateid_into_cluster_id(cate_list, cluster):\n for idx in range(len(cate_list)):\n cluster_id = get_cluster_id(cluster, cate_list[idx].item())\n\n if cluster_id != -1:\n cate_list[idx] = cluster_id\n return cate_list\n\n\ndef trans_cluster_label(pred_pred_cate_list, gt_pred_cate_list, cluster):\n \"\"\"\n transform the categories labels to cluster label for label overlapping avoiding\n :param pred_pair_mat: (subj_id, obj-id, cate-lable)\n :param gt_pair_mat:\n :return:\n \"\"\"\n cluster_ref_pred_cate = transform_cateid_into_cluster_id(pred_pred_cate_list, cluster)\n cluster_ref_gt_cate = transform_cateid_into_cluster_id(gt_pred_cate_list, cluster)\n\n return cluster_ref_pred_cate, cluster_ref_gt_cate\n","repo_name":"SHTUPLUS/PySGG","sub_path":"pysgg/data/datasets/evaluation/vg/vg_stage_eval_utils.py","file_name":"vg_stage_eval_utils.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"20806659006","text":"import networkx as nx\nimport pandas as pd\nfrom typing import List\nfrom sklearn.base import BaseEstimator\n\nACTUAL_CLASS = 'actual_class'\nPREDICTED = 'predicted_class'\nTRUE_CLASS = 'Y_true'\nBETWEENNESS = 'betweenness'\nNUM_CLASSES = 3\n\nclass IDA_classfier(BaseEstimator):\n def get_params(self,deep):\n dict = {\n \"G\":self._g,\n \"uncover_rate\":self._uncover_rate,\n \"attribute\":self._nodes_attribute\n }\n return dict\n def set_params(self, **parameters):\n ##TODO\n return self\n\n def __init__(self, G: nx.Graph, uncover_rate: float, attribute: str):\n self._g = G\n self._uncover_rate = uncover_rate\n self._nodes_attribute = attribute\n self._bootstrapped_data = self.bootstrap()\n\n def bootstrap(self):\n nodes_count = len(self._g.nodes)\n uncover_nodes_count = int(self._uncover_rate * nodes_count) # how many nodes do we uncover for algorith\n\n uncover_nodes_list = self.nodes_uncover(self._nodes_attribute, uncover_nodes_count, self._g)\n self.uncover(self._g, uncover_nodes_list)\n\n self.set_default_class_score(self._g)\n train_set, test_set, y_train, y_test = self.prepare_sets(self._g, uncover_nodes_list)\n\n bootstrapped_data = [self._g, train_set, test_set, y_train, y_test]\n\n return bootstrapped_data\n\n def fit(self, classifier, stop_iterations):\n g, train, test, y_train, y_test = self._bootstrapped_data\n\n classifier.fit(X=train, y=y_train)\n predicted_nodes = classifier.predict(test)\n nodes = test.index\n for node, pred_y in zip(nodes, predicted_nodes):\n nx.set_node_attributes(g, {node: {PREDICTED: int(pred_y)}})\n\n changes_iterator = 0\n MAX_CHANGES = 3\n it = 0\n while changes_iterator != MAX_CHANGES and it < stop_iterations:\n change = False\n sorted_nodes = sorted(nodes, key=lambda item: test.loc[node, self._nodes_attribute], reverse=True)\n for node in sorted_nodes:\n for src, dest in g.edges(node):\n if src == node:\n self.update_score(g, node, dest)\n elif dest == node:\n self.update_score(g, node, src)\n for i in range(NUM_CLASSES):\n key = f'class_{i}_score'\n test.loc[node, key] = g.nodes[node][key]\n pred = int(classifier.predict([test.loc[node]])[0])\n if pred != g.nodes[node][PREDICTED]:\n change = True\n\n nx.set_node_attributes(g, {node:\n {PREDICTED: pred}\n })\n if change:\n changes_iterator = 0\n else:\n changes_iterator += 1\n it += 1\n\n return g, test\n\n def predict(self, x):\n y = []\n for node in x:\n node_from_graph = self._g.nodes[node]\n if PREDICTED in node_from_graph:\n node_class = node_from_graph[PREDICTED]\n y.append(node_class)\n else:\n node_class = node_from_graph[TRUE_CLASS]\n y.append(node_class)\n return y\n\n def prepare_sets(self ,G: nx.Graph, uncover_nodes_list: List[int]):\n train_set = pd.DataFrame() # contains known nodes\n test_set = pd.DataFrame() # contains unknown nodes\n for node in G.nodes:\n if node in uncover_nodes_list:\n train_set = train_set.append(G.nodes[node], ignore_index=True)\n else:\n test_set = test_set.append(G.nodes[node], ignore_index=True)\n\n train_set.set_index('node', drop=True, inplace=True)\n test_set.set_index('node', drop=True, inplace=True)\n\n y_train = train_set[TRUE_CLASS]\n y_test = test_set[TRUE_CLASS]\n to_drop = [TRUE_CLASS, ACTUAL_CLASS]\n\n train_set.drop(columns=to_drop, inplace=True, axis=1)\n test_set.drop(columns=[TRUE_CLASS], inplace=True, axis=1)\n\n return train_set, test_set, y_train, y_test\n\n def update_class(self, G, node, neighbour, classname):\n # If destination node (neighbour) has class:value update source node score of this class.\n if classname in G.nodes[neighbour]:\n neighbour_class = int(G.nodes[neighbour][classname])\n key = f'class_{neighbour_class}_score'\n if key in G.nodes[node]: # if already has this key\n update = G.nodes[node][key]\n else: # if it hasnt got this class score (just to be sure)\n update = 1\n nx.set_node_attributes(G, {node: {\n key: update\n }})\n\n def update_score(self,G: nx.Graph, source_node: int, dest_node: int):\n self.update_class(G, source_node, dest_node, ACTUAL_CLASS)\n self.update_class(G, source_node, dest_node, PREDICTED)\n\n def nodes_uncover(self,attribute: str, nodes_count: int, G: nx.Graph):\n nodes_sorted = sorted(G.nodes, key=lambda item: G.nodes[item][attribute], reverse=True)\n node_uncover_list = [node for node in nodes_sorted][:nodes_count]\n return node_uncover_list\n\n def uncover(self,G: nx.Graph, nodes_to_uncover: List[int]):\n for node in nodes_to_uncover:\n G.nodes[node][ACTUAL_CLASS] = G.nodes[node]['Y_true']\n\n def set_default_class_score(self,G):\n # at the beginning all scores (of 3 classes) are 0\n for node in G.nodes:\n template = {\n node: {\n \"class_0_score\": 0,\n \"class_1_score\": 0,\n \"class_2_score\": 0\n }\n }\n nx.set_node_attributes(G, template)\n\n for node in G.nodes:\n for src, dest in G.edges(node):\n # for each source node update class score\n if node == src:\n self.update_score(G, node, dest)","repo_name":"BlonskiP/PWR-Datascience-projects-exercises","sub_path":"Complex data analysis/kt-assignment-1-BlonskiP/src/ICA.py","file_name":"ICA.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20052418084","text":"'''You are given an array nums of non-negative integers. nums is considered special if there exists a number x such that there are exactly x numbers in nums that are greater than or equal to x.\n\nNotice that x does not have to be an element in nums.\n\nReturn x if the array is special, otherwise, return -1. It can be proven that if nums is special, the value for x is unique.\n\n \n\nExample 1:\n\nInput: nums = [3,5]\nOutput: 2\nExplanation: There are 2 values (3 and 5) that are greater than or equal to 2.\nExample 2:\n\nInput: nums = [0,0]\nOutput: -1\nExplanation: No numbers fit the criteria for x.\nIf x = 0, there should be 0 numbers >= x, but there are 2.\nIf x = 1, there should be 1 number >= x, but there are 0.\nIf x = 2, there should be 2 numbers >= x, but there are 0.\nx cannot be greater since there are only 2 numbers in nums.\nExample 3:\n\nInput: nums = [0,4,3,0,4]\nOutput: 3\nExplanation: There are 3 values that are greater than or equal to 3.'''\n\nfrom ast import List\n\n# tc = O(nlogn) and sc = O(n)\nclass Solution:\n def countElements(self, nums: List[int]) -> int:\n count = 0\n n = len(nums)\n s = sorted(nums)\n for i in range(len(nums)):\n if s[i] > s[0] and s[i] < s[n-1]:\n count += 1\n return count\n\n# tc = O(n) and sc = O(1)\nclass Solution:\n def countElements(self, nums: List[int]) -> int:\n count = 0\n n = len(nums)\n m, x = max(nums), min(nums)\n for i in range(n):\n if nums[i] < m and nums[i] > x:\n count += 1\n return count","repo_name":"DEVHrishi/DSA--PYTHON--SQL","sub_path":"Sorting/Easy/Special Array With X Elements Greater Than or Equal X.py","file_name":"Special Array With X Elements Greater Than or Equal X.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17172678872","text":"from __future__ import division\nimport multiprocessing as mp\nimport numpy as np\nimport random\nfrom qiskit import *\nfrom qiskit.quantum_info import random_statevector\nimport copt\n\n\nrandom.seed(1) # setting random seed to 1 for reproducibility\nrandom_seed = 1 # setting the random seed inside the statevector\n\n\ndef generate_random_psi(num_qbits=2, debug=False):\n \"\"\"\n Initialize our target state |psi>\n :param num_qbits: int, number of qbits\n :param debug: bool, will print |psi>\n :return: qiskit.quantum_info Statevector object\n \"\"\"\n\n dim = 2**num_qbits\n psi = random_statevector(dim, seed=random_seed)\n if debug:\n print(psi)\n\n return psi\n\n\ndef initialize_theta(circ_depth=10, num_qbits=2):\n \"\"\"\n Initialize the theta parameter vector\n :param circ_depth: int, number of parameterized layers in circuit\n :param num_qbits: int, number of qbits\n :return: np.array, values of theta\n \"\"\"\n\n theta = np.zeros((circ_depth, num_qbits))\n return theta\n\n\ndef multi_processing_attempt(file_name):\n \"\"\"\n file contains a list of quantum states (psi). This is a method for learning\n the parameterization vectors (theta) for an array of quantum states (psi)\n using multi-processing. The results (theta) are written to\n a new text file named (file_name + '_newTheta.txt'). An associated file\n named (file_name + '_newPsi.txt') which contains the states (psi) so\n that the 1st parameterization theta corresponds to the first state psi.\n\n :param file_name: str, name of file containing list of states psi\n :return: none\n \"\"\"\n\n file = open(file_name, 'r')\n psi_lst = file.readlines()\n file.close()\n\n new_psi = file_name.split('.')[0] + '_newPsi.txt'\n new_theta = file_name.split('.')[0] + '_newTheta.txt'\n with open(new_psi, 'w') as P, open(new_theta, 'w') as T:\n p = mp.Pool(mp.cpu_count())\n print(f\"Multiprocessing Pool created! Running with all {mp.cpu_count()} of your cores\")\n\n for i, entry in enumerate(p.map(pool_function, psi_lst), 1):\n print('\\rdone {0:%}'.format(i / len(psi_lst)))\n line, row = entry\n P.write(line)\n T.write(row)\n\n return\n\n\ndef pool_function(line):\n \"\"\"\n Reads in the quantum state (psi) and runs the QML method to\n determine the associated parameterization (theta). returns\n a list containing the [psi, theta] written as strings.\n\n :param line: str, the quantum state psi, written as a str\n :return: list of len 2, containing the str psi (quantum state) and str theta (parameterization)\n \"\"\"\n # :param circ_depth: int, representing depth of VQC ## need to fix this\n # :param num_qbits: int, num qbits for state psi\n circ_depth = 8\n num_qbits = 3\n\n psi_vect = [complex(v) for v in line.split(',')]\n psi = qiskit.quantum_info.Statevector(psi_vect)\n\n initial_theta = initialize_theta(circ_depth=circ_depth, num_qbits=num_qbits)\n results, optimizer_data = copt.optimize_theta_scp(initial_theta, psi) # Learn theta using VQCs\n optimized_theta = optimizer_data[-1] # Final result\n\n theta_str_lst = [str(i) for i in optimized_theta]\n row = \",\".join(theta_str_lst) + \"\\n\"\n\n return [line, row]\n\n\ndef main():\n # multi_processing_attempt(\"3Qbit_psi_1k.txt\")\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iisharankov/QuantumStateTomography","sub_path":"qml_approach/qml_main.py","file_name":"qml_main.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"70500393769","text":"from parsers.common import scrape_word_data\nfrom utils.general.common import write_todo\nfrom utils.sanhedrin_finalise_lobjs_and_create_nexus.tools import is_it_the_same_meaning, q, add_signalwords, \\\n get_signalword, test_signalword\nfrom utils.postprocessing.common import finalise_lemma_objects\nfrom utils.scraping.common import check_rescraped_against_existing\nfrom utils.universal import color as c, get_curried_save, load_tempsave_if_exists\n\nif __name__ == '__main__':\n\n # # # # # #\n wordtype = \"nco\"\n batch = \"01\"\n # # # # # #\n\n input_filename = f\"{wordtype}_batch_{batch}_TGT\"\n stem = \"./../../output_saved/batches/\"\n input_path = f\"{stem}{input_filename}\"\n tempsave_path = input_path + \"_S06_tempsave\"\n _save = get_curried_save(input_path, tempsave_path)\n\n c.print_teal(\"input_path = \" + c.teal(input_path))\n c.print_teal(\"tempsave_path = \" + c.teal(tempsave_path))\n c.print_teal(\"Output path will be the same as input.\")\n\n\n def save(tgt_lobjs, temp: bool = False):\n print(f\"Got {len(tgt_lobjs)} members.\")\n print(\"Reordering so siblings are next to each other...\")\n res = []\n done_ids = []\n for l in tgt_lobjs:\n if l[\"id\"] not in done_ids:\n is_sibling = False\n for sib_set in siblings:\n if sib_set[0][\"id\"] == l[\"id\"]:\n if is_sibling:\n raise Exception(f'Why is there more than one sibling set for \"{l[\"id\"]}\"?')\n is_sibling = True\n res.extend(sib_set)\n done_ids.extend([lo[\"id\"] for lo in sib_set])\n\n if l[\"id\"] in all_sibling_ids: # Catching the ones that got deleted, don't add to res.\n is_sibling = True\n done_ids.append(l[\"id\"])\n\n if not is_sibling:\n res.append(l)\n done_ids.append(l[\"id\"])\n print(f\"Got {len(res)} members.\")\n\n _save(res, temp)\n\n\n tgt_lobjs = load_tempsave_if_exists(tempsave_path, input_path)\n siblings = []\n sibling_headers = []\n\n print(\"Loaded\", len(tgt_lobjs), \"target lobjs.\")\n\n for index_1, tgt_lobj_1 in enumerate(tgt_lobjs):\n if tgt_lobj_1[\"lemma\"] not in sibling_headers:\n sibling_set = [tgt_lobj_1]\n for index_2, tgt_lobj_2 in enumerate(tgt_lobjs):\n if index_1 != index_2 and tgt_lobj_1[\"lemma\"] == tgt_lobj_2[\"lemma\"]:\n sibling_set.append(tgt_lobj_2)\n if len(sibling_set) > 1:\n siblings.append(sibling_set)\n sibling_headers.append(tgt_lobj_1[\"lemma\"])\n\n all_sibling_ids = [] # Some may get deleted but their IDs kept here so don't put them back into res when saving.\n print(f\"There are {len(siblings)} sibling sets.\")\n for sib_set in siblings:\n print(sib_set)\n for sibli in sib_set:\n all_sibling_ids.append(sibli[\"id\"])\n\n print(f\"There are {len(siblings)} sibling sets.\")\n for sib_set_index, sib_set in enumerate(siblings):\n signalwords = [get_signalword(l[\"id\"]) for l in sib_set]\n failed = False\n for signalword in signalwords:\n if not test_signalword(signalword):\n failed = True\n\n print(\"\")\n print(f\"{sib_set_index + 1}/{len(siblings)}\")\n\n if not failed:\n c.print_green(\"ALREADY LOOKS DONE\")\n for sibl in sib_set:\n print(c.green(get_signalword(sibl[\"id\"])), sibl)\n continue\n\n if sib_set_index % 5 == 0:\n save(tgt_lobjs, True)\n\n add_signalwords(sib_set)\n\n save(tgt_lobjs)\n\n print(\"\")\n print(\"Completely done.\")\n","repo_name":"chicorycolumn/WiktionaryScraper","sub_path":"utils/sanhedrin_finalise_lobjs_and_create_nexus/S06_distinguish_sibling_tgt_lobjs.py","file_name":"S06_distinguish_sibling_tgt_lobjs.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6844582644","text":"from time import sleep\nimport subprocess\n\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\ndef set_pin_high(pin):\n pin = int(pin)\n\n GPIO.setup(pin, GPIO.OUT)\n\n sleep(0.2)\n GPIO.output(pin, GPIO.HIGH)\n sleep(0.2)\n\ndef set_pin_low(pin):\n pin = int(pin)\n\n GPIO.setup(pin, GPIO.OUT)\n\n sleep(0.2)\n GPIO.output(pin, GPIO.LOW)\n sleep(0.2)\n\ndef get_pin_status(pin):\n status = subprocess.check_output(['gpio', '-g', 'read', pin])\n\n return str(status)\n","repo_name":"adesilvey/pipincontrol","sub_path":"pipincontrol.py","file_name":"pipincontrol.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14109887913","text":"import subprocess as sp\nimport os\nimport silence_tensorflow.auto # pylint: disable=unused-import\nimport tensorflow as tf\n\ndef mask_unused_gpus(leave_unmasked=1):\n\n\tACCEPTABLE_AVAILABLE_MEMORY = 1024\n\tCOMMAND = \"nvidia-smi --query-gpu=memory.free --format=csv\"\n\n\ttry:\n\t\t_output_to_list = lambda x: x.decode('ascii').split('\\n')[:-1]\n\t\tmemory_free_info = _output_to_list(sp.check_output(COMMAND.split()))[1:]\n\t\tmemory_free_values = [int(x.split()[0]) for i, x in enumerate(memory_free_info)]\n\t\tavailable_gpus = [i for i, x in enumerate(memory_free_values) if x > ACCEPTABLE_AVAILABLE_MEMORY]\n\n\t\tif len(available_gpus) < leave_unmasked: raise ValueError('Found only %d usable GPUs in the system' % len(available_gpus))\n\t\tgpus = tf.config.experimental.list_physical_devices(device_type='GPU')\n\t\ttf.config.experimental.set_visible_devices(gpus[available_gpus[0]], 'GPU')\n\n\texcept Exception as e:\n\t\tprint('\"nvidia-smi\" is probably not installed. GPUs are not masked', e)\n\nmask_unused_gpus()\n\nimport absl.logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport shutil\nfrom sklearn.ensemble import IsolationForest\nfrom tqdm import tqdm\n\nabsl.logging.set_verbosity(absl.logging.ERROR)\n\n\nos.chdir('/home/ricky/RNNAE/conv_npy')\n\nlc = np.array(np.load('lc.npy', allow_pickle=True))\nlc_meta = np.array(np.load('lc_meta.npy', allow_pickle=True))\n\ndef import_data(x):\n\n x = np.load(f'{x}.npy', allow_pickle=True)\n x = np.asarray(x).astype('float32')\n\n return x\n\ndef create_clean_directory(d):\n\n isExist = os.path.exists(d)\n if isExist:\n shutil.rmtree(d)\n os.makedirs(d)\n else:\n os.makedirs(d)\n\n return\n\ndef cnnae_test(autoencoder, input_tmp, input_meta_tmp):\n\n pred = autoencoder.predict(x=[input_tmp, input_meta_tmp], verbose=1)\n pred_loss = autoencoder.evaluate(x=[input_tmp, input_meta_tmp], y=input_tmp, verbose=1)\n\n return pred, pred_loss\n\ndef latent_space_concatenation(latent_space, meta_data: list, split):\n latent_space_add = []\n for i in range(latent_space.shape[0]):\n latent_space_add.append([lc_meta[i+split][j] for j in meta_data])\n latent_space_add = np.array(latent_space_add)\n \n return np.concatenate((latent_space, latent_space_add), axis=-1)\n\ndef isolation_forest(latent_space, n_tree, split):\n\n clf = IsolationForest(n_estimators=n_tree, warm_start=True)\n clf.fit(latent_space)\n anomaly = clf.score_samples(latent_space)\n anomaly_id = np.argsort(anomaly)\n\n d = '/home/ricky/RNNAE/CNN_product/CNN_anomaly_graph'\n\n create_clean_directory(d)\n\n for i, ano in enumerate(anomaly_id):\n name = lc_meta[ano+split]['SN_name']\n peak_mag = \"{:.2f}\".format(lc_meta[ano+split]['peak_mag'])\n delta_m = \"{:.2f}\".format(lc_meta[ano+split]['delta_m'])\n #t_normalised_noise = \"{:.2f}\".format(lc_meta[ano+split]['t_normalised_noise'])\n #no_near_peak = \"{:.2f}\".format(lc_meta[ano+split]['no_near_peak'])\n\n # naming problem?\n try:\n shutil.copy(f'/home/ricky/RNNAE/SDSS_GP_graph/{name}.pdf', f'/home/ricky/RNNAE/CNN_product/CNN_anomaly_graph/{i+1}_{name}_{peak_mag}_{delta_m}.pdf')\n except OSError:\n try:\n shutil.copy(f'/home/ricky/RNNAE/SDSS_prime_GP_graph/{name}.pdf', f'/home/ricky/RNNAE/CNN_product/CNN_anomaly_graph/{i+1}_{name}_{peak_mag}_{delta_m}.pdf')\n except OSError:\n name = name.replace('_prime','')\n shutil.copy(f'/home/ricky/RNNAE/SDSS_prime_GP_graph/{name}.pdf', f'/home/ricky/RNNAE/CNN_product/CNN_anomaly_graph/{i+1}_{name}_{peak_mag}_{delta_m}.pdf')\n \n return anomaly_id\n\ndef cdf(anomaly_id, split=0):\n\n os.chdir('/home/ricky/RNNAE/CNN_product')\n\n rank_normalIa = []\n rank_peculiarIa = []\n\n for i, ano in enumerate(anomaly_id):\n if lc_meta[ano+split]['type'] == 'Ia':\n rank_normalIa.append(i)\n else:\n rank_peculiarIa.append(i)\n\n count, bins_count = np.histogram(rank_normalIa, bins=len(anomaly_id))\n pdf = count/sum(count)\n cdf_n = np.cumsum(pdf)\n\n count, bins_count = np.histogram(rank_peculiarIa, bins=len(anomaly_id))\n pdf = count/sum(count)\n cdf_p = np.cumsum(pdf)\n\n fig = plt.figure(figsize=(8, 6))\n plt.plot(bins_count[1:], cdf_n, label=\"Normal SNeIa\")\n plt.plot(bins_count[1:], cdf_p, label=\"Subtype SNeIa\")\n\n plt.xlabel('Anomaly Ranking', fontsize=15)\n plt.ylabel('CDF', fontsize=15)\n plt.title('CDF of anomaly ranking')\n\n plt.grid()\n plt.legend()\n\n plt.savefig('cdf.pdf', bbox_inches='tight')\n plt.close()\n\n return\n\ndef latent_space_graph(latent_space, anomaly_id, split):\n\n d = '/home/ricky/RNNAE/CNN_product/CNN_latent_space_graph'\n create_clean_directory(d)\n os.chdir(d)\n\n color = [0 for i in range(latent_space.shape[0])]\n for i, ano in enumerate(anomaly_id):\n color[ano] = i\n\n print('plotting latent space graphs...')\n\n for i in tqdm(range(latent_space.shape[1] - 1)):\n for j in range(latent_space.shape[1] - 1 - i):\n fig = plt.figure(figsize=(8, 6))\n plt.grid() \n plt.scatter(latent_space[:,i], latent_space[:,i+j+1], c=color, cmap='viridis', s=6)\n \n if i == latent_space.shape[1]-2:\n plt.xlabel('\\u0394 M_15', fontsize=12)\n else:\n plt.xlabel(f'id {i+1}', fontsize=12) # addition + 1 for readability\n \n if i+j+1 == latent_space.shape[1]-2:\n plt.ylabel('\\u0394 M_15', fontsize=12)\n elif i+j+1 == latent_space.shape[1]-1:\n plt.gca().invert_yaxis()\n plt.ylabel('g band peak magnitude', fontsize=12)\n else:\n plt.ylabel(f'id {i+j+1+1}', fontsize=12) # addition + 1 for readability\n \n plt.title(f'latent space id {i+1} vs id {i+j+1+1}') # addition + 1 for readability\n plt.colorbar(label='anomaly ranking')\n \n plt.savefig(f'id_{i+1}_vs_id_{i+j+1+1}.pdf', bbox_inches='tight') # addition + 1 for readability\n plt.close()\n\n return\n\ndef reconstruction_graph(input_tmp, pred, split, filters=['g', 'r', 'i']):\n\n color1 = ['mediumturquoise', 'crimson', 'maroon']\n color2 = ['lightseagreen', 'firebrick', 'darkred']\n\n d = '/home/ricky/RNNAE/CNN_product/CNN_reconstruction_graph'\n create_clean_directory(d)\n\n for i in tqdm(range(input_tmp.shape[0])):\n\n os.chdir(d)\n\n isExist = os.path.exists(f'./{lc_meta[i+split][\"SN_name\"]}')\n\n if not isExist:\n os.makedirs(f'./{lc_meta[i+split][\"SN_name\"]}')\n os.chdir(f'./{lc_meta[i+split][\"SN_name\"]}')\n\n fig, axs = plt.subplots(3, figsize=(8, 16))\n\n fig.suptitle('Images of CNN')\n axs[0].set_title('Input Test Image')\n axs[1].set_title('Reconstructed Test Image')\n axs[2].set_title('Difference between Images')\n\n a = []\n a.append(axs[0].imshow(input_tmp[i].reshape(96,96).T, interpolation='nearest', aspect='auto', cmap='BrBG'))\n a.append(axs[1].imshow(pred[i].reshape(96,96).T, interpolation='nearest', aspect='auto', cmap='BrBG'))\n a.append(axs[2].imshow((input_tmp[i] - pred[i]).reshape(96,96).T, interpolation='nearest', aspect='auto', cmap='BrBG'))\n\n for j in range(3):\n plt.colorbar(a[j], ax=axs[j]).set_label('Normalized Absolute Magnitude')\n axs[j].set_xlabel('Timestep')\n\n fig.savefig(f'./{lc_meta[i+split][\"SN_name\"]}.pdf', bbox_inches='tight')\n plt.close()\n\n fig, axs = plt.subplots(1, 3, figsize=(18, 5))\n fig.suptitle(f'{lc_meta[i+split][\"SN_name\"]}, type {lc_meta[i+split][\"type\"]}', fontsize=15)\n\n for j, filter in enumerate(filters):\n\n axs[j].set_xlabel('Timestep', fontsize=12)\n axs[j].set_ylabel('Normalized Absolute Magnitude', fontsize=12)\n\n axs[j].invert_yaxis()\n\n axs[j].set_title(f'{filter} band', fontsize=12)\n\n axs[j].scatter(np.linspace(0, 96, 96), input_tmp[i][:,j,:], s=4, marker='o', color=color1[j], label=f'test data'.format('o'))\n axs[j].scatter(np.linspace(0, 96, 96), pred[i][:,j,:], s=16, marker='X', color=color2[j], label=f'reconstruction'.format('X'))\n \n axs[j].grid()\n axs[j].legend()\n\n plt.savefig(f'./{lc_meta[i+split][\"SN_name\"]}_lc.pdf', bbox_inches='tight')\n plt.close()\n\n return\n\ndef cnn_predict(autoencoder, input_tmp, input_meta_tmp, **kwargs):\n\n split = int(0.8*(lc.shape[0]))\n\n if kwargs['training_data']:\n split = 0\n\n pred, pred_loss = cnnae_test(autoencoder, input_tmp, input_meta_tmp)\n\n if kwargs['reconstruct_graph']:\n print('Plotting reconstruction graphs...')\n reconstruction_graph(input_tmp, pred, split)\n\n return pred, pred_loss\n\ndef main():\n \n print('Loading in autoencoder model...')\n autoencoder = tf.keras.models.load_model('/home/ricky/RNNAE/CNN_product/CNN_autoencoder_model')\n print('Autoencoder finished loading')\n\n print('Loading in encoder model...')\n encoder = tf.keras.models.load_model('/home/ricky/RNNAE/CNN_product/CNN_encoder_model')\n print('Encoder finished loading')\n\n os.chdir('/home/ricky/RNNAE/CNN_product/CNN_npy')\n input = import_data('input')\n input_train = import_data('input_train')\n input_test = import_data('input_test')\n input_meta = import_data('input_meta')\n input_meta_train = import_data('input_meta_train')\n input_meta_test = import_data('input_meta_test')\n type_train = import_data('type_train')\n type_test = import_data('type_test')\n\n cnn_predict(autoencoder, input_test[0], input_meta_test[0], reconstruct_graph=False, training_data=False)\n\n latent_space = encoder.predict([input[0], input_meta[0]], verbose=1)\n print(latent_space.shape)\n\n anomaly_id = isolation_forest(latent_space, 5000, 0)\n cdf(anomaly_id, 0)\n \n latent_space_graph(latent_space, anomaly_id, split=0)\n\n '''latent_space_concatentate = latent_space_concatenation(latent_space, ['t_normalised_noise', 'no_near_peak'], split=0)\n\n anomaly_id = isolation_forest(latent_space_concatentate, 1000, 0)\n latent_space_graph(latent_space_concatentate, anomaly_id, split=0)'''\n\n print('End of CNN_predict.py')\n\nif __name__ == '__main__':\n main()","repo_name":"rickysa007/CNNAE","sub_path":"python_files/CNN_predict.py","file_name":"CNN_predict.py","file_ext":"py","file_size_in_byte":10302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35702743827","text":"from pgmpy.models import BayesianModel\nfrom pgmpy.factors.discrete import TabularCPD\nfrom pgmpy.inference import VariableElimination\n\nmodel = BayesianModel()\nmodel.add_nodes_from(['family_out','bowel_problem', 'light_on','dog_out','hear_bark'])\nmodel.add_edge('family_out', 'light_on')\nmodel.add_edge('family_out', 'dog_out')\nmodel.add_edge('bowel_problem', 'dog_out')\nmodel.add_edge('dog_out', 'hear_bark')\n\ncpd_fo = TabularCPD(variable='family_out', variable_card=2, values=[[0.15], [0.85]])\ncpd_bp = TabularCPD(variable='bowel_problem', variable_card=2, values=[[0.01], [0.99]])\ncpd_do = TabularCPD(variable='dog_out', variable_card=2, \n values=[[0.99, 0.9, 0.97, 0.3],[0.01, 0.1, 0.03, 0.7]],\n evidence=['family_out', 'bowel_problem'], evidence_card=[2, 2])\ncpd_lo = TabularCPD(variable='light_on', variable_card=2,\n values=[[0.6, 0.05],[0.4, 0.95]], evidence=['family_out'], evidence_card=[2])\ncpd_hb = TabularCPD(variable='hear_bark', variable_card=2,\n values=[[0.7, 0.01],[0.3, 0.99]], evidence=['dog_out'], evidence_card=[2])\n\nmodel.add_cpds(cpd_fo, cpd_bp, cpd_do, cpd_lo, cpd_hb)\n#model justification\nmodel.check_model()\n\ninfer = VariableElimination(model)\nprint(infer.query(['family_out'], evidence={'light_on': 0, 'hear_bark': 1}) ['family_out'])\n","repo_name":"tywang89/mlin40","sub_path":"29 code.py","file_name":"29 code.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"3205897591","text":"\"\"\"Classes for handling the detectors and pixels in the simulation\"\"\"\nimport numpy\n\n\nclass DetectorPlate:\n r\"\"\"A detector plate to be installed in the detector.\n\n Parameters\n ----------\n bounds : dict\n Plate edges along the x- and y-axis. Assumes detector plane spans\n the so-defined rectangle in the x and y plane.\n Example: ``{'x': (-10., 10.), 'y': (-2.0, 7.5)}``\n Npixs : int\n Total number of pixels on this detector plate.\n z : float\n Z-coordinate of the detector plate.\n phi : float (optional)\n Angle (in degrees) by which the detector plate is rotated along\n the z-axis.\n \"\"\"\n\n def __init__(self, bounds, Npixs, z, phi=0):\n self._Npixs = None\n self._bnds = None\n self._z = None\n self._phi = None\n # Store the arguments\n self.bnds = bounds\n self.Npixs = Npixs\n self.z = z\n self.phi = phi\n # Calculate the rotation matrix\n phi = numpy.deg2rad(self.phi)\n sphi = numpy.sin(phi)\n cphi = numpy.cos(phi)\n self._rotmat = numpy.array([[cphi, -sphi],\n [sphi, cphi]])\n\n @property\n def Npixs(self):\n \"\"\"Returns the number of pixels in this detector.\"\"\"\n return self._Npixs\n\n @Npixs.setter\n def Npixs(self, Npixs):\n \"\"\"Sets the number of pixels.\"\"\"\n if not isinstance(Npixs, int):\n raise ValueError(\"``Npixs`` must be an integer.\")\n self._Npixs = Npixs\n\n @property\n def bnds(self):\n \"\"\"Returns the detector boundaries in the ``x`` and ``y`` plane.\"\"\"\n return self._bnds\n\n @bnds.setter\n def bnds(self, boundaries):\n \"\"\"Sets ``bnds``. Checks that both ``x`` and ``y`` bounds\n are present.\"\"\"\n boundaries = boundaries.copy()\n if not isinstance(boundaries, dict):\n raise ValueError(\"``boundaries`` must be a dictionary.\")\n xbnd = boundaries.pop('x', None)\n ybnd = boundaries.pop('y', None)\n if boundaries:\n raise ValueError(\"Unknown keys: {}\".format(boundaries.keys()))\n self._bnds = {}\n for par, bnd in zip(['x', 'y'], [xbnd, ybnd]):\n if bnd is None:\n raise ValueError(\"Boundaries must include {}\".format(par))\n if len(bnd) != 2:\n raise ValueError(\"Specific boundaries must have length 2.\")\n self._bnds[par] = sorted(bnd)\n\n @property\n def z(self):\n \"\"\"Returns the detector z coordinate.\"\"\"\n return self._z\n\n @z.setter\n def z(self, z):\n \"\"\"Sets ``z``.\"\"\"\n if not z > 0:\n raise ValueError(\"``z`` must be positive.\")\n self._z = z\n\n @property\n def phi(self):\n \"\"\"Returns the angle by which the detector plate is rotated.\"\"\"\n return self._phi\n\n @phi.setter\n def phi(self, phi):\n \"\"\"Sets ``phi``.\"\"\"\n if not isinstance(phi, (float, int)):\n raise ValueError(\"``phi`` must be a float.\")\n self._phi = phi\n\n def pixelID2coordinates(self, IDs):\n \"\"\"\n Returns the Cartesian coordinates ``i``-th horizontal and ``j``-th\n vertical pixel.\n \"\"\"\n i, j = IDs['xpixel'], IDs['ypixel']\n if not (0 <= i < self.Npixs and 0 <= j < self.Npixs):\n raise ValueError(\"Invalid pixel ID.\")\n # Remember that i, j run from 0, 1, ..., Npixs - 1.\n xbnd = self.bnds['x']\n ybnd = self.bnds['y']\n return {'x': (xbnd[1] - xbnd[0]) / self.Npixs * (i + 0.5) + xbnd[0],\n 'y': (ybnd[1] - ybnd[0]) / self.Npixs * (j + 0.5) + ybnd[0],\n 'z': self.z}\n\n def coordinates2pixelID(self, coords):\n \"\"\"\n Returns the ID of a pixel that corresponds to the given x and y\n coordinates. Checks that both are within the detector area.\n \"\"\"\n IDs = {}\n for par, bnd in self.bnds.items():\n if not bnd[0] < coords[par] < bnd[1]:\n raise ValueError(\"Invalid position.\")\n # Later move these somewhere so they don't get regenerated\n # every time\n width = bnd[1] - bnd[0]\n bins = numpy.arange(bnd[0], bnd[1], width / self.Npixs)\n IDs['{}pixel'.format(par)] = numpy.digitize(coords[par], bins) - 1\n return IDs\n\n def evaluate_collision(self, event):\n \"\"\"\n Evaluates the collision with a simulated event. Returns the ID of\n a pixel that is hit and the time.\n \"\"\"\n dt = (self.z - event['z0']) / event['vz']\n # Intersection point between the particle path and detector plane\n x0 = numpy.array([event['x0'] + event['vx'] * dt,\n event['y0'] + event['vy'] * dt]).reshape(-1, 1)\n # Rotate the intersection so that detector eges || axes\n # The first rotation is with the inverse matrix because instead of\n # rotating the axes we wish to rotate the point\n x0_rot = numpy.matmul(self._rotmat.T, x0)\n # Get the pixel IDs\n pixels = self.coordinates2pixelID({p: x0_rot[i]\n for i, p in enumerate(['x', 'y'])})\n # Get the pixel centres Cartesian coordinates\n _xf = self.pixelID2coordinates(pixels)\n xf = numpy.array([_xf[p] for p in ('x', 'y')]).reshape(-1, 1)\n xf_rot = numpy.matmul(self._rotmat, xf)\n\n out = {p: float(xf_rot[i]) for i, p in enumerate(['x', 'y'])}\n out.update({'z': self.z, 't': event['t'] + dt})\n return out\n\n\nclass Detector:\n r\"\"\"\n A simple particle detector consisting of several plates.\n\n Parameters\n ----------\n plates : list of dicts\n A list containing the individual detector planes' parameters.\n For more information see :py:class:`DetectorPlate`\n\n \"\"\"\n def __init__(self, plates):\n self._plates = None\n self.plates = [DetectorPlate(**plate) for plate in plates]\n\n def evaluate_events(self, events):\n \"\"\"\n Evaluates the events. Returns a list of list: ``out[i, j]``\n where the ``i`` refers to the event and ``j`` refers to the detector\n plate.\n \"\"\"\n out = [None] * len(events)\n for i, event in enumerate(events):\n data = [None] * len(self.plates)\n for j, plate in enumerate(self.plates):\n data[j] = plate.evaluate_collision(event)\n out[i] = data\n return out\n","repo_name":"Richard-Sti/Particle-Detector","sub_path":"simulator/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43605173073","text":"from __future__ import unicode_literals\nimport threading\nimport time\nimport uuid\n# remove unused usings\n# from six import iteritems, string_types\n# Remove python 2 support\n# from six.moves import queue\nimport queue as queue\nimport os\nimport octoprint_octolapse.utility as utility\nfrom octoprint_octolapse.stabilization_gcode import SnapshotGcodeGenerator, SnapshotGcode\nfrom octoprint_octolapse.gcode_commands import Commands, Response\nfrom octoprint_octolapse.gcode_processor import ParsedCommand\nfrom octoprint_octolapse.position import Position\nfrom octoprint_octolapse.settings import PrinterProfile, OctolapseSettings\nfrom octoprint_octolapse.snapshot import CaptureSnapshot, SnapshotJobInfo, SnapshotError\nfrom octoprint_octolapse.trigger import Triggers\nimport octoprint_octolapse.error_messages as error_messages\nimport octoprint_octolapse.stabilization_preprocessing as preprocessing\nfrom octoprint_octolapse.gcode_processor import GcodeProcessor\n# create the module level logger\nfrom octoprint_octolapse.log import LoggingConfigurator\n\nlogging_configurator = LoggingConfigurator()\nlogger = logging_configurator.get_logger(__name__)\n\n\nclass TimelapseStartException(Exception):\n def __init__(self, message, type):\n super(TimelapseStartException, self).__init__()\n self.message = message\n self.type = type\n\n\nclass Timelapse(object):\n\n def __init__(\n self, get_current_octolapse_settings, octoprint_printer, data_folder, default_settings_folder,\n octoprint_settings,\n on_print_started=None, on_print_start_failed=None,\n on_snapshot_start=None, on_snapshot_end=None, on_new_thumbnail_available=None,\n on_post_processing_error_callback=None, on_timelapse_stopping=None,\n on_timelapse_stopped=None, on_state_changed=None, on_timelapse_end=None, on_snapshot_position_error=None,\n on_rendering_start=None\n ):\n # config variables - These don't change even after a reset\n self.state_update_period_seconds = 1\n self._data_folder = data_folder\n self._temporary_folder = get_current_octolapse_settings().main_settings.get_temporary_directory(self._data_folder)\n self.get_current_octolapse_settings = get_current_octolapse_settings\n self._settings = self.get_current_octolapse_settings() # type: OctolapseSettings\n self._octoprint_settings = octoprint_settings\n self._octoprint_printer = octoprint_printer\n self._default_settings_folder = default_settings_folder\n self._print_start_callback = on_print_started\n self._print_start_failed_callback = on_print_start_failed\n self._snapshot_start_callback = on_snapshot_start\n self._snapshot_complete_callback = on_snapshot_end\n self._new_thumbnail_available_callback = on_new_thumbnail_available\n self._on_post_processing_error_callback = on_post_processing_error_callback\n self._timelapse_stopping_callback = on_timelapse_stopping\n self._timelapse_stopped_callback = on_timelapse_stopped\n self._state_changed_callback = on_state_changed\n self._timelapse_end_callback = on_timelapse_end\n self._snapshot_position_error_callback = on_snapshot_position_error\n self._on_rendering_start_callback = on_rendering_start\n self._commands = Commands() # used to parse and generate gcode\n self._triggers = None\n self._print_end_status = \"Unknown\"\n self._last_state_changed_message_time = 0\n # Settings that may be different after StartTimelapse is called\n\n self._octoprint_printer_profile = None\n self._current_job_info = None\n self._stabilization = None\n self._trigger = None\n self._trigger_profile = None\n self._gcode = None\n self._printer = None\n self._capture_snapshot = None\n self._position = None\n self._state = TimelapseState.Idle\n self._test_mode_enabled = False\n # State Tracking that should only be reset when starting a timelapse\n self._has_been_stopped = False\n self._timelapse_stop_requested = False\n # State tracking variables\n self.job_on_hold = False\n self.RequiresLocationDetectionAfterHome = False\n self._position_request_sent = False\n # fetch position private variables\n self._position_payload = None\n self._position_timeout_long = 600.0\n self._position_timeout_short = 60.0\n self._position_timeout_very_short = 5.0\n self._position_signal = threading.Event()\n self._position_signal.set()\n\n # get snapshot async private variables\n self._snapshot_success = False\n # It shouldn't take more than 5 seconds to take a snapshot!\n self._snapshot_timeout = 5.0\n self._most_recent_snapshot_payload = None\n\n self._stabilization_signal = threading.Event()\n self._stabilization_signal.set()\n\n self._current_profiles = {}\n self._current_file_line = 0\n\n self.snapshot_plans = None # type: [preprocessing.SnapshotPlan]\n self.current_snapshot_plan_index = 0\n self.current_snapshot_plan = None # type: preprocessing.SnapshotPlan\n self.is_realtime = True\n self.was_started = False\n # snapshot thread queue\n self._snapshot_task_queue = queue.Queue(maxsize=1)\n\n self._reset()\n\n def validate_snapshot_command(self, command_string):\n # there needs to be at least one non-comment non-whitespace character for the gcode command to work.\n parsed_command = GcodeProcessor.parse(command_string)\n return len(parsed_command.gcode)>0\n\n def get_snapshot_count(self):\n if self._capture_snapshot is None:\n return 0, 0\n return self._capture_snapshot.SnapshotsTotal, self._capture_snapshot.ErrorsTotal\n\n def get_current_profiles(self):\n return self._current_profiles\n\n def get_current_settings(self):\n return self._settings\n\n def get_current_state(self):\n return self._state\n\n def start_timelapse(\n self, settings, overridable_printer_profile_settings,\n gcode_file_path, snapshot_plans=None\n ):\n logger.debug(\n \"Starting the timelapse with the current configuration.\"\n )\n # we must supply the settings first! Else reset won't work properly.\n self._reset()\n # in case the settings have been destroyed and recreated\n self._settings = settings\n # ToDo: all cloning should be removed after this point. We already have a settings object copy.\n # Also, we no longer need the original settings since we can use the global OctolapseSettings.Logger now\n self._printer = self._settings.profiles.current_printer()\n self._temporary_folder = self._settings.main_settings.get_temporary_directory(self._data_folder)\n self._stabilization = self._settings.profiles.current_stabilization()\n self._trigger_profile = self._settings.profiles.current_trigger()\n self.snapshot_plans = snapshot_plans\n self.current_snapshot_plan = None\n self.current_snapshot_plan_index = 0\n # set the current snapshot plan if we have any\n if self.snapshot_plans is not None and len(self.snapshot_plans) > 0:\n self.current_snapshot_plan = self.snapshot_plans[self.current_snapshot_plan_index]\n # if we have at least one snapshot plan, we must have preprocessed, so set is_realtime to false.\n self.is_realtime = self.snapshot_plans is None\n assert (isinstance(self._printer, PrinterProfile))\n\n self.RequiresLocationDetectionAfterHome = False\n self.overridable_printer_profile_settings = overridable_printer_profile_settings\n\n self._current_job_info = utility.TimelapseJobInfo(\n job_guid=uuid.uuid4(),\n print_start_time=time.time(),\n print_file_name=utility.get_filename_from_full_path(gcode_file_path),\n print_file_extension=utility.get_extension_from_full_path(gcode_file_path),\n )\n # save the timelapse job info\n self._current_job_info.save(self._temporary_folder)\n # save the rendering settings for use by the RenderingProcessor for this timelapse\n self._settings.save_rendering_settings(self._temporary_folder, self._current_job_info.JobGuid)\n self._gcode = SnapshotGcodeGenerator(self._settings, self.overridable_printer_profile_settings)\n\n self._capture_snapshot = CaptureSnapshot(\n self._settings,\n self._data_folder,\n self._settings.profiles.active_cameras(),\n self._current_job_info,\n self.send_gcode_for_camera,\n self._new_thumbnail_available_callback,\n self._on_post_processing_error_callback,\n\n )\n\n self._position = Position(\n self._settings.profiles.current_printer(),\n self._settings.profiles.current_trigger(), self.overridable_printer_profile_settings\n )\n\n self._test_mode_enabled = self._settings.main_settings.test_mode_enabled\n self._triggers = Triggers(self._settings)\n self._triggers.create()\n\n # take a snapshot of the current settings for use in the Octolapse Tab\n self._current_profiles = self._settings.profiles.get_profiles_dict()\n # test the position request\n if not self._test_position_request():\n message = \"Your printer does not support M114, and is incompatible with Octolapse.\"\n if not self._settings.main_settings.cancel_print_on_startup_error:\n message += \" Continue on failure is enabled so your print will continue, but the timelapse has been \" \\\n \"aborted.\"\n raise TimelapseStartException(message, 'm114_not_supported')\n self._state = TimelapseState.WaitingForTrigger\n self.was_started = True\n logger.debug(\n \"The timelapse configuration is set, waiting to stop the job-on-hold lock.\"\n )\n\n _stabilization_gcode_tags = {\n 'snapshot-init',\n 'snapshot-start',\n 'snapshot-gcode',\n 'snapshot-return',\n 'snapshot-end'\n }\n\n def get_current_temporary_folder(self):\n return self._temporary_folder\n\n class StabilizationGcodeStateException(Exception):\n pass\n\n def send_snapshot_gcode_array(self, gcode_array, tags):\n self._octoprint_printer.commands(gcode_array, tags=tags)\n\n def send_gcode_for_camera(self, gcode_array, timeout, wait_for_completion=True, tags=None):\n if tags is None:\n tags = {'camera-gcode'}\n if wait_for_completion:\n self.get_position_async(\n start_gcode=gcode_array, timeout=timeout, tags=tags\n )\n else:\n self.send_snapshot_gcode_array(gcode_array, tags=tags)\n\n def _test_position_request(self):\n logger.info(\"Testing M114 Support.\")\n if self.get_position_async(timeout=self._position_timeout_short, no_wait=True):\n return True\n return False\n\n # requests a position from the printer (m400-m114), and can send optional gcode before the position request.\n # this ensures any gcode sent in the start_gcode parameter will be executed before the function returns.\n _position_acquisition_array_wait = [\"M400\", \"M114\"]\n _position_acquisition_no_wait = [\"M400\", \"M114\"]\n\n def get_position_async(self, start_gcode=None, timeout=None, tags=None, no_wait=False):\n self._position_payload = None\n if timeout is None:\n timeout = self._position_timeout_long\n\n logger.info(\"Octolapse is requesting a position.\")\n\n # Warning, we can only request one position at a time!\n if self._position_signal.is_set():\n self._position_signal.clear()\n if tags is None:\n tags = set()\n\n # send any code that is to be run before the position request\n if start_gcode is not None and len(start_gcode) > 0:\n self.send_snapshot_gcode_array(start_gcode, tags)\n tags = set(['wait-for-position'])\n\n if no_wait:\n self.send_snapshot_gcode_array([\"M114\"], tags)\n else:\n self.send_snapshot_gcode_array([\"M400\", \"M114\"], tags)\n event_is_set = self._position_signal.wait(timeout)\n if not event_is_set:\n # we ran into a timeout while waiting for a fresh position\n logger.warning(\"Warning: A timeout occurred while requesting the current position.\")\n return None\n return self._position_payload\n\n def on_position_received(self, payload):\n # added new position request sent flag so that we can prevent position requests NOT from Octolapse from\n # triggering a snapshot.\n if self._position_request_sent:\n self._position_request_sent = False\n logger.info(\"Octolapse has received a position request response.\")\n # set flag to false so that it can be triggered again after the next M114 sent by Octolapse\n self._position_payload = payload\n self._position_signal.set()\n else:\n logger.info(\"Octolapse has received an position response but did not request one. Ignoring.\")\n\n def _take_snapshots(self, metadata):\n snapshot_payload = {\n \"success\": False,\n \"error\": \"Waiting on thread to signal, aborting\"\n }\n # start the snapshot\n logger.info(\"Taking a snapshot.\")\n self._snapshot_task_queue.join()\n self._snapshot_task_queue.put(\"snapshot_job\")\n try:\n results = self._capture_snapshot.take_snapshots(metadata, no_wait=not self._stabilization.wait_for_moves_to_finish)\n finally:\n self._snapshot_task_queue.get()\n self._snapshot_task_queue.task_done()\n # todo - notify client here\n # todo - maintain snapshot number separately for each camera!\n succeeded = len(results) > 0\n errors = {}\n error_count = 0\n for result in results:\n assert(isinstance(result, SnapshotJobInfo))\n if not result.success:\n succeeded = False\n error_count += 1\n if isinstance(result.error, SnapshotError):\n error_message = result.error.message\n # remove python 2 support\n # elif isinstance(result.error, string_types):\n elif isinstance(result.error, str):\n error_message = result.error\n\n if result.job_type not in errors:\n errors[result.job_type] = {\"error\": error_message, 'count': 1}\n else:\n previous_error = errors[result.job_type]\n previous_error[\"error\"] += error_message\n previous_error[\"count\"] += 1\n\n snapshot_payload[\"success\"] = succeeded\n\n error_message = \"\"\n if error_count == 1:\n # remove python 2 support\n # for key, value in iteritems(errors):\n for key, value in errors.items():\n error = value[\"error\"]\n if key == 'before-snapshot':\n error_message = \"Before Snapshot Script Error: {0}\"\n elif key == 'after-snapshot':\n error_message = \"After Snapshot Script Error: {0}\"\n else:\n error_message = \"{0}\"\n\n error_message = error_message.format(error)\n\n elif error_count > 1:\n before_snapshot_error_count = False\n after_snapshot_error_count = False\n snapshot_error_count = False\n # remove python 2 support\n # for key, value in iteritems(errors):\n for key, value in errors.items():\n if key == 'before-snapshot':\n before_snapshot_error_count = value[\"count\"]\n elif key == 'after-snapshot':\n after_snapshot_error_count = value[\"count\"]\n else:\n snapshot_error_count = value[\"count\"]\n\n error_message = \"Multiple errors occurred:\"\n if before_snapshot_error_count > 0:\n error_message += \"{0}{1} Before Snapshot Error{2}\".format(\n os.linesep, before_snapshot_error_count, \"s\" if before_snapshot_error_count > 1 else \"\"\n )\n if snapshot_error_count > 0:\n error_message += \"{0}{1} Snapshot Error{2}\".format(\n os.linesep, snapshot_error_count, \"s\" if snapshot_error_count > 1 else \"\"\n )\n if after_snapshot_error_count > 0:\n error_message += \"{0}{1} After Snapshot Error{2}\".format(\n os.linesep, after_snapshot_error_count, \"s\" if after_snapshot_error_count > 1 else \"\"\n )\n error_message += \"{0}See plugin_octolapse.log for details.\".format(os.linesep)\n\n snapshot_payload[\"error\"] = error_message\n\n if len(error_message) > 0:\n self._snapshot_success = False\n\n return snapshot_payload\n\n def _take_timelapse_snapshot_precalculated(self):\n timelapse_snapshot_payload = {\n \"snapshot_position\": None,\n \"return_position\": None,\n \"snapshot_gcode\": None,\n \"snapshot_payload\": None,\n \"success\": False,\n \"error\": \"\"\n }\n try:\n has_error = False\n # create the GCode for the timelapse and store it\n snapshot_gcode = self._gcode.create_gcode_for_snapshot_plan(\n self.current_snapshot_plan, self._position.g90_influences_extruder,\n self._trigger_profile.get_snapshot_plan_options()\n )\n # save the gcode fo the payload\n timelapse_snapshot_payload[\"snapshot_gcode\"] = snapshot_gcode\n\n if snapshot_gcode is None:\n logger.warning(\"No snapshot gcode was generated.\")\n return timelapse_snapshot_payload\n\n assert (isinstance(snapshot_gcode, SnapshotGcode))\n\n # If we have any initialization gcodes, send them before waiting for moves to finish\n if len(snapshot_gcode.InitializationGcode) > 0:\n logger.info(\"Queuing %d initialization commands.\", len(snapshot_gcode.InitializationGcode))\n self.send_snapshot_gcode_array(snapshot_gcode.InitializationGcode, {'snapshot-init'})\n\n # If we have any start gcodes (lift/retract), send them before waiting for moves to finish\n if len(snapshot_gcode.StartGcode) > 0:\n logger.info(\"Queuing %d start commands.\", len(snapshot_gcode.StartGcode))\n self.send_snapshot_gcode_array(snapshot_gcode.StartGcode, {'snapshot-start'})\n\n ## Send the snapshot gcodes, making sure to send an M400+M114 before taking any snapshots\n gcodes_to_send = []\n # loop through the snapshot commands and build up gocdes_to_send array, only sending the current commands\n # once we hit the snapshot command.\n for gcode in snapshot_gcode.snapshot_commands:\n if gcode == \"{0} {1}\".format(PrinterProfile.OCTOLAPSE_COMMAND, PrinterProfile.DEFAULT_OCTOLAPSE_SNAPSHOT_COMMAND):\n if self._stabilization.wait_for_moves_to_finish:\n logger.debug(\n \"Queuing %d snapshot commands, an M400 and an M114 command. Note that the actual snapshot command is never sent.\",\n len(gcodes_to_send)\n )\n snapshot_position = self.get_position_async(start_gcode=gcodes_to_send, tags={'snapshot-gcode'})\n if snapshot_position is None:\n has_error = True\n logger.error(\n \"The snapshot position is None. Either the print has cancelled or a timeout has been \"\n \"reached. \"\n )\n else:\n logger.debug(\n \"Queuing %d snapshot commands. Not waiting for moves to finish.\",\n len(gcodes_to_send)\n )\n snapshot_position = None\n self.send_snapshot_gcode_array(gcodes_to_send, {'snapshot-gcode'})\n gcodes_to_send = []\n\n # TODO: ALLOW MULTIPLE PAYLOADS\n timelapse_snapshot_payload[\"snapshot_position\"] = snapshot_position\n # take a snapshot\n timelapse_snapshot_payload[\"snapshot_payload\"] = self._take_snapshots(self.current_snapshot_plan.get_snapshot_metadata())\n else:\n gcodes_to_send.append(gcode)\n\n if len(gcodes_to_send) > 0:\n logger.info(\"Queuing remaining %d snapshot commands.\", len(snapshot_gcode.StartGcode))\n self.send_snapshot_gcode_array(gcodes_to_send, {\"snapshot-gcode\"})\n\n # return the printhead to the starting position by sending the return commands\n if len(snapshot_gcode.ReturnCommands) > 0:\n logger.info(\"Queuing %d return commands.\", len(snapshot_gcode.ReturnCommands))\n self.send_snapshot_gcode_array(snapshot_gcode.ReturnCommands, {\"snapshot-return\"})\n\n # send any end gcodes, including deretract, delift, axis mode corrections, etc\n if len(snapshot_gcode.EndGcode) > 0:\n logger.info(\"Queuing %d end commands.\", len(snapshot_gcode.EndGcode))\n self.send_snapshot_gcode_array(snapshot_gcode.EndGcode, {\"snapshot-end\"})\n\n if self._state != TimelapseState.TakingSnapshot:\n logger.warning(\n \"The timelapse state was expected to TakingSnapshots, but was equal to {0}\".format(self._state)\n )\n # we've completed the procedure, set success\n timelapse_snapshot_payload[\"success\"] = not has_error\n\n except Timelapse.StabilizationGcodeStateException as e:\n logger.exception(\"The timelapse was in the wrong state to take a snapshot.\")\n timelapse_snapshot_payload[\"success\"] = False\n timelapse_snapshot_payload[\"error\"] = \"The timelapse was stopped in the middle of a snapshot. Skipping.\"\n except Exception as e:\n logger.exception(\"Failed to take a snapshot for the provided snapshot plan.\")\n timelapse_snapshot_payload[\"error\"] = \"An unexpected error was encountered while running the timelapse \" \\\n \"snapshot procedure. \"\n\n return timelapse_snapshot_payload\n\n # public functions\n def to_state_dict(self, include_timelapse_start_data=False):\n try:\n position_dict = None\n printer_state_dict = None\n extruder_dict = None\n trigger_state = None\n snapshot_plan = None\n\n if self._settings is not None:\n if self.is_realtime:\n if self._position is not None:\n position_dict = self._position.to_position_dict()\n printer_state_dict = self._position.to_state_dict()\n extruder_dict = self._position.current_pos.to_extruder_state_dict()\n if self._triggers is not None:\n trigger_state = {\n \"name\": self._triggers.name,\n \"triggers\": self._triggers.state_to_list()\n }\n else:\n snapshot_plans = None\n total_travel_distance = 0.0\n total_saved_travel_distance = 0.0\n if include_timelapse_start_data:\n if self.snapshot_plans is not None:\n snapshot_plans = []\n for plan in self.snapshot_plans:\n snapshot_plans.append(plan.to_dict())\n total_travel_distance += plan.travel_distance\n total_saved_travel_distance += plan.saved_travel_distance\n printer_volume = self.overridable_printer_profile_settings[\"volume\"]\n snapshot_plan = {\n \"printer_volume\": printer_volume,\n \"snapshot_plans\": snapshot_plans,\n \"total_travel_distance\": total_travel_distance,\n \"total_saved_travel_distance\": total_saved_travel_distance,\n \"current_plan_index\": self.current_snapshot_plan_index,\n \"current_file_line\": self._current_file_line,\n }\n\n state_dict = {\n \"extruder\": extruder_dict,\n \"position\": position_dict,\n \"printer_state\": printer_state_dict,\n \"trigger_state\": trigger_state,\n \"trigger_type\": \"real-time\" if self.is_realtime else \"pre-calculated\",\n \"snapshot_plan\": snapshot_plan,\n\n }\n return state_dict\n except Exception as e:\n logger.exception(\"Failed to create a timelapse state dict.\")\n raise e\n\n # if we're here, we've reached and logged an error.\n return {\n \"extruder\": None,\n \"position\": None,\n \"printer_state\": None,\n \"trigger_state\": None\n }\n\n def stop_snapshots(self, message=None, error=False):\n self._state = TimelapseState.WaitingToRender\n if self._timelapse_stopped_callback is not None:\n timelapse_stopped_callback_thread = threading.Thread(\n target=self._timelapse_stopped_callback, args=[message, error]\n )\n timelapse_stopped_callback_thread.daemon = True\n timelapse_stopped_callback_thread.start()\n return True\n\n def release_job_on_hold_lock(self, force=False, reset=False, parsed_command=None):\n if parsed_command is not None:\n self._octoprint_printer.commands([parsed_command.gcode], tags={\"before_release_job_lock\"})\n\n if self.job_on_hold:\n if force or (self._stabilization_signal.is_set() and self._position_signal.is_set()):\n logger.debug(\"Releasing job-on-hold lock.\")\n if self._octoprint_printer.is_operational():\n try:\n self._octoprint_printer.set_job_on_hold(False)\n except RuntimeError as e:\n logger.exception(\"Unable to release job lock. It's likely that the printer was disconnected.\")\n self.job_on_hold = False\n if reset:\n self._reset()\n\n def on_print_failed(self):\n if self._state != TimelapseState.Idle:\n self.end_timelapse(\"FAILED\")\n\n def on_print_disconnecting(self):\n if self._state != TimelapseState.Idle:\n self.end_timelapse(\"DISCONNECTING\")\n\n def on_print_disconnected(self):\n if self._state != TimelapseState.Idle:\n self.end_timelapse(\"DISCONNECTED\")\n\n def on_print_cancelling(self):\n self._state = TimelapseState.Cancelling\n\n def on_print_canceled(self):\n if self._state != TimelapseState.Idle:\n self.end_timelapse(\"CANCELED\")\n\n def on_print_completed(self):\n if self._state != TimelapseState.Idle:\n self.end_timelapse(\"COMPLETED\")\n\n def on_print_ended(self):\n self.snapshot_plans = []\n\n def end_timelapse(self, print_status):\n self._print_end_status = print_status\n try:\n if self._state != TimelapseState.Idle:\n # See if there are enough snapshots to start renderings\n snapshot_count, error_count = self.get_snapshot_count()\n if snapshot_count > 1:\n self._render_timelapse(self._print_end_status)\n self._reset()\n except Exception as e:\n logger.exception(\"Failed to end the timelapse\")\n\n if self._timelapse_end_callback is not None:\n self._timelapse_end_callback()\n\n def on_print_paused(self):\n try:\n if self._state == TimelapseState.Idle:\n return\n elif self._state < TimelapseState.WaitingToRender:\n logger.info(\"Print Paused.\")\n self._triggers.pause()\n except Exception as e:\n logger.exception(\"Failed to pause the print.\")\n\n def on_print_resumed(self):\n try:\n if self._state == TimelapseState.Idle:\n return\n elif self._state < TimelapseState.WaitingToRender:\n self._triggers.resume()\n except Exception as e:\n logger.exception(\"Failed to resume the print\")\n\n def is_timelapse_active(self):\n if (\n self._settings is None\n or self._state in [TimelapseState.Idle, TimelapseState.Initializing, TimelapseState.WaitingToRender]\n or self._octoprint_printer.get_state_id() == \"CANCELLING\"\n or (self.is_realtime and (self._triggers is None or self._triggers.count() < 1))\n ):\n return False\n return True\n\n def get_is_test_mode_active(self):\n return self._test_mode_enabled\n\n def get_is_taking_snapshot(self):\n return self._snapshot_task_queue.qsize() > 0\n\n def on_print_start(self, parsed_command):\n self._print_start_callback(parsed_command)\n\n def on_print_start_failed(self, message):\n self._print_start_failed_callback(message)\n\n def on_gcode_queuing(self, command_string, cmd_type, gcode, tags):\n if self.detect_timelapse_start(command_string, tags) == (None,):\n # suppress command if the timelapse start detection routine tells us to\n # this is because preprocessing happens on a thread, and will send any detected commands after completion.\n return None,\n\n if not self.is_timelapse_active():\n current_printer = self._settings.profiles.current_printer()\n if (\n current_printer is not None and\n current_printer.suppress_snapshot_command_always and\n current_printer.is_snapshot_command(command_string)\n ):\n logger.info(\n \"Snapshot command %s detected while octolapse was disabled.\"\n \" Suppressing command.\".format(command_string)\n )\n return None,\n else:\n # if the timelapse is not active, exit without changing any gcode\n return None\n\n self.check_current_line_number(tags)\n\n if not (\n tags is not None and\n \"plugin:octolapse\" in tags and\n self.log_octolapse_gcode(logger.debug, \"queuing\", command_string, tags)\n ):\n logger.verbose(\"Queuing: %s\", command_string)\n\n if self.is_realtime:\n return_value = self.process_realtime_gcode(command_string, tags)\n parsed_command = self._position.current_pos.parsed_command\n else:\n parsed_command = GcodeProcessor.parse(command_string)\n return_value = self.process_pre_calculated_gcode(parsed_command, tags)\n\n # notify any callbacks\n self._send_state_changed_message()\n\n if (\n return_value == (None,) or (\n self._printer.is_snapshot_command(command_string)\n )\n ):\n return None,\n\n if parsed_command is not None and parsed_command.cmd is not None:\n # see if the current command is G92 with a dummy parameter (O)\n # note that this must be done BEFORE stripping commands for test mode\n if (\n parsed_command.cmd == \"G92\"\n and (\"O\" in parsed_command.parameters)\n ):\n parsed_command.parameters.pop(\"O\")\n if len(parsed_command.parameters) == 0:\n # suppress command, the g92 ONLY contained an O (fake home) parameter\n return None,\n return Commands.to_string(parsed_command)\n\n # look for test mode\n if self._test_mode_enabled and self._state >= TimelapseState.WaitingForTrigger:\n return self._commands.alter_for_test_mode(parsed_command)\n\n # Send the original unaltered command\n return None\n\n def set_next_snapshot_plan(self):\n self.current_snapshot_plan = None\n self.current_snapshot_plan_index += 1\n if len(self.snapshot_plans) > self.current_snapshot_plan_index:\n self.current_snapshot_plan = self.snapshot_plans[self.current_snapshot_plan_index]\n\n def process_pre_calculated_gcode(self, parsed_command, tags):\n if not {'plugin:octolapse', 'snapshot_gcode'}.issubset(tags) and 'source:file' in tags:\n if self.current_snapshot_plan is None:\n return None\n current_file_line = self.get_current_file_line(tags)\n # skip plans if we need to in case any were missed.\n if self.current_snapshot_plan.file_gcode_number < current_file_line:\n while (\n self.current_snapshot_plan.file_gcode_number < current_file_line and\n len(self.snapshot_plans) > self.current_snapshot_plan_index\n ):\n self.set_next_snapshot_plan()\n\n if (\n self._state == TimelapseState.WaitingForTrigger\n and self._octoprint_printer.is_printing()\n and self.current_snapshot_plan.file_gcode_number == current_file_line\n ):\n # time to take a snapshot!\n if self.current_snapshot_plan.triggering_command.gcode != parsed_command.gcode:\n logger.error(\n \"The snapshot plan position (gcode number: %s, gcode:%s, line number: %s) does not match the actual position (gcode number: %s, gcode: %s)! \"\n \"Aborting Snapshot, moving to next plan.\",\n self.current_snapshot_plan.file_gcode_number,\n self.current_snapshot_plan.triggering_command.gcode,\n self.current_snapshot_plan.file_line_number,\n current_file_line,\n parsed_command.gcode\n )\n self.set_next_snapshot_plan()\n return None\n\n if self._octoprint_printer.set_job_on_hold(True):\n logger.debug(\"Setting job-on-hold lock.\")\n # this was set to 'False' earlier. Why?\n self.job_on_hold = True\n # We are triggering, take a snapshot\n self._state = TimelapseState.TakingSnapshot\n\n # take the snapshot on a new thread, making sure to set a signal so we know when it is finished\n if not self._stabilization_signal.is_set():\n self._stabilization_signal.clear()\n thread = threading.Thread(\n target=self.acquire_snapshot_precalculated, args=[parsed_command]\n )\n thread.daemon = True\n thread.start()\n # suppress the current command, we'll send it later\n return None,\n return None\n\n def process_realtime_gcode(self, gcode, tags):\n # a flag indicating that we should suppress the command (prevent it from being sent to the printer)\n suppress_command = False\n\n # update the position tracker so that we know where all of the axis are.\n # We will need this later when generating snapshot gcode so that we can return to the previous\n # position\n try:\n # get the position state in case it has changed\n # if there has been a position or extruder state change, inform any listener\n file_line_number = self.get_current_file_line(tags)\n self._position.update(gcode, file_line_number=file_line_number)\n parsed_command = self._position.current_pos.parsed_command\n\n # if this code is snapshot gcode, simply return it to the printer.\n if not {'plugin:octolapse', 'snapshot_gcode'}.issubset(tags):\n if not self.check_for_non_metric_errors():\n\n if (\n self._state == TimelapseState.WaitingForTrigger\n and self._position.previous_pos.parsed_command is not None\n and (\n self._position.command_requires_location_detection(\n self._position.previous_pos.parsed_command.cmd\n )\n and self._octoprint_printer.is_printing()\n )\n ):\n # there is no longer a need to detect Octoprint start/end script, so\n # we can put the job on hold without fear!\n self._state = TimelapseState.AcquiringLocation\n\n if self._octoprint_printer.set_job_on_hold(True):\n logger.debug(\"Setting job-on-hold lock.\")\n self.job_on_hold = True\n thread = threading.Thread(target=self.acquire_position, args=[parsed_command])\n thread.daemon = True\n thread.start()\n return None,\n elif (self._state == TimelapseState.WaitingForTrigger\n and self._octoprint_printer.is_printing()):\n # update the triggers with the current position\n self._triggers.update(self._position)\n\n # see if at least one trigger is triggering\n _first_triggering = self.get_first_triggering()\n\n if _first_triggering:\n # get the job lock\n if self._octoprint_printer.set_job_on_hold(True):\n logger.debug(\"Setting job-on-hold lock.\")\n self.job_on_hold = True\n # We are triggering, take a snapshot\n self._state = TimelapseState.TakingSnapshot\n # pause any timer triggers that are enabled\n self._triggers.pause()\n\n # create the snapshot plan\n self.current_snapshot_plan = self._gcode.create_snapshot_plan(\n self._position, _first_triggering)\n\n # take the snapshot on a new thread, making sure to set a signal so we know when it\n # is finished\n if not self._stabilization_signal.is_set():\n self._stabilization_signal.clear()\n thread = threading.Thread(\n target=self.acquire_snapshot_precalculated, args=[parsed_command]\n )\n thread.daemon = True\n thread.start()\n\n # undo the position update since we'll be suppressing this command\n #self._position.undo_update()\n\n # suppress the current command, we'll send it later\n return None,\n\n elif self._state == TimelapseState.TakingSnapshot:\n # Don't do anything further to any commands unless we are\n # taking a timelapse , or if octolapse paused the print.\n # suppress any commands we don't, under any circumstances,\n # to execute while we're taking a snapshot\n\n if parsed_command.cmd in self._commands.SuppressedSnapshotGcodeCommands:\n suppress_command = True # suppress the command\n\n except Exception as e:\n logger.exception(\"Realtime gcode processing failed.\")\n raise\n\n # do any post processing for test mode\n if suppress_command:\n return None,\n\n def detect_timelapse_start(self, command_string, tags):\n # detect print start, including any start gcode script\n if (\n self._state == TimelapseState.Idle and\n self.get_current_octolapse_settings().main_settings.is_octolapse_enabled and\n (\n (\n 'trigger:comm.start_print' in tags and\n (\n 'trigger:comm.reset_line_numbers' in tags or\n command_string.startswith(\"M23\")\n )\n ) or {'script:beforePrintStarted', 'trigger:comm.send_gcode_script'} <= tags\n ) and self._octoprint_printer.is_printing()\n ):\n if command_string.startswith(\"M23\"):\n # SD print, can't do anything about it. Send a warning\n error = error_messages.get_error([\"init\", \"cant_print_from_sd\"])\n logger.info(error[\"description\"])\n self.on_print_start_failed([error])\n # continue with the print since we can't stop it\n return None\n\n if self._octoprint_printer.set_job_on_hold(True):\n logger.debug(\"Setting job-on-hold lock.\")\n self.job_on_hold = True\n self._state = TimelapseState.Initializing\n\n logger.info(\n \"Print Start Detected. Command: %s, Tags:%s\",\n command_string,\n tags\n )\n # parse the command string\n try:\n parsed_command = GcodeProcessor.parse(command_string)\n except ValueError as e:\n self._state = TimelapseState.Idle\n logger.exception(\"Unable to parse the command string.\")\n # if we don't return NONE here, we will have problems with the print!\n return None\n except Exception as e:\n self._state = TimelapseState.Idle\n logger.exception(\"An unexpected exception occurred while trying to parse the command string.\")\n # TODO: REMOVE THIS BECAUSE IT'S TOO BROAD!\n raise e\n\n # start a thread to start the timelapse\n\n def run_on_print_start_callback(parsed_command):\n self.on_print_start(parsed_command)\n\n thread = threading.Thread(\n target=run_on_print_start_callback, args=[parsed_command]\n )\n thread.daemon = True\n thread.start()\n return None,\n else:\n self.on_print_start_failed(\n error_messages['timelapse']['cannot_aquire_job_lock']\n )\n return None\n\n def preprocessing_finished(self, parsed_command):\n if parsed_command is not None:\n self.send_snapshot_gcode_array([parsed_command.gcode], {'pre-processing-end'})\n self.release_job_on_hold_lock()\n logger.debug(\"Releasing job-on-hold lock.\")\n self.job_on_hold = False\n\n @staticmethod\n def get_current_file_line(tags):\n # check the current line number\n if 'source:file' in tags:\n for tag in tags:\n if len(tag) > 9 and tag.startswith(\"fileline:\"):\n actual_file_line = tag[9:]\n return int(actual_file_line)\n return None\n\n @staticmethod\n def get_current_file_position(tags):\n # check the current line number\n if 'source:file' in tags:\n for tag in tags:\n if len(tag) > 9 and tag.startswith(\"filepos:\"):\n actual_file_position = tag[8:]\n return int(actual_file_position)\n return None\n\n def check_current_line_number(self, tags):\n # check the current line number\n if 'source:file' in tags:\n # this line is from the file, advance!\n self._current_file_line += 1\n if \"fileline:{0}\".format(self._current_file_line) not in tags:\n actual_file_line = \"unknown\"\n for tag in tags:\n if len(tag) > 9 and tag.startswith(\"fileline:\"):\n actual_file_line = tag[9:]\n message = \"File line number {0} was expected, but {1} was received!\".format(\n self._current_file_line + 1,\n actual_file_line\n )\n logger.error(message)\n self.stop_snapshots(message, True)\n\n def check_for_non_metric_errors(self):\n # make sure we're not using inches\n is_metric = self._position.current_pos.is_metric\n has_error = False\n error_message = \"\"\n if is_metric is None:\n has_error = True\n error_message = \"The printer profile requires an explicit G21 command before any position \" \\\n \"altering/setting commands, including any home commands. Stopping timelapse, \" \\\n \"but continuing the print. \"\n\n elif not is_metric:\n has_error = True\n if self._printer.units_default == \"inches\":\n error_message = \"The printer profile uses 'inches' as the default unit of measurement. In order to\" \\\n \" use Octolapse, a G21 command must come before any position altering/setting commands, including\" \\\n \" any home commands. Stopping timelapse, but continuing the print. \"\n else:\n error_message = \"The gcode file contains a G20 command (set units to inches), which Octolapse \" \\\n \"does not support. Stopping timelapse, but continuing the print.\"\n\n if has_error:\n logger.error(error_message)\n self.stop_snapshots(error_message, has_error)\n\n return has_error\n\n def get_first_triggering(self):\n try:\n # make sure we're in a state that could want to check for triggers\n if not self._state == TimelapseState.WaitingForTrigger:\n return False\n # see if the PREVIOUS command triggered (that means current gcode gets sent if the trigger[0]\n # is triggering\n first_trigger = self._triggers.get_first_triggering(0, Triggers.TRIGGER_TYPE_IN_PATH)\n\n if first_trigger:\n logger.info(\"An in-path snapshot is triggering\")\n return first_trigger\n\n first_trigger = self._triggers.get_first_triggering(0, Triggers.TRIGGER_TYPE_DEFAULT)\n if first_trigger: # We're triggering\n logger.info(\"A snapshot is triggering\")\n return first_trigger\n except Exception as e:\n logger.exception(\"Failed checking snapshot trigger state.\")\n # no need to re-raise here, the trigger just won't happen\n return False\n\n def acquire_position(self, parsed_command):\n try:\n assert (isinstance(parsed_command, ParsedCommand))\n logger.info(\n \"A position altering command has been detected. Fetching and updating position. \"\n \"Position Command: %s\",\n parsed_command.gcode\n )\n # Undo the last position update, we will be resending the command\n self._position.undo_update()\n current_position = self.get_position_async(tags={'acquire-position'})\n\n if current_position is None:\n self._print_end_status = \"POSITION_TIMEOUT\"\n self._state = TimelapseState.WaitingToEndTimelapse\n logger.info(\"Unable to acquire a position.\")\n else:\n # update position\n self._position.update_position(\n current_position[\"x\"],\n current_position[\"y\"],\n current_position[\"z\"],\n current_position[\"e\"],\n None)\n\n # adjust the triggering command\n gcode = parsed_command.gcode\n\n if gcode != \"\":\n if self._state == TimelapseState.AcquiringLocation:\n logger.info(\"Sending triggering command for position acquisition - %s\", gcode)\n # send the triggering command\n self.send_snapshot_gcode_array([gcode], {'location-detection-command'})\n else:\n logger.warning(\n \"Unable to send triggering command for position acquisition - incorrect state:%s.\",\n self._state\n )\n # set the state\n if self._state == TimelapseState.AcquiringLocation:\n self._state = TimelapseState.WaitingForTrigger\n\n logger.info(\"Position Acquired\")\n\n finally:\n self._octoprint_printer.set_job_on_hold(False)\n logger.debug(\"Releasing job-on-hold lock.\")\n self.job_on_hold = False\n\n def acquire_snapshot_precalculated(self, parsed_command):\n try:\n logger.info(\"About to take a snapshot. Triggering Command: %s\", parsed_command.gcode)\n if self._snapshot_start_callback is not None:\n snapshot_callback_thread = threading.Thread(target=self._snapshot_start_callback)\n snapshot_callback_thread.daemon = True\n snapshot_callback_thread.start()\n\n # take the snapshot\n self._most_recent_snapshot_payload = self._take_timelapse_snapshot_precalculated()\n\n if self._most_recent_snapshot_payload is None:\n logger.error(\"acquire_snapshot received a null payload.\")\n else:\n logger.info(\"The snapshot has completed\")\n\n finally:\n\n # set the state\n if self._state == TimelapseState.TakingSnapshot:\n self._state = TimelapseState.WaitingForTrigger\n\n # notify that we're finished, but only if we haven't just stopped the timelapse.\n if self._most_recent_snapshot_payload is not None:\n logger.info(\"Sending on_snapshot_complete payload.\")\n # send a copy of the dict in case it gets changed by threads.\n new_payload = self._most_recent_snapshot_payload.copy()\n self._on_trigger_snapshot_complete(new_payload)\n self._most_recent_snapshot_payload = None\n\n # set the next snapshot plan\n if not self.is_realtime:\n self.set_next_snapshot_plan()\n self._octoprint_printer.set_job_on_hold(False)\n logger.debug(\"Releasing job-on-hold lock.\")\n self.job_on_hold = False\n self._stabilization_signal.set()\n\n def on_gcode_sending(self, cmd, tags):\n if cmd == \"M114\" and 'plugin:octolapse' in tags:\n logger.debug(\"The position request is being sent\")\n self._position_request_sent = True\n elif self._state == TimelapseState.Idle:\n return\n elif not (\n tags is not None\n and \"plugin:octolapse\" in tags\n and self.log_octolapse_gcode(logger.verbose, \"sending\", cmd, tags)\n ):\n logger.verbose(\"Sending: %s\", cmd)\n\n def on_gcode_sent(self, cmd, cmd_type, gcode, tags={}):\n if self._state == TimelapseState.Idle:\n return\n if not (\n tags is not None\n and \"plugin:octolapse\" in tags\n and self.log_octolapse_gcode(logger.debug, \"sent\", cmd, tags)\n ):\n logger.debug(\"Sent: %s\", cmd)\n\n def on_gcode_received(self, line):\n if self._position_request_sent:\n payload = Response.check_for_position_request(line)\n if payload:\n self.on_position_received(payload)\n elif self._state != TimelapseState.Idle:\n logger.verbose(\"Received: %s\", line)\n return line\n\n def log_octolapse_gcode(self, logf, msg, cmd, tags):\n if \"acquire-position\" in tags:\n logf(\"Acquire snapshot position gcode - %s: %s\", msg, cmd)\n elif \"snapshot-init\" in tags:\n logf(\"Snapshot gcode INIT - %s: %s\", msg, cmd)\n elif \"snapshot-start\" in tags:\n logf(\"Snapshot gcode START - %s: %s\", msg, cmd)\n elif \"snapshot-gcode\" in tags:\n logf(\"Snapshot gcode SNAPSHOT - %s: %s\", msg, cmd)\n elif \"snapshot-return\" in tags:\n logf(\"Snapshot gcode RETURN - %s: %s\", msg, cmd)\n elif \"snapshot-end\" in tags:\n logf(\"Snapshot gcode END - %s: %s\", msg, cmd)\n elif \"wait-for-position\" in tags:\n logf(\"Waiting for moves to complete before continuing - %s: %s\", msg, cmd)\n elif \"pre-processing-end\" in tags:\n logf(\"Pre processing finished gcode - %s: %s\", msg, cmd)\n elif \"current-position\" in tags:\n logf(\"Current position gcode - %s: %s\", msg, cmd)\n elif \"before-snapshot-gcode\" in tags:\n logf(\"Before snapshot gcode - %s: %s\", msg, cmd)\n elif \"after-snapshot-gcode\" in tags:\n logf(\"After snapshot gcode - %s: %s\", msg, cmd)\n elif \"camera-gcode\" in tags:\n logf(\"Camera gcode - %s: %s\", msg, cmd)\n elif \"force_xyz_axis\" in tags:\n logf(\"Force XYZ axis mode gcode - %s: %s\", msg, cmd)\n elif \"force_e_axis\" in tags:\n logf(\"Force E axis mode gcode - %s: %s\", msg, cmd)\n elif \"preview-stabilization\" in tags:\n logf(\"Preview stabilization gcode - %s: %s\", msg, cmd)\n else:\n return False\n return True\n\n\n # internal functions\n ####################\n def _send_state_changed_message(self):\n \"\"\"Notifies any callbacks about any changes contained in the dictionaries.\n If you send a dict here the client will get a message, so check the\n settings to see if they are subscribed to notifications before populating the dictinaries!\"\"\"\n try:\n\n if self._last_state_changed_message_time + 1 > time.time():\n return\n # Notify any callbacks\n if self._state_changed_callback is not None:\n\n def send_real_time_change_message():\n trigger_change_list = None\n position_change_dict = None\n printer_state_change_dict = None\n extruder_change_dict = None\n trigger_changes_dict = None\n\n # Get the changes\n if self.get_current_octolapse_settings().main_settings.show_trigger_state_changes:\n trigger_change_list = self._triggers.state_to_list()\n if self.get_current_octolapse_settings().main_settings.show_position_changes:\n position_change_dict = self._position.to_position_dict()\n\n update_printer_state = (\n self.get_current_octolapse_settings().main_settings.show_printer_state_changes\n )\n\n if update_printer_state:\n printer_state_change_dict = self._position.to_state_dict()\n if self.get_current_octolapse_settings().main_settings.show_extruder_state_changes:\n extruder_change_dict = self._position.current_pos.to_extruder_state_dict()\n\n # if there are any state changes, send them\n if (\n position_change_dict is not None\n or printer_state_change_dict is not None\n or extruder_change_dict is not None\n or trigger_change_list is not None\n ):\n if trigger_change_list is not None and len(trigger_change_list) > 0:\n trigger_changes_dict = {\n \"name\": self._triggers.name,\n \"triggers\": trigger_change_list\n }\n change_dict = {\n\n \"trigger_type\": \"real-time\",\n \"extruder\": extruder_change_dict,\n \"position\": position_change_dict,\n \"printer_state\": printer_state_change_dict,\n \"trigger_state\": trigger_changes_dict\n }\n\n if (\n change_dict[\"extruder\"] is not None\n or change_dict[\"position\"] is not None\n or change_dict[\"printer_state\"] is not None\n or change_dict[\"trigger_state\"] is not None\n ):\n self._state_changed_callback(change_dict)\n\n def send_pre_calculated_change_message():\n if not self.get_current_octolapse_settings().main_settings.show_snapshot_plan_information:\n return\n # if there are any state changes, send them\n change_dict = {\n\n \"trigger_type\": \"pre-calculated\",\n \"snapshot_plan\":\n {\n \"printer_volume\": self.overridable_printer_profile_settings[\"volume\"],\n \"current_plan_index\": self.current_snapshot_plan_index,\n \"current_file_line\": self._current_file_line,\n }\n }\n self._state_changed_callback(change_dict)\n\n if self.is_realtime:\n send_real_time_change_message()\n else:\n send_pre_calculated_change_message()\n\n self._last_state_changed_message_time = time.time()\n\n except Exception as e:\n # no need to re-raise, callbacks won't be notified, however.\n logger.exception(\"Failed to send state change message.\")\n\n def _is_trigger_waiting(self):\n # make sure we're in a state that could want to check for triggers\n if not self._state == TimelapseState.WaitingForTrigger:\n return None\n # Loop through all of the active currentTriggers\n waiting_trigger = self._triggers.get_first_waiting()\n if waiting_trigger is not None:\n return True\n return False\n\n def _on_trigger_snapshot_complete(self, snapshot_payload):\n if self._snapshot_complete_callback is not None:\n payload = {\n \"success\": snapshot_payload[\"success\"],\n \"error\": snapshot_payload[\"error\"],\n \"snapshot_count\": self._capture_snapshot.SnapshotsTotal,\n \"snapshot_failed_count\": self._capture_snapshot.ErrorsTotal,\n \"snapshot_payload\": snapshot_payload[\"snapshot_payload\"],\n }\n\n snapshot_complete_callback_thread = threading.Thread(\n target=self._snapshot_complete_callback, args=[payload]\n )\n snapshot_complete_callback_thread.daemon = True\n snapshot_complete_callback_thread.start()\n\n def _render_timelapse(self, print_end_state):\n if self.was_started:\n # If we are still taking snapshots, wait for them all to finish\n if self.get_is_taking_snapshot():\n logger.info(\"Snapshot jobs are running, waiting for them to finish before rendering.\")\n self._snapshot_task_queue.join()\n logger.info(\"Snapshot jobs queue has completed, starting to render.\")\n # todo: update print job info\n self._current_job_info.PrintEndTime = time.time()\n self._current_job_info.PrintEndState = print_end_state\n self._current_job_info.save(self._temporary_folder)\n for camera in self._settings.profiles.active_cameras():\n self._on_rendering_start_callback(self._current_job_info.JobGuid, camera.guid, self._temporary_folder)\n\n def _reset(self):\n self._state = TimelapseState.Idle\n self._current_file_line = 0\n if self._triggers is not None:\n self._triggers.reset()\n self.CommandIndex = -1\n\n self._last_state_changed_message_time = 0\n self._current_job_info = None\n self._snapshotGcodes = None\n self._positionRequestAttempts = 0\n self._test_mode_enabled = False\n self._position_request_sent = False\n\n # A list of callbacks who want to be informed when a timelapse ends\n self._timelapse_stop_requested = False\n self._snapshot_success = False\n self._snapshotError = \"\"\n self._has_been_stopped = False\n self._current_profiles = {\n \"printer\": \"\",\n \"stabilization\": \"\",\n \"trigger\": \"\",\n \"snapshot\": \"\",\n \"rendering\": \"\",\n \"camera\": \"\",\n \"logging_profile\": \"\"\n }\n # fetch position private variables\n self._position_payload = None\n self._position_signal.set()\n self._current_job_info = None\n self.was_started = False\n\n def _reset_snapshot(self):\n self._state = TimelapseState.WaitingForTrigger\n self.CommandIndex = -1\n self._snapshotGcodes = None\n self._positionRequestAttempts = 0\n self._snapshot_success = False\n self._snapshotError = \"\"\n\n\nclass TimelapseState(object):\n Idle = 1\n Initializing = 2\n WaitingForTrigger = 3\n AcquiringLocation = 4\n TakingSnapshot = 5\n WaitingToRender = 6\n WaitingToEndTimelapse = 7\n Cancelling = 8\n","repo_name":"FormerLurker/Octolapse","sub_path":"octoprint_octolapse/timelapse.py","file_name":"timelapse.py","file_ext":"py","file_size_in_byte":62498,"program_lang":"python","lang":"en","doc_type":"code","stars":605,"dataset":"github-code","pt":"53"} +{"seq_id":"1495996467","text":"from twilio.rest import Client\n\naccountSID = 'AC98e1847a66e3bb97170a435e3018d01f'\n\nauthToken = 'c924efa1f9b16fc3c5ea7460a05f29c6'\n\nclient = Client(accountSID,authToken)\n\nTwilioNumber = \"+12058902866\"\n\nmycellphone = \"+17138165159\"\n\ntextmessage = client.messages.create(to=mycellphone,from_=TwilioNumber,body=\"Hello World!\")\n\nprint(textmessage.status)\n\n#make a phone call\n\ncall = client.calls.create(url=\"http://demo.twilio.com/docs/voice.xml\",to=mycellphone,from_=TwilioNumber)\n","repo_name":"AydinHalimi1/Twilio","sub_path":"texting1.py","file_name":"texting1.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27782504199","text":"from collections import deque\n\nclass Node:\n def __init__(self, val):\n self.left = None\n self.right = None\n self.val = val\n\nroot = Node(44)\n\nroot.left = Node(17)\nroot.left.left = Node(8)\nroot.left.right = Node(32)\nroot.left.right.left = Node(28)\nroot.left.right.left.right = Node(29)\n\nroot.right = Node(88)\nroot.right.left = Node(65)\nroot.right.left.left = Node(54)\nroot.right.left.right = Node(82)\nroot.right.left.right.left = Node(76)\nroot.right.left.right.left.left = Node(68)\nroot.right.left.right.left.right = Node(80)\nroot.right.right = Node(97)\nroot.right.right.left = Node(93)\n\n#return as 2d array\n\ndef bfs(root):\n\n result = []\n\n if root is None: result\n\n #make queue\n queue = deque()\n\n #add root to queue\n queue.append(root)\n flip = False\n\n while queue:\n count = len(queue)\n temp = []\n\n # needed for 2D array\n for _ in range(count):\n\n node = queue.popleft()\n\n temp.append(node.val)\n\n if node.left:\n queue.append(node.left)\n\n if node.right:\n queue.append(node.right)\n\n if flip:\n temp.reverse()\n result.append(temp)\n\n flip = not flip\n\n return result\n\n\nprint(bfs(root))","repo_name":"n-gibs/dsa","sub_path":"trees/class/zigzag_level_order.py","file_name":"zigzag_level_order.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34269670536","text":"import unittest\nimport numpy as np\nfrom lwpc.model import PerspectiveCameraModel\n\n\nclass TestPerspectiveCameraModel(unittest.TestCase):\n def create_world_points(self):\n return np.array(\n [\n [-0.1, -0.1, 1.0],\n [-0.1, 0.0, 1.0],\n [-0.1, 0.1, 1.0],\n [0.0, -0.1, 1.0],\n [0.0, 0.0, 1.0],\n [0.0, 0.1, 1.0],\n [0.1, -0.1, 1.0],\n [0.1, 0.0, 1.0],\n [0.1, 0.1, 1.0],\n ]\n )\n\n def create_image_points(self):\n return np.array(\n [\n [490.0, 362.0],\n [490.0, 512.0],\n [490.0, 662.0],\n [640.0, 362.0],\n [640.0, 512.0],\n [640.0, 662.0],\n [790.0, 362.0],\n [790.0, 512.0],\n [790.0, 662.0],\n ]\n )\n\n def create_intrinsics(self):\n focal_len = 0.015\n pixel_len = 10e-6\n fpr = focal_len / pixel_len\n return np.array([[fpr, 0.0, 640.0], [0.0, fpr, 512.0], [0.0, 0.0, 1.0]])\n\n def test_project_world_to_image(self):\n intrinsics = self.create_intrinsics()\n model = PerspectiveCameraModel(intrinsics)\n\n points = self.create_world_points()\n camera_pose = np.eye(4)\n\n x = model.project_to_image(camera_pose=camera_pose, points=points)\n\n expected = self.create_image_points()\n self.assertTrue(np.allclose(x, expected))\n\n def test_project_image_to_world(self):\n intrinsics = self.create_intrinsics()\n model = PerspectiveCameraModel(intrinsics)\n\n points = self.create_image_points()\n camera_pose = np.eye(4)\n\n distances = np.linalg.norm(self.create_world_points(), axis=1)\n x = model.project_to_world(\n camera_pose=camera_pose, points=points, distances=distances\n )\n\n expected = self.create_world_points()\n self.assertTrue(np.allclose(x, expected, atol=0.01))\n","repo_name":"troiwill/lwpc","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"894256915","text":"# ! pip install spanish_sentiment_analysis\n\nfrom classifier import * \n\nimport pandas as pd\nimport streamlit as st\nimport time\n\nnlp = SentimentClassifier()\n\ndef main():\n\n st.title('Coke.ai')\n st.title('Análisis de sentimiento spanish-sentiment-analysis')\n\n # text = st.text_input(\"Expresión:\")\n write_here = \"Texto aqui...\"\n text = st.text_area(\"Incluya un texto ..\", write_here)\n if st.button(\"Analizar\"):\n if text != write_here:\n label, score = sentimiento(text)\n st.success('Sentimiento de ['+ text + ']')\n st.success(label)\n st.success('%.2f' % score) \n else:\n st.error(\"Ingresa un texto y presiona el boton Analizar ..\")\n else:\n st.info(\n \"Ingresa un texto y presiona el boton Analizar ..\"\n )\n\n uploaded_file = st.file_uploader(\"O bien puede seleccionar un archivo CSV para procesar (se procesará columna 'text')\",type=['csv'])\n if uploaded_file is not None:\n data = read_df(uploaded_file)\n data[data['text'].str.strip().astype(bool)]\n data['text'] = data['text'].astype(str)\n total_reg = len(data)\n total_reg_toproc = st.slider('indique cuantos registros quiere procesar (Tipicamente se pueden procesar 3500-4000 registros sin problema)', 1, total_reg, total_reg, 100)\n data.drop(data.tail(total_reg-total_reg_toproc).index,inplace = True)\n if st.button(\"Procesar Archivo CSV\"): \n #pd.read_parquet(\"penguin-dataset.parquet\")\n #data.to_parquet(\"penguin-dataset.parquet\")\n st.success(\"Procesando CSV ..\")\n t0 = time.time()\n msg = f\"Espere por favor, esto puede tomar algun tiempo .. procesando {total_reg:.0f} elementos\" if total_reg>1000 else f\"Espere .. procesando {total_reg:.0f} registros\"\n with st.spinner(msg):\n g = lambda x: pd.Series(sentimiento(x.text))\n data[['label', 'score']] = data.apply(g, axis=1) \n csv = convert_df(data)\n st.success(f'{total_reg:.0f} registros procesados con éxito en {time.time() - t0:.0f} seg')\n if st.download_button(label=\"Presione para descargar archivo procesado\", data=csv, file_name='_cokeai_results.csv', mime='text/csv'):\n st.success(\"Descargado con éxito ..\")\n st.stop()\n uploaded_file = None\n else:\n st.error(\"Aun no se ha procesado el archivo..\")\n else:\n st.info(\"Aun no se ha procesado el archivo ..\")\n\n@st.cache\ndef sentimiento(text):\n #try:\n conditions = {\n 1: 'Muy Malo',\n 2: 'Malo',\n 3: 'Neutro',\n 4: 'Bueno',\n 5: 'Muy bueno'\n }\n result = nlp.predict(text)\n label = conditions[CheckForLess([0.1, 0.2, 0.5, 0.8, 1],result)]\n return label, round(result,4)\n #except:\n # return \"_Error\", -1\n\n@st.cache\ndef CheckForLess(list1, val): \n \n # traverse in the list\n i=1\n for x in list1: \n if val <= x: \n return i\n else:\n i=i+1\n return False\n\n@st.cache\ndef convert_df(df):\n # Cache the conversion to prevent computation on every rerun\n return df.to_csv().encode('utf-8')\n\n@st.cache(allow_output_mutation=True)\ndef read_df(csv_file):\n # Cache the conversion to prevent computation on every rerun\n return pd.read_csv(csv_file,usecols=[\"text\"])\n\n\nif __name__ == '__main__':\n main() ","repo_name":"rdpulgar/stProject3","sub_path":"spanish_sentiment_analysys.py","file_name":"spanish_sentiment_analysys.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33285937541","text":"# n개의 정수로 이루어진 임의의 수열이 주어진다. \n# 우리는 이 중 연속된 몇 개의 수를 선택해서 구할 수 있는 합 중 가장 큰 합을 구하려고 한다. \n# 단, 수는 한 개 이상 선택해야 한다.\n\n# 예를 들어서 10, -4, 3, 1, 5, 6, -35, 12, 21, -1 이라는 수열이 주어졌다고 하자. \n# 여기서 정답은 12+21인 33이 정답이 된다.\n\n# 가장 큰 연속 수열\n# 음수 섞여도 괜찮은 경우 있음.\n\n# 10 -1 2 4\n# 전부 합하는게 이득\n\n\n\nn = int(input())\n\narr = list(map(int, input().split()))\n\n\ns = [arr[0]]\n\nfor i in range(len(arr) - 1):\n s.append(max(s[i] + arr[i+1], arr[i+1]))\nprint(max(s))\n\n\n[[0] * n for _ in range(m)]\n","repo_name":"chulhee23/today_ps","sub_path":"BOJ/01000-04999/1912.py","file_name":"1912.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20160184862","text":"import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1' # -1:cpu, 0:first gpu\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport datetime\nimport random\nimport gym\nimport pylab\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorboardX import SummaryWriter\n#tf.config.experimental_run_functions_eagerly(True) # used for debuging and development\ntf.compat.v1.disable_eager_execution() # usually using this for fastest performance\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.optimizers import Adam, RMSprop, Adagrad, Adadelta\nfrom tensorflow.keras import backend as K\nimport copy\n\nfrom agents import ContinuousActorModel, DiscreteActorModel, CriticModel\n\nfrom threading import Thread, Lock\nfrom multiprocessing import Process, Pipe\nimport time\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif len(gpus) > 0:\n print(f'GPUs {gpus}')\n try: tf.config.experimental.set_memory_growth(gpus[0], True)\n except RuntimeError: pass\n\n\nclass RLxEvolution:\n #An algorithm combining evolutionary methods with RL\n def __init__(self, env_name, population, model_name=\"\"):\n # Initialization\n # Environment and PPO parameters\n self.env_name = env_name \n self.env = gym.make(env_name)\n # Parameters for evolution\n self.generation = 0\n self.GENERATION = 200 #max generations\n self.population = population\n self.holdout = max(1, int(0.1 * self.population))\n\n print(hasattr(self.env.action_space, 'n'))\n if hasattr(self.env.action_space, 'n'):\n self.discrete = True\n self.action_size = self.env.action_space.n\n else:\n self.discrete = False\n self.action_size = self.env.action_space.shape[0]\n self.state_size = self.env.observation_space.shape\n self.EPISODES = 100 # total episodes to train through all environments\n self.episode = 0 # used to track the episodes total count of episodes played through all thread environments\n self.max_average = 0 # when average score is above 0 model will be saved\n self.lr = 0.0003\n self.epochs = 20 # training epochs\n self.shuffle = True\n self.Training_batch = 100\n #self.optimizer = RMSprop\n self.optimizer = Adam\n\n self.replay_count = 0\n self.writer = SummaryWriter(comment=\"_\"+self.env_name+\"_\"+self.optimizer.__name__+\"_\"+str(self.lr))\n \n # Instantiate plot memory\n self.scores_, self.episodes_, self.average_ = [], [], [] # used in matplotlib plots\n # Create initial population \n self.actors = self.create_initial_population(self.population)\n # Just the one critic for now\n self.Critic = CriticModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\n \n self.Actor_name = f\"{self.env_name}_PPO_Actor.h5\"\n self.Critic_name = f\"{self.env_name}_PPO_Critic.h5\"\n #self.load() # uncomment to continue training from old weights\n\n # do not change bellow\n self.log_std = -0.5 * np.ones(self.action_size, dtype=np.float32)\n self.std = np.exp(self.log_std)\n \n\n def create_initial_population(self, population=10):\n actors = []\n for i in range(0, population):\n if self.discrete:\n Actor = DiscreteActorModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\n else:\n Actor = ContinuousActorModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\n actors.append(Actor)\n return actors\n\n\n def gaussian_likelihood(self, action, pred, log_std):\n # https://github.com/hill-a/stable-baselines/blob/master/stable_baselines/sac/policies.py\n pre_sum = -0.5 * (((action-pred)/(np.exp(log_std)+1e-8))**2 + 2*log_std + np.log(2*np.pi)) \n return np.sum(pre_sum, axis=1)\n\n def discount_rewards(self, reward):#gaes is better\n # Compute the gamma-discounted rewards over an episode\n # We apply the discount and normalize it to avoid big variability of rewards\n gamma = 0.99 # discount rate\n running_add = 0\n discounted_r = np.zeros_like(reward)\n for i in reversed(range(0,len(reward))):\n running_add = running_add * gamma + reward[i]\n discounted_r[i] = running_add\n\n discounted_r -= np.mean(discounted_r) # normalizing the result\n discounted_r /= (np.std(discounted_r) + 1e-8) # divide by standard deviation\n return discounted_r\n\n def get_gaes(self, rewards, dones, values, next_values, gamma = 0.98, lamda = 0.95, normalize=True):\n deltas = [r + gamma * (1 - d) * nv - v for r, d, nv, v in zip(rewards, dones, next_values, values)]\n deltas = np.stack(deltas)\n gaes = copy.deepcopy(deltas)\n for t in reversed(range(len(deltas) - 1)):\n gaes[t] = gaes[t] + (1 - dones[t]) * gamma * lamda * gaes[t + 1]\n\n target = gaes + values\n if normalize:\n gaes = (gaes - gaes.mean()) / (gaes.std() + 1e-8)\n return np.vstack(gaes), np.vstack(target)\n\n def replay(self, states, actor, actions, rewards, dones, next_states, logp_ts):\n # reshape memory to appropriate shape for training\n states = np.vstack(states)\n next_states = np.vstack(next_states)\n actions = np.vstack(actions)\n logp_ts = np.vstack(logp_ts)\n \n # Get Critic network predictions \n values = self.Critic.predict(states)\n next_values = self.Critic.predict(next_states)\n\n # Compute discounted rewards and advantages\n #discounted_r = self.discount_rewards(rewards)\n #advantages = np.vstack(discounted_r - values)\n advantages, target = self.get_gaes(rewards, dones, np.squeeze(values), np.squeeze(next_values))\n\n # stack everything to numpy array\n # pack all advantages, predictions and actions to y_true and when they are received\n # in custom loss function we unpack it\n y_true = np.hstack([advantages, actions, logp_ts])\n #print(y_true)\n # training Actor and Critic networks\n a_loss = actor.Actor.fit(states, y_true, epochs=self.epochs, verbose=0, shuffle=self.shuffle)\n c_loss = self.Critic.Critic.fit([states, values], target, epochs=self.epochs, verbose=0, shuffle=self.shuffle)\n\n # calculate loss parameters (should be done in loss, but couldn't find working way how to do that with disabled eager execution)\n if not self.discrete:\n pred = actor.Actor.predict(states)\n log_std = -0.5 * np.ones(self.action_size, dtype=np.float32)\n logp = self.gaussian_likelihood(actions, pred, log_std)\n approx_kl = np.mean(logp_ts - logp)\n approx_ent = np.mean(-logp)\n\n self.writer.add_scalar('Data/actor_loss_per_replay', np.sum(a_loss.history['loss']), self.replay_count)\n self.writer.add_scalar('Data/critic_loss_per_replay', np.sum(c_loss.history['loss']), self.replay_count)\n if not self.discrete:\n self.writer.add_scalar('Data/approx_kl_per_replay', approx_kl, self.replay_count)\n self.writer.add_scalar('Data/approx_ent_per_replay', approx_ent, self.replay_count)\n self.replay_count += 1\n \n\n def create_new_weights(self, Model1, Model2):\n \"\"\" if self.discrete:\n child = DiscreteActorModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\n else:\n print('continuous')\n child = ContinuousActorModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\"\"\"\n new_weights = []\n for i in range(len(Model1.Actor.layers)):\n if len(Model1.Actor.layers[i].get_weights()) > 0:\n\n weights1 = Model1.Actor.layers[i].get_weights()[0]\n weights2 = Model2.Actor.layers[i].get_weights()[0]\n bias1 = Model1.Actor.layers[i].get_weights()[1]\n bias2 = Model2.Actor.layers[i].get_weights()[1] \n #weights_pass = np.random.rand(1, weights1.shape[1]) < 0.5 \n weights_pass = np.random.randint(2, size=weights1.shape)\n bias_pass = np.random.randint(2, size=bias1.shape)\n #new_weights.append(weights_pass * weights1 + ~weights_pass * weights2)\n new_weights.append(weights1)\n new_weights.append(bias1)\n\n # new_bias = bias_pass * bias1 + ~bias_pass * bias2\n # child.Actor.layers[i].set_weights([new_weights, bias1]) \n #child.mutate()\n return new_weights\n \n\n def generate_child(self, new_weights, mutate):\n if self.discrete:\n child = DiscreteActorModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\n else:\n print('continuous')\n child = ContinuousActorModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\n child.Actor.set_weights(new_weights)\n if mutate > 1:\n child.mutate()\n return child\n \n def load(self):\n self.Actor.Actor.load_weights(self.Actor_name)\n self.Critic.Critic.load_weights(self.Critic_name)\n\n def save(self):\n self.Actor.Actor.save_weights(self.Actor_name)\n self.Critic.Critic.save_weights(self.Critic_name)\n\n pylab.figure(figsize=(18, 9))\n pylab.subplots_adjust(left=0.05, right=0.98, top=0.96, bottom=0.06)\n\n \n def PlotModel(self, score, episode, save=True):\n self.scores_.append(score)\n self.episodes_.append(episode)\n self.average_.append(sum(self.scores_[-50:]) / len(self.scores_[-50:]))\n if str(episode)[-2:] == \"00\":# much faster than episode % 100\n pylab.plot(self.episodes_, self.scores_, 'b')\n pylab.plot(self.episodes_, self.average_, 'r')\n pylab.ylabel('Score', fontsize=18)\n pylab.xlabel('Steps', fontsize=18)\n try:\n pylab.grid(True)\n pylab.savefig(self.env_name+\".png\")\n except OSError:\n pass\n # saving best models\n if self.average_[-1] >= self.max_average and save:\n self.max_average = self.average_[-1]\n self.save()\n SAVING = \"SAVING\"\n # decreaate learning rate every saved model\n #self.lr *= 0.99\n #K.set_value(self.Actor.Actor.optimizer.learning_rate, self.lr)\n #K.set_value(self.Critic.Critic.optimizer.learning_rate, self.lr)\n else:\n SAVING = \"\"\n\n return self.average_[-1], SAVING\n \n\n def run_batch(self, actor): \n self.episode = 0\n state = self.env.reset()\n state = np.reshape(state, [1, self.state_size[0]])\n done, score, SAVING = False, 0, ''\n while True:\n # Instantiate or reset games memory\n states, next_states, actions, rewards, predictions, dones = [], [], [], [], [], []\n for t in range(self.Training_batch):\n #self.env.render()\n # Actor picks an action\n if self.discrete:\n action, action_onehot, prediction = actor.act(state)\n next_state, reward, done, _ = self.env.step(action)\n else:\n action, prediction = actor.act(state)\n next_state, reward, done, _ = self.env.step(action[0])\n\n # Memorize (state, action, reward) for training\n states.append(state)\n next_states.append(np.reshape(next_state, [1, self.state_size[0]]))\n \n if self.discrete: \n actions.append(action_onehot)\n else:\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n predictions.append(prediction)\n # Update current state\n state = np.reshape(next_state, [1, self.state_size[0]])\n score += reward\n if done:\n self.episode += 1\n #average, SAVING = self.PlotModel(score, self.episode)\n if self.episode%10==0:\n #print(\"episode: {}/{}, score: {}, average: {:.2f} {}\".format(self.episode, self.EPISODES, score, average, SAVING))\n print(f\"episode: {self.episode}/{self.EPISODES}, score={score}\")\n self.writer.add_scalar(f'Workers:{1}/score_per_episode', score, self.episode)\n self.writer.add_scalar(f'Workers:{1}/learning_rate', self.lr, self.episode)\n\n state, done, score, SAVING = self.env.reset(), False, 0, ''\n state = np.reshape(state, [1, self.state_size[0]])\n self.replay(states, actor, actions, rewards, dones, next_states, predictions)\n if self.episode >= self.EPISODES:\n #self.replay(states, actor, actions, rewards, dones, next_states, predictions)\n break\n \n #self.env.close()\n return score\n\n \n\n def run_evolution(self):\n print(len(self.actors))\n scores_df = pd.DataFrame(columns=['generation','best_score', 'best_actor'])\n while self.generation < self.GENERATION:\n scores = []\n print(f\"generation: {self.generation}\")\n for actor in self.actors: \n self.run_batch(actor)\n score = self.evaluate(actor, test_episodes=3)\n scores.append(score)\n print('testing')\n print(scores)\n print(np.argsort(scores)[::-1])\n print([x for x in np.argsort(scores)[::-1]])\n self.actors = [self.actors[x] for x in np.argsort(scores)[::-1]]\n \n # best_organism = self.actors[0]\n best_score = max(scores)\n best_actor_idx = np.argsort(scores)[::-1][0]\n scores_df.loc[self.generation] = [self.generation, best_score, best_actor_idx] \n scores_df.to_csv('results.csv')\n print('best score: ', best_score)\n new_population = []\n\n #new_population.append(self.actors[0])\n #tf.keras.backend.clear_session()\n # new_population.append(self.actors[1])\n new_weights_list = []\n new_weights_list.append(self.actors[0].Actor.get_weights())\n for i in range(self.population-1):\n parent_1_idx = 0\n #parent_2_idx = min(self.population - 1, int(np.random.exponential(self.holdout)))\n parent_2_idx = 1\n print(f'parents: {parent_1_idx}, {parent_2_idx}')\n new_weights = self.create_new_weights(self.actors[parent_1_idx], self.actors[parent_2_idx])\n new_weights_list.append(new_weights)\n #new_population.append(offspring)\n \n critic_weights = self.Critic.Critic.get_weights()\n\n tf.keras.backend.clear_session()\n del self.Critic\n self.Critic = CriticModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr, optimizer = self.optimizer)\n self.Critic.Critic.set_weights(critic_weights)\n\n # Create new population and set weights\n for i in range(0, len(new_weights_list)):\n print('child: ', i)\n child = self.generate_child(new_weights_list[i], i)\n new_population.append(child)\n self.generation +=1\n self.actors = new_population\n \n\n\n def evaluate(self, actor, test_episodes = 100):#evaluate\n score = 0\n for e in range(test_episodes):\n state = self.env.reset()\n state = np.reshape(state, [1, self.state_size[0]])\n done = False\n \n while not done:\n self.env.render()\n # Actor picks an action\n action = actor.act(state)[0]\n # Actor picks an action\n if self.discrete:\n action = actor.act(state)[0]\n state, reward, done, _ = self.env.step(action)\n else:\n action = actor.act(state)[0]\n state, reward, done, _ = self.env.step(action[0])\n\n state = np.reshape(state, [1, self.state_size[0]])\n score += reward\n if done:\n #average, SAVING = self.PlotModel(score, e, save=False)\n print(\"evaluation episode: {}/{}, score: {}\".format(e, test_episodes, score))\n break\n #self.env.close()\n print('total score: ', score)\n return score\n \n\nif __name__ == \"__main__\":\n # newest gym fixed bugs in 'BipedalWalker-v2' and now it's called 'BipedalWalker-v3'\n print('yay')\n env_name = 'BipedalWalker-v3'\n #env_name = \"CartPole-v1\"\n #env_name = 'LunarLanderContinuous-v2'\n agent = RLxEvolution(env_name, 10)\n agent.run_evolution() # train as PPO\n #agent.run_multiprocesses(num_worker = 16) # train PPO multiprocessed (fastest)\n #agent.evaluate(agent.actors[0], 10)","repo_name":"rhys-gardener/RLxEvolution","sub_path":"Archive/rlxevolution.py","file_name":"rlxevolution.py","file_ext":"py","file_size_in_byte":17557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"354091465","text":"\"\"\" This file contains figures related to how far the states need to be,\nwhich is shown by Wasserestein distance. \"\"\"\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.metrics import rand_score\n\nfrom .common import (\n getSetup,\n subplotLabel,\n commonAnalyze,\n pi,\n T,\n E2,\n max_desired_num_cells,\n num_data_points,\n)\nfrom ..LineageTree import LineageTree\nfrom ..states.StateDistributionGaPhs import StateDistribution\n\n\ndef makeFigure():\n \"\"\"\n Makes fig 6.\n \"\"\"\n\n # Get list of axis objects\n ax, f = getSetup((12, 5), (1, 3))\n figureMaker5(ax, *accuracy())\n\n subplotLabel(ax)\n\n return f\n\n\ndef accuracy():\n \"\"\"A Helper function to create more random copies of a population.\"\"\"\n # Creating a list of populations to analyze over\n list_of_Es = [\n [\n StateDistribution(\n E2[1].params[0],\n E2[1].params[1],\n E2[1].params[2],\n a,\n E2[1].params[4],\n E2[1].params[5],\n ),\n E2[1],\n ]\n for a in np.linspace(4.0, 20.0, num_data_points)\n ]\n list_of_populations = [\n [LineageTree.rand_init(pi, T, E, max_desired_num_cells)] for E in list_of_Es\n ]\n # for the violin plots\n list_of_Es2 = [\n [\n StateDistribution(\n E2[1].params[0],\n E2[1].params[1],\n E2[1].params[2],\n a,\n E2[1].params[4],\n E2[1].params[5],\n ),\n E2[1],\n ]\n for a in np.linspace(4.0, 20.0, num_data_points)\n ]\n list_of_populations2 = [\n [LineageTree.rand_init(pi, T, E, 3 * max_desired_num_cells)]\n for E in list_of_Es2\n ]\n\n balanced_score = np.empty(len(list_of_populations))\n\n for ii, pop in enumerate(list_of_populations):\n ravel_true_states = np.array(\n [cell.state for lineage in pop for cell in lineage.output_lineage]\n )\n all_cells = np.array(\n [cell.obs[2] for lineage in pop for cell in lineage.output_lineage]\n )\n\n shape, scale1, scale2 = (\n list_of_Es[ii][0].params[2],\n list_of_Es[ii][0].params[3],\n list_of_Es[ii][1].params[3],\n )\n thresh = classification_threshold(shape, scale1, scale2)\n pred_st = np.zeros(all_cells.shape)\n for j, obs in enumerate(all_cells):\n if obs <= thresh:\n pred_st[j] = 1\n balanced_score[ii] = 100 * rand_score(ravel_true_states, pred_st)\n\n # replace x with 1-x if the accuracy is less than 50%\n balanced_score[balanced_score < 50.0] = (\n 100.0 - balanced_score[balanced_score < 50.0]\n )\n\n wass, _, dict_out, _ = commonAnalyze(\n list_of_populations,\n 2,\n xtype=\"wass\",\n list_of_fpi=[pi] * num_data_points,\n list_of_fT=[T] * num_data_points,\n parallel=True,\n )\n accuracy = dict_out[\"state_similarity\"]\n distribution_df = pd.DataFrame(\n columns=[\"Distribution Similarity\", \"G1 lifetime\", \"State\"]\n )\n lineages = [\n list_of_populations2[int(num_data_points * i / 4.0)][0] for i in range(4)\n ]\n len_lineages = [len(lineage) for lineage in lineages]\n distribution_df[\"G1 lifetime\"] = [\n (cell.obs[1] + cell.obs[2])\n for lineage in lineages\n for cell in lineage.output_lineage\n ]\n distribution_df[\"State\"] = [\n \"State 1\" if cell.state == 0 else \"State 2\"\n for lineage in lineages\n for cell in lineage.output_lineage\n ]\n distribution_df[\"Distribution Similarity\"] = (\n len_lineages[0] * [\"Same\\n\" + str(0) + \"-\" + str(wass[-1] / 4)]\n + len_lineages[1] * [\"Similar\\n\" + str(wass[-1] / 4) + \"-\" + str(wass[-1] / 2)]\n + len_lineages[2]\n * [\"Different\\n\" + str(wass[-1] / 2) + \"-\" + str(wass[-1] * 0.75)]\n + len_lineages[3] * [\"Distinct\\n>\" + str(wass[-1] * 0.75)]\n )\n\n # for the violin plot (distributions)\n wasser_df = pd.DataFrame(\n columns=[\"Wasserstein distance\", \"Adjusted Rand Index Accuracy\"]\n )\n wasser_df[\"Wasserstein distance\"] = wass\n wasser_df[\"Adjusted Rand Index Accuracy\"] = accuracy\n wasser_df[\"Baseline Accuracy\"] = balanced_score\n return distribution_df, wasser_df\n\n\ndef figureMaker5(ax, distribution_df, wasser_df):\n \"\"\"\n This makes figure 5.\n \"\"\"\n # cartoon to show different shapes --> similar shapes\n i = 0\n ax[i].axis(\"off\")\n ax[i].set_title(\"state difference\")\n\n i += 1\n\n sns.violinplot(\n x=\"G1 lifetime\",\n y=\"Distribution Similarity\",\n hue=\"State\",\n palette={\"State 1\": \"b\", \"State 2\": \"g\"},\n split=True,\n data=distribution_df,\n ax=ax[i],\n )\n\n i += 1\n # state accuracy\n sns.regplot(\n x=\"Wasserstein distance\",\n y=\"Adjusted Rand Index Accuracy\",\n data=wasser_df,\n label=\"tHMM\",\n ax=ax[i],\n lowess=True,\n marker=\"+\",\n )\n sns.regplot(\n x=\"Wasserstein distance\",\n y=\"Baseline Accuracy\",\n data=wasser_df,\n ax=ax[i],\n label=\"Best threshold\",\n lowess=True,\n marker=\"+\",\n )\n ax[i].set_title(\"State Assignment Accuracy\")\n ax[i].set_ylabel(\"Adjusted Rand Index Accuracy [%]\")\n ax[i].set_ylim(bottom=10.0, top=101)\n ax[i].legend()\n\n\ndef classification_threshold(shape, scale1, scale2):\n \"\"\"Given the parameters of the gamma distribution, it provides an analytical threshold for classification.\n This function is specific to this figure, as the shape parameter is shared and only the scale varies.\n \"\"\"\n if scale1 == scale2:\n return shape * scale1\n else:\n numer = shape * np.log(scale2 / scale1)\n denom = (1 / scale1) - (1 / scale2)\n return numer / denom\n","repo_name":"meyer-lab/tHMM","sub_path":"lineage/figures/figure6.py","file_name":"figure6.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"8884059945","text":"import setuptools\nimport versioneer\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"airkupofrod\", # Replace with your own username\n version=versioneer.get_version(),\n author=\"Reuben Thomas-Davis\",\n author_email=\"reuben@rekon.uk\",\n description=\"Takes a deployment in your kubernetes cluster and turns its pod template into a KubernetesPodOperator \"\n \"object.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/rekon-oss/airkupofrod\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"helm-charts\", \"airflow-image\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\n 'apache-airflow[kubernetes]>=1.10',\n ],\n cmdclass=versioneer.get_cmdclass()\n)\n","repo_name":"Rested/airkupofrod","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71552180007","text":"def merge(arr1,arr2):\r\n index1=0\r\n index2=0\r\n index=0\r\n brr=[0]*(len(arr1)+len(arr2))\r\n while index1arr2[index2]:\r\n brr[index]=arr2[index2]\r\n index+=1\r\n index2+=1\r\n elif arr1[index1] 12:\n mon -= 12\n yrs += 1\n if mon < 10:\n mon = \"0\" + str(mon)\n return [str(yrs), tick, str(mon)+\"E\"]\n\n\ndef get_next_year(index):\n \"\"\"\n Get next years index key\n \"\"\"\n return [str(int(index[0])+1), index[1], str(index[2]).replace(\"E\", \"\")+\"E\"]\n\n\ndef get_ticker_info(ticks, table, idx=None):\n \"\"\"\n Grabbing info for a list of given tickers from the db\n \"\"\"\n # t0 = time.time()\n with DBHelper() as dbh:\n dbh.connect()\n lis = ''\n for tick in ticks:\n lis += \"'\" + tick + \"', \"\n df_ret = dbh.select(table, where='tick in (' + lis[:-2] + ')')\n\n # t1 = time.time()\n # print(\"Done Retrieving data, took {0} seconds\".format(t1-t0))\n if idx:\n return df_ret.set_index(idx)\n return df_ret\n\n\ndef data_cum_columns(data):\n \"\"\"\n cumulative columns for intrayear calcs\n \"\"\"\n tick = data.reset_index()['tick'][0]\n try:\n data = data.drop([('TTM', '', tick)])\n except KeyError:\n # no TTM included\n pass\n h1_dat = data.iloc[-4] + data.iloc[-3]\n m9_dat = h1_dat + data.iloc[-2]\n y1_dat = m9_dat + data.iloc[-1]\n data = data.reset_index()\n data.loc[len(data)] = ['H1', tick, ''] + list(h1_dat.values)\n data.loc[len(data)] = ['M9', tick, ''] + list(m9_dat.values)\n data.loc[len(data)] = ['Y1', tick, ''] + list(y1_dat.values)\n data = data.reindex([0, 1, 2, 5, 3, 6, 4, 7])\n data = data.set_index(IDX)\n return data\n\n\ndef remove_empty_cols(data):\n \"\"\"\n remove empty columns from dataframe\n \"\"\"\n for key in data.keys():\n data[key] = data[key].dropna(axis='columns', how='all')\n return data\n\n\ndef replace_needed_cols(data):\n \"\"\"\n replace columns we need with 0s\n \"\"\"\n check_replace = []\n check_replace.append(['bs', 'accounts_payable'])\n check_replace.append(['bs', 'inv'])\n check_replace.append(['cf', 'chg_working_cap'])\n check_replace.append(['cf', 'divs_paid'])\n for ind_check in check_replace:\n if ind_check[1] not in data[ind_check[0]].columns:\n data[ind_check[0]][ind_check[1]] = 0\n return data\n\n\ndef period_chg(data_df):\n \"\"\"\n Calculate the change fom one year to the next\n \"\"\"\n data_df = data_df['is']\n df_y = data_df.reset_index()\n df_y = df_y[df_y.year != 'TTM']\n years = list(df_y['year'] + df_y['month'])\n df_chg = pd.DataFrame(columns=IDX + list(data_df.columns))\n for yrs in years:\n if yrs == min(years):\n last_y = yrs\n continue\n year_df = df_y[(df_y.year == yrs[:4]) &\n (df_y.month == yrs[4:])].drop(IDX, axis=1).values\n last_y_df = df_y[(df_y.year == last_y[:4]) &\n (df_y.month == last_y[4:])].drop(IDX, axis=1).values\n yoy = (year_df / last_y_df - 1) * 100\n yoy[abs(yoy) == np.inf] = 0\n where_are_nans = np.isnan(yoy)\n yoy[where_are_nans] = 0\n data = list(df_y[(df_y.year == yrs[:4]) &\n (df_y.month == yrs[4:])].iloc[0][IDX]) + list(yoy[0])\n df_chg.loc[len(df_chg)] = data\n last_y = yrs\n\n # need this to add year over year for single year model\n yoy = (df_y.drop(IDX, axis=1).loc[len(df_y)-1].values\n / df_y.drop(IDX, axis=1).loc[0].values - 1) * 100\n yoy[abs(yoy) == np.inf] = 0\n where_are_nans = np.isnan(yoy)\n yoy[where_are_nans] = 0\n data = ['YoY', data_df.reset_index().tick[0], ''] + list(yoy)\n df_chg.loc[len(df_chg)] = data\n df_chg = df_chg.set_index(IDX)\n return df_chg\n\n\ndef setup_comp_cols(indices):\n \"\"\"\n Sets up the comparison analysis columns\n \"\"\"\n cols = ['ticker', 'cat'] + [i[0] for i in indices]\n cols.insert(7, 'avg_5y')\n return cols\n\n\ndef setup_pdv_cols(per, years_fwd):\n \"\"\"\n Assigns the column values for the peer derived value calc\n \"\"\"\n cols = ['ticker', 'cat', '5y_avg', 'hist_avg_v_weight_avg']\n for yrf in range(1, years_fwd + 1):\n year = int(per[0]) + yrf\n cols += ['fwd_mult_{}'.format(year), 'fwd_mult_v_weight_avg_{}'.format(year),\n 'prem_disc_{}'.format(year), 'pdv_price_{}'.format(year)]\n return cols\n\n\ndef match_px(data, eod_px, tick):\n \"\"\"\n Apply a price to each statement date\n \"\"\"\n dates = data['bs'].reset_index()[['year', 'month']]\n eod_px = eod_px.loc[tick]\n data['ols']['date_px'] = np.nan\n data['ols']['hi_52wk'] = np.nan\n data['ols']['lo_52wk'] = np.nan\n data['ols']['avg_52wk'] = np.nan\n\n for _, vals in dates.iterrows():\n # get the closest price to the data date\n data_date = dt.datetime(int(vals['year']), int(vals['month']), 1)\n yr1_ago = dt.datetime(int(vals['year'])-1, int(vals['month']), 1)\n day = 1\n while True:\n try:\n date = dt.datetime(int(vals['year']), int(vals['month']), day)\n px_val = eod_px.loc[date]['px']\n data['ols'].at[(vals['year'], tick, vals['month']), 'date_px'] = px_val\n break\n except KeyError:\n # holiday or weekend probably\n day += 1\n if day > 10:\n break\n # 52 week high, low, and avg\n date_range = eod_px.loc[yr1_ago: data_date]\n data['ols'].at[(vals['year'], tick, vals['month']), 'hi_52wk'] = date_range['px'].max()\n data['ols'].at[(vals['year'], tick, vals['month']), 'lo_52wk'] = date_range['px'].min()\n data['ols'].at[(vals['year'], tick, vals['month']), 'avg_52wk'] = date_range['px'].mean()\n return data\n\n\ndef get_beta(data, eod_px, ticker, mkt, ind):\n \"\"\"\n Calculate the beta for a given security\n \"\"\"\n window = 52\n # This will get the week end price and do a pct change\n tick = eod_px.loc[ticker].rename(columns={'px': ticker}).groupby(pd.TimeGrouper('W')).nth(0).pct_change()\n # ind = eod_px.loc[ind].rename(columns={'px': ind}).groupby(pd.TimeGrouper('W')).nth(0).pct_change()\n mkt = eod_px.loc[mkt].rename(columns={'px': mkt}).groupby(pd.TimeGrouper('W')).nth(0).pct_change()\n cov_df = pd.merge(tick, mkt, left_index=True, right_index=True).rolling(window, min_periods=1).cov()\n cov_df = cov_df[[cov_df.columns[1]]]\n covariance = cov_df[np.in1d(cov_df.index.get_level_values(1), [ticker])]\n variance_mkt = cov_df[np.in1d(cov_df.index.get_level_values(1), [mkt.columns[0]])]\n beta = (covariance.reset_index().set_index('date')[[mkt.columns[0]]]\n / variance_mkt.reset_index().set_index('date')[[mkt.columns[0]]])\n rep_dates = get_report_dates(data)\n data['ols']['beta'] = None\n for ind_dt in rep_dates:\n try:\n val = beta[beta.index < ind_dt].iloc[-1].values[0]\n except IndexError:\n print(\"May not have any dates, assume a beta of 1\")\n val = 1\n data['ols'].at[(str(ind_dt.year), ticker, ind_dt.strftime(\"%m\")), 'beta'] = val\n # data['ols'].at[(str(ind_dt.year), ticker, \"0\" + str(ind_dt.month)), 'beta'] = val\n return data\n\n\ndef get_report_dates(data):\n \"\"\"\n Gets the report dates from the financial statements\n \"\"\"\n dates = []\n for ind, _ in data['bs'].iterrows():\n dates.append(dt.datetime(int(ind[0]), int(ind[2]), 1))\n return dates\n","repo_name":"mccarvik/python_for_finance","sub_path":"research/equity_analysis/res_utils.py","file_name":"res_utils.py","file_ext":"py","file_size_in_byte":25680,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"26676639814","text":"COLOURS = {\n \"blue\": \"#0052CC\",\n \"dark_blue\": \"#172B4D\",\n \"light_blue\": \"#00B8D9\",\n \"white\": \"#FFFFFF\",\n \"grey\": \"#C1C7D0\",\n \"red\": \"#FF5630\",\n \"yellow\": \"#FFAB00\",\n \"green\": \"#36B37E\",\n \"purple\": \"#6554C0\",\n \"black\": \"#091E42\",\n \"light_grey\": \"#EBECF0\",\n}\n\nCORE_COLOURS = [\"light_blue\", \"green\", \"purple\", \"red\", \"yellow\"]\nCORE_COLOURS = [COLOURS[i] for i in CORE_COLOURS]\n\n\ndef format_absolute(num):\n num = float(\"{:.3g}\".format(num))\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n return \"{}{}\".format(\n \"{:f}\".format(num).rstrip(\"0\").rstrip(\".\"),\n [\"\", \"K\", \"M\", \"B\", \"T\"][magnitude],\n )\n\n\ndef format_percentage(num):\n return \"{:.1%}\".format(num)\n\n\ndef clean_text(text):\n return text.replace(\"_\", \" \").title()\n\n\ndef ifnone(a, b):\n \"`b` if `a` is None else `a`\"\n return b if a is None else a\n\n\ndef get_month_before(row, df, col):\n try:\n return df.query(f\"\"\"month == {row['month']} and year == {row['year'] - 1}\"\"\")[\n col\n ].item()\n except ValueError as e:\n raise ValueError(\n f\"Monthly data not available for {row['month']: .0f}-{row['year'] - 1: .0f}\"\n )\n\n\ndef get_week_before(row, df, col):\n try:\n return df.query(f\"\"\"week == {row['week']} and year == {row['year'] - 1}\"\"\")[\n col\n ].item()\n except ValueError as e:\n raise ValueError(\n f\"Weekly data not available for {row['week']: .0f}-{row['year'] - 1: .0f}\"\n )\n\n\ndef check_list_type(x):\n return isinstance(x, (list, tuple))\n\n\ndef convert_list_if_not(x):\n return x if check_list_type(x) else [x]\n","repo_name":"felixzhu17/MetricVis","sub_path":"MetricVis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4655377149","text":"import csv\nimport os\nfrom dataclasses import dataclass\nfrom tempfile import mkstemp\n\nfrom quatradis.comparison.essentiality import GeneEssentiality\n\n\n@dataclass\nclass EssentialityInput:\n control_files: list\n condition_files: list\n only_ess_control_files: list\n only_ess_condition_files: list\n\ndef gene_names_from_essentiality_file(filename, verbose=False):\n with open(filename, 'r') as fileh:\n reader = csv.reader(fileh, delimiter=',', quotechar='\"')\n gene_names = [r[1] for r in reader if r[1] != 'gene_name']\n print(\"Number of all genes:\" + str(len(gene_names)))\n\n return gene_names\n\n\ndef get_all_gene_names(control_files, condition_files):\n all_gene_names = set()\n\n for filename in condition_files:\n with open(filename, 'r') as fileh:\n reader = csv.reader(fileh, delimiter='\\t', quotechar='\"')\n gene_names1 = [r[1] for r in reader if len(r) > 1 and r[1] != 'gene_name']\n all_gene_names = all_gene_names.union(set(gene_names1))\n\n for f in control_files:\n with open(filename, 'r') as fileh:\n reader = csv.reader(fileh, delimiter='\\t', quotechar='\"')\n gene_names2 = [r[1] for r in reader if len(r) > 1 and r[1] != 'gene_name']\n all_gene_names = all_gene_names.union(set(gene_names2))\n\n return list(all_gene_names)\n\ndef all_gene_essentiality(input: EssentialityInput, analysis_type, verbose=False):\n\n all_gene_names = get_all_gene_names(input.control_files, input.condition_files)\n if verbose:\n print(\"# all_gene_names: \" + str(len(all_gene_names)))\n\n genes_ess = {g: GeneEssentiality() for g in all_gene_names}\n if analysis_type == \"original\":\n for f in input.only_ess_condition_files:\n ess_gene_names = gene_names_from_essentiality_file(f, verbose)\n if verbose:\n print(\"ess_gene_names condition: \" + str(len(ess_gene_names)))\n print(\"genes_ess: \" + str(len(genes_ess)))\n for e in genes_ess:\n if e in ess_gene_names:\n genes_ess[e].condition += 1\n genes_ess[e].number_of_reps = len(input.only_ess_condition_files)\n for f in input.only_ess_control_files:\n ess_gene_names = gene_names_from_essentiality_file(f, verbose)\n if verbose:\n print(\"ess_gene_names control: \" + str(len(ess_gene_names)))\n print(\"genes_ess: \" + str(len(genes_ess)))\n for e in genes_ess:\n if e in ess_gene_names:\n genes_ess[e].control += 1\n genes_ess[e].number_of_reps = len(input.only_ess_control_files)\n else:\n for e in genes_ess:\n genes_ess[e].control = 0\n genes_ess[e].condition = 0\n genes_ess[e].number_of_reps = len(input.only_ess_control_files)\n\n return genes_ess\n\ndef add_gene_essentiality_to_file(input_filename, output_filename, genes_ess, analysis_type):\n \"\"\"\n We can add information on gene essentiality to the comparison output,\n but this will not reflect full set of essential genes as the output does not contain all genes\n In order to prevent confusion this is \"switched\" off, but could be used if uncommented\n \"\"\"\n with open(input_filename, 'r') as inputfh:\n output_content = []\n\n reader = csv.reader(inputfh, delimiter=',', quotechar='\"')\n input_content = [r for r in reader]\n # if analysis_type == \"original\":\n # print(\"Number of cells: \" + len(input_content))\n # for i, cells in enumerate(input_content):\n # if i == 0:\n # cells.append(\"Essentiality\")\n # elif cells[1] in genes_ess and not (\"3prime\" in cells[1] or \"5prime\" in cells[1]):\n # cells.append(genes_ess[cells[1]].status())\n # else:\n # cells.append('N/A')\n # output_content.append(cells)\n # else:\n # output_content = input_content\n\n output_content = input_content\n\n with open(output_filename, 'w') as outputfh:\n for line in output_content:\n outputfh.write(\",\".join(line) + \"\\n\")\n\n\ndef essentiality_analysis(input: EssentialityInput, output_dir, analysis_type, output_filename=\"\"):\n\n #out_csv = mkstemp()\n genes_ess = all_gene_essentiality(input, analysis_type)\n #add_gene_essentiality_to_file(out_csv, output_filename, genes_ess, analysis_type)\n #os.remove(out_csv)\n\n ess = open(os.path.join(output_dir, \"essentiality.csv\"), \"w+\")\n ess.write(\"Gene,Essentiality,Control,Condition,Replicates\\n\")\n for e in genes_ess:\n ess.write(\",\".join([e, str(genes_ess[e].status()), str(genes_ess[e].control),\n str(genes_ess[e].condition), str(genes_ess[e].number_of_reps)]) + \"\\n\")\n ess.close()\n","repo_name":"quadram-institute-bioscience/QuaTradis","sub_path":"quatradis/comparison/essentiality_analysis.py","file_name":"essentiality_analysis.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9651325959","text":"#!/usr/bin/python3\n\n# minio_feature_store.py\n# Date: 21.07.2021\n# Author: Stratulat Ștefan\n# Email: stefan.stratulat1997@gmail.com\n\nimport io\nfrom pathlib import Path\nfrom abc import ABC, abstractmethod\nfrom SPARQLWrapper import SPARQLWrapper, CSV, JSON\n\nimport pandas as pd\n\nfrom string import Template\n\nDEFAULT_ENCODING = 'utf-8'\nDEFAULT_ENDPOINT_URL = \"https://publications.europa.eu/webapi/rdf/sparql\"\n\n\nclass SubstitutionTemplate(Template):\n delimiter = '~'\n\n\nclass SPARQLClientPool(object):\n \"\"\"\n A singleton connection pool, that hosts a dictionary of endpoint_urls and\n a corresponding SPARQLWrapper object connecting to it.\n The rationale of this connection pool is to reuse connection objects and save time.\n \"\"\"\n connection_pool = {}\n\n @staticmethod\n def create_or_reuse_connection(endpoint_url: str):\n if endpoint_url not in SPARQLClientPool.connection_pool:\n SPARQLClientPool.connection_pool[endpoint_url] = SPARQLWrapper(endpoint_url)\n return SPARQLClientPool.connection_pool[endpoint_url]\n\n\nclass TripleStoreABC(ABC):\n \"\"\"\n This class provides an abstraction for a TripleStore.\n \"\"\"\n\n @abstractmethod\n def with_query(self, sparql_query: str, substitution_variables: dict = None,\n sparql_prefixes: str = \"\") -> 'TripleStoreABC':\n \"\"\"\n This method will take a query in a string format\n :param sparql_query:\n :param substitution_variables:\n :param sparql_prefixes:\n :return:\n \"\"\"\n\n @abstractmethod\n def with_query_from_file(self, sparql_query_file_path: str, substitution_variables: dict = None,\n prefixes: str = \"\") -> 'TripleStoreABC':\n \"\"\"\n This method will read a query from a file\n :param sparql_query_file_path:\n :param substitution_variables:\n :param prefixes:\n :return:\n \"\"\"\n\n @abstractmethod\n def fetch_tabular(self) -> pd.DataFrame:\n \"\"\"\n This method will return the result of the SPARQL query in a tabular format (dataframe)\n :return:\n \"\"\"\n\n @abstractmethod\n def fetch_tree(self) -> dict:\n \"\"\"\n This method will return the result of the SPARQL query in a dict format (json)\n :return:\n \"\"\"\n\n\nclass SPARQLTripleStore(TripleStoreABC):\n\n def __init__(self, endpoint_url: str = DEFAULT_ENDPOINT_URL):\n self.endpoint = SPARQLClientPool.create_or_reuse_connection(endpoint_url)\n\n def with_query(self, sparql_query: str, substitution_variables: dict = None,\n sparql_prefixes: str = \"\") -> TripleStoreABC:\n \"\"\"\n Set the query text and return the reference to self for chaining.\n :return:\n \"\"\"\n if substitution_variables:\n template_query = SubstitutionTemplate(sparql_query)\n sparql_query = template_query.safe_substitute(substitution_variables)\n\n new_query = (sparql_prefixes + \" \" + sparql_query).strip()\n\n self.endpoint.setQuery(new_query)\n return self\n\n def with_query_from_file(self, sparql_query_file_path: str, substitution_variables: dict = None,\n prefixes: str = \"\") -> TripleStoreABC:\n \"\"\"\n Set the query text and return the reference to self for chaining.\n :return:\n \"\"\"\n\n with open(Path(sparql_query_file_path).resolve(), 'r') as file:\n query_from_file = file.read()\n\n if substitution_variables:\n template_query = SubstitutionTemplate(query_from_file)\n query_from_file = template_query.safe_substitute(substitution_variables)\n\n new_query = (prefixes + \" \" + query_from_file).strip()\n\n self.endpoint.setQuery(new_query)\n return self\n\n def fetch_tabular(self) -> pd.DataFrame:\n \"\"\"\n Get query results in a tabular format\n :return:\n \"\"\"\n if not self.endpoint.queryString or self.endpoint.queryString.isspace():\n raise Exception(\"The query is empty.\")\n\n self.endpoint.setReturnFormat(CSV)\n query_result = self.endpoint.queryAndConvert()\n return pd.read_csv(io.StringIO(str(query_result, encoding=DEFAULT_ENCODING)))\n\n def fetch_tree(self):\n \"\"\"\n Get query results in a dict format\n :return:\n \"\"\"\n if not self.endpoint.queryString or self.endpoint.queryString.isspace():\n raise Exception(\"The query is empty.\")\n\n self.endpoint.setReturnFormat(JSON)\n return self.endpoint.queryAndConvert()\n\n def __str__(self):\n return f\"from <...{str(self.endpoint.endpoint)[-30:]}> {str(self.endpoint.queryString)[:60]} ...\"\n","repo_name":"valipopa/ted-sws","sub_path":"ted_sws/core/adapters/sparql_triple_store.py","file_name":"sparql_triple_store.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"36569900406","text":"import nltk\nfrom sklearn.cluster import KMeans\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.decomposition import PCA\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport seaborn as sns\nsns.set(style=\"white\", color_codes=True)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\npage_url = 'https://en.wikipedia.org/wiki/Natural_language_processing'\nhtml = requests.get(page_url)\npage = BeautifulSoup(html.content, \"html.parser\")\n\n# For Bonus Points\nwith open('input.txt', 'w', encoding='utf-8') as file:\n file.write(page.get_text())\n\n# Reduce amount of text we are processing\nworking_text = page.get_text()[2000:6000]\n\nimport nltk\nnltk.download('punkt')\nnltk.download('universal_tagset')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\nnltk.download('maxent_ne_chunker')\nnltk.download('words')\n\n# Tokenizing each word\nw_tokens = nltk.word_tokenize(working_text)\nprint(w_tokens)\n\n# Tagging each word\ntagged = nltk.pos_tag(w_tokens)\nprint(tagged)\n\n# Stemming each word\nstemmer = nltk.stem.LancasterStemmer()\nstemmed = [stemmer.stem(token) for token in w_tokens]\nprint(stemmed)\n\n# Lemmatizing each word\nlemmatizer = nltk.stem.WordNetLemmatizer()\nlemmatized = [lemmatizer.lemmatize(token) for token in w_tokens]\nprint(lemmatized)\n\n# Using Named Entity Recognition on each word\nnamed_entity_recognition = nltk.ne_chunk(nltk.pos_tag(nltk.wordpunct_tokenize(working_text)))\nprint(named_entity_recognition)\n\n# Trigramming the word_list\ntrigrammed = nltk.trigrams(w_tokens)\nprint(*trigrammed)\n\n# Finding word counts\nword_counts = [w_tokens.count(word) for word in w_tokens]\nwords_and_counts = set(zip(w_tokens, word_counts))\nprint(words_and_counts)\n\n# Plotting the count of each word (first 20)\nimport matplotlib.pyplot as plt\nplt.bar(w_tokens[:20], word_counts[:20])\nplt.title('Word vs Count')\nplt.xlabel('Word')\nplt.ylabel('Count')\nplt.show()\n\n# Plotting the frequency of each word (first 20)\nword_freq = [count / len(w_tokens) for count in word_counts]\nplt.bar(w_tokens[:20], word_freq[:20])\nplt.title('Word vs Frequency')\nplt.xlabel('Word')\nplt.ylabel('Frequency')\nplt.show()\n","repo_name":"tmartinweb/DeepLearningClass","sub_path":"Solution-4/Part-1/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19726879556","text":"#!/usr/bin/python\nimport os, sys\n\nfile_name = {\"zsh_config\":\".zshrc .oh-my-zsh\"}\n\n\ndef plus(item, direct):\n\tif(direct == '1'):\n\t\tsou_dir = ' '.join(map(lambda foo:\"./\"+item+\"/\"+foo, file_name[item].split()))\n\t\tdes_dir = \"$HOME/\"\n\telse:\n\t\tsou_dir = ' '.join(map(lambda foo:\"$HOME/\"+foo, file_name[item].split()))\n\t\tdes_dir = item+\"/\"\n\treturn (sou_dir, des_dir)\n\ndef copy(dirs):\n\ttry:\n\t\tif(os.path.exists(dirs[1])):\n\t\t\tprint(\"delete the original %s...\" %dirs[1])\n\t\t\tos.system(\"sudo rm -rf \" + dirs[1])\n\t\tif(not os.path.exists(dirs[1])):\n\t\t\tos.system(\"mkdir \" + dirs[1])\n\t\tprint(\"copy %s to %s...\" %(dirs[0], dirs[1]))\n\t\tos.system(\"sudo cp -r %s %s\" %(dirs[0], dirs[1]))\n\texcept:\n\t\tprint(\"no such directory!\")\n\tprint(\"Done!\")\n\nclass Parser(object):\n\tdef __init__(self):\n\t\tself.argv = sys.argv\n\tdef solve(self):\n\t\tif(self.argv[1][1] not in ('i', 'b')):\n\t\t\ttry:\n\t\t\t\traise Argv_error(0)\n\t\t\texcept Argv_error as e:\n\t\t\t\tprint(\"unrecognized arguments!\")\n\t\t\t\texit(0)\n\t\telif(self.argv[1][1] == 'i'):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 2\n\tdef par(self):\n\t\tif(len(self.argv) > 2):\n\t\t\ttry:\n\t\t\t\traise Argv_error(len(self.argv))\n\t\t\texcept Argv_error as e:\n\t\t\t\tprint(\"%i arguments are commited, 1 are required!\" %(e.value-1))\n\t\t\t\texit(0)\n\t\telif(len(self.argv) == 1):\n\t\t\tprint(\"welcome to auto_config tools\\n\\n\")\n\t\t\tprint(\"Do you want to install or backup?\\n\")\n\t\t\tprint(\"# 1.install\")\n\t\t\tprint(\"# 2.backup\")\n\t\t\tdirect = input(\"choice:\")\n\t\t\tif(direct in ('1', '2')):\n\t\t\t\treturn direct\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\traise Argv_error(0)\n\t\t\t\texcept Argv_error as e:\n\t\t\t\t\tprint(\"unrecognized arguments!\")\n\t\t\t\t\texit(0)\n\t\telse:\n\t\t\treturn self.solve()\n\nclass Argv_error(Exception):\n\tdef __init__(self, value):\n\t\tself.value = value\n\tdef __str__(self):\n\t\treturn repr(self.value)\n\n\n\nclass Ask_to_do(object):\n\tdef __init__(self, item):\n\t\tself.item = item\n\tdef do_or_not(self):\n\t\tif(input(\"Do you want to deal with %s(y/n):\" %self.item) == \"y\"):\n\t\t\treturn True\n\tdef deal_with(self, do_or_not, direct):\n\t\tif do_or_not:\n\t\t\tprint(\"\\nfetching infomation about %s...\" %self.item)\n\t\t\tcopy(plus(self.item, direct))\n\n\n\ndef main():\n\tparser = Parser()\n\tdirect = parser.par()\n\tfor item in file_name:\n\t\tAsk_to_do(item).deal_with(Ask_to_do(item).do_or_not(), direct)\n\tprint(\"All done! enjoy!\")\n\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"cycoe/auto_config","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30879498506","text":"\"\"\"Module created to chart the infection and immunization curves of a simulation.\n\nHypothesis: If infected is only 0.2% of the population in a dense population, \nmore than half of the population is going to be infected before becoming immune.\n\nUnder a dense population of 500 people, only one person was selected to be infected.\nEven though this was only 0.2% of the population, the infected cell count reached, a strickingly high, \ngreater than 90% of the population. I was amazed how quickly the infection spread throught the people.\nAnother interesting thing was that immune cells only started emerging after reaching peak infection.\n\"\"\"\n\n\nimport argparse\nfrom projects.pj02.model import Model\nimport matplotlib.pyplot as plt\nfrom typing import List\n\n\ndef main() -> None:\n \"\"\"Entry point to create a chart.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"cell_count\")\n parser.add_argument(\"base_infected\")\n parser.add_argument(\"base_immune\")\n args = parser.parse_args()\n model = Model(int(args.cell_count), 5.0, int(args.base_infected), int(args.base_immune))\n ticks: List[int] = [0]\n infected: List[int] = [int(args.base_infected)]\n immune: List[int] = [int(args.base_immune)]\n\n counter: int = 0\n while(not model.is_complete()):\n total_infected: int = 0\n total_immune: int = 0\n model.tick()\n counter += 1\n ticks.append(counter)\n for cell in model.population:\n if(cell.is_infected()):\n total_infected += 1\n if(cell.is_immune()):\n total_immune += 1\n infected.append(total_infected)\n immune.append(total_immune)\n \n plt.title(\"Immunity and Infection Over Time\")\n plt.plot(ticks, infected, color = \"crimson\", label = \"Infected Cells\")\n plt.plot(ticks, immune, color = \"CornflowerBlue\", label = \"Immune Cells\")\n plt.xlabel(\"Time Ticks\")\n plt.ylabel(\"Number of Cells\")\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"kailash-unc/pj02_contagion_simulation","sub_path":"pj02/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12791198804","text":"goods = []\nfeatures = {'Наименование продукта': '', 'Цена': '', 'Количество': '', 'ед.': ''}\nanalytics = {'Наименование продукта': [], 'Цена': [], 'Количество': [], 'ед.': []}\nnum = 0\nfeature_ = None\ncontrol = None\nwhile True:\n control = input(\"Для выхода наберите 'Q', для продолжения нажмите 'Enter', для получения аналитики наберите 'A'\").upper()\n if control == 'Q':\n break\n num += 1\n if control == 'A':\n print(f'\\n Current analytics \\n {\"-\" * 30}')\n for key, value in analytics.items():\n print(f'{key[:25]:>30}: {value}')\n print(\"-\" * 30)\n for f in features.keys():\n feature_ = input(f'Введите \"{f}\"')\n features[f] = int(feature_) if (f == 'price' or f == 'quantity') else feature_\n analytics[f].append(features[f])\n goods.append((num, features))","repo_name":"maksim-chekhonadskii/homework-python","sub_path":"Task_2/task_2_6.py","file_name":"task_2_6.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26629492192","text":"import numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn import model_selection, preprocessing\nfrom sklearn.feature_selection import SelectKBest\nimport tensorflow as tf\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras.layers import Dense\nfrom tensorflow.python.keras.layers import Dropout\n\ndef preprocess(X_text, X_val_text,X_test, y_train):\n vectorizer = TfidfVectorizer(ngram_range = (1,3),\n decode_error = 'replace',\n strip_accents= 'unicode',\n analyzer = 'word',\n stop_words = 'english',\n min_df = 2)\n X = vectorizer.fit_transform(X_text)\n X_test = vectorizer.transform(X_test)\n X_val = vectorizer.transform(X_val_text)\n\n selector = SelectKBest(k=min(25000, X.shape[1]))\n selector.fit(X, y_train)\n X = selector.transform(X)\n X_val = selector.transform(X_val)\n X_test = selector.transform(X_test)\n\n print(X.shape, X_val.shape, X_test.shape)\n return X, X_val, X_test\n\n# Source: https://developers.google.com/machine-learning/guides/text-classification/step-4\ndef create_model(hidden_layers, units, dropout_rate, input_shape, num_classes):\n model = models.Sequential()\n model.add(Dropout(rate=dropout_rate, input_shape=input_shape))\n\n for _ in range(hidden_layers):\n model.add(Dense(units=units, activation='relu'))\n model.add(Dropout(rate=dropout_rate))\n\n model.add(Dense(units=num_classes, activation='softmax'))\n return model\n\n\ndef trained_model(data,\n learning_rate=1e-3,\n epochs=100,\n batch_size=128,\n hidden_layers=1,\n units=32,\n dropout_rate=0.0):\n\n (x_train, train_labels), (x_val, val_labels) = data\n\n num_classes = max(train_labels) + 1\n\n model = create_model(hidden_layers=hidden_layers,\n units=units,\n dropout_rate=dropout_rate,\n input_shape=x_train.shape[1:],\n num_classes=num_classes)\n\n optimizer = tf.keras.optimizers.Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])\n\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_acc', patience=3, restore_best_weights=True)]\n\n history = model.fit(\n x_train,\n train_labels,\n epochs=epochs,\n callbacks=callbacks,\n validation_data=(x_val, val_labels),\n verbose=2,\n batch_size=batch_size)\n\n history = history.history\n print('Best validation accuracy:', max(history['val_acc']), ' loss:', history['val_loss'][np.argmax(history['val_acc'])])\n return model\n\ndef majority_predictions(dataset, stacked_models):\n # method recycled from bagging\n # majority predictions of models\n votes = np.zeros((dataset.shape[0], 20)) #(nb samples, nb classes) where votes[i,j] is nb of vote that sample i is from class j\n for sub_model in stacked_models:\n class_preds = np.argmax(sub_model.predict(dataset), axis=1) # for each samples, int representing the class with highest prob\n votes = votes + np.eye(20)[class_preds] #transform class_preds into onehot were the one is the vote for the predicted class\n return np.argmax(votes, axis=1) #for each samples, return int representing the class with most votes\n\n\ndef create_submission(prediction):\n index = np.arange(0,len(prediction))\n result = np.column_stack((index, prediction))\n header = np.array(['Id','Category'])\n result = np.vstack((header,result))\n result = result.astype(str)\n print(result)\n np.savetxt(\"submission_nn.csv\", result, delimiter=\",\", fmt='%s')\n\n\n\nif __name__== \"__main__\":\n\n\n X, y = np.load(\"reddit-comments/data_train.pkl\", allow_pickle=True)\n\n x_train, x_val, y_train, y_valid = model_selection.train_test_split(X, y)\n\n X_test = np.load(\"reddit-comments/data_test.pkl\", allow_pickle=True)\n\n encoder = preprocessing.LabelEncoder()\n y_train = encoder.fit_transform(y_train)\n y_valid = encoder.transform(y_valid)\n\n x_train, x_val, X_test = preprocess(x_train, x_val, X_test, y_train)\n\n model = trained_model(((x_train, y_train), (x_val, y_valid)),\n learning_rate=1e-3,\n epochs=50,\n batch_size=128,\n hidden_layers=1,\n units=32,\n dropout_rate=0.5)\n\n preds = majority_predictions(X_test, [model])\n print('\\n', preds)\n print('\\n', encoder.inverse_transform(preds))\n create_submission(encoder.inverse_transform(preds))\n","repo_name":"AndyBaiMQC/ift6390_kaggle_project","sub_path":"nn_submission.py","file_name":"nn_submission.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25322235618","text":"import logging\nimport os\nimport subprocess\nfrom typing import Any, Dict, List, Optional\n\nimport yaml\nfrom charmhelpers.fetch import snap\n\n# Log messages can be retrieved using juju debug-log\nlogger = logging.getLogger(__name__)\n\n\nclass ExporterConfigError(Exception):\n \"\"\"Indicates problem with configuration of exporter service.\"\"\"\n\n\nclass ExporterSnap:\n \"\"\"Class that handles operations of prometheus-juju-exporter snap and related services.\"\"\"\n\n SNAP_NAME = \"prometheus-juju-exporter\"\n SNAP_CONFIG_PATH = f\"/var/snap/{SNAP_NAME}/current/config.yaml\"\n _SNAP_ACTIONS = [\n \"stop\",\n \"start\",\n \"restart\",\n ]\n _REQUIRED_CONFIG = [\n \"customer.name\",\n \"customer.cloud_name\",\n \"juju.controller_endpoint\",\n \"juju.controller_cacert\",\n \"juju.username\",\n \"juju.password\",\n \"exporter.port\",\n \"exporter.collect_interval\",\n ]\n\n def install(self, snap_path: Optional[str] = None) -> None:\n \"\"\"Install prometheus-juju-exporter snap.\n\n This method tries to install snap from local file if parameter :snap_path is provided.\n Otherwise, it'll attempt installation from snap store based on ExporterSnap.SNAP_NAME.\n\n :param snap_path: Optional parameter to provide local file as source of snap installation.\n :raises:\n snap.CouldNotAcquireLockException: In case of snap installation failure.\n \"\"\"\n if snap_path:\n logger.info(\"Installing snap %s from local resource.\", self.SNAP_NAME)\n snap.snap_install(snap_path, \"--dangerous\")\n else:\n logger.info(\"Installing %s snap from snap store.\", self.SNAP_NAME)\n snap.snap_install(self.SNAP_NAME)\n\n def _validate_required_options(self, config: Dict[str, Any]) -> List[str]:\n \"\"\"Validate that config has all required options for snap to run.\"\"\"\n missing_options = []\n for option in self._REQUIRED_CONFIG:\n config_value = config\n for identifier in option.split(\".\"):\n config_value = config_value.get(identifier, {})\n if not config_value:\n missing_options.append(option)\n\n return missing_options\n\n @staticmethod\n def _validate_option_values(config: Dict[str, Any]) -> str:\n \"\"\"Validate sane values for some of the config parameters where its feasible.\"\"\"\n errors = \"\"\n\n # Verify that 'port' is number within valid port range.\n try:\n port = int(config[\"exporter\"][\"port\"])\n if not 0 < port < 65535:\n errors += f\"Port {port} is not valid port number.{os.linesep}\"\n except ValueError:\n errors += f\"Configuration option 'port' must be a number.{os.linesep}\"\n except KeyError:\n pass # Options was not in the config\n\n # Verify that 'collect_interval' is positive number.\n try:\n collect_interval = int(config[\"exporter\"][\"collect_interval\"])\n if collect_interval < 1:\n errors += (\n f\"Configuration option 'collect_interval' must be a \"\n f\"positive number.{os.linesep}\"\n )\n except ValueError:\n errors += f\"Configuration option 'collect_interval' must be a number.{os.linesep}\"\n except KeyError:\n pass # Options was not in the config\n\n return errors\n\n def validate_config(self, config: Dict[str, Any]) -> None:\n \"\"\"Validate supplied config file for exporter service.\n\n :param config: config dictionary to be validated\n :raises:\n ExporterConfigError: In case the config does not pass the validation process. For\n example if the required fields are missing or values have unexpected format.\n \"\"\"\n errors = \"\"\n\n missing_options = self._validate_required_options(config)\n if missing_options:\n missing_str = \", \".join(missing_options)\n errors += f\"Following config options are missing: {missing_str}{os.linesep}\"\n\n errors += self._validate_option_values(config)\n\n if errors:\n raise ExporterConfigError(errors)\n\n def apply_config(self, exporter_config: Dict[str, Any]) -> None:\n \"\"\"Update configuration file for exporter service.\"\"\"\n self.stop()\n logger.info(\"Updating exporter service configuration.\")\n self.validate_config(exporter_config)\n\n with open(self.SNAP_CONFIG_PATH, \"w\", encoding=\"utf-8\") as config_file:\n yaml.safe_dump(exporter_config, config_file)\n\n self.start()\n logger.info(\"Exporter configuration updated.\")\n\n def restart(self) -> None:\n \"\"\"Restart exporter service.\"\"\"\n self._execute_service_action(\"restart\")\n\n def stop(self) -> None:\n \"\"\"Stop exporter service.\"\"\"\n self._execute_service_action(\"stop\")\n\n def start(self) -> None:\n \"\"\"Start exporter service.\"\"\"\n self._execute_service_action(\"start\")\n\n def _execute_service_action(self, action: str) -> None:\n \"\"\"Execute one of the supported snap service actions.\n\n Supported actions:\n - stop\n - start\n - restart\n\n :param action: snap service action to execute\n :raises:\n RuntimeError: If requested action is not supported.\n \"\"\"\n if action not in self._SNAP_ACTIONS:\n raise RuntimeError(f\"Snap service action '{action}' is not supported.\")\n logger.info(\"%s service executing action: %s\", self.SNAP_NAME, action)\n subprocess.call([\"snap\", action, self.SNAP_NAME])\n","repo_name":"mkalcok/charm-juju-machine-exporter","sub_path":"src/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43424843442","text":"from unittest import TestCase\nimport random\nimport functools\n\n################################################################################\n# 1. IMPLEMENT THIS HEAP\n################################################################################\nclass Heap:\n def __init__(self, key=lambda x:x):\n self.data = []\n self.key = key\n\n @staticmethod\n def _parent(idx):\n return (idx-1)//2\n\n @staticmethod\n def _left(idx):\n return idx*2+1\n\n @staticmethod\n def _right(idx):\n return idx*2+2\n\n def heapify(self, idx=0):\n ### BEGIN SOLUTION\n right = Heap._right(idx)\n left = Heap._left(idx)\n if right < len(self.data):\n if self.key(self.data[idx]) < self.key(self.data[left]) or self.key(self.data[idx]) < self.key(self.data[right]):\n if self.key(self.data[left]) > self.key(self.data[right]):\n self.data[idx], self.data[left] = self.data[left], self.data[idx]\n self.heapify(left)\n else:\n self.data[idx], self.data[right] = self.data[right], self.data[idx]\n self.heapify(right)\n elif left < len(self.data):\n if self.key(self.data[idx]) < self.key(self.data[left]):\n self.data[idx], self.data[left] = self.data[left], self.data[idx]\n ### END SOLUTION\n\n def add(self, x):\n ### BEGIN SOLUTION\n pos = len(self.data)\n self.data.append(x)\n while pos != 0 and self.key(self.data[pos]) > self.key(self.data[Heap._parent(pos)]):\n self.data[pos], self.data[Heap._parent(pos)] = self.data[Heap._parent(pos)], self.data[pos]\n pos = Heap._parent(pos)\n ### END SOLUTION\n\n def peek(self):\n return self.data[0]\n\n def pop(self):\n ret = self.data[0]\n self.data[0] = self.data[len(self.data)-1]\n del self.data[len(self.data)-1]\n self.heapify()\n return ret\n\n def __iter__(self):\n return self.data.__iter__()\n\n def __bool__(self):\n return len(self.data) > 0\n\n def __len__(self):\n return len(self.data)\n\n def __repr__(self):\n return repr(self.data)\n\n################################################################################\n# 1. IMPLEMENT THIS HEAP\n################################################################################\n\n# (6 point)\ndef test_key_heap_1():\n from unittest import TestCase\n import random\n\n tc = TestCase()\n h = Heap()\n\n random.seed(0)\n for _ in range(10):\n h.add(random.randrange(100))\n\n tc.assertEqual(h.data, [97, 61, 65, 49, 51, 53, 62, 5, 38, 33])\n\n# (6 point)\ndef test_key_heap_2():\n tc = TestCase()\n h = Heap(lambda x:-x)\n\n random.seed(0)\n for _ in range(10):\n h.add(random.randrange(100))\n\n tc.assertEqual(h.data, [5, 33, 53, 38, 49, 65, 62, 97, 51, 61])\n\n# (6 points)\ndef test_key_heap_3():\n tc = TestCase()\n h = Heap(lambda s:len(s))\n\n h.add('hello')\n h.add('hi')\n h.add('abracadabra')\n h.add('supercalifragilisticexpialidocious')\n h.add('0')\n\n tc.assertEqual(h.data,\n ['supercalifragilisticexpialidocious', 'abracadabra', 'hello', 'hi', '0'])\n\n# (6 points)\ndef test_key_heap_4():\n tc = TestCase()\n h = Heap()\n\n random.seed(0)\n lst = list(range(-1000, 1000))\n random.shuffle(lst)\n\n for x in lst:\n h.add(x)\n\n for x in range(999, -1000, -1):\n tc.assertEqual(x, h.pop())\n\n# (6 points)\ndef test_key_heap_5():\n tc = TestCase()\n h = Heap(key=lambda x:abs(x))\n\n random.seed(0)\n lst = list(range(-1000, 1000, 3))\n random.shuffle(lst)\n\n for x in lst:\n h.add(x)\n\n for x in reversed(sorted(range(-1000, 1000, 3), key=lambda x:abs(x))):\n tc.assertEqual(x, h.pop())\n\n################################################################################\n# 2. MEDIAN\n################################################################################\ndef running_medians(iterable):\n ### BEGIN SOLUTION\n minHeap = Heap()\n maxHeap = Heap(key=lambda x: -x)\n itList = list(iterable)\n medianList = [itList[0]] \n minHeap.add(itList[0]) \n for n in itList[1:]:\n if n > minHeap.peek():\n maxHeap.add(n)\n else:\n minHeap.add(n)\n if len(minHeap) > len(maxHeap) + 1:\n maxHeap.add(minHeap.pop())\n elif len(maxHeap) > len(minHeap) + 1:\n minHeap.add(maxHeap.pop())\n if len(minHeap) == len(maxHeap):\n medianList.append((minHeap.peek()+maxHeap.peek())/2)\n elif len(minHeap) > len(maxHeap):\n medianList.append(minHeap.peek())\n else:\n medianList.append(maxHeap.peek())\n return medianList\n ### END SOLUTION\n\n################################################################################\n# TESTS\n################################################################################\ndef running_medians_naive(iterable):\n values = []\n medians = []\n for i, x in enumerate(iterable):\n values.append(x)\n values.sort()\n if i%2 == 0:\n medians.append(values[i//2])\n else:\n medians.append((values[i//2] + values[i//2+1]) / 2)\n return medians\n\n# (13 points)\ndef test_median_1():\n tc = TestCase()\n tc.assertEqual([3, 2.0, 3, 6.0, 9], running_medians([3, 1, 9, 25, 12]))\n\n# (13 points)\ndef test_median_2():\n tc = TestCase()\n vals = [random.randrange(10000) for _ in range(1000)]\n tc.assertEqual(running_medians_naive(vals), running_medians(vals))\n\n# MUST COMPLETE IN UNDER 10 seconds!\n# (14 points)\ndef test_median_3():\n tc = TestCase()\n vals = [random.randrange(100000) for _ in range(100001)]\n m_mid = sorted(vals[:50001])[50001//2]\n m_final = sorted(vals)[len(vals)//2]\n running = running_medians(vals)\n tc.assertEqual(m_mid, running[50000])\n tc.assertEqual(m_final, running[-1])\n\n################################################################################\n# 3. TOP-K\n################################################################################\ndef topk(items, k, keyf):\n ### BEGIN SOLUTION\n revkey = lambda x: keyf(x) * -1\n minHeap = Heap(key = revkey)\n for item in items:\n if len(minHeap) < k:\n minHeap.add(item)\n elif keyf(minHeap.peek()) < keyf(item):\n minHeap.pop()\n minHeap.add(item)\n return sorted(minHeap.data, key=revkey)\n ### END SOLUTION\n\n################################################################################\n# TESTS\n################################################################################\ndef get_age(s):\n return s[1]\n\ndef naive_topk(l,k,keyf):\n revkey = lambda x: keyf(x) * -1\n return sorted(l, key=revkey)[0:k]\n\n# (30 points)\ndef test_topk_students():\n tc = TestCase()\n students = [ ('Peter', 33), ('Bob', 23), ('Alice', 21), ('Gertrud', 53) ]\n\n tc.assertEqual(naive_topk(students, 2, get_age),\n topk(students, 2, get_age))\n\n tc.assertEqual(naive_topk(students, 1, get_age),\n topk(students, 1, get_age))\n\n tc.assertEqual(naive_topk(students, 3, get_age),\n topk(students, 3, get_age))\n\n################################################################################\n# TEST HELPERS\n################################################################################\ndef say_test(f):\n print(80 * \"*\" + \"\\n\" + f.__name__)\n\ndef say_success():\n print(\"SUCCESS\")\n\n################################################################################\n# MAIN\n################################################################################\ndef main():\n for t in [test_key_heap_1,\n test_key_heap_2,\n test_key_heap_3,\n test_key_heap_4,\n test_key_heap_5,\n test_median_1,\n test_median_2,\n test_median_3,\n test_topk_students\n ]:\n say_test(t)\n t()\n say_success()\n\nif __name__ == '__main__':\n main()\n","repo_name":"jsnow06/cs331-s21-singino","sub_path":"lab08/lab08.py","file_name":"lab08.py","file_ext":"py","file_size_in_byte":7972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4685967875","text":"import unittest\n\nimport dash\nimport dash_html_components as html\nfrom days_until_vac import DaysUntilVac, DaysUntilVacDash\n\n\nclass DaysUntilVacTest(unittest.TestCase):\n def test_days_until_vac(self):\n chart = DaysUntilVac()\n chart.show()\n\n def test_days_until_vac_dash(self):\n external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n chart = DaysUntilVacDash(app)\n components_html = [html.Div([html.H1(\"Visualizações sobre Covid-19\"),\n html.H6(\n \"Neste trabalho, são apresentadas visualizações sobre os casos de Covid-19 e as vacinas aplicadas no Brasil.\")]),\n chart.component_html]\n app.layout = html.Div(components_html)\n app.run_server(debug=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"claudiovaliense/visualizacao_covid","sub_path":"veloso/test_days_until_vac.py","file_name":"test_days_until_vac.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21926699794","text":"def read_data(path):\n \"\"\"Reads comma separated data from path.\"\"\"\n infile = open(path)\n infile.readline() # discard headers\n for line in infile:\n line = line.strip()\n row = line.split(',')\n print(row)\n\nread_data('ice-cream.csv')\n","repo_name":"mwartell/gdi-ct","sub_path":"read2.py","file_name":"read2.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74502815527","text":"# https://leetcode.com/problems/path-sum/discuss/36486/Python-solutions-(DFS-recursively-DFS%2Bstack-BFS%2Bqueue)\n# https://leetcode.com/problems/path-sum/submissions/\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n # this is recursive, DFS\n def isLeaf(self, node):\n # if not node:\n # return True\n return not node.left and not node.right\n \n def DFS(self, node, sumsofar, target):\n if not node:\n return False\n sumsofar += node.val\n if sumsofar == target and self.isLeaf(node):\n return True\n \n found = False\n if node.left:\n found = self.DFS(node.left, sumsofar, target)\n if node.right and not found:\n found = self.DFS(node.right, sumsofar, target)\n \n return found\n \n def hasPathSum(self, root: TreeNode, targetSum: int) -> bool:\n return self.DFS(root, 0, targetSum)","repo_name":"linminhtoo/algorithms","sub_path":"BinarySearchTree/easy/PathSumI.py","file_name":"PathSumI.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15462629732","text":"#frankenstein code\r\n\r\nclass Doctor:\r\n\r\n def __init__(self, id, name, specialization, schedule, qualification, room):\r\n\r\n self.id = id\r\n self.name = name\r\n self.specialization = specialization\r\n self.schedule = schedule\r\n self.qualification = qualification\r\n self.room = room\r\n\r\n\r\n def format_data_print():\r\n\r\n with open('doctors.txt') as f:\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n line = line.strip().split('_')\r\n print(line[0] + ' ' + line[1] + ' ' + line[2] + ' ' + line[3] + ' ' + line[4] + ' ' + line[5])\r\n\r\n\r\n def check_id():\r\n id = (input(\"Enter the doctor ID:\\n\"))\r\n\r\n with open('doctors.txt') as f:\r\n lines = f.readlines()\r\n\r\n line_number_total = 0\r\n for line in lines:\r\n line = line.strip().split(\"_\")\r\n line_number_total += 1\r\n line_number_check = 0\r\n\r\n for line[0] in line:\r\n \r\n while line_number_check != line_number_total:\r\n try:\r\n if line[0] == id:\r\n print('\\n'+line[0]+'\\t'+line[1]+'\\t'+line[2]+'\\t\\t'+line[3]+'\\t'+line[4]+'\\t\\t'+line[5])\r\n return\r\n\r\n elif line[0] != id:\r\n line_number_check += 1\r\n\r\n except:\r\n continue\r\n\r\n if line_number_check == line_number_total:\r\n print(\"Can't find the doctor with the same ID on the system\")\r\n\r\n\r\n def check_name():\r\n name = (input(\"Enter the doctor name:\\n\"))\r\n\r\n with open('doctors.txt') as f:\r\n lines = f.readlines()\r\n\r\n line_number_total = 0\r\n for line in lines:\r\n line = line.strip().split(\"_\")\r\n line_number_total += 1\r\n line_number_check = 0\r\n\r\n for line[0] in line:\r\n \r\n while line_number_check != line_number_total:\r\n try:\r\n if line[1] == name:\r\n print('\\n'+line[0]+'\\t'+line[1]+'\\t'+line[2]+'\\t\\t'+line[3]+'\\t'+line[4]+'\\t\\t'+line[5])\r\n return\r\n\r\n elif line[1] != name:\r\n line_number_check += 1\r\n\r\n except:\r\n continue\r\n\r\n if line_number_check == line_number_total:\r\n print(\"Can't find the doctor with the same name on the system\")\r\n\r\n\r\n def append_text_data(self):\r\n text_data = object.id + '_' + object.name + '_' + object.specialization + '_' + object.schedule + '_' + object.qualification + '_' + object.room\r\n with open('doctors.txt', 'a') as f:\r\n f.write('\\n' + text_data)\r\n\r\n\r\n def edit_id():\r\n id = input(\"Please enter the id of the doctor that you want to edit their information:\\n\")\r\n\r\n with open('doctors.txt','r+') as f:\r\n lines = f.readlines()\r\n\r\n global line_number_total\r\n line_number_total = 0\r\n for line in lines:\r\n line = line.strip().split(\"_\")\r\n line_number_total += 1\r\n line_number_check = 0\r\n \r\n\r\n for line[0] in line:\r\n \r\n while line_number_check != line_number_total:\r\n try:\r\n if line[0] == id:\r\n line[1] = input('Enter new name:\\n')\r\n line[2] = input('Enter new specialization:\\n')\r\n line[3] = input('Enter new schedule:\\n')\r\n line[4] = input('Enter new qualification:\\n')\r\n line[5] = input('Enter new room number:\\n')\r\n global edited_text\r\n edited_text = (line[0] + '_' + line[1] + '_' + line[2] + '_' + line[3] + '_' + line[4] + '_' + line[5])\r\n return\r\n\r\n elif line[0] != id:\r\n line_number_check += 1\r\n\r\n except:\r\n continue\r\n \r\n if line_number_check == line_number_total:\r\n print(\"Can't find the Patient with the same id on the system\")\r\n\r\n\r\n def menu():\r\n while True:\r\n try:\r\n option = int(input('\\n1 - Display Doctors list\\n2 - Search for doctor by ID\\n3 - Search for doctor by name\\n4 - Add doctor\\n5 - Edit doctor info\\n6 - Back to the Main Menu\\n\\n'))\r\n \r\n if option == 1:\r\n print('\\n')\r\n Doctor.format_data_print()\r\n print('\\nBack to the previous Menu')\r\n continue\r\n\r\n elif option == 2:\r\n print('\\n')\r\n Doctor.check_id()\r\n continue\r\n\r\n elif option == 3:\r\n print('\\n')\r\n Doctor.check_name()\r\n continue\r\n\r\n elif option == 4:\r\n print('\\n')\r\n Doctor.append_text_data()\r\n continue\r\n\r\n elif option == 5:\r\n print('\\n')\r\n global object\r\n object = Doctor(input(\"Enter the doctor's ID: \\n\"), input(\"Enter the doctor's name (Dr.name): \\n\"), input(\"Enter the doctor's specialty: \\n\"), input(\"Enter the doctor's schedule (e.g. 7AM-10PM): \\n\"), input(\"Enter the doctor's qualification: \\n\"), input(\"Enter the doctor's room number: \\n\"))\r\n object.edit_id()\r\n Doctor.format_data_print()\r\n print('\\nBack to the previous Menu')\r\n continue\r\n\r\n elif option == 6:\r\n #return to main menu of classes\r\n break\r\n except:\r\n continue","repo_name":"Gordly/OOP_Final_Project","sub_path":"doctor.py","file_name":"doctor.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26792342096","text":"# -*- coding:utf-8 -*-\r\n\"\"\"\r\n@FileName :md.py\r\n@Time :2022/11/8 17:14\r\n@Author :fsksf\r\n\"\"\"\r\n\r\nfrom vnpy.trader.object import (\r\n CancelRequest, OrderRequest, SubscribeRequest, TickData,\r\n ContractData\r\n)\r\nimport xtquant.xtdata\r\nimport xtquant.xttrader\r\nimport xtquant.xttype\r\nfrom vnpy_qmt.utils import (\r\n From_VN_Exchange_map, TO_VN_Exchange_map, to_vn_contract,\r\n TO_VN_Product, to_vn_product, timestamp_to_datetime,\r\n to_qmt_code\r\n)\r\n\r\n\r\nclass MD:\r\n\r\n def __init__(self, gateway):\r\n self.gateway = gateway\r\n self.th = None\r\n self.limit_ups = {}\r\n self.limit_downs = {}\r\n\r\n def close(self) -> None:\r\n pass\r\n\r\n def subscribe(self, req: SubscribeRequest) -> None:\r\n\r\n return xtquant.xtdata.subscribe_quote(\r\n stock_code=f'{req.symbol}.{From_VN_Exchange_map[req.exchange]}',\r\n period='tick',\r\n callback=self.on_tick\r\n )\r\n\r\n def connect(self, setting: dict) -> None:\r\n self.get_contract()\r\n return\r\n\r\n def get_contract(self):\r\n self.write_log('开始获取标的信息')\r\n contract_ids = set()\r\n bk = ['上期所', '上证A股', '上证B股', '中金所', '创业板', '大商所',\r\n '沪市ETF', '沪市指数', '沪深A股',\r\n '沪深B股', '沪深ETF', '沪深指数', '深市ETF',\r\n '深市基金', '深市指数', '深证A股', '深证B股', '科创板', '科创板CDR',\r\n ]\r\n for sector in bk:\r\n print(sector)\r\n stock_list = xtquant.xtdata.get_stock_list_in_sector(sector_name=sector)\r\n for symbol in stock_list:\r\n if symbol in contract_ids:\r\n continue\r\n contract_ids.add(symbol)\r\n info = xtquant.xtdata.get_instrument_detail(symbol)\r\n contract_type = xtquant.xtdata.get_instrument_type(symbol)\r\n if info is None or contract_type is None:\r\n continue\r\n try:\r\n exchange = TO_VN_Exchange_map[info['ExchangeID']]\r\n except KeyError:\r\n print('本gateway不支持的标的', symbol)\r\n continue\r\n if exchange not in self.gateway.exchanges:\r\n continue\r\n product = to_vn_product(contract_type)\r\n if product not in self.gateway.TRADE_TYPE:\r\n continue\r\n\r\n c = ContractData(\r\n gateway_name=self.gateway.gateway_name,\r\n symbol=info['InstrumentID'],\r\n exchange=exchange,\r\n name=info['InstrumentName'],\r\n product=product,\r\n pricetick=info['PriceTick'],\r\n size=100,\r\n min_volume=100\r\n )\r\n self.limit_ups[c.vt_symbol] = info['UpStopPrice']\r\n self.limit_downs[c.vt_symbol] = info['DownStopPrice']\r\n self.gateway.on_contract(c)\r\n self.write_log('获取标的信息完成')\r\n\r\n def on_tick(self, datas):\r\n for code, data_list in datas.items():\r\n symbol, suffix = code.rsplit('.')\r\n exchange = TO_VN_Exchange_map[suffix]\r\n for data in data_list:\r\n ask_price = data['askPrice']\r\n ask_vol = data['askVol']\r\n bid_price = data['bidPrice']\r\n bid_vol = data['bidVol']\r\n\r\n tick = TickData(\r\n gateway_name=self.gateway.gateway_name,\r\n symbol=symbol,\r\n exchange=exchange,\r\n datetime=timestamp_to_datetime(data['time']),\r\n last_price=data['lastPrice'],\r\n volume=data['volume'],\r\n open_price=data['open'],\r\n high_price=data['high'],\r\n low_price=data['low'],\r\n pre_close=data['lastClose'],\r\n limit_down=0,\r\n limit_up=0,\r\n ask_price_1=ask_price[0],\r\n ask_price_2=ask_price[1],\r\n ask_price_3=ask_price[2],\r\n ask_price_4=ask_price[3],\r\n ask_price_5=ask_price[4],\r\n\r\n ask_volume_1=ask_vol[0],\r\n ask_volume_2=ask_vol[1],\r\n ask_volume_3=ask_vol[2],\r\n ask_volume_4=ask_vol[3],\r\n ask_volume_5=ask_vol[4],\r\n\r\n bid_price_1=bid_price[0],\r\n bid_price_2=bid_price[1],\r\n bid_price_3=bid_price[2],\r\n bid_price_4=bid_price[3],\r\n bid_price_5=bid_price[4],\r\n\r\n bid_volume_1=bid_vol[0],\r\n bid_volume_2=bid_vol[1],\r\n bid_volume_3=bid_vol[2],\r\n bid_volume_4=bid_vol[3],\r\n bid_volume_5=bid_vol[4],\r\n )\r\n contract = self.gateway.get_contract(tick.vt_symbol)\r\n if contract:\r\n tick.name = contract.name\r\n tick.limit_up = self.limit_ups.get(tick.vt_symbol, None)\r\n tick.limit_down = self.limit_downs.get(tick.vt_symbol, None)\r\n self.gateway.on_tick(tick)\r\n def write_log(self, msg):\r\n self.gateway.write_log(f\"[ md ] {msg}\")","repo_name":"ruyisee/vnpy_qmt","sub_path":"vnpy_qmt/md.py","file_name":"md.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"75146750248","text":"\"\"\"\nVarious utility functions for chall-architect\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\n\nfrom yaml import safe_load\n\nfrom ctf_architect.core.models import Config\n\n\ndef get_config(path: str | Path) -> Config:\n \"\"\"\n Gets the config from a given path.\n \"\"\"\n if isinstance(path, str):\n path = Path(path)\n\n # Ensure path is a file\n if not path.is_file():\n raise ValueError(\"Path must be a file\")\n \n # Ensure path is a yaml file\n if path.suffix != \".yaml\":\n raise ValueError(\"Path must be a yaml file\")\n \n # Load the config\n with path.open(\"r\") as f:\n data = safe_load(path.open(\"r\"))\n\n config = data.get(\"config\")\n\n # Ensure config is present\n if config is None:\n raise ValueError(\"Config not found in yaml file\")\n \n config = Config(**config)\n\n return config\n\n\ndef is_valid_service_folder(path: str | Path) -> bool:\n \"\"\"\n Checks if the given path is a valid service folder.\n\n A service folder is considered valid if it has a dockerfile.\n \"\"\"\n if isinstance(path, str):\n path = Path(path)\n\n # Check if there is a docker file in the folder, case-insensitive\n return any(file.name.lower() == \"dockerfile\" for file in path.iterdir())","repo_name":"Jus-Codin/CTF-Architect","sub_path":"ctf_architect/chall_architect/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"13582795863","text":"from collections import defaultdict\nimport random\n\nfrom fortypoints.cards import Card, constants as CARD, GameCard\nfrom fortypoints.core import db\nfrom fortypoints.models import ModelMixin\nfrom fortypoints.games import constants as GAME\nfrom fortypoints.games.exceptions import GameError\nfrom fortypoints.players import get_player, get_player_by_id\n\n\nclass Game(db.Model, ModelMixin):\n __tablename__ = 'game'\n id = db.Column(db.Integer(unsigned=True), primary_key=True)\n trump_number = db.Column(db.SmallInteger(unsigned=True), nullable=False)\n trump_suit = db.Column(db.SmallInteger(unsigned=True), nullable=True)\n size = db.Column(db.SmallInteger(unsigned=True))\n first = db.Column(db.Boolean, nullable=False, default=False)\n _state = db.Column('state', db.SmallInteger(unsigned=True), nullable=False)\n next_game_id = db.Column(db.Integer(unsigned=True), db.ForeignKey(id), index=True)\n\n next_game = db.relationship('Game',\n uselist=False,\n remote_side=[id],\n backref=db.backref('previous_game', uselist=False))\n\n def __init__(self, num_players, level, first=False):\n self.size = num_players\n self.trump_number = level\n self.first = first\n self.state = GAME.DRAWING\n\n @property\n def name(self):\n return 'Game #{game_id}'.format(game_id=self.id)\n\n @property\n def state(self):\n return self._state\n\n @state.setter\n def state(self, game_state):\n if game_state not in GAME.STATES:\n raise GameError('Unknown game state {0}'.format(game_state))\n self._state = game_state\n\n @property\n def trump(self):\n return Card(self.trump_number, self.trump_suit)\n\n @trump.setter\n def trump(self, card):\n self.trump_number = card.num\n self.trump_suit = card.suit\n\n @property\n def trump_letters(self):\n if self.trump_suit is None:\n return ''\n if self.trump_number == CARD.SMALL_JOKER:\n return 'SJ'\n elif self.trump_number == CARD.BIG_JOKER:\n return 'BJ'\n else:\n return '{0}{1}'.format(CARD.NUMBER[self.trump_number],\n CARD.SUIT[self.trump_suit]).upper()\n\n @property\n def current_player(self):\n active = lambda p: p.active\n active_players = filter(active, self.players)\n return active_players[0] if active_players else None\n\n @current_player.setter\n def current_player(self, player):\n self.current_player_id = player.id\n\n @property\n def house_players(self):\n return filter(lambda p: p.house, self.players)\n\n @property\n def house_lead(self):\n lead = filter(lambda p: p.lead, self.players)\n return lead[0] if lead else None\n\n @house_lead.setter\n def house_lead(self, player):\n for other_player in self.players:\n other_player.lead = False\n player.lead = True\n\n def get_player(self, user):\n return get_player(self, user)\n\n @property\n def round(self):\n if not list(self.plays):\n return 1\n current_round = max([play.round for play in self.plays])\n plays = filter(lambda p: p.round == current_round, self.plays)\n if len(plays) == len(list(self.players)):\n return current_round + 1\n return current_round\n\n @property\n def round_plays(self):\n plays = filter(lambda p: p.round == self.round, self.plays)\n return sorted(plays, key=lambda p: p.number)\n\n @property\n def round_suit(self):\n if not self.round_plays:\n return None\n return self.round_plays[0].cards[0].suit\n\n @property\n def deck(self):\n return self.cards\n\n @property\n def undealt_cards(self):\n return filter(lambda c: c.player_id is None, self.deck)\n\n @property\n def hand_cards(self):\n cards = []\n for player in self.players:\n cards.extend(player.hand)\n return cards\n\n @property\n def flipped_cards(self):\n return filter(lambda c: c.flipped, self.deck)\n\n @property\n def bottom_size(self):\n num_players = len(list(self.players))\n num_left = len(list(self.deck)) % num_players\n return num_left % num_players + num_players\n\n def deal(self, player):\n card = random.choice(self.undealt_cards)\n card.player_id = player.id\n return card\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'trump_number': self.trump_number,\n 'trump_suit': self.trump_suit,\n 'trump_letters': self.trump_letters,\n 'size': self.size,\n 'first': self.first,\n 'state': self.state,\n 'next_game_id': self.next_game_id\n }\n\n def __repr__(self):\n return ''.format(\n size=self.size,\n num=self.trump_number,\n suit=self.trump_suit\n )","repo_name":"melvindu/40-Points","sub_path":"fortypoints/games/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9022210166","text":"from functools import reduce\nfrom matplotlib.colors import LinearSegmentedColormap\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_alpha(alpha):\n fig, ax = plt.subplots(1, 5, figsize=(15, 10))\n for i, img in enumerate(alpha.reshape(5, 38, 10)):\n ax[i].imshow(img, cmap='gray')\n ax[i].axis('off')\n \n return fig, ax\n\ndef plot_exp_vs_sim(rvae, zexp, zsim, aexp, asim, criterion, nmax=10):\n xyz = reduce((lambda x, y: x * y), zexp.shape[:3])\n # xyz //= 2\n tot = 0\n fig, ax = plt.subplots(1, 4, figsize=(5,2))\n for n in range(xyz):\n distances = np.linalg.norm(zsim - zexp[n], axis=1)\n # distances += np.abs(z31 - z11[n])\n # distances += np.linalg.norm(z33 - z13[n + xyz], axis=1)\n # distances += np.abs(z31 - z11[n + xyz])\n if np.min(distances) > criterion:\n continue\n tot += 1\n if tot > nmax:\n break\n nearest_neighbor_index = np.argmin(distances)\n # fig.suptitle(f'{n} {nearest_neighbor_index}')\n fig.suptitle('exp vs sim')\n ax[0].imshow(rvae.decode(np.array([*zexp[n], *aexp[n]]))[0])\n ax[0].axis('off')\n ax[1].imshow(rvae.decode(np.array([*zsim[nearest_neighbor_index], *asim[nearest_neighbor_index]]))[0])\n ax[1].axis('off')\n\n return fig, ax\n\n\ndef plot_tk(data, vmax=None, vmin=None):\n\n import tkinter\n\n from matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\n # Implement the default Matplotlib key bindings.\n from matplotlib.backend_bases import key_press_handler\n from matplotlib.figure import Figure\n\n import numpy as np\n\n root = tkinter.Tk()\n root.wm_title(\"Embedding in Tk\")\n\n fig, ax = plt.subplots(1, 1, figsize=(3, 3), dpi=100)\n colors = ['#00000F', '#0000FF','#00FF00', '#FF0000', '#FFFF00', '#FFFFFF']\n cmap = LinearSegmentedColormap.from_list('mycmap', colors, N=256)\n\n ax.imshow(data[0], cmap=cmap)\n ax.axis('off')\n canvas = FigureCanvasTkAgg(fig, master=root) # A tk.DrawingArea.\n canvas.draw()\n\n # pack_toolbar=False will make it easier to use a layout manager later on.\n toolbar = NavigationToolbar2Tk(canvas, root, pack_toolbar=False)\n toolbar.update()\n\n canvas.mpl_connect(\n \"key_press_event\", lambda event: print(f\"you pressed {event.key}\"))\n canvas.mpl_connect(\"key_press_event\", key_press_handler)\n\n button_quit = tkinter.Button(master=root, text=\"Quit\", command=root.destroy)\n\n def update_frequency(new_val):\n new_val = int(new_val)\n ax.clear()\n # ax.imshow(imutils.rotate(data[0, 20, 0], int(new_val), com[200]))\n # ax.imshow(imutils.rotate(data.sum(axis=2)[2, new_val], int(82), com[200]))\n if vmax is None or vmin is None:\n ax.imshow(data[new_val], cmap=cmap)\n else:\n ax.imshow(data[new_val], cmap=cmap, vmax=vmax, vmin=vmin)\n ax.axis('off')\n # required to update canvas and attached toolbar!\n canvas.draw()\n\n slider_update = tkinter.Scale(root, from_=0, to=len(data), orient=tkinter.HORIZONTAL,\n command=update_frequency, label=\"Frequency [Hz]\")\n\n # Packing order is important. Widgets are processed sequentially and if there\n # is no space left, because the window is too small, they are not displayed.\n # The canvas is rather flexible in its size, so we pack it last which makes\n # sure the UI controls are displayed as long as possible.\n button_quit.pack(side=tkinter.BOTTOM)\n slider_update.pack(side=tkinter.BOTTOM, fill=tkinter.BOTH)\n toolbar.pack(side=tkinter.BOTTOM, fill=tkinter.X)\n canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=True)\n\n tkinter.mainloop()\n plt.show(block=False)\n\ndef plot_random(data):\n fig, ax = plt.subplots(3, 3, figsize=(3, 3), dpi=100)\n for i in range(3):\n for j in range(3):\n ax[i, j].imshow(data[random.randint(0, len(data))])\n ax[i, j].axis('off')\n plt.show()","repo_name":"material-JH/polar_clustering","sub_path":"lib/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23232597954","text":"\na1 = int(input())\na2 = int(input())\nn = int(input())\n\nfor first in range(a1, a2):\n for secont in range(1, n):\n for third in range(1, int(n/2)):\n fourth = first\n sum = secont + third + fourth\n if first % 2 != 0 and sum % 2 != 0:\n print(f\"{chr(first)}-{secont}{third}{fourth}\")","repo_name":"Nedelchev86/Python-Basic-SoftUni","sub_path":"Online_Exam_15_and_16_June_2019/06_Movie_Tickets.py","file_name":"06_Movie_Tickets.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4263616351","text":"from cinderclient.tests.v1 import fakes\nfrom cinderclient.tests.v1.test_volumes import VolumesTest\nfrom cinderclient.tests.v1.test_types import TypesTest\nfrom cinderclient.tests import utils\nfrom cinderclient.v1.volumes import Volume\nfrom cinderclient.v1.volume_types import VolumeType\nfrom cinderclient.v1.volume_types import VolumeTypeManager\nfrom powervc.common.client.extensions import cinder as ext_cinder\nfrom powervc.common.client import delegate\nfrom powervc.common import utils as commonutils\n\nimport mock\nimport sys\n\n\n\"\"\"\n This class similarly extend the current cinder client test cases\n and also provided are examples of how someone can override and existing\n method in the event we need to test something unique to powerVC.\n\n The current methods that are overridden expect the same results as the base\n class test cases and are only provided for example.\n\n For specific PowerVC data model, just override the parent fake data\n structure and corresponding testcase methods logic that could verify\n the functions.\n\n To run the testcases, alternatively:\n 1. Right click the TestCinderClient.py --> Run As --> Python unit-test\n or\n 2. Refer to this link for detail UT running information:\n https://jazz04.rchland.ibm.com:9443/jazz/service/ +\n com.ibm.team.workitem.common.internal.rest.IAttachmentRestService/ +\n itemName/com.ibm.team.workitem.Attachment/67843\n\n All the testcases should be run successfully.\n\"\"\"\n\n\nclass PVCFakeClient(fakes.FakeClient):\n\n \"\"\"\n This PVCFakeClient class extends the current cinder FakeClient,\n and pvccinderclient.CinderClient.\n aiming to set the self client variable to PVCFakeHTTPClient\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n fakes.FakeClient.__init__(self, *args, **kwargs)\n self.client = PVCFakeHTTPClient(**kwargs)\n sys.modules['powervc.common.client.factory'] = mock.MagicMock()\n\n\nclass PVCFakeHTTPClient(fakes.FakeHTTPClient):\n\n \"\"\"\n This PVCFakeHTTPClient class extends the current cinder FakeHTTPClient.\n For all the HTTP requests in this class, it returns a fake json data\n as specified beforehand instead of requesting to a real environment.\n Ex, to test if json data from powerVC volume RESTAPI:\n 1. Add expected powerVC volumes json raw data into\n get_volumes_detail() method\n 2. Add get_volumes_{volume_id} method to return the volume\n 3. Add post_volumes_{volume_id}_action to handle post logic\n 4. Add testcase and new added methods will be called\n \"\"\"\n\n def __init__(self, **kwargs):\n fakes.FakeHTTPClient.__init__(self, **kwargs)\n\n def get_volumes_pvcvolume(self, **kw):\n r = {'volume': self.get_volumes_detail()[2]['volumes'][1]}\n return (200, {}, r)\n\n def get_volumes_detail(self, **kw):\n \"\"\"\n Override the parent method to a new powerVC specified volume,\n Here is the same structure as OpenStack one for example.\n \"\"\"\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})\n\n def post_volumes_pvcvolume_action(self, body, **kw):\n \"\"\"\n Add this method to handle powerVC volume post actions\n Here is the same logic as OpenStack one for example.\n \"\"\"\n _body = None\n resp = 202\n assert len(list(body.keys())) == 1\n action = list(body.keys())[0]\n if action == 'os-attach':\n assert sorted(list(body[action])) == ['instance_uuid',\n 'mode',\n 'mountpoint']\n elif action == 'os-detach':\n assert body[action] is None\n elif action == 'os-reserve':\n assert body[action] is None\n elif action == 'os-unreserve':\n assert body[action] is None\n elif action == 'os-initialize_connection':\n assert list(body[action].keys()) == ['connector']\n return (202, {}, {'connection_info': 'foos'})\n elif action == 'os-terminate_connection':\n assert list(body[action].keys()) == ['connector']\n elif action == 'os-begin_detaching':\n assert body[action] is None\n elif action == 'os-roll_detaching':\n assert body[action] is None\n elif action == 'os-reset_status':\n assert 'status' in body[action]\n else:\n raise AssertionError(\"Unexpected action: %s\" % action)\n return (resp, {}, _body)\n\n def get_storage_providers_2(self, **kw):\n \"\"\"\n To get a fake detail storage_providers\n \"\"\"\n return (200, {}, {\"storage_provider\":\n {\n \"backend_type\": \"svc\",\n \"volume_count\": \"null\",\n \"service\": {\n \"host_display_name\": \"shared_v7000_1\",\n \"host\": \"shared_v7000_1\",\n \"id\": 4\n },\n \"backend_id\": \"00000200A0204C30\",\n \"health_status\": {\n \"health_value\": \"OK\"\n },\n \"free_capacity_gb\": 873.5,\n \"total_capacity_gb\": 1115.5,\n \"storage_hostname\": \"shared_v7000_1\",\n \"id\": 2,\n \"backend_state\": \"running\"\n }})\n\n def get_storage_providers_detail(self, **kw):\n \"\"\"\n To return a fake detail storage_providers\n \"\"\"\n return (200, {}, {\"storage_providers\": [\n {\n \"backend_type\": \"svc\",\n \"volume_count\": \"null\",\n \"service\": {\n \"host_display_name\": \"shared_v7000_1\",\n \"host\": \"shared_v7000_1\",\n \"id\": 4\n },\n \"backend_id\": \"00000200A0204C30\",\n \"health_status\": {\n \"health_value\": \"OK\"\n },\n \"free_capacity_gb\": 873.5,\n \"total_capacity_gb\": 1115.5,\n \"storage_hostname\": \"shared_v7000_1\",\n \"id\": 2,\n \"backend_state\": \"running\"\n },\n {\n \"backend_type\": \"fc\",\n \"volume_count\": \"null\",\n \"service\": {\n \"host_display_name\": \"shared_v7000_1\",\n \"host\": \"shared_v7000_1\",\n \"id\": 4\n },\n \"backend_id\": \"00000200A0204C31\",\n \"health_status\": {\n \"health_value\": \"OK\"\n },\n \"free_capacity_gb\": 73.5,\n \"total_capacity_gb\": 115.5,\n \"storage_hostname\": \"shared_v7000_2\",\n \"id\": 3,\n \"backend_state\": \"running\"\n }\n ]})\n\n def get_types(self, **kw):\n return (200, {}, {\n \"volume_types\": [\n {\n \"extra_specs\": {\n \"drivers:storage_pool\": \"P-NGP01-pool\",\n \"capabilities:volume_backend_name\": \"shared_v7000_1\",\n \"drivers:rsize\": \"-1\"\n },\n \"name\": \"shared_v7000_1-default\",\n \"id\": \"6627888e-9f59-4996-8c22-5d528c3273f0\"\n },\n {\n \"extra_specs\": {},\n \"name\": \"dm-crypt\",\n \"id\": \"a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb\"\n },\n {\n \"extra_specs\": {},\n \"name\": \"LUKS\",\n \"id\": \"291f81a2-591b-4164-b2b2-829abc935573\"\n }\n ]\n })\n\n\nclass PVCCinderVolumesTest(VolumesTest):\n\n \"\"\"\n This PVCCinderVolumesTest class extends the current cinder\n VolumesTest class to provide volume related UT cases.\n \"\"\"\n\n volume_list = [\n {\n 'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {\n 'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]\n }]\n\n def setUp(self):\n super(PVCCinderVolumesTest, self).setUp()\n # get cinder client\n cinder_fakeclient = PVCFakeClient('r', 'p')\n # delegate to nova extension class\n cinder_client = delegate.new_composite_deletgate(\n [ext_cinder.Client(cinder_fakeclient), cinder_fakeclient])\n self.cs = cinder_client\n\n def tearDown(self):\n super(PVCCinderVolumesTest, self).tearDown()\n\n def test_pvcvolume_attach(self):\n \"\"\"\n Add this method to test if powerVC volume attach functions\n Here is the same logic as OpenStack for example.\n \"\"\"\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')\n\n def test_list_all_volumes(self):\n resluts = self.cs.volumes.list_all_volumes()\n\n self.cs.assert_called('GET', '/volumes/detail')\n self.assertEqual(resluts[0].id, 1234)\n self.assertEqual(resluts[1].name, 'pvc sample-volume for cinder')\n\n def test_list_volumes_1(self):\n returnvalues = [Volume(self, res, loaded=True)\n for res in self.volume_list if res]\n commonutils.get_utils().get_multi_scg_accessible_volumes = \\\n mock.MagicMock(return_value=returnvalues)\n result = self.cs.volumes.list()\n\n self.assertEquals(result[0].id, 1234)\n self.assertEquals(result[1].name, \"pvc sample-volume for cinder\")\n\n def test_list_volumes_2(self):\n returnvalues = [Volume(self, res, loaded=True)\n for res in self.volume_list if res]\n commonutils.get_utils().get_scg_accessible_volumes = \\\n mock.MagicMock(return_value=returnvalues)\n\n result = self.cs.volumes.list(True, None, 'SCGUUID', None)\n self.assertEquals(result[0].name, \"sample-volume for cinder\")\n\n\nclass PVCCinderTypesTest(TypesTest):\n\n \"\"\"\n This PVCCinderTypesTest class extends the current cinder\n TypesTest class to provide volume Type related UT cases.\n \"\"\"\n volumes_type_list = [\n {\n \"extra_specs\": {\n \"drivers:storage_pool\": \"P-NGP01-pool\",\n \"capabilities:volume_backend_name\": \"shared_v7000_1\",\n \"drivers:rsize\": \"-1\"\n },\n \"name\": \"shared_v7000_1-default\",\n \"id\": \"6627888e-9f59-4996-8c22-5d528c3273f\"\n },\n {\n \"extra_specs\": {},\n \"name\": \"dm-crypt\",\n \"id\": \"a3ae95f6-4aab-4446-b1d2-0fc2f60a89b\"\n },\n {\n \"extra_specs\": {},\n \"name\": \"LUKS\",\n \"id\": \"291f81a2-591b-4164-b2b2-829abc93557\"\n }]\n\n def setUp(self):\n super(PVCCinderTypesTest, self).setUp()\n # get cinder client\n cinder_fakeclient = PVCFakeClient('r', 'p')\n # delegate to nova extension class\n cinder_client = delegate.new_composite_deletgate(\n [ext_cinder.Client(cinder_fakeclient), cinder_fakeclient])\n self.cs = cinder_client\n\n def tearDown(self):\n super(PVCCinderTypesTest, self).tearDown()\n\n def test_list_all_storage_templates(self):\n\n reslut = self.cs.volume_types.list_all_storage_templates()\n\n self.assertEqual(reslut[0].name, \"shared_v7000_1-default\")\n\n def test_list_storage_templates_1(self):\n returnvalues = [VolumeType(VolumeTypeManager, res, loaded=True)\n for res in self.volumes_type_list if res]\n\n commonutils.get_utils().get_multi_scg_accessible_storage_templates = \\\n mock.MagicMock(return_value=returnvalues)\n result = self.cs.volume_types.list()\n\n self.assertEquals(result[0].id, \"6627888e-9f59-4996-8c22-5d528c3273f\")\n self.assertEquals(result[1].name, \"dm-crypt\")\n self.assertEquals(result[2].name, \"LUKS\")\n\n def test_list_storage_templates_2(self):\n data = self.volumes_type_list[2]\n returnvalues = [VolumeType(VolumeTypeManager, res, loaded=True)\n for res in [data] if res]\n\n commonutils.get_utils().get_scg_accessible_storage_templates = \\\n mock.MagicMock(return_value=returnvalues)\n result = self.cs.volume_types.list(\"SCGUUID\", None)\n\n self.assertEquals(result[0].name, \"LUKS\")\n\n\nclass PVCStorageProvidersTest(utils.TestCase):\n\n \"\"\"\n Class PVCStorageProvidersTest is used to provide\n Storage Providers related UT cases.\n \"\"\"\n expected_sp = [\n dict(\n backend_type=\"svc\",\n volume_count=\"null\",\n service=dict(\n host_display_name=\"shared_v7000_1\",\n host=\"shared_v7000_1\",\n id=4),\n backend_id=\"00000200A0204C30\",\n health_status=dict(health_value=\"OK\"),\n free_capacity_gb=873.5,\n total_capacity_gb=1115.5,\n storage_hostname=\"shared_v7000_1\",\n id=2,\n backend_state=\"running\",\n storage_type=\"fc\")]\n\n def setUp(self):\n super(PVCStorageProvidersTest, self).setUp()\n # get cinder client\n cinder_fakeclient = PVCFakeClient('r', 'p')\n # delegate to nova extension class\n cinder_client = delegate.new_composite_deletgate(\n [ext_cinder.Client(cinder_fakeclient), cinder_fakeclient])\n self.cs = cinder_client\n\n def tearDown(self):\n super(PVCStorageProvidersTest, self).tearDown()\n\n def compare_to_expected(self, expected, hyper):\n for key, value in expected.items():\n self.assertEqual(getattr(hyper, key), value)\n\n def test_get_detail_SPs(self):\n expected = [\n dict(id=2,\n backend_type=\"svc\",\n backend_id=\"00000200A0204C30\",\n free_capacity_gb=873.5,\n total_capacity_gb=1115.5,\n storage_hostname=\"shared_v7000_1\",\n backend_state=\"running\"),\n dict(id=3,\n backend_type=\"fc\",\n backend_id=\"00000200A0204C31\",\n free_capacity_gb=73.5,\n total_capacity_gb=115.5,\n storage_hostname=\"shared_v7000_2\",\n backend_state=\"running\")]\n\n result = self.cs.storage_providers.list_all_providers()\n self.cs.assert_called('GET', '/storage-providers/detail')\n\n for idx, hyper in enumerate(result):\n self.compare_to_expected(expected[idx], hyper)\n\n def test_get_storage_provider(self):\n expected = dict(id=2,\n backend_type=\"svc\",\n backend_id=\"00000200A0204C30\",\n free_capacity_gb=873.5,\n total_capacity_gb=1115.5,\n storage_hostname=\"shared_v7000_1\",\n backend_state=\"running\")\n\n result = self.cs.storage_providers.get(2)\n self.cs.assert_called('GET',\n '/storage-providers/2')\n\n self.compare_to_expected(expected, result)\n\n def test_list_SP_1(self):\n expected = self.expected_sp\n returnvalue = [ext_cinder.StorageProvider(None, expected[0], True)]\n\n commonutils.get_utils().get_scg_accessible_storage_providers = \\\n mock.MagicMock(return_value=returnvalue)\n result = self.cs.storage_providers.list(True, None, \"SCGUUID\", None)\n\n for idx, hyper in enumerate(result):\n self.compare_to_expected(expected[idx], hyper)\n\n def test_list_SP_2(self):\n expected = self.expected_sp\n returnvalue = [ext_cinder.StorageProvider(None, expected[0], True)]\n\n commonutils.get_utils().get_multi_scg_accessible_storage_providers = \\\n mock.MagicMock(return_value=returnvalue)\n result = self.cs.storage_providers.list()\n\n for idx, hyper in enumerate(result):\n self.compare_to_expected(expected[idx], hyper)\n","repo_name":"Gokulk7/PowerVC-Drivers-clone","sub_path":"common-powervc/test/common/client/test_cinder.py","file_name":"test_cinder.py","file_ext":"py","file_size_in_byte":16678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41146886877","text":"import json\nimport os\nfrom flask import Flask, render_template, make_response, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return render_template('main.html')\n\n@app.route('/data1', methods=['POST'])\ndef data1():\n data = [[\"date\", \"power_consumption\", {'role': 'style'}]]\n with open(os.path.join('C:/Users/user/PycharmProjects/untitled2\\data', request.form['m'],\n 'output{}.txt'.format(request.form['m']))) as fp:\n lines = fp.readlines()\n\n n = 0\n temp = 0\n for line in lines:\n #n += 1\n line = line.strip()\n mon, day, _, power = line.split()\n #data.append([\"{}월{}일\".format(mon, day), float(power), \"#FF0000\"])\n temp +=float(power)\n\n if((n+1) % 24 ==0):\n data.append([\"{}월{}일\".format(mon, day), float(power), \"#FF0000\"])\n temp = 0\n n += 1\n\n resp = make_response(json.dumps({'data': data}))\n resp.status_code = 200\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n@app.route('/data2', methods=['POST'])\ndef data2():\n data = [[\"date\", \"power_consumption\", {'role': 'style'}]]\n with open(os.path.join('C:/Users/user/PycharmProjects/untitled2\\data', request.form['m'],\n 'output{}월{}일.txt'.format(request.form['m'], request.form['d']))) as fp:\n lines = fp.readlines()\n\n n = 0\n temp = 0\n for line in lines:\n\n line = line.strip()\n mon, day, _, power = line.split()\n temp += float(power)\n if((n+1) % 4 == 0):\n data.append([\"{}월{}일{}시\".format(mon, day, int(n/4)), float(temp), \"#FF0000\"])\n temp = 0\n n += 1\n\n resp = make_response(json.dumps({'data': data}))\n resp.status_code = 200\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Ryudongki/htmlserver","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13684241145","text":"\"\"\"\nEesa Aamer\nDate Created: 11/04/20\nLast Modified: 13/04/20\n\nA python script that can read and send emails, while also being\nable to download attachments directly onto the device.\n\n\"\"\"\nimport ezgmail\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Enter valid email information\")\nparser.add_argument('Email', type=str, help='Receiver email address')\nparser.add_argument('SubjectLine', type=str, help='Subject Line of Email')\nparser.add_argument('Body', type=str, help='Body of Email')\n\nargs = parser.parse_args()\n\nclass SendWithNoAttachment:\n \"\"\" Sends emails without attachments \"\"\" \n def __init__(self, email, subjectLine, body):\n self.email = email\n self.subjectLine = subjectLine\n self.body = body \n \n def sender(self):\n ezgmail.send(self.email, self.subjectLine, self.body)\n \n\nclass SendWithAttachments(SendWithNoAttachment):\n \"\"\" Sends emails with variable amount of attachments\"\"\"\n def __init__(self, email, subjectLine, body, attachments):\n super().__init__(email, subjectLine, body) # Inherits from SendWithNoAttachment class\n self.attachments = attachments\n\n def senderWithAttach(self):\n ezgmail.send(self.email, self.subjectLine, self.body, self.attachments) # Command to send email\n\nclass Reader:\n \"\"\" Reads most recent unread emails \"\"\"\n def __init__(self):\n unreadThreads = ezgmail.unread() # Collects unread email into lsit\n print(\"You have {} new emails\".format(len(unreadThreads)))\n ezgmail.summary(unreadThreads) # Command that provides name, subject line, and body of unread emails\n \n\nclass Downloader:\n \"\"\" Downloads attachments from select emails \"\"\"\n def __init__(self, subjectLine):\n self.subjectLine = subjectLine\n \n def mailFinder(self):\n mail = ezgmail.search(self.subjectLine) # Collects emails that have a certain subject line\n return mail\n\n def downloadOneAttachment(self, files):\n filename = input(\"What is the name of the file?: \")\n files[0].messages[0].downloadAttachment(filename) # Command to download a specific attachment\n\n def downloadAllAttachments(self, files):\n files[0].messages[0].downloadAllAttachments() # Command to download all attachments\n\n\nclass Introduction:\n \"\"\" Initial user interface \"\"\"\n def __init__(self):\n pass\n def start(self):\n # Takes in user choice\n print(\"Welcome to the automated email system!\")\n initialResp = input(\"[S]end without attachments, [W]ith attachments, [R]ead?, or [D]ownload?: \")\n return initialResp\n\n\nif __name__ == \"__main__\":\n begin = Introduction() # Introduction object acts as starting screen\n initial = begin.start()\n if initial == \"S\": # User wants to send an email\n newSenderWithNone = SendWithNoAttachment(args.Email, args.SubjectLine, args.Body).sender()\n elif initial == \"W\": # User wants to send email with attachments\n print(\"For attachments, please list all attachments seperated with a space\")\n attaches = input(\"Attachments?: \")\n new_list = [attach for attach in attaches.split(\" \")]\n newSenderWithSome = SendWithAttachments(args.Email, args.SubjectLine, args.Body, new_list).senderWithAttach()\n elif initial == \"R\": # User wants to view recent unread emails\n newRead = Reader()\n elif initial == \"D\": # User wants to download attachments from file\n desiredEmail = input(\"What is the subject line?: \")\n newDownload = Downloader(desiredEmail)\n user_choice = input(\"[O]ne file or [A]ll?: \")\n if user_choice == \"O\":\n newDownload.downloadOneAttachment(newDownload.mailFinder())\n elif user_choice == \"A\":\n newDownload.downloadAllAttachments(newDownload.mailFinder())\n\n\n","repo_name":"eesaaamer3/EmailAutomation","sub_path":"quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72244958889","text":"import csv\nimport sys\n\n# c.f., https://stackoverflow.com/questions/15063936/csv-error-field-larger-than-field-limit-131072\ndef set_csv_field_size_limit(maxsize: int = sys.maxsize) -> None:\n decrement = True\n\n while decrement:\n try:\n csv.field_size_limit(maxsize)\n except OverflowError:\n maxsize = int(maxsize / 10)\n\n decrement = True\n else:\n decrement = False\n\n return\n","repo_name":"pnnl/buildingid-py","sub_path":"buildingid/command_line/set_csv_field_size_limit.py","file_name":"set_csv_field_size_limit.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"27506273523","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport time\nimport numpy as np\nimport tensorflow_model_optimization as tfmot\n\ndef load_mnist_data():\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train, x_test = x_train / 255.0, x_test / 255.0\n return x_train,y_train,x_test,y_test\n\ndef mnist_model():\n '''\n mnist model build\n '''\n x_train,y_train,x_test,y_test = load_mnist_data()\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=(28,28)))\n model.add(tf.keras.layers.Dense(512, activation='relu'))\n model.add(tf.keras.layers.Dropout(0.2))\n model.add(tf.keras.layers.Dense(10, activation='softmax'))\n\n model.summary()\n model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])\n model.fit(x_train, y_train, batch_size=64,epochs=1)\n score = model.evaluate(x_test, y_test, verbose=2)\n print('loss:',score[0])\n print('accuracy:',score[1])\n # model.save('./model/tf_model',save_format = 'tf')\n tf.saved_model.save(model,'./model/tf_model')\n tf.keras.utils.plot_model(model,'model_info.png',show_shapes = True)\n\ndef test(model_path='./model/tf_model'):\n 'keras model 推理'\n x_train,y_train,x_test,y_test = load_mnist_data()\n model = tf.keras.models.load_model(model_path)\n\n t=time.time()\n output = model(x_test)\n print(output[0],'\\n',time.time()-t)\n\ndef trt(trt_opt):\n '''\n 使用tensorRT优化模型\n '''\n converter = tf.experimental.tensorrt.Converter(input_saved_model_dir='./model/tf_model')\n converter.convert()#完成转换,但是此时没有进行优化,优化在执行推理时完成\n if trt_opt == True:\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_test = x_test.astype('float32')\n x_test = x_test / 255.0\n def input_fn():\n yield (x_test[:1])\n converter.build(input_fn) #优化后保存\n converter.save('trt_model_opt')\n else:\n converter.save('trt_model')\n\ndef trt_test(model_path='./trt_model_opt'):\n 'trt model 推理'\n x_train,y_train,x_test,y_test = load_mnist_data()\n model_loaded = tf.saved_model.load(model_path)#读取模型\n\n graph_func = model_loaded.signatures['serving_default']#获取推理函数\n t=time.time()\n #output = graph_func(tf.constant(x_test))\n output = model_loaded(x_test)\n print(output[0],'\\n',time.time()-t)\n\ndef tflite():\n 'keras model -> tflite 训练后量化' \n converter = tf.lite.TFLiteConverter.from_saved_model(\"./model/tf_model\")\n # 量化权重:16位浮点数用于GPU加速,而8位整数则用于CPU执行。\n converter.optimizations = [tf.lite.Optimize.DEFAULT] # 训练后量化\n #converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] # 指定8位整数权重量化\n tflite_quantized_model = converter.convert()\n\n # SavedModel to TensorFlow Lite\n open(\"./model/quantized_converted_model.tflite\", \"wb\").write(tflite_quantized_model)\n\ndef tflite_run(model_path=\"./model/quantized_converted_model.tflite\"):\n 'tflite 推理'\n # Load the TFLite model and allocate tensors.\n interpreter = tf.lite.Interpreter(model_path)\n interpreter.allocate_tensors()\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n # Test the model on random input data.\n input_shape = input_details[0]['shape']\n input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n\n # The function `get_tensor()` returns a copy of the tensor data.\n # Use `tensor()` in order to get a pointer to the tensor.\n output_data = interpreter.get_tensor(output_details[0]['index'])\n print(output_data)\n\n\nif __name__ == '__main__':\n print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n # mnist_model()\n # trt(True)\n # trt_test()\n # tflite()\n # test('./model/tf_model')\n # tflite_run(\"./model/quantized_converted_model.tflite\")","repo_name":"Acemyzoe/TFL","sub_path":"tfmodel_opt/mnist_model.py","file_name":"mnist_model.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16799990129","text":"i=0\nlist = []\nwhile i<100:\n\tlist.append(100*i)\n\ti=i+1\n\t\n\ndef addiTaunt(nb1,nb2):\n\tj=0\n\tt=0\n\tres = nb1 + nb2\n\tfor j in list:\n\t\tif res > j:\n\t\t\tprint('Difficulté',t)\n\t\t\tbreak\n\t\telse :\n\t\t\tprint(j)\n\t\tt=t+1\n\naddiTaunt(10,10)\n","repo_name":"etouss/Python","sub_path":"TP1/Gmail/testPy.py","file_name":"testPy.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74222428008","text":"class Solution:\n def findComplement(self, num: int) -> int:\n ans=bin(num)\n \n ans=ans[2:]\n l=list(ans)\n for i in range(len(ans)):\n if l[i]=='0':\n l[i]='1'\n else:\n l[i]='0'\n \n return int(''.join(l),2)","repo_name":"abhi-apple/leetcode","sub_path":"0476-number-complement/0476-number-complement.py","file_name":"0476-number-complement.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32403198006","text":"from collections import OrderedDict, namedtuple\nimport logging\nimport os\n\nfrom tensorboardX import SummaryWriter\nfrom tensorboardX.embedding import make_sprite, make_mat, append_pbtxt\nimport torch\n\nlog = logging.getLogger('main')\n\n\nclass ResultWriter(object):\n # result manager\n Embedding = namedtuple(\"Embedding\", \"embed, text\")\n\n def __init__(self, cfg):\n self._outdir = cfg.name\n tf_events_path = os.path.join(cfg.log_dir, 'tf_events')\n self._writer = MySummaryWriter(tf_events_path)\n self.initialize()\n\n def initialize(self):\n \"\"\"To initialize instance from outside\"\"\"\n self._scalar = OrderedDict()\n self._text = OrderedDict()\n self._embedding = OrderedDict()\n\n def __repr__(self):\n return self.__dict__.__repr__()\n\n def add(self, name, update_dict):\n \"\"\"Update result dictionaries considering type of each update item.\n scalars, text : updated in a hierarchical dict structure\n embeddings : updated to only one-depth dict\n (as they have to be projected to the same space altogether.)\n\n \"\"\"\n scalar = OrderedDict()\n text = OrderedDict()\n\n for sub_name, sub_value in update_dict.items():\n if type(sub_value) in (int, float):\n scalar.update({sub_name: sub_value})\n elif type(sub_value) is str:\n text.update({sub_name: sub_value})\n elif isinstance(sub_value, self.Embedding):\n self._embedding.update({name+'/'+sub_name: sub_value})\n else:\n raise Exception('Unknown type : %s' % type(sub_value))\n\n if scalar: self._scalar.update({name: scalar})\n if text: self._text.update({name: text})\n\n def log_scalar(self):\n if not self._scalar: return\n print_str = \"\"\n for name, scalar in self._scalar.items():\n print_str = \"| %s | %s |\" % (self._outdir, name)\n for sub_name, sub_scalar in scalar.items():\n print_str += \" %s : %.8f |\" % (sub_name, sub_scalar)\n log.info(print_str)\n\n def log_text(self):\n if not self._text: return\n print_str = \"\"\n for name, text in self._text.items():\n # print_str = \"| %s | %s |\\n\" % (self._outdir, name)\n for sub_name, sub_text in text.items():\n print_str += sub_text\n log.info(print_str)\n\n def save_scalar(self, step):\n if not self._scalar: return\n for name, scalar in self._scalar.items():\n for sub_name, sub_scalar in scalar.items():\n tag = \"%s/%s\" % (name, sub_name)\n self._writer.add_scalar(tag, sub_scalar, step)\n\n def save_text(self, step):\n if not self._text: return\n\n for name, text in self._text.items():\n for sub_name, sub_text in text.items():\n tag = \"%s/%s\" % (name, sub_name)\n self._writer.add_text(tag, sub_text, step)\n\n def save_embedding(self, step):\n \"\"\"Append all embeddings with the same tag but different metadata\"\"\"\n if not self._embedding: return\n all_embed = []\n all_label = []\n all_id = []\n all_text = []\n for name, (embed, text) in self._embedding.items():\n if type(embed) is torch.cuda.LongTensor:\n embed = embed.cpu() # better do this only when necessary\n all_embed.append(embed)\n all_label += ([name] * embed.size(0))\n all_id += [\"[%s]\" % str(i) for i in range(embed.size(0))]\n all_text += text\n all_embed = torch.cat(all_embed, dim=0)\n metadata = dict(label=all_label, id=all_id, text=all_text)\n self._writer.add_embedding(all_embed, metadata,\n global_step=step, tag='embedding')\n\n\nclass MySummaryWriter(SummaryWriter):\n def add_embedding(self, mat, metadata=None, label_img=None,\n global_step=None, tag='default'):\n \"\"\"Override make_tsv function in order to handle multi-column tsv file.\n\n Args(changed):\n metadata (dict): Keys -> headers of metadata\n Values -> values of metatdata\n \"\"\"\n if global_step is None:\n global_step = 0\n\n save_path = os.path.join(self.file_writer.get_logdir(),\n str(global_step).zfill(5))\n try:\n os.makedirs(save_path)\n except OSError:\n # to control log level\n info.warning('warning: Embedding dir exists, '\n 'did you set global_step for add_embedding()?')\n\n if metadata is not None:\n assert all(mat.size(0) == len(d) for d in metadata.values()), \\\n '#labels should equal with #data points'\n make_tsv(metadata, save_path)\n\n if label_img is not None:\n assert mat.size(0) == label_img.size(0), \\\n '#images should equal with #data points'\n make_sprite(label_img, save_path)\n\n assert mat.dim() == 2, \\\n 'mat should be 2D and mat.size(0) is the number of data points'\n make_mat(mat.tolist(), save_path)\n # new funcion to append to the config file a new embedding\n append_pbtxt(metadata, label_img, self.file_writer.get_logdir(),\n str(global_step).zfill(5), tag)\n\n\ndef make_tsv(metadata, save_path):\n \"\"\"Only used in MySummaryWriter. Write multi-column tsv metadata file\"\"\"\n metadata_str = []\n process = lambda data : '\\t'.join([str(d) for d in data])\n metadata_str.append(process(metadata.keys()))\n\n for data in zip(*[value for value in metadata.values()]):\n metadata_str.append(process(data))\n\n with open(os.path.join(save_path, 'metadata.tsv'), 'w') as f:\n for x in metadata_str:\n f.write(x + '\\n')\n","repo_name":"ricoshin/textgen","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3507540745","text":"from utils import fetch_air_quality_data, read_air_quality_data, calculate_average_pm2_5_by_site, calculate_monthly_average_pm2_5,month_unique\nfrom datetime import datetime\n\n# Specify the airqloud_id\ngrid_id = \"6542358ddcd81300139b4c1b\"\nstart_time = datetime(2023, 7, 1, 9, 0, 0)\nend_time = datetime(2023, 10, 30, 9, 0, 0)\npage = 1\ntop_location = 6\nleast_location = 1\n\n# Call the function with the desired start and end times\ndata = fetch_air_quality_data(grid_id, start_time, end_time, page)\n\n# Check if data is not empty before generating the report\nif data:\n # Generate the report\n air_quality_data = read_air_quality_data(data)\n avg_pm2_5_by_site = calculate_average_pm2_5_by_site(air_quality_data)\n\n # Identify the top 5 sites with the highest average PM2.5 values\n top_PM_sites = avg_pm2_5_by_site.nlargest(top_location, \"pm2_5_value\") # Adjust column name if needed\n\n # Identify the least 3 sites with the lowest average PM2.5 values\n least_PM_sites = avg_pm2_5_by_site.nsmallest(least_location, \"pm2_5_value\") # Adjust column name if needed\n\n # Print or use the generated report data as needed\n print(f\"Top {top_location} sites with highest average PM2.5 values:\")\n print(top_PM_sites)\n\n print(f\"\\nLeast {least_location} sites with lowest average PM2.5 values:\")\n print(least_PM_sites)\n\n print(f\"\\nLeast monthly sites with lowest average PM2.5 values:\")\n monthly_avg_pm2_5 = calculate_monthly_average_pm2_5(air_quality_data)\n print(monthly_avg_pm2_5)\n\n print(f\"\\nMonths under study:\")\n month_unique_place = month_unique(air_quality_data)\n print(month_unique_place)\nelse:\n print(\"No data available for the specified time range.\")\n\n","repo_name":"wabinyai/My_research-lab","sub_path":"src/report/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37026352991","text":"import tensorflow as tf\nimport sonnet as snt\n\nfrom vaeseq import codec\nfrom vaeseq import context as context_mod\nfrom vaeseq import hparams as hparams_mod\nfrom vaeseq import util\nfrom vaeseq import vae as vae_mod\n\n\ndef _inputs_and_vae(hparams):\n \"\"\"Constructs a VAE.\"\"\"\n obs_encoder = codec.MLPObsEncoder(hparams)\n obs_decoder = codec.MLPObsDecoder(\n hparams,\n codec.BernoulliDecoder(squeeze_input=True),\n param_size=1)\n inputs = context_mod.EncodeObserved(obs_encoder)\n vae = vae_mod.make(hparams, obs_encoder, obs_decoder)\n return inputs, vae\n\n\ndef _observed(hparams):\n \"\"\"Test observations.\"\"\"\n return tf.zeros([util.batch_size(hparams), util.sequence_size(hparams)],\n dtype=tf.int32, name=\"test_obs\")\n\n\ndef _inf_tensors(hparams, inputs, vae):\n \"\"\"Simple inference graph.\"\"\"\n with tf.name_scope(\"inf\"):\n observed = _observed(hparams)\n latents, divs = vae.infer_latents(inputs, observed)\n log_probs = vae.evaluate(inputs, observed, latents=latents)\n elbo = tf.reduce_sum(log_probs - divs)\n return [observed, latents, divs, log_probs, elbo]\n\n\ndef _gen_tensors(hparams, inputs, vae):\n \"\"\"Samples observations and latent variables from the VAE.\"\"\"\n del hparams # Unused, just passed for consistency.\n with tf.name_scope(\"gen\"):\n generated, latents = vae.generate(inputs)\n return [generated, latents]\n\n\ndef _eval_tensors(hparams, inputs, vae):\n \"\"\"Calculates the log-probabilities of the observations.\"\"\"\n with tf.name_scope(\"eval\"):\n observed = _observed(hparams)\n log_probs = vae.evaluate(inputs, observed, samples=100)\n return [log_probs]\n\n\ndef _test_assertions(inf_tensors, gen_tensors, eval_tensors):\n \"\"\"Returns in-graph assertions for testing.\"\"\"\n observed, latents, divs, log_probs, elbo = inf_tensors\n generated, sampled_latents = gen_tensors\n eval_log_probs, = eval_tensors\n\n # For RNN, we return None from infer_latents as an optimization.\n if latents is None:\n latents = sampled_latents\n\n def _same_batch_and_sequence_size_asserts(t1, name1, t2, name2):\n return [\n tf.assert_equal(\n util.batch_size_from_nested_tensors(t1),\n util.batch_size_from_nested_tensors(t2),\n message=\"Batch: \" + name1 + \" vs \" + name2),\n tf.assert_equal(\n util.sequence_size_from_nested_tensors(t1),\n util.sequence_size_from_nested_tensors(t2),\n message=\"Steps: \" + name1 + \" vs \" + name2),\n ]\n\n def _same_shapes(nested1, nested2):\n return snt.nest.flatten(snt.nest.map(\n lambda t1, t2: tf.assert_equal(\n tf.shape(t1), tf.shape(t2),\n message=\"Shapes: \" + t1.name + \" vs \" + t2.name),\n nested1, nested2))\n\n def _all_same_batch_and_sequence_sizes(nested):\n batch_size = util.batch_size_from_nested_tensors(nested)\n sequence_size = util.sequence_size_from_nested_tensors(nested)\n return [\n tf.assert_equal(tf.shape(tensor)[0], batch_size,\n message=\"Batch: \" + tensor.name)\n for tensor in snt.nest.flatten(nested)\n ] + [\n tf.assert_equal(tf.shape(tensor)[1], sequence_size,\n message=\"Steps: \" + tensor.name)\n for tensor in snt.nest.flatten(nested)\n ]\n\n assertions = [\n tf.assert_non_negative(divs),\n tf.assert_non_positive(log_probs),\n ] + _same_shapes(\n (log_probs, log_probs, observed, latents),\n (divs, eval_log_probs, generated, sampled_latents)\n ) + _all_same_batch_and_sequence_sizes(\n (observed, latents, divs)\n ) + _all_same_batch_and_sequence_sizes(\n (generated, sampled_latents)\n )\n vars_ = tf.trainable_variables()\n grads = tf.gradients(-elbo, vars_)\n for (var, grad) in zip(vars_, grads):\n assertions.append(tf.check_numerics(grad, \"Gradient for \" + var.name))\n return assertions\n\n\ndef _all_tensors(hparams, inputs, vae):\n \"\"\"All tensors to evaluate in tests.\"\"\"\n gen_tensors = _gen_tensors(hparams, inputs, vae)\n inf_tensors = _inf_tensors(hparams, inputs, vae)\n eval_tensors = _eval_tensors(hparams, inputs, vae)\n assertions = _test_assertions(inf_tensors, gen_tensors, eval_tensors)\n all_tensors = inf_tensors + gen_tensors + eval_tensors + assertions\n return [x for x in all_tensors if x is not None]\n\n\nclass VAETest(tf.test.TestCase):\n\n def _test_vae(self, vae_type):\n \"\"\"Make sure that all tensors and assertions evaluate without error.\"\"\"\n hparams = hparams_mod.make_hparams(vae_type=vae_type)\n inputs, vae = _inputs_and_vae(hparams)\n tensors = _all_tensors(hparams, inputs, vae)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tensors)\n\n def test_iseq(self):\n self._test_vae(\"ISEQ\")\n\n def test_rnn(self):\n self._test_vae(\"RNN\")\n\n def test_srnn(self):\n self._test_vae(\"SRNN\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","repo_name":"google/vae-seq","sub_path":"vaeseq/vae/vae_test.py","file_name":"vae_test.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"53"} +{"seq_id":"9772327469","text":"# This example is not working in Spyder directly (F5 or Run)\r\n# Please type '!python turtle_runaway.py' on IPython console in your Spyder.\r\nimport turtle, random, time\r\n\r\nclass RunawayGame:\r\n def __init__(self, canvas, runner, chaser, catch_radius=50):\r\n self.canvas = canvas\r\n self.runner = runner\r\n self.chaser = chaser\r\n self.catch_radius2 = catch_radius**2\r\n self.start_time = time.time()\r\n self.point = 0\r\n\r\n # Initialize 'runner' and 'chaser'\r\n self.runner.shape('turtle')\r\n self.runner.color('blue')\r\n self.runner.penup()\r\n\r\n self.chaser.shape('turtle')\r\n self.chaser.color('red')\r\n self.chaser.penup()\r\n\r\n # Instantiate an another turtle for drawing\r\n self.drawer = turtle.RawTurtle(canvas)\r\n self.drawer.hideturtle()\r\n self.drawer.penup()\r\n\r\n def is_catched(self):\r\n p = self.runner.pos()\r\n q = self.chaser.pos()\r\n dx, dy = p[0] - q[0], p[1] - q[1]\r\n return dx**2 + dy**2 < self.catch_radius2\r\n\r\n def start(self, init_dist=400, ai_timer_msec=100):\r\n self.runner.setpos((-init_dist / 2, 0))\r\n self.runner.setheading(0)\r\n self.chaser.setpos((+init_dist / 2, 0))\r\n self.chaser.setheading(180)\r\n\r\n self.ai_timer_msec = ai_timer_msec\r\n self.canvas.ontimer(self.step, self.ai_timer_msec)\r\n\r\n def step(self):\r\n self.runner.run_ai(self.chaser.pos(), self.chaser.heading())\r\n self.chaser.run_ai(self.runner.pos(), self.runner.heading())\r\n\r\n #시간\r\n diff_time = time.time() - self.start_time\r\n diff_time = round(diff_time,1)\r\n \r\n is_catched = self.is_catched()\r\n \r\n #point\r\n if (is_catched):\r\n self.point += 1\r\n position = random.randint(-300,300)\r\n self.runner.goto(position,position)\r\n \r\n if(self.point > 5):\r\n self.runner.speed(8)\r\n self.runner.color('green')\r\n self.runner.shapesize(2,2,2)\r\n \r\n \r\n self.drawer.undo()\r\n self.drawer.penup()\r\n self.drawer.setpos(-300, 300)\r\n self.drawer.write(f'Is catched? {is_catched} Counting Time : {diff_time}\\nYour score is {self.point}',font=(\"\",20))\r\n\r\n self.canvas.ontimer(self.step, self.ai_timer_msec)\r\n\r\nclass ManualMover(turtle.RawTurtle):\r\n def __init__(self, canvas, step_move=10, step_turn=10):\r\n super().__init__(canvas)\r\n self.step_move = step_move\r\n self.step_turn = step_turn\r\n\r\n # Register event handlers\r\n canvas.onkeypress(lambda: self.forward(self.step_move), 'Up')\r\n canvas.onkeypress(lambda: self.backward(self.step_move), 'Down')\r\n canvas.onkeypress(lambda: self.left(self.step_turn), 'Left')\r\n canvas.onkeypress(lambda: self.right(self.step_turn), 'Right')\r\n canvas.listen()\r\n\r\n def run_ai(self, opp_pos, opp_heading):\r\n pass\r\n\r\nclass RandomMover(turtle.RawTurtle):\r\n def __init__(self, canvas, step_move=10, step_turn=10):\r\n super().__init__(canvas)\r\n self.step_move = step_move\r\n self.step_turn = step_turn\r\n\r\n def run_ai(self, opp_pos, opp_heading):\r\n mode = random.randint(0, 2)\r\n if mode == 0:\r\n self.forward(self.step_move)\r\n elif mode == 1:\r\n self.left(self.step_turn)\r\n elif mode == 2:\r\n self.right(self.step_turn)\r\n\r\nclass MyMover(turtle.RawTurtle):\r\n def __init__(self, canvas, step_move=10, step_turn=10):\r\n super().__init__(canvas)\r\n dist = random.randint(10,30)\r\n self.step_move = dist\r\n angle = random.randint(-90,90)\r\n self.step_turn = angle\r\n\r\n def run_ai(self, opp_pos, opp_heading): \r\n mode = random.randint(0, 2)\r\n if mode == 0:\r\n self.forward(self.step_move)\r\n elif mode == 1:\r\n self.left(self.step_turn)\r\n elif mode == 2:\r\n self.right(self.step_turn)\r\n\r\n\r\nif __name__ == '__main__':\r\n canvas = turtle.Screen()\r\n canvas.bgcolor('#DDFFFF')\r\n runner = MyMover(canvas)\r\n chaser = ManualMover(canvas)\r\n\r\n game = RunawayGame(canvas, runner, chaser)\r\n game.start()\r\n canvas.mainloop()","repo_name":"Seeooo-0/Turtle_game","sub_path":"turtle_runaway.py","file_name":"turtle_runaway.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2141063735","text":"import logging\n\nimport mdig.grass as grass\n\nfrom mdig.instance import DispersalInstance\nfrom mdig.replicate import Replicate\n\n\ndef create_filename(rep):\n if isinstance(rep, Replicate):\n fn = rep.get_map_name_base()\n elif isinstance(rep, DispersalInstance):\n i = rep\n fn = i.get_map_name_base()\n else:\n logging.getLogger('mdig').error(\"Unknown object to create filename for.\")\n fn = None\n return fn\n \n\nclass BaseOutput(object):\n\n def replicate_update(self, rep, t):\n return NotImplemented\n\n def create_filename(self, rep):\n return create_filename(rep)\n\n\nclass PngOutput(BaseOutput):\n\n def __init__(self, node):\n self.interval = 1\n self.show_year = False\n self.show_grid = False\n self.log = logging.getLogger(\"mdig.pngOutput\")\n \n self.log.debug(\"Initialised pngOutput\")\n \n for child in node:\n if child.tag == \"interval\":\n self.interval = int(child.text)\n elif child.tag == \"showTime\":\n if child.text.lower() == \"true\":\n self.show_year = True\n elif child.tag == \"showGrid\":\n if child.text.lower() == \"true\":\n self.show_year = True\n \n self.listeningTo = []\n \n def replicate_update(self, rep, t):\n g = grass.get_g()\n \n fn = None\n \n if rep.instance.experiment.interval_modulus(self.interval, t) == 0:\n fn = self.create_filename(rep)\n fn += \"_\" + repr(t) + \".png\"\n self.log.debug(\"Writing PNG %s\" % fn)\n \n g.set_output(fn, display=None)\n g.clear_monitor()\n \n current_region = rep.instance.experiment.get_region(rep.instance.r_id)\n \n if current_region.getBackgroundMap():\n g.paint_map(current_region.getBackgroundMap().get_map_filename())\n \n for l in rep.temp_map_names.keys():\n g.paint_map(rep.temp_map_names[l][0])\n \n if self.show_grid:\n g.paint_grid(5)\n if self.show_year:\n g.paint_year(t)\n \n self.last_output = t\n g.close_output()\n \n return [None, fn]\n \n\nclass RasterOutput(BaseOutput):\n _tag=\"RasterOutput\"\n \n def __init__(self, node):\n self.interval = 1\n self.lifestage = None\n self.log = logging.getLogger(\"mdig.RasterOutput\")\n \n for child in node:\n if child.tag == \"interval\":\n self.interval = int(child.text)\n elif child.tag == \"lifestage\":\n self.lifestage = child.text\n \n self.listeningTo = []\n \n def replicate_update(self, rep, t):\n g = grass.get_g()\n fn = None\n\n if rep.instance.experiment.interval_modulus(self.interval, t) == 0:\n for l in rep.temp_map_names.keys():\n if self.lifestage == l:\n fn = self.create_filename(rep)\n fn += \"_ls_\" + l + \"_\" + repr(t)\n self.log.debug(\"Writing raster %s\" % fn)\n g.copy_map(rep.temp_map_names[l][0], fn, True)\n self.last_output = t\n \n return [self.lifestage, fn]\n","repo_name":"ferrouswheel/mdig","sub_path":"mdig/mdig/outputformats.py","file_name":"outputformats.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"9210329936","text":"from pycocotools.coco import COCO\r\nimport requests\r\nimport os\r\nimport argparse\r\nfrom xml.etree import ElementTree\r\nfrom xml.etree.ElementTree import Element, SubElement\r\nfrom lxml import etree\r\nimport codecs\r\nimport cv2\r\nimport time\r\n\r\nXML_EXT = '.xml'\r\nENCODE_METHOD = 'utf-8'\r\n\r\nclass PascalVocWriter:\r\n\r\n def __init__(self, foldername, filename, imgSize,databaseSrc='detection_objects', localImgPath=None):\r\n self.foldername = foldername\r\n self.filename = filename\r\n self.databaseSrc = databaseSrc\r\n self.imgSize = imgSize\r\n self.boxlist = []\r\n self.localImgPath = localImgPath\r\n self.verified = False\r\n\r\n def prettify(self, elem):\r\n \"\"\"\r\n Return a pretty-printed XML string for the Element.\r\n \"\"\"\r\n rough_string = ElementTree.tostring(elem, 'utf8')\r\n root = etree.fromstring(rough_string)\r\n return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(\" \".encode(), \"\\t\".encode())\r\n\r\n def genXML(self):\r\n \"\"\"\r\n Return XML root\r\n \"\"\"\r\n # Check conditions\r\n if self.filename is None or \\\r\n self.foldername is None or \\\r\n self.imgSize is None:\r\n return None\r\n\r\n top = Element('annotation')\r\n if self.verified:\r\n top.set('verified', 'yes')\r\n\r\n folder = SubElement(top, 'folder')\r\n folder.text = self.foldername\r\n\r\n filename = SubElement(top, 'filename')\r\n filename.text = self.filename\r\n\r\n if self.localImgPath is not None:\r\n localImgPath = SubElement(top, 'path')\r\n localImgPath.text = self.localImgPath\r\n\r\n source = SubElement(top, 'source')\r\n database = SubElement(source, 'database')\r\n database.text = self.databaseSrc\r\n\r\n size_part = SubElement(top, 'size')\r\n width = SubElement(size_part, 'width')\r\n height = SubElement(size_part, 'height')\r\n depth = SubElement(size_part, 'depth')\r\n width.text = str(self.imgSize[1])\r\n height.text = str(self.imgSize[0])\r\n if len(self.imgSize) == 3:\r\n depth.text = str(self.imgSize[2])\r\n else:\r\n depth.text = '1'\r\n\r\n segmented = SubElement(top, 'segmented')\r\n segmented.text = '0'\r\n return top\r\n\r\n def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult):\r\n bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}\r\n bndbox['name'] = name\r\n bndbox['difficult'] = difficult\r\n self.boxlist.append(bndbox)\r\n\r\n def appendObjects(self, top):\r\n for each_object in self.boxlist:\r\n object_item = SubElement(top, 'object')\r\n name = SubElement(object_item, 'name')\r\n name.text = each_object['name']\r\n pose = SubElement(object_item, 'pose')\r\n pose.text = \"Unspecified\"\r\n truncated = SubElement(object_item, 'truncated')\r\n if int(each_object['ymax']) == int(self.imgSize[0]) or (int(each_object['ymin'])== 1):\r\n truncated.text = \"1\" # max == height or min\r\n elif (int(each_object['xmax'])==int(self.imgSize[1])) or (int(each_object['xmin'])== 1):\r\n truncated.text = \"1\" # max == width or min\r\n else:\r\n truncated.text = \"0\"\r\n difficult = SubElement(object_item, 'difficult')\r\n difficult.text = str( bool(each_object['difficult']) & 1 )\r\n bndbox = SubElement(object_item, 'bndbox')\r\n xmin = SubElement(bndbox, 'xmin')\r\n xmin.text = str(each_object['xmin'])\r\n ymin = SubElement(bndbox, 'ymin')\r\n ymin.text = str(each_object['ymin'])\r\n xmax = SubElement(bndbox, 'xmax')\r\n xmax.text = str(each_object['xmax'])\r\n ymax = SubElement(bndbox, 'ymax')\r\n ymax.text = str(each_object['ymax'])\r\n\r\n def save(self, targetFile=None):\r\n root = self.genXML()\r\n self.appendObjects(root)\r\n out_file = None\r\n if targetFile is None:\r\n out_file = codecs.open(\r\n self.filename + XML_EXT, 'w', encoding=ENCODE_METHOD)\r\n else:\r\n out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)\r\n\r\n prettifyResult = self.prettify(root)\r\n out_file.write(prettifyResult.decode('utf8'))\r\n out_file.close()\r\n\r\ndef create_labelsfile(coco):\r\n cats = coco.loadCats(coco.getCatIds())\r\n classes = [cat['name'] for cat in cats]\r\n filename = \"obj.names\"\r\n with open(filename, 'w') as f:\r\n for cat in classes:\r\n f.write(\"%s\\n\" % cat)\r\n f.close()\r\n return classes\r\n\r\ndef create_pascalfromcoco(imagefolder_path, cocojson_path, output_directory):\r\n os.makedirs(output_directory, exist_ok=True)\r\n\r\n coco = COCO(cocojson_path)\r\n create_labelsfile(coco)\r\n imgIds = coco.getImgIds()\r\n for i in imgIds:\r\n img = coco.loadImgs(i)[0]\r\n imgFileName = img['file_name']\r\n imagePath = os.path.join(imagefolder_path, imgFileName)\r\n annotation_no_txt = os.path.splitext(imgFileName)[0]\r\n imgFolderName = os.path.basename(imagefolder_path)\r\n\r\n image = cv2.imread(imagePath)\r\n writer = PascalVocWriter(imgFolderName, imgFileName, image.shape, localImgPath=imagePath)\r\n\r\n annIds = coco.getAnnIds(imgIds=i)\r\n anns = coco.loadAnns(annIds)\r\n for ann in anns:\r\n label = coco.loadCats(ann[\"category_id\"])[0]['name']\r\n bbox = ann['bbox']\r\n xmin = bbox[0]\r\n ymin = bbox[1]\r\n x_max = bbox[2] + bbox[0]\r\n y_max = bbox[3] + bbox[1]\r\n\r\n writer.addBndBox(xmin, ymin, x_max, y_max, label, 0)\r\n\r\n writer.save(targetFile= output_directory+ \"/\" + annotation_no_txt + \".xml\")\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser('COCO annotations to ParscalVOC annotation converter helper')\r\n parser.add_argument('-p', '--path', type=str, required=True, help='(Absolute) path for folder containing image files')\r\n parser.add_argument('-j', '--json', type=str, required=True ,help='(Absolute) path to COCO annotated json file')\r\n parser.add_argument('-o', '--output', default=\"pascal\", type=str, help='Name of the directory to store PascalVOC VML files')\r\n\r\n args = parser.parse_args()\r\n return args\r\n\r\nif __name__ == '__main__':\r\n start = time.time()\r\n opt = get_args()\r\n imagefolder_path = opt.path\r\n cocojson_path = opt.json\r\n output_directory = opt.output\r\n create_pascalfromcoco(imagefolder_path, cocojson_path, output_directory)\r\n print(\"COCO annotation converted to PascalVOC annotations in: \" + output_directory+\" folder\")\r\n print(\"Conversion processed in \" + str(float(time.time()-start)) + \" seconds\")","repo_name":"alsontay/COCO-YOLO-Pascal-DetectionAnnotationConverter","sub_path":"coco2pascal.py","file_name":"coco2pascal.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32200476806","text":"import socket, argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--url\")\nparser.add_argument(\"--remotefile\")\nargs = parser.parse_args()\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nurl = args.url\nfilepath = args.remotefile\nimage_name = filepath.split(\"/\")[-1]\nget_url = \"\"\ni = 8\nif url[0:7] == \"http://\":\n i = 7\nget_url += url[i:(len(url)-1)]\ns.connect((get_url, 80))\n\nrequest = \"GET \" + filepath + \" HTTP/1.1\\r\\nHost: \" + get_url + \"\\r\\n\\r\\n\"\ns.send(request.encode())\nresponse = b''\nwhile True:\n\trespons = s.recv(2048)\n\tif not respons:\n\t\tbreak\n\tresponse += respons\ns.close()\nresponse = response.decode('iso-8859-1')\nif \"HTTP/1.1 200 OK\" in response:\n image_len = len(response.split('\\r\\n\\r\\n')[1].encode('iso-8859-1'))\n print(\"Kich thuoc file anh: \" + str(image_len) + \" bytes\")\n image_type = response.split(\"\\r\\n\\r\\n\")[-1]\n image_url = image_name\n open(image_url, \"wb\").write(image_type.encode('iso-8859-1'))\nelse:\n print(\"Khong ton tai file anh\")\n exit(0)\n","repo_name":"ayaka312/prog04_trag","sub_path":"httpdownload.py","file_name":"httpdownload.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36367902927","text":"import cv2\nimport torch, torchvision\nfrom efficientnet_pytorch import EfficientNet\nimport torchvision.transforms as T\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom ultralytics import YOLO\nimport numpy as np\nimport os\n\nRC_MODEL_PATH = './models/RC.pt'\nLD_MODEL_PRE = './models/'\nLD_MODEL_SUF = '.pt'\nLANDMARKS_PRE = './landmarks/'\nLANDMARKS_SUF = '_outboxes.npy'\nCLASS_DICT_PATH = 'class_dict.npy'\n\ndef im_pipe(im):\n # assert im.width == 2592 and im.height == 1944\n proceed = check_zeros(im)\n if not proceed:\n return [], []\n region, conf = check_region(im)\n class_dict = np.load(CLASS_DICT_PATH, allow_pickle=True).item()\n class_dict_rev = dict((value, key) for key, value in class_dict.items())\n regions = [x for x in region]\n keys = [class_dict_rev[x] for x in regions]\n all_landmarks = []\n for key in keys:\n landmarks = landmark_detection(key, im) \n lonlat_landmarks = class2lonlat(key, landmarks)\n all_landmarks += lonlat_landmarks\n return keys, all_landmarks\n\ndef class2lonlat(key, landmarks):\n path = LANDMARKS_PRE + key + LANDMARKS_SUF\n lds = np.load(path)\n ld_lons = (lds[:,0] + lds[:,2])/2\n ld_lats = (lds[:,1] + lds[:,3])/2\n lonlat_landmarks = []\n for landmark in landmarks:\n ld_cls = landmark[2]\n ld_lon = ld_lons[ld_cls]\n ld_lat = ld_lats[ld_cls]\n lonlat_landmarks.append([landmark[0], landmark[1], (ld_lon, ld_lat),\n landmark[3]])\n return lonlat_landmarks\n \n\ndef landmark_detection(key, im):\n model = YOLO(LD_MODEL_PRE + key + LD_MODEL_SUF)\n sections = section_image(im)\n results = model(sections)\n box_list = []\n i = 0\n for result in results:\n boxes = result.boxes\n for box in boxes:\n x,y,w,h = box.xywh[0]\n if i == 1:\n x = x + 1280\n if i == 2:\n y = y + 632\n if i == 3:\n x = x + 1280\n y = y + 632\n cl = box.cls[0]\n conf = box.conf[0]\n box_list.append([x,y,cl,conf])\n i += 1\n box_list = torch.tensor(box_list)\n out_list = []\n if len(box_list) > 0:\n for i in range(model.model.model[22].dfl.conv.in_channels):\n idxs = torch.where(box_list[:,2] == i)\n if len(idxs[0]) > 0:\n cl_i = box_list[idxs]\n idx = cl_i[:,2].argmax()\n x_px = round(cl_i[idx][0].item())\n y_px = round(cl_i[idx][1].item())\n cl = int(cl_i[idx][2].item())\n conf = cl_i[idx][3].item()\n out_list.append([x_px, y_px, cl, conf])\n return out_list\n \n \n \ndef check_region(im):\n model_path = RC_MODEL_PATH\n model_dict = torch.load(model_path)\n model_state_dict = model_dict#model_dict['model_state_dict']\n model = EfficientNet.from_name('efficientnet-b0')\n model._fc = torch.nn.Linear(1280, 16, bias=True)\n model.load_state_dict(model_state_dict)\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n model.to(device) \n transform = T.Compose(\n [\n T.Resize(360),\n T.CenterCrop(480),\n T.ToTensor(),\n T.Normalize((0.09795914, 0.10673781, 0.11483832), \n (0.17475154, 0.16193452, 0.16501454))\n ]\n )\n x = transform(im).to(device)\n x.unsqueeze_(0)\n model.eval()\n scores = model(x)\n sigscores = torch.nn.Sigmoid()(scores)\n preds = (sigscores.data > 0.9) * 1\n pred = preds[0].cpu()\n pred = [x.item() for x in pred.nonzero()]\n conf = [sigscores[0][x].item() for x in pred]\n return pred, conf\n \ndef check_zeros(im):\n im_sum = np.sum(im, axis=2)\n if (im_sum == 0).sum() > (im.height*im.width)//10:\n return False\n else:\n return True\n\ndef section_image(im):\n s1 = im.crop((0,0,1312,1312))\n s2 = im.crop((1280,0,2592,1312))\n s3 = im.crop((0, 632, 1312, 1944))\n s4 = im.crop((1280, 632, 2592, 1944))\n return [s1,s2,s3,s4]\n\n\n\n\n\n\n\n# base_path = './12R_orb_bing'\n# frame_list = []\n# # image_path = 'orb3 (98).png'\n# image_paths = os.listdir(base_path)\n# all_landmarks = []\n# # nimp = []\n# # for image_path in image_paths:\n# # if image_path.endswith('png'):\n# # val = image_path[6:-5]\n# # # val = image_path[:-4]\n# # newname = str(val).zfill(3)\n# # os.rename(os.path.join(base_path, image_path),\n# # os.path.join(base_path, newname + '.png'))\n# # os.rename(os.path.join(base_path, image_path[:-4] + '.npy'),\n# # os.path.join(base_path, newname + '.npy'))\n# # nimp.append(newname + '.png')\n# for image_path in image_paths:\n# if image_path.endswith('png'):\n# keys, landmarks = run_pipeline(os.path.join(base_path,image_path))\n# im = cv2.imread(os.path.join(base_path,image_path)) \n# im_small = cv2.resize(im, (640,480)) \n# for landmark in landmarks:\n# x, y, lonlat, conf = landmark\n# x_sm = round(x/4.05)\n# y_sm = round(y/4.05)\n# lonlat_str = '(' + str(round(lonlat[0],4)) + ',' + str(round(lonlat[1],4)) + ')'\n# cv2.circle(im_small, (x_sm,y_sm), 5,\n# [0,0,255], thickness=-1)\n# cv2.putText(im_small,\n# lonlat_str + \" \" + str(round(conf,2)),\n# (x_sm, y_sm), 0, 0.5, [0,0,255])\n# s = ''.join(str(key) + ' ' for key in keys)\n# cv2.putText(im_small,\n# s,\n# (320, 50), 0, 1, [255, 200, 255], thickness=2)\n# frame_list.append(im_small)\n# all_landmarks.append([image_path, landmarks])\n# np.save('all_landmarks.npy', np.asanyarray(all_landmarks,dtype=object))\n# vid = cv2.VideoWriter('vid_bing_3.mp4',cv2.VideoWriter_fourcc(*'mp4v'),\n# 2.0, (640,480)) \n# for frame in frame_list:\n# vid.write(frame)\n \n \n ","repo_name":"kyledmccleary/vlr-project","sub_path":"code/im_pipe.py","file_name":"im_pipe.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6916674585","text":"import colorama\nimport os\nimport time\n\n\n# Объявляем цвета\n\ncolorama.init()\ngreen = colorama.Fore.GREEN\nred = colorama.Fore.RED\nblue = colorama.Fore.BLUE\nreset = colorama.Fore.RESET\n\n\n\nboard = list(range(1, 10))\n\n# print(board)\n\n# Параметры поля\ncells = 3\ndashes = 13\nspaces = 4\n\n# Счетчик ходов\ncounter = 0\n\nis_win = False\n\n# Условия победы\nwin_coords = (\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n)\n\n# Список токенов игроков\ntokens_list = [red + 'X' + reset, green + 'O' + reset]\n\n\n\ndef draw_board():\n '''Отрисовка поля в консоли'''\n\n os.system('cls||clear')\n\n print(green + \"\\n* Игра К��естики-нолики *\\n\"+reset)\n for i in range(3):\n shift = i * 3\n print(spaces * ' ', end=\"\")\n print('-' * dashes)\n print(spaces * ' ', end=\"\")\n print(\n f'| {board[0 + shift]} | {board[1 + shift]} | {board[2 + shift]} |')\n\n print(spaces * ' ', end=\"\")\n print('-' * dashes + \"\\n\")\n\n\n\n# Игровой цикл\n\nwhile not is_win:\n\n draw_board()\n\n if counter % 2 == 0:\n player_token = tokens_list[0]\n else:\n player_token = tokens_list[1]\n\n player_answer = input(f'{reset}Куда ставим {player_token}?: ')\n\n # Выполняем валидацию (проверку) ввода\n if not player_answer.isdigit():\n print(\"Ошибка ввода. Можно вводить только числа\")\n time.sleep(0.6)\n continue\n\n if not 0 < int(player_answer) < 10:\n print(\"Ошибка ввода. Введите число от 1 до 9\")\n time.sleep(0.6)\n continue \n\n\n # Делаем поправку \"-1\" для учета индексации с 0.\n player_answer = int(player_answer) - 1\n\n\n if str(board[player_answer]) not in tokens_list:\n board[player_answer] = player_token\n else:\n print(red + 'Эта ячейка уже занята!' + reset)\n time.sleep(0.6)\n continue\n\n counter += 1\n\n if counter > 4:\n for each in win_coords:\n # Проверяем наличие выигрышной комбинации (для X или O)\n if board[each[0]] == board[each[1]] == board[each[2]]:\n is_win = True\n break\n\n if is_win:\n draw_board()\n print(blue + f'* Победил {player_token + blue}! Поздравляем! *\\n')\n break\n\n if counter == 9:\n draw_board()\n print(blue + 'Победила дружба :)\\n')\n break\n","repo_name":"aleksmn/PythonLessons","sub_path":"tic-tac-toe/main_colors.py","file_name":"main_colors.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37177485265","text":"# Filename: leap_year.py\n# Author: Megan Black\n# Description: Program that says if the year entered by the user is a leap year or not.\n\n# Function to error check user input\ndef check():\n year = input(\"Please enter a year: \")\n while(1):\n if year.isdigit():\n if len(year) == 4:\n return int(year)\n else:\n year = input(\"You didn't enter a good value, enter a valid year: \")\n else:\n year = input(\"You didn't enter a good value, enter a valid year: \")\n \n\n\nyear = check()\nif year % 4 == 0 and year % 100 == 0 and year % 400 == 0:\n print(year + \" is a leap year\")\nelif year % 4 == 0 and year % 100 != 0:\n print(str(year) + \" is a leap year\")\nelse:\n print(str(year) + \" is not a leap year\") \n","repo_name":"megan-black/362-hw3","sub_path":"leap_year.py","file_name":"leap_year.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23770371885","text":"#!/usr/bin/env micropython\nfrom ev3dev2.motor import LargeMotor, OUTPUT_C, OUTPUT_B, SpeedPercent, MoveTank, follow_for_ms\nfrom ev3dev2.sensor import INPUT_2\nfrom ev3dev2.sensor.lego import TouchSensor, ColorSensor\nfrom ev3dev2.led import Leds\nimport time\n\n#m1 = LargeMotor(OUTPUT_C)\n#m2 = LargeMotor(OUTPUT_B)\n\ntank = MoveTank(OUTPUT_C, OUTPUT_B)\ntank.cs = ColorSensor()\n\nprint(\"Calibrate\")\ntime.sleep(2)\ntank.calibrate_white()\nprint(\"Done, following\")\n\n#tank_drive = MoveTank(OUTPUT_C, OUTPUT_B).follow_line(1, 1, 1, SpeedPercent(10))\n\ntry:\n # Follow the line for 4500ms\n tank.follow_line(\n kp=11.3, ki=0.05, kd=3.2,\n speed=SpeedPercent(30),\n ms=4500\n )\nexcept Exception as e:\n print(e)\n tank.stop()\n raise\n\n","repo_name":"markusleh/roboto","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"908976596","text":"number = input(\"insert 5-digits number: \")\n\n\ndef blabla(num):\n print(\"your number is: \"+num)\n newNum = str(num)\n sum=0\n print (\"Digits in your number are: \")\n for i in range(0, 5):\n print(newNum[i])\n sum += int(newNum[i])\n print(f\"Sum of digits: {sum}\")\n \nblabla(number)\n\n \n\n\n","repo_name":"lioralehrer/appleseeds","sub_path":"week9/python/ex-1-intro-python/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35228227933","text":"from gui_filter_window import Ui_MainWindow as main_gui\nfrom gui_spell_dialog import Ui_Dialog as spell_gui\n\nfrom PyQt5.QtWidgets import QMainWindow, QDialog, QApplication, QWidget\nfrom PyQt5 import QtCore, QtGui\n\n\"\"\"\nA GUI for searching through the DnD 5E spell list! \n\"\"\"\n\nimport sys, httplib2, os\nimport json\n\nclass SpellItem(QtGui.QStandardItem):\n \"\"\"\n These are the items added to the great big list of spells. \n\n Each carries an attribute (\"this_path\"), that specifies which json file correlates with this entry\n \"\"\"\n\n def __init__(self, value, this_path):\n super(SpellItem,self).__init__(value)\n self._this_path = this_path\n\n @property\n def path(self):\n return(self._this_path)\n\nclass main_window(QMainWindow):\n \"\"\"\n The main filter window\n \"\"\"\n\n def __init__(self,parent=None):\n QWidget.__init__(self, parent)\n self.ui = main_gui()\n self.ui.setupUi(self)\n\n # which api to use\n # create a machine for Interneting \n self.addr = 'http://www.dnd5eapi.co'\n self.http = httplib2.Http()\n \n # decide where to store the data\n if sys.platform=='linux':\n self.datadir = os.path.join(os.path.expandvars(\"$HOME\"), '.local', 'SuperSpell')\n elif sys.platform=='darwin': #macOS -- needs testing. I don't know if the .local folder exists in $HOME on macs \n self.datadir = os.path.join(os.path.expandvars(\"$HOME\"), 'SuperSpell')\n elif sys.platform=='win32' or sys.platform=='cygwin': #not actually sure if this works on cygwin\n self.datadir = os.path.join(os.path.expandvars(\"%AppData%\"), 'SuperSpell')\n else:\n raise NotImplementedError(\"{} not a supported platform\".format(sys.platform))\n\n\n # make sure the master spell list is there, download if it isn't\n self.spell_list = os.path.join( self.datadir, \"master.json\")\n self.check_spell_list()\n\n # set up the menu with all the spell results\n self.ui.spell_list_entry = QtGui.QStandardItemModel()\n self.ui.listView.setModel( self.ui.spell_list_entry )\n self.ui.listView.clicked[QtCore.QModelIndex].connect( self.spell_item_clicked )\n\n # update the gui whenever an entry is changed\n self.ui.comboBox.currentIndexChanged.connect( self.update )\n self.ui.comboBox_2.currentIndexChanged.connect( self.update )\n self.ui.lineEdit.textChanged.connect(self.update)\n self.ui.check_name.stateChanged.connect(self.update)\n self.ui.check_desc.stateChanged.connect(self.update)\n\n # load in all the spells! \n self.ui.check_name.setChecked(True)\n self.update()\n\n def get_number_append(self, number):\n \"\"\"\n So, like, this takes a stringized number\n\n And it returns the letters at the end... so, like, 1st 2nd 3rd etc... \n \"\"\"\n last = number[-1]\n if last=='1':\n return('st')\n if last=='2':\n return('nd')\n if last=='3':\n return('rd')\n else:\n return('th')\n\n def spell_item_clicked(self, index):\n \"\"\"\n This is called whenever a spell is clicked. It opens up the spell dialog and sets the relevant values to how they should be! \n \"\"\"\n item = self.ui.spell_list_entry.itemFromIndex( index )\n where = item.path\n\n f = open(where)\n data = json.load(f)\n f.close()\n\n dialog = spell_dialog(self)\n dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose )\n \n dialog.ui.label.setText( data[\"name\"] )\n fancy = str(data['level'])\n fancy += self.get_number_append(fancy)\n fancy += \" level \"+data['school']['name']\n dialog.ui.label_2.setText(fancy)\n \n dialog.ui.casting_time_val.setText(data['casting_time'])\n dialog.ui.range_text.setText(data['range'])\n \n comp = \"\"\n for compo in data['components']:\n comp+=compo+\" \"\n if 'material' in data:\n comp+= \": \" +data['material']\n\n dialog.ui.comp_text.setText(comp)\n dialog.ui.dur_text.setText(data['duration'])\n \n classes_block = \"\"\n for i in data['classes']:\n if classes_block!='':\n classes_block+=\", \"\n classes_block+= i['name']\n dialog.ui.class_text.setText(classes_block)\n dialog.ui.textBrowser.setText(\" \".join(data['desc']))\n\n dialog.exec_()\n\n\n def check_spell_list(self):\n \"\"\"\n Verifies that there is a local copy of the master spell list \n \"\"\"\n\n # make sure the data directory exists \n if os.path.exists(self.datadir):\n pass\n else:\n os.mkdir(self.datadir)\n\n if os.path.isfile( self.spell_list ):\n # file is already there, so that's good \n # TODO: check if file is gucci \n pass\n else:\n # download and write the json file \n content = self.http.request( self.addr + \"/api/spells\" )\n f = open( self.spell_list, 'wb')\n f.write(content[1])\n f.close()\n\n def check_spell(self, api_url):\n \"\"\"\n Checks if the spell for that api_url is there\n Downloads it if not \n \"\"\"\n file_name = \"spell_\" + api_url.split(\"/\")[-1] +\".json\"\n full_path = os.path.join( self.datadir, file_name)\n\n if not os.path.isfile( full_path ):\n f = open( full_path, 'wb')\n content = self.http.request( self.addr + api_url )\n f.write(content[1])\n f.close()\n\n return(full_path)\n\n\n def satisfies(self, spell_fl):\n \"\"\"\n Checks whether the json-spell-entry satisfies the current filters\n\n spell_fl is a very specifically formated dicitonary as loaded by json from the spell files \n \"\"\"\n level = str(spell_fl['level'])\n casting = [ i['name'] for i in spell_fl['classes'] ]\n \n search = self.ui.lineEdit.text().lower()\n\n if not (search==\"\"):\n if (self.ui.check_name.isChecked() or self.ui.check_desc.isChecked()):\n # check if what's there matche\n if self.ui.check_name.isChecked():\n sat_name = search in spell_fl['name'].lower()\n else:\n sat_name = False\n if self.ui.check_desc.isChecked():\n sat_desc = search in (\" \".join(spell_fl['desc'])).lower()\n else:\n sat_desc = False\n\n if not (sat_name or sat_desc):\n return(False)\n\n if not( self.ui.comboBox.currentText()==\"Any\" or self.ui.comboBox.currentText() in casting ):\n return(False)\n if not( self.ui.comboBox_2.currentText()==\"Any\" or self.ui.comboBox_2.currentText()==level):\n return(False)\n return(True)\n\n\n def update(self):\n \"\"\"\n Called whenever the filters have changed and we need to load up a new list of spells \n \"\"\"\n \n # pass over master spell list\n self.check_spell_list()\n f = open( self.spell_list, 'r')\n master_list = json.load(f)\n f.close()\n \n # clear the list of spells in the list \n self.ui.spell_list_entry.clear()\n\n # go over all the spells in the master list \n for entry in master_list['results']:\n # make sure the spell we're iterating over has a correlating data file, get its location\n path = self.check_spell( entry['url'])\n f = open(path,'r')\n data = json.load(f)\n f.close()\n\n # if it satisfies the filters, add it to the shown list \n if self.satisfies(data):\n # add it to the thing\n self.ui.spell_list_entry.appendRow(SpellItem(entry['name'],path))\n\n \n\nclass spell_dialog(QDialog):\n \"\"\"\n Very basic little dialog box for the spells when you click on them\n \"\"\"\n def __init__(self,parent):\n super(spell_dialog,self).__init__(parent)\n self.ui = spell_gui()\n self.ui.setupUi(self)\n\n\n# launch the app\napp = QApplication(sys.argv)\napp_instance = main_window()\nif __name__==\"__main__\":\n app_instance.show()\n sys.exit(app.exec_())\n","repo_name":"BenSmithers/SuperSpell","sub_path":"main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32163239139","text":"import whisper\nimport json\n\ndef main(): \n print(\"loading model...\") \n model = whisper.load_model(\"base\")\n\n print(\"predicting...\")\n #result = model.transcribe(\"861b9e79-dbac-4a63-bfb7-c06e7f82e1c3.mp3\")\n result = model.transcribe(\"https://augie-public-test.s3.amazonaws.com/27a8d8a9-5624-4b2f-a425-4fb99a891779/ff23affc-9ab7-458f-9792-d7b5a19ef223/861b9e79-dbac-4a63-bfb7-c06e7f82e1c3.mp3\") \n\n json_object = json.dumps(result, indent=4)\n \n # write to disk\n with open(\"data.json\", \"w\") as outfile:\n outfile.write(json_object)\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"augxlabs/openai-whisper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19348143696","text":"from ApiHelper import scoring_cats\nfrom statistics import stdev, mean\nfrom collections import defaultdict, OrderedDict\nimport operator\n\nutil_cats = [\"FGA\",\"FGM\",\"FTA\",\"FTM\"]\n#list of idealized values for calculation of relative stdev, ripping from bballmonster\n#they seem to help improve the rankings vs. using the league average in certain cases\nideal_FGP = 0.472\nideal_FTP = 0.797\n\nBLK_SCAL = 1.9\nREB_SCAL = 1.3\nFT_SCAL = 1.12\nFG_SCAL = 1.5\nTPM_SCAL = 1.25\nSTL_SCAL = 1.2\nPTS_SCAL = 2.4\nAST_SCAL = 1.15\nTOV_SCAL = 1.1\n\nclass MathHelper:\n \n pkey_total_score_index = {}\n player_total_score_index = {}\n \n pkey_pg_score_index = {}\n player_pg_score_index = {}\n \n def __init__(self, players):\n self.players = players\n \n #calculate the standard deviation for a given set of players / set of stats\n #uses all of a particular kind of category by default\n def all_player_stdev(self, players, stats=[], pergame=True):\n stdv_mean_map = defaultdict(list)\n for player in players : \n for stat in stats if stats else scoring_cats:\n if pergame :\n if stat in [\"FG%\",\"FT%\"]: stdv_mean_map[stat].append(player.get(stat))\n else: stdv_mean_map[stat].append(player.get_pg_stat(stat))\n else: stdv_mean_map[stat].append(player.get(stat))\n for stat, data in stdv_mean_map.items():\n stdv_mean_map[stat] = [round(stdev(data),5),round(mean(data),5)]\n return stdv_mean_map #for each stat, a tuple of the standard deviation and mean for the entire league\n \n def rel_stdev(self, player, stdv_mean_map, pergame=True):\n player_stdev = defaultdict(list)\n for stat, dev_mean in stdv_mean_map.items():\n if pergame:\n if stat == \"FG%\":\n base = (player.get(stat) - ideal_FGP)\n player_stdev[stat] = round(base * player.get_pg_stat(\"FGA\") * 1.7,5)\n elif stat == \"FT%\":\n base = (player.get(stat) - ideal_FTP)\n player_stdev[stat] = round(base * player.get_pg_stat(\"FTA\") * 3.2,5)\n elif stat == \"3PM\":\n base = (player.get_pg_stat(stat) - dev_mean[1]*1.8)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"PTS\":\n base = (player.get_pg_stat(stat) - dev_mean[1]*1.85)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"AST\":\n base = (player.get_pg_stat(stat) - dev_mean[1]*1.85)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"TOV\":\n base = (player.get_pg_stat(stat) - dev_mean[1]*1.75)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"STL\":\n base = (player.get_pg_stat(stat) - dev_mean[1]*1.7)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"BLK\":\n base = (player.get_pg_stat(stat) - dev_mean[1]*1.6)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n else :\n player_stdev[stat] = round((player.get_pg_stat(stat) - dev_mean[1]) / dev_mean[0], 3)\n else:\n if stat == \"FG%\":\n base = (player.get(stat) - ideal_FGP)\n player_stdev[stat] = round(base * player.get(\"FGA\") * dev_mean[0],5)\n elif stat == \"FT%\":\n base = (player.get(stat) - ideal_FTP)\n player_stdev[stat] = round(base * player.get(\"FTA\") * dev_mean[0],5)\n elif stat == \"3PM\":\n base = (player.get(stat) - dev_mean[1]*1.8) #the hard-coded scalars here slightly amplify the league average for this cat\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"PTS\":\n base = (player.get(stat) - dev_mean[1]*1.85) #a smaller scalar means the player's relative \"score\" will increase, and vice versa\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"AST\":\n base = (player.get(stat) - dev_mean[1]*1.85) #sort of a coarser control valve in comparison to the scalars applied at the end\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"TOV\":\n base = (player.get(stat) - dev_mean[1]*1.75) #and yes they're all totally arbitrary\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"STL\":\n base = (player.get(stat) - dev_mean[1]*1.7)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n elif stat == \"BLK\":\n base = (player.get(stat) - dev_mean[1]*1.6)\n player_stdev[stat] = round(base / dev_mean[0], 3)\n else :\n player_stdev[stat] = round((player.get(stat) - dev_mean[1]) / dev_mean[0], 5)\n if pergame: player.pg_stdev_map = player_stdev #map of player stat cats to player standard deviation relative to league per cat\n else: player.total_stdev_map = player_stdev\n \n def simple_eval_player(self, player, stats=[]):\n player.score = round(sum([val for key, val in player.stdev_map.items() if key not in util_cats]) - round(2*player.stdev_map[\"TOV\"],3) if \"TOV\" in stats else 0, 5)\n \n def weighted_eval_player(self, player, stdev_map, pergame=True, stats=[], weights={}):\n stats = stats if not stats else scoring_cats\n total_score = 0\n score_map = {}\n total = sum(x[0] for x in {k:v for k, v in stdev_map.items() if k not in util_cats}.values())\n for cat, stdev in player.pg_stdev_map.items() if pergame else player.total_stdev_map.items():\n #try and weigh the scalar by the relative deviation for this cat\n scalar = 1-(stdev_map[cat][0]/total)\n #these scalars are just a last step to \"polish\" the numbers to more closely resemble values for other ranking sites lol im such a hack\n if cat == \"BLK\" : scalar /= BLK_SCAL\n elif cat == \"REB\" : scalar /= REB_SCAL\n elif cat == \"3PM\" : scalar /= TPM_SCAL\n elif cat == \"AST\" : scalar /= AST_SCAL\n elif cat == \"FG%\" : scalar /= FG_SCAL\n elif cat == \"FT%\" : scalar /= FT_SCAL\n elif cat == \"STL\" : scalar *= STL_SCAL\n elif cat == \"TOV\" : scalar *= TOV_SCAL\n elif cat == \"PTS\" : scalar *= PTS_SCAL\n if weights:\n scalar *= weights[cat] if cat in weights else 1\n should_omit = (cat in weights and weights[cat] <= 0) or cat in util_cats #omit mades and attempts\n else :\n should_omit = cat in util_cats\n \n score = 0 if should_omit else stdev + (stdev * scalar)\n if cat == \"TOV\" : score = score*-1\n score_map[cat] = round(score/2,2)\n total_score += round(score/2,2)\n if pergame:\n player.pg_score_map = score_map\n player.pg_score_map[\"OVR\"] = round(total_score/10,3)\n else:\n player.total_score_map = score_map\n player.total_score_map[\"OVR\"] = round(total_score/10,3)\n \n \n #just sums the standard deviations for whatever stats are given\n #indexes: {player_key : score_map}, {player_key : total_score} hmm should I just combine them\n def rank_players(self, players, stats=[], weights={}, pergame=True):\n stats = scoring_cats if not stats else stats\n score_map = {}\n stdev_map = self.all_player_stdev(players, stats, pergame)\n for player in players:\n self.rel_stdev(player, stdev_map, pergame)\n self.weighted_eval_player(player, stdev_map, pergame, stats, weights)\n score_map[player] = player.get_score(pergame)\n score_map = OrderedDict(sorted(score_map.items(), key=operator.itemgetter(1), reverse=True))\n pkey_score_index = {k.get(\"PKEY\") : v for k, v in score_map.items()}\n if pergame:\n self.player_pg_score_index = score_map\n self.pkey_pg_score_index = pkey_score_index\n else:\n self.player_total_score_index = score_map\n self.pkey_total_score_index = pkey_score_index\n return score_map\n \n def rank_and_print_players(self, players, stats=[], weights={}, pergame=True, topRank=None):\n topRank = 50 if topRank is None else topRank\n score_map = self.rank_players(players, stats, weights, pergame)\n self.pretty_print_player_map(score_map, topRank, pergame) # return something eventually\n \n \n def pretty_print_player_map(self, player_map, top, pergame=True):\n rank=0\n for player in player_map.keys() :\n rank+=1\n player.pretty_print(player.pg_score_map if pergame else player.total_score_map, rank)\n # player.pretty_print_rank_name_only(rank)\n if rank == top: break","repo_name":"smadigan91/pyathono","sub_path":"bot/MathHelper.py","file_name":"MathHelper.py","file_ext":"py","file_size_in_byte":9189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12114599307","text":"from neo4j import GraphDatabase\nimport src.utils.docker as docker\nimport time\n\n\nclass Neo4j():\n def __init__(self, uri, user, password):\n self.uri = uri\n self.user = user\n self.password = password\n self._driver = None\n\n def connect(self):\n self._driver = GraphDatabase.driver(self.uri, auth=(self.user, self.password))\n\n def disconnect(self):\n self._driver.close()\n\n def wait_and_connect(self, max_retry=10):\n print('connecting to neo4j... [.', end='')\n count = 0\n while True:\n try:\n count += 1\n if count > max_retry:\n break\n self.connect()\n break\n except Exception:\n print('.', end='')\n time.sleep(2)\n print('] done')\n\n def restart_neo4j(self, max_retry=10):\n self.disconnect()\n docker.docker_compose('restart neo4j', './docker')\n self.wait_and_connect(max_retry=max_retry)\n\n def run_query(self, query, query_type='read'):\n \"\"\"\n\n :param query:\n :param type: 'no':no_transaction, 'read':transaction_read, 'write':transaction_write\n :return:\n \"\"\"\n with self._driver.session() as session:\n if query_type is 'no':\n result = session.run(query)\n elif query_type is 'read':\n result = session.read_transaction(self._run_query, query)\n elif query_type is 'write':\n result = session.write_transaction(self._run_query, query)\n else:\n raise ValueError(\"wrong parameter query_type\")\n\n return result\n\n @staticmethod\n def _run_query(tx, query):\n result = tx.run(query)\n return result\n\n def clean_db(self):\n self.run_query(\"MATCH (n) DETACH DELETE n\", query_type='no')\n\n def import_vertices_csv(self, csv_name):\n self.run_query(\"USING PERIODIC COMMIT 500 \"\n \"LOAD CSV WITH HEADERS FROM 'file:///freebase_data/%s.csv' AS line\t\"\n \"CREATE (:Entity{node_id:line.node_id, text:line.node_text})\" % (csv_name), query_type='no')\n \n def import_edges_csv(self, csv_name):\n self.run_query(\"USING PERIODIC COMMIT 500 \"\n \"LOAD CSV WITH HEADERS FROM 'file:///freebase_data/%s.csv' AS line \"\n \"MATCH (a:Entity), (b:Entity) \"\n \"WHERE a.node_id = line.source_node_id AND b.node_id = line.dest_node_id \"\n \"CREATE (a)-[r:Relation {edge_id:line.edge_type_id, text:line.edge_type_text}]->(b)\" % (csv_name), query_type='no')\n \n def create_index(self, label, attribute, wait_finish=True):\n print('creating graphdb index on {}:{}...'.format(label, attribute))\n self.run_query(\"CREATE INDEX ON :{}({})\".format(label, attribute), query_type='no')\n\n if wait_finish:\n self.run_query(\"CALL db.awaitIndexes(120)\", query_type='no')\n print(' done')\n\n def create_node(self, properties, label='CommonNode'):\n properties_query = \"{\"\n for property_key, property_value in properties.items():\n if isinstance(property_value, int):\n properties_query += '{}:{}, '.format(property_key, property_value)\n else:\n properties_query += '{}:\"{}\", '.format(property_key, property_value)\n properties_query = properties_query[:-2]\n properties_query += \"}\"\n\n query = \"CREATE(:%s %s)\" % (label, properties_query)\n self.run_query(query, query_type='no')\n\n def create_edge(self, source_node_id, dest_node_id, properties, label='CommonRelation'):\n properties_query = \"{\"\n for property_key, property_value in properties.items():\n if isinstance(property_value, int):\n properties_query += '{}:{}, '.format(property_key, property_value)\n else:\n properties_query += '{}:\"{}\", '.format(property_key, property_value)\n properties_query = properties_query[:-2]\n properties_query += \"}\"\n\n query = \"\"\"MATCH (a:CommonNode), (b:CommonNode)\n WHERE a.node_id = {} AND b.node_id = {}\n CREATE (a)-[r:{} {} ]->(b)\n RETURN r\"\"\".format(source_node_id, dest_node_id, label, properties_query)\n self.run_query(query, query_type='no')\n\n def get_max_node_property_value(self, edge_property):\n query = \"MATCH (n) RETURN MAX(n.{})\".format(edge_property)\n return self.run_query(query, query_type='no')\n\n def get_max_edge_property_value(self, edge_property):\n query = \"MATCH (n:CommonNode)-[r:CommonRelation]->(t:CommonNode) RETURN MAX(r.{})\".format(edge_property)\n return self.run_query(query, query_type='no')\n\n\n\n","repo_name":"andrea-pustina/Elicio","sub_path":"src/driver/neo4j.py","file_name":"neo4j.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22544092389","text":"import sys, os\n\n#Primera Caracteristica: Uno si la primera letra es Mayuscula, 0 lo contrario\ndef quitartab():\n\tfilas=open(\"salida.txt\", 'r')\n\tsalida=open(\"nueva_salida\", 'w')\n\ttexto=filas.read()\n\ttexto=texto.replace(\"\\t\",\" \")\n\tsalida.write(texto)\n\tsalida.close()\n\tfilas.close()\n\n#Segunda Caracteristica: Agrega el tipo\n#addSecondCaracter()\n\n\nif __name__ == '__main__':\n\tquitartab()","repo_name":"nelson-portilla/NER-conll2002-con-CRF","sub_path":"proyecto-conll-2002/modelo1/quitartab.py","file_name":"quitartab.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25435258619","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\n\n\n# hold_out法\ndef hold_out(st, df):\n test_rate = st.number_input(\n \"test_sizeを指定(0.0~1.0)\", min_value=0.0, max_value=1.0, value=0.25\n )\n is_shuffle = st.checkbox(\"データをシャッフルするか?\")\n if is_shuffle:\n seed_value = st.number_input(\n \"seed値を指定(0~100)\", min_value=0, max_value=100, value=10\n )\n else:\n seed_value = 0\n is_stf = st.checkbox(\"層化抽出するか?\")\n if is_stf:\n # \"species\"(目的変数)は適宜調整すること\n target_stf = df[\"species\"]\n else:\n target_stf = None\n\n # \"species\"(目的変数)は適宜調整すること\n X_train, X_test, y_train, y_test = train_test_split(\n df.drop(\"species\", axis=1),\n df[\"species\"],\n test_size=test_rate,\n shuffle=is_shuffle,\n random_state=seed_value,\n stratify=target_stf,\n )\n df_train = pd.concat([X_train, y_train], axis=1)\n df_test = pd.concat([X_test, y_test], axis=1)\n\n return df_train, df_test\n\n\n# K-分割交差検証\ndef kfold(st, df):\n n_splits = st.number_input(\"n_splitsを指定(2~10)\", min_value=2, max_value=10, value=5)\n is_shuffle = st.checkbox(\"データをシャッフルするか?\")\n if is_shuffle:\n seed_value = st.number_input(\n \"seed値を指定(0~100)\", min_value=0, max_value=100, value=10\n )\n else:\n seed_value = None\n\n kf = KFold(n_splits=n_splits, shuffle=is_shuffle, random_state=seed_value)\n df_train_list = []\n df_test_list = []\n for train_index, test_index in kf.split(df):\n df_train = df.iloc[train_index]\n df_test = df.iloc[test_index]\n df_train_list.append(df_train)\n df_test_list.append(df_test)\n\n n_fold = st.selectbox(\"何番目の分割データを見るか?\", range(len(df_train_list)))\n\n return df_train_list[n_fold], df_test_list[n_fold]\n\n\n# 層化K-分割交差検証\n","repo_name":"yonesuke0716/dataset_viz","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26268876277","text":"import argparse\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import linspace, pi, cos, split\nfrom scipy import signal\nfrom scipy.fftpack import fft, fftshift\n\n\n# Line colors in plots\ncolors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\n '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\n '#bcbd22', '#17becf']\n\n\n# Parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--pd', type=int, default=50,\n help='Enter the pulse duration in milliseconds.')\nargs = parser.parse_args()\n\nconstant = args.pd\n\n\n# Data\nsignal_start = -0.05\nsignal_end = 0.05\nsignal_amplitude = 1\nsamples_per_period = 1000\n\n# Calcualtions\npulse_duration = pow(10, -3) * constant\nsingal_samples = ((abs(signal_start)+signal_end)/0.01)*samples_per_period\nsignal_period = 2 * pulse_duration\nsignal_frequency = 1 / signal_period\n\ntime = linspace(signal_start, signal_end, singal_samples)\nrectangle_wave = signal_amplitude * \\\n signal.square(2*pi*signal_frequency*(time+(pulse_duration/2)))\n\nfourier_transform = fft(rectangle_wave)\nnormalized_fourier_transform = abs(fourier_transform) / len(time)\n\nif len(time) % 2 == 0:\n frequency_range = linspace(-len(time)/20, len(time)/20-1, len(time))\nelse:\n frequency_range = linspace(-(len(time)-1)/20, (len(time)-1)/20, len(time))\n\nduration = len(time)*(signal_period/samples_per_period)\nfrequency_range = frequency_range*(1/duration)\n\nif constant == 10:\n bra = -2500\n ket = 2500\n step = 500\nelif constant == 50:\n bra = -500\n ket = 500\n step = 100\n\n\n# Plots\nplt.figure(1, figsize=[6, 4], dpi=400)\nplt.plot(time, rectangle_wave, colors[0])\nplt.xlabel('$Time (sec)$')\nplt.xticks([i/100 for i in range(-5, 6)])\nplt.ylim(bottom=-2, top=2)\nplt.ylabel('$Frequency (Hz)$')\nplt.grid()\nplt.title('Rectangle wave')\nplt.tight_layout()\nplt.savefig(f'241_{str(constant)}.png')\n\nplt.figure(2, figsize=[6, 7], dpi=400)\nplt.subplot(2, 1, 1)\nplt.plot(frequency_range, normalized_fourier_transform, colors[0])\nplt.xlabel('$Frequency (Hz)$')\nplt.xticks([i for i in range(bra, ket+1, step)])\nplt.ylabel('$Amplitude (V)$')\nplt.yticks([i/100 for i in range(0, 21,5)])\nplt.ylim(bottom=0, top=0.2)\nplt.title('Two-sided spectrum')\nplt.grid()\nplt.tight_layout()\nplt.savefig(f'242_{str(constant)}.png')\nplt.subplot(2, 1, 2)\nplt.plot(split(frequency_range, 2)[-1],\n split(normalized_fourier_transform, 2)[-1], colors[0])\nplt.xlabel('$Frequency (Hz)$')\nplt.ylabel('$Amplitude (V)$')\nplt.yticks([i/100 for i in range(0, 21,5)])\nplt.ylim(bottom=0, top=0.21)\nplt.title('Single-sided spectrum')\nplt.grid()\nplt.tight_layout()\nplt.savefig(f'242_{str(constant)}.png')\n","repo_name":"mikezamayias/glowing-sniffle","sub_path":"Past Courses/Introduction to Telecommunications/lab/Task 2/task_2_d.py","file_name":"task_2_d.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28147631394","text":"# Author: Xiangtai Li\n# Implement Temporal Tube-Link.\n\nimport copy\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmdet.utils import get_root_logger\n\nfrom mmdet.core import INSTANCE_OFFSET, bbox2result, encode_mask_results\nfrom mmdet.core.visualization import imshow_det_bboxes\nfrom mmdet.models.builder import DETECTORS, build_backbone, build_head, build_neck\nfrom mmdet.models.detectors.single_stage import SingleStageDetector\nfrom datasets.datasets.kitti_step_dvps import kitt_step_train_id_2_cat_id\n\n\nfrom tracker.qdtrack.builder import build_tracker\nfrom tracker.unitrack.utils.mask import tensor_mask2box\n\n\ndef mapping_train_id_to_cat_id_segm(\n sem_seg_result,\n train_id_2_cat_id=kitt_step_train_id_2_cat_id\n):\n labels = np.unique(sem_seg_result)\n sem_seg_result_new = np.ones_like(sem_seg_result) * 255\n\n for i in labels:\n if i == 255:\n continue\n else:\n masks = sem_seg_result == i\n sem_seg_result_new[masks] = train_id_2_cat_id[i]\n\n return sem_seg_result_new\n\n\n@DETECTORS.register_module()\nclass Mask2FormerVideoCustomMatching(SingleStageDetector):\n r\"\"\"Implementation of `Per-Pixel Classification is\n NOT All You Need for Semantic Segmentation\n `_.\"\"\"\n\n async def async_simple_test(self, img, img_metas, **kwargs):\n raise NotImplementedError\n\n def __init__(self,\n backbone,\n neck=None,\n panoptic_head=None,\n panoptic_fusion_head=None,\n train_cfg=None,\n test_cfg=None,\n init_cfg=None,\n dataset=\"kitti-step\",\n tracker=None,\n **kwargs\n ):\n super(SingleStageDetector, self).__init__(init_cfg=init_cfg)\n self.backbone = build_backbone(backbone)\n if neck is not None:\n self.neck = build_neck(neck)\n\n panoptic_head_ = copy.deepcopy(panoptic_head)\n panoptic_head_.update(train_cfg=train_cfg)\n panoptic_head_.update(test_cfg=test_cfg)\n self.panoptic_head = build_head(panoptic_head_)\n\n panoptic_fusion_head_ = copy.deepcopy(panoptic_fusion_head)\n panoptic_fusion_head_.update(test_cfg=test_cfg)\n self.panoptic_fusion_head = build_head(panoptic_fusion_head_)\n\n self.num_things_classes = self.panoptic_head.num_things_classes\n self.num_stuff_classes = self.panoptic_head.num_stuff_classes\n self.num_classes = self.panoptic_head.num_classes\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.dataset = dataset\n\n # BaseDetector.show_result default for instance segmentation\n if self.num_stuff_classes > 0:\n self.show_result = self._show_pan_result\n\n self.logger = get_root_logger()\n self.logger.info(\"[Unified Video Segmentation] Using customized tube_link_vps segmentor.\")\n\n self.tracker = None\n self.tracker_cfg = tracker\n self.frame_id = -1\n\n def forward_dummy(self, img, img_metas):\n raise NotImplementedError\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_masks,\n gt_semantic_seg=None,\n gt_bboxes_ignore=None,\n *,\n ref_img=None,\n ref_img_metas=None,\n ref_gt_bboxes=None,\n ref_gt_labels=None,\n ref_gt_bboxes_ignore=None,\n ref_gt_masks=None,\n ref_gt_semantic_seg=None,\n ref_gt_instance_ids=None,\n **kargs):\n super(SingleStageDetector, self).forward_train(ref_img[0], ref_img_metas[0])\n # add batch_input_shape in img_metas\n # super(SingleStageDetector, self).forward_train(img, img_metas)\n # step 1 : extract volume img features\n bs, num_frame, three, h, w = ref_img.size() # (b,T,3,h,w)\n\n ref_video = ref_img.reshape((bs * num_frame, three, h, w))\n video_x = self.extract_feat(ref_video)\n # step 2: forward the volume features\n losses = self.panoptic_head.forward_train(\n video_x,\n ref_img_metas,\n ref_gt_bboxes,\n ref_gt_labels,\n ref_gt_masks,\n ref_gt_semantic_seg,\n ref_gt_instance_ids,\n gt_bboxes_ignore=None\n )\n\n return losses\n\n def init_memory(self):\n self.logger.info(\"[Unified Video Segmentation] Reset tracker.\")\n self.tracker = build_tracker(self.tracker_cfg)\n self.frame_id = 0\n\n def simple_test(self, img, img_metas, ref_img, ref_img_metas, **kwargs):\n\n bs, num_frame, three, h, w = ref_img.size()\n # (b, t, 3, h, w)\n ref_video = ref_img.reshape((bs * num_frame, three, h, w))\n video_x = self.extract_feat(ref_video)\n\n mask_cls_results, mask_pred_results, query_feats = \\\n self.panoptic_head.simple_test_with_query(video_x, ref_img_metas, **kwargs)\n # whether the first frame\n frame_id = ref_img_metas[0][0][\"img_id\"]\n is_first = frame_id == 0\n\n self.frame_id = frame_id + 1\n\n if is_first:\n self.init_memory()\n\n results = [[] for _ in range(bs)]\n\n # for each frame results\n for frame_id in range(num_frame):\n # fuse the final panoptic segmentation results.\n # assert self.panoptic_fusion_head.panoptic_mode in ['with_query', 'sort_with_query']\n result, query_lists = self.panoptic_fusion_head.simple_test(\n mask_cls_results,\n mask_pred_results[:, frame_id],\n [ref_img_metas[idx][frame_id] for idx in range(bs)],\n **kwargs\n )\n\n for i in range(len(result)):\n if 'pan_results' in result[i]:\n\n result[i]['pan_results'] = result[i]['pan_results'].detach(\n ).cpu().numpy()\n result[i]['query_list'] = query_lists[i]\n\n # add the sem_seg results for vps and vss evaluation\n if self.dataset == \"kitti-step\":\n result[i]['sem_results'] = mapping_train_id_to_cat_id_segm(result[i]['sem_results'],\n train_id_2_cat_id=kitt_step_train_id_2_cat_id)\n\n if 'ins_results' in result[i]:\n labels_per_image, bboxes, mask_pred_binary = result[i]['ins_results']\n # add the id in the box field.\n bboxes = torch.cat(\n [torch.arange(len(bboxes), dtype=bboxes.dtype, device=bboxes.device)[:, None] + 1,\n bboxes], dim=1)\n # sort by the score\n inds = torch.argsort(bboxes[:, -1], descending=True)\n labels_per_image = labels_per_image[inds][:10] # only keep final top-10 in each image\n bboxes = bboxes[inds][:10]\n mask_pred_binary = mask_pred_binary[inds][:10]\n bbox_results = bbox2result(bboxes, labels_per_image, self.num_things_classes)\n mask_results = [[] for _ in range(self.num_things_classes)]\n for j, label in enumerate(labels_per_image):\n mask = mask_pred_binary[j].detach().cpu().numpy()\n mask_results[label].append(mask)\n result[i]['ins_results'] = bbox_results, mask_results # default format as instance segmentation.\n\n results[i].append(result[i])\n\n if self.num_stuff_classes == 0:\n # HY : starting from here, the codes are for video instance segmentation.\n # THe codes for vis does not support vps anymore.\n for i in range(len(results)):\n for j in range(len(results[i])):\n bbox_results, mask_results = results[i][j]['ins_results']\n results[i][j]['ins_results'] = (bbox_results, encode_mask_results(mask_results))\n\n results = self.match_panoptic(results, query_feats.cpu(), mask_cls_results.cpu(), mask_pred_results.cpu())\n\n return results\n\n def match_panoptic(self, results, query_feats, mask_cls_results, mask_pred_results):\n # mask_cls_results, mask_pred_results are for bbox generation\n assert len(results) == 1\n assert self.frame_id != -1, \"Not initialized\"\n result = results[0]\n query_feats = query_feats[:, 0]\n clip_query_inds = None\n clip_pan_ids = None\n\n for frame in result:\n query_list = frame.pop('query_list')\n query_inds = torch.tensor([itm[0] for itm in query_list])\n pan_ids = torch.tensor([itm[1] for itm in query_list])\n if clip_query_inds is None and len(query_list) != 0:\n clip_query_inds = torch.unique(query_inds)\n clip_pan_ids = torch.unique(pan_ids)\n elif len(query_list) != 0:\n clip_query_inds = torch.unique(torch.cat([clip_query_inds, query_inds]))\n clip_pan_ids = torch.unique(torch.cat([clip_pan_ids, pan_ids]))\n else:\n clip_query_inds = None\n clip_pan_ids = None\n\n if clip_query_inds is None:\n # no thing founded\n return results\n\n clip_obj_feats = query_feats[clip_query_inds]\n clip_labels = clip_pan_ids % INSTANCE_OFFSET\n\n # get bbox for tracking\n mask_cls_results = mask_cls_results.softmax(dim=-1)\n frame_bbox = 0\n bbox = torch.zeros((len(clip_labels), 5), dtype=torch.float)\n bbox[:, 4] = torch.tensor(mask_cls_results[0][clip_query_inds][torch.arange(len(clip_labels)), clip_labels])\n tracking_masks = [torch.tensor(np.equal(result[frame_bbox]['pan_results'], itm), dtype=torch.float32)\n for itm in clip_pan_ids]\n tracking_masks = torch.stack(tracking_masks)\n bbox[:, :4] = torch.tensor(tensor_mask2box(tracking_masks))\n\n # TODO : how to design online tracker ? qdtrack ? simple matching\n bboxes, labels, new_ids = self.tracker.match(\n bboxes=bbox,\n labels=clip_labels,\n masks=tracking_masks.unsqueeze(1),\n track_feats=clip_obj_feats,\n frame_id=self.frame_id)\n\n new_ids += 1\n new_ids[new_ids == -1] = 0\n new_ids_len = len(new_ids)\n\n for result_per_frame in result:\n new_pan_map = copy.deepcopy(result_per_frame['pan_results'])\n for idx, clip_pan_id in enumerate(clip_pan_ids):\n clip_label = clip_pan_id % INSTANCE_OFFSET\n if idx < new_ids_len: # only keep the remaining tracked id.\n new_pan_map[result_per_frame['pan_results'] == clip_pan_id.item()] = \\\n clip_label.item() + new_ids[idx].item() * INSTANCE_OFFSET\n else:\n new_pan_map[result_per_frame['pan_results'] == clip_pan_id.item()] = \\\n clip_label.item() + 0 * INSTANCE_OFFSET\n result_per_frame['pan_results'] = new_pan_map\n\n results[0] = result\n\n return results\n\n def forward_test(self, imgs, img_metas, **kwargs):\n \"\"\"Currently video seg model does not support aug test.\n So we only add batch input shape here\n \"\"\"\n for img, img_meta in zip(imgs, img_metas):\n batch_size = len(img_metas)\n for img_id in range(batch_size):\n img_metas[img_id]['batch_input_shape'] = tuple(img.size()[-2:])\n for ref_img, ref_img_meta in zip(kwargs['ref_img'], kwargs['ref_img_metas']):\n batch_size = len(kwargs['ref_img_metas'])\n for batch_id in range(batch_size):\n num_frame = len(ref_img_meta)\n for frame_id in range(num_frame):\n kwargs['ref_img_metas'][batch_id][frame_id]['batch_input_shape'] = tuple(ref_img.size()[-2:])\n\n return self.simple_test(img=imgs, img_metas=img_metas, **kwargs)\n\n def aug_test(self, imgs, img_metas, **kwargs):\n raise NotImplementedError\n\n def _show_pan_result(self,\n img,\n result,\n score_thr=0.3,\n bbox_color=(72, 101, 241),\n text_color=(72, 101, 241),\n mask_color=None,\n thickness=2,\n font_size=13,\n win_name='',\n show=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `panoptic result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (dict): The results.\n\n score_thr (float, optional): Minimum score of bboxes to be shown.\n Default: 0.3.\n bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n The tuple of color should be in BGR order. Default: 'green'.\n text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n The tuple of color should be in BGR order. Default: 'green'.\n mask_color (None or str or tuple(int) or :obj:`Color`):\n Color of masks. The tuple of color should be in BGR order.\n Default: None.\n thickness (int): Thickness of lines. Default: 2.\n font_size (int): Font size of texts. Default: 13.\n win_name (str): The window name. Default: ''.\n wait_time (float): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n img (Tensor): Only if not `show` or `out_file`.\n \"\"\"\n img = mmcv.imread(img)\n img = img.copy()\n pan_results = result['pan_results']\n # keep objects ahead\n ids = np.unique(pan_results)[::-1]\n legal_indices = ids != self.num_classes # for VOID label\n ids = ids[legal_indices]\n labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n segms = (pan_results[None] == ids[:, None, None])\n\n # if out_file specified, do not show image in window\n if out_file is not None:\n show = False\n # draw bounding boxes\n img = imshow_det_bboxes(\n img,\n segms=segms,\n labels=labels,\n class_names=self.CLASSES,\n bbox_color=bbox_color,\n text_color=text_color,\n mask_color=mask_color,\n thickness=thickness,\n font_size=font_size,\n win_name=win_name,\n show=show,\n wait_time=wait_time,\n out_file=out_file)\n\n if not (show or out_file):\n return img","repo_name":"lxtGH/Tube-Link","sub_path":"models/video/tube_link_vps/mask2former_matching.py","file_name":"mask2former_matching.py","file_ext":"py","file_size_in_byte":15219,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"53"} +{"seq_id":"17067499829","text":"\"\"\"\na path is a list of arcs, segments or vertical paths\n\"\"\"\nimport os\nfrom math import floor\nfrom collections import defaultdict\nfrom sortedcontainers import SortedDict\nfrom jimn.utils.coordinates_hash import CoordinatesHash\nfrom jimn.vertical_path import VerticalPath\nfrom jimn.bounding_box import BoundingBox\nfrom jimn.point import Point\nfrom jimn.segment import Segment\nfrom jimn.arc import Arc\nfrom jimn.envelope import Envelope\nfrom jimn.elementary_path import ElementaryPath\nfrom jimn.displayable import tycat\n\n\nPATH_IMAGES = os.environ.get(\"JIMN_PATH_ANIMATION\")\nif PATH_IMAGES is None:\n PATH_IMAGES = 10\nelse:\n PATH_IMAGES = int(PATH_IMAGES)\n\n\nclass Path:\n \"\"\"\n a path is a list of arcs, segments or vertical paths\n \"\"\"\n def __init__(self, elementary_paths):\n self.elementary_paths = elementary_paths\n\n def copy(self):\n \"\"\"\n return a deepcopy of given path.\n \"\"\"\n return Path([p.copy() for p in self.elementary_paths])\n\n def get_first_point(self):\n \"\"\"\n return first point in path\n \"\"\"\n return self.elementary_paths[0].endpoints[0]\n\n def translate(self, translation):\n \"\"\"\n translates the whole path by a given translation vector.\n returns new path if obtained path is different and same path\n if translation vector is (0,0)\n \"\"\"\n if translation.is_almost(Point([0, 0])):\n return self\n return Path([p.translate(translation) for p in self.elementary_paths])\n\n def set_elementary_paths(self, elementary_paths):\n \"\"\"\n replace elementary paths in self by given paths\n \"\"\"\n self.elementary_paths = elementary_paths\n\n def get_bounding_box(self):\n \"\"\"\n min bounding box for whole path\n \"\"\"\n box = BoundingBox.empty_box(2)\n for path in self.elementary_paths:\n box.update(path.get_bounding_box())\n return box\n\n def save_svg_content(self, display):\n \"\"\"\n svg for tycat\n \"\"\"\n raise Exception(\"TODO\")\n self.get_first_point().save_svg_content(display)\n horizontal_paths = self.hash_horizontal_paths_by_height()\n for height in sorted(list(horizontal_paths.keys()), reverse=True):\n paths = horizontal_paths[height]\n for path in paths:\n path.save_svg_content(display)\n\n def hash_horizontal_paths_by_height(self):\n \"\"\"\n hash each horizontal path to its corresponding height\n \"\"\"\n horizontal_paths = defaultdict(list)\n current_height = 0\n for path in self.elementary_paths:\n if isinstance(path, VerticalPath):\n current_height = path.update_height(current_height)\n else:\n horizontal_paths[current_height].append(path)\n return horizontal_paths\n\n def length(self):\n \"\"\"\n return total length of path\n \"\"\"\n return sum([p.length() for p in self.elementary_paths])\n\n def append(self, elementary_path):\n \"\"\"\n add elementary path at end of path\n \"\"\"\n self.elementary_paths.append(elementary_path)\n\n def extend(self, paths):\n \"\"\"\n add elementary paths at end of path\n \"\"\"\n self.elementary_paths.extend(paths)\n\n def animate(self, milling_radius):\n \"\"\"\n step by step animation for carving the path with\n given milling radius.\n \"\"\"\n total_length = self.length()\n steps_length = total_length / PATH_IMAGES\n\n # all paths strings up to now (by height)\n current_strings = SortedDict()\n bounding_box = BoundingBox.empty_box(2) # bounding box up to now\n\n current_height = 0\n current_length = 0\n heights_hash = CoordinatesHash()\n\n for path in self.elementary_paths:\n new_length = current_length + path.length()\n\n if isinstance(path, VerticalPath):\n current_height = path.update_height(current_height)\n current_height = heights_hash.hash_coordinate(current_height)\n else:\n envelope = Envelope(path, milling_radius)\n if current_height not in current_strings:\n current_strings[current_height] = []\n current_strings[current_height].append(envelope.svg_content())\n bounding_box.update(envelope.get_bounding_box())\n\n if floor(current_length / steps_length) != \\\n floor(new_length / steps_length):\n tycat(*list(reversed(current_strings.values())), bounding_box=bounding_box)\n\n current_length = new_length\n\n def change_starting_point(self, position):\n \"\"\"\n change starting point of cycle path.\n takes a position as argument.\n \"\"\"\n start = self.elementary_paths[:position.index]\n end = self.elementary_paths[position.index+1:]\n new_cycle = []\n start_path = self.elementary_paths[position.index]\n before, after = start_path.split_around(position.point)\n\n if after is not None:\n new_cycle.append(after)\n new_cycle.extend(end)\n new_cycle.extend(start)\n if before is not None:\n new_cycle.append(before)\n self.elementary_paths = new_cycle\n\n def get_dot_label(self):\n \"\"\"\n label for displaying trees containing paths.\n (see Path_Tree class)\n \"\"\"\n return str(id(self))\n\n def get_points(self):\n \"\"\"\n iterates through all points.\n \"\"\"\n for path in self.elementary_paths:\n yield path.endpoints[0]\n\n def __hash__(self):\n return hash(id(self))\n\n def __str__(self):\n path_strings = [str(p) for p in self.elementary_paths]\n return \"Path([\\n \" + \",\\n \".join(path_strings) + \"\\n])\"\n\n\ndef __update_elementary_height(self, height):\n # pylint: disable=unused-argument\n \"\"\"\n height change by following elementary path (no change since horizontal).\n \"\"\"\n return height\n\nsetattr(ElementaryPath, \"update_height\", __update_elementary_height)\n","repo_name":"wagnerf42/Jimn","sub_path":"src/jimn/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73231684649","text":"import requests\nimport os\nfrom pydub.utils import mediainfo\n\n\ndef delete_file(file_path):\n try:\n os.remove(file_path)\n print(f\"Файл {file_path} успешно удален.\")\n except FileNotFoundError:\n print(f\"Файл {file_path} не найден.\")\n except Exception as e:\n print(f\"Произошла ошибка при удалении файла {file_path}: {e}\")\n\n\ndef save_audio_file(file_name, file_link):\n response = requests.get(file_link)\n # Сохраняем аудиофайл локально для последующей обработки\n sound_folder = 'sound'\n local_file_path = f\"{sound_folder}/{file_name}\"\n\n if not os.path.exists(sound_folder):\n # Если папки нет, создаем её\n os.makedirs(sound_folder)\n\n with open(local_file_path, 'wb') as local_file:\n local_file.write(response.content)\n return local_file_path\n\n\ndef save_text_to_file(file_name, trance_text):\n text_folder = 'output'\n local_file_path = f\"{text_folder}/{file_name.replace('.', '_') + '.txt'}\"\n\n if not os.path.exists(text_folder):\n # Если папки нет, создаем её\n os.makedirs(text_folder)\n # Сохраняем вывод в текстовый файл\n with open(local_file_path, \"w\", encoding=\"utf-8\") as output_file:\n output_file.write(trance_text)\n\n return local_file_path\n\n\ndef file_duration_check(file_path):\n audio_info = mediainfo(file_path)\n duration = float(audio_info['duration'])\n print(duration)\n return duration\n","repo_name":"VGCH/music_translate_AI_bot","sub_path":"file_process.py","file_name":"file_process.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25470820414","text":"\"\"\"Connection class.\n\nThis module contains the Connection class which represents two interfaces in separate components being connected with some arguments.\n\n\"\"\"\n\nclass Connection(object):\n \"\"\"A connection between interfaces\n \n This class holds data for a named connection. It also provides enforces rules on making connections between interfaces. For example, two interfaces with different number of ports cannot be connected.\n\n \"\"\"\n\n def __init__(self, from_interface, to_interface, name=None, **kwargs):\n \"\"\"Creates a connection.\n\n Args:\n from_interface (Interface): the interface to connect from\n to_interface (Interface): the interface to connect to\n \n \"\"\"\n\n if len(from_interface.get_ports()) != len(to_interface.get_ports()):\n raise Exception(\"Interfaces cannot be connected, number of ports is different\")\n\n self.kwargs = kwargs\n self.from_interface = from_interface\n self.to_interface = to_interface\n \n if name is None:\n self.name = \"{}->{}\".format(from_interface.get_name(), to_interface.get_name())\n else:\n self.name = name\n\n \n self.matched_ports = []\n for i in range(from_interface.get_ports()):\n self.matched_ports.append(from_interface[i], to_interface[i])\n \n def get_name(self):\n \"\"\"Returns the name of this connection.\n\n Args:\n None\n\n Returns:\n name as a string\n\n \"\"\"\n return self.name\n\n def get_port_matchings(self):\n \"\"\"Returns a list of tuples of ports that have been matched\n \n Args:\n None\n\n Returns:\n List of ordered pairs of ports\n\n \"\"\"\n return self.matched_ports\n\nif __name__ == \"__main__\":\n pass\n ","repo_name":"uclalemur/roco","sub_path":"roco/api/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13453250838","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'profiles'\n\nurlpatterns = [\n path('', views.Create.as_view(), name='create'),\n path('address/', views.AddressList.as_view(), name='address'),\n path(\n 'address/',\n views.AddressUpdate.as_view(), name='addressdetails'\n ),\n path('address//delete', views.AddressDelete.as_view(), name='delete'),\n path(\n 'address-create/',\n views.AddressCreate.as_view(), name='addresscreate'\n ),\n path('login/', views.Login.as_view(), name='login'),\n path('logout/', views.Logout.as_view(), name='logout'),\n path(\n 'address/address/', views.cep_update, name='cepupdate'\n ),\n path(\n 'address-create/address/', views.cep_update, name='cepupdate-newaddress'\n ),\n]\n","repo_name":"VenPoisen/project-Ecommerce-Django","sub_path":"profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"934165754","text":"import numpy as np\nfrom scipy.ndimage import gaussian_filter\nimport skimage.measure as skmeasure\nimport scipy.ndimage as ndi\nimport torch\n\n\nclass BBoxException(Exception):\n pass\n\n\ndef get_non_empty_min_max_idx_along_axis(mask, axis):\n \"\"\"\n Get non zero min and max index along given axis.\n :param mask:\n :param axis:\n :return:\n \"\"\"\n if isinstance(mask, torch.Tensor):\n # pytorch is the axis you want to get\n nonzero_idx = (mask != 0).nonzero()\n if len(nonzero_idx) == 0:\n min = max = 0\n else:\n max = nonzero_idx[:, axis].max()\n min = nonzero_idx[:, axis].min()\n elif isinstance(mask, np.ndarray):\n nonzero_idx = (mask != 0).nonzero()\n if len(nonzero_idx[axis]) == 0:\n min = max = 0\n else:\n max = nonzero_idx[axis].max()\n min = nonzero_idx[axis].min()\n else:\n raise BBoxException(\"Wrong type\")\n max += 1\n return min, max\n\n\ndef get_bbox_3d(mask):\n \"\"\" Input : [D, H, W] , output : ((min_x, max_x), (min_y, max_y), (min_z, max_z))\n Return non zero value's min and max index for a mask\n If no value exists, an array of all zero returns\n :param mask: numpy of [D, H, W]\n :return:\n \"\"\"\n assert len(mask.shape) == 3\n min_z, max_z = get_non_empty_min_max_idx_along_axis(mask, 0)\n min_y, max_y = get_non_empty_min_max_idx_along_axis(mask, 1)\n min_x, max_x = get_non_empty_min_max_idx_along_axis(mask, 2)\n\n return np.array(((min_x, max_x + 1),\n (min_y, max_y + 1),\n (min_z, max_z + 1)))\n\n\ndef pad_bbox(bbox, min_bbox, max_img):\n \"\"\"\n :param bbox: ndarray ((min_x, max_x), (min_y, max_y), (min_z, max_z))\n :param min_bbox: list (d, h, w)\n :param max_img: list (d, h, w), image shape\n :return:\n \"\"\"\n min_bbox = list(min_bbox)\n change_min_bbox = False\n for i, (min_x, max_img_x) in enumerate(zip(min_bbox, max_img)):\n if min_x > max_img_x:\n min_bbox[i] = max_img[i]\n change_min_bbox = True\n\n if change_min_bbox:\n print('min box {} is larger than max image size {}'.format(min_bbox, max_img))\n\n # z first\n bbox = np.array(bbox)[::-1, :]\n result_bbox = []\n for (min_x, max_x), min_size, max_size in zip(bbox, min_bbox, max_img):\n width = max_x - min_x\n if width < min_size:\n padding = min_size - width\n padding_left = padding // 2\n padding_right = padding - padding_left\n\n # find a best place to pad img\n while True:\n if (min_x - padding_left) < 0 and (max_x + padding_right) > max_size:\n # pad to img size\n padding_left = min_x\n padding_right = max_size - max_x\n break\n elif (min_x - padding_left) < 0:\n # right shift pad\n padding_left -= 1\n padding_right += 1\n elif (max_x + padding_right) > max_size:\n # left shift pad\n padding_left += 1\n padding_right -= 1\n else:\n # no operation to pad\n break\n min_x -= padding_left\n max_x += padding_right\n result_bbox.append((min_x, max_x))\n # x first\n return np.array(result_bbox)[::-1, :]\n\n\ndef expand_bbox(img, bbox, expand_size, min_crop_size):\n img_z, img_y, img_x = img.shape\n\n # expand [[154 371 15] [439 499 68]]\n bbox[:, 0] -= expand_size[::-1] # min (x, y, z)\n bbox[:, 1] += expand_size[::-1] # max (x, y, z)\n # prevent out of range\n bbox[0, :] = np.clip(bbox[0, :], 0, img_x)\n bbox[1, :] = np.clip(bbox[1, :], 0, img_y)\n bbox[2, :] = np.clip(bbox[2, :], 0, img_z)\n\n # expand, then pad\n bbox = pad_bbox(bbox, min_crop_size, img.shape)\n return bbox\n\n\n\ndef crop_img(img, bbox, min_crop_size):\n \"\"\" Crop image with expanded bbox.\n :param img: ndarray (D, H, W)\n :param bbox: ndarray ((min_x, max_x), (min_y, max_y), (min_z, max_z))\n :param min_crop_size: list (d, h ,w)\n :return:\n \"\"\"\n\n # extract coords\n (min_x, max_x), (min_y, max_y), (min_z, max_z) = bbox\n\n # crop\n cropped_img = img[min_z:max_z, min_y:max_y, min_x:max_x]\n\n padding = []\n for i, (cropped_width, min_width) in enumerate(zip(cropped_img.shape, min_crop_size)):\n if cropped_width < min_width:\n padding.append((0, min_width - cropped_width))\n else:\n padding.append((0, 0))\n padding = np.array(padding).astype(np.int)\n cropped_img = np.pad(cropped_img, padding, mode='constant', constant_values=0)\n return cropped_img\n\n\nfrom dipy.align.reslice import reslice\ndef resample_volume_nib(np_data, affine, spacing_old, spacing_new=(1., 1., 1.), mask=False):\n \"\"\"Resample 3D image(trilinear) and mask(nearest) to (1., 1., 1.) spacing.\n It seems works better than the method above, seen from generated image.\n\n :param np_data: ndarray, channel first\n :param affine: the affine returned from nibabel\n :param spacing_old: current spacing\n :param spacing_new: target spacing, default is (1., 1., 1.)\n :param mask: if set True, use nearest instead of trilinear interpolation\n :return:\n resampled data : ndarray\n affine : the modified affine.\n \"\"\"\n if not mask:\n # trilinear\n resampled_data, affine = reslice(np_data, affine, spacing_old, spacing_new, order=1)\n else:\n # nearest\n resampled_data, affine = reslice(np_data, affine, spacing_old, spacing_new, order=0)\n return resampled_data, affine","repo_name":"koncle/CoraNet","sub_path":"preprocess/preprocess_utils.py","file_name":"preprocess_utils.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"53"} +{"seq_id":"11988873978","text":"class SerVivo:\n\n def respirar(self):\n print(\"Respirando\")\n\nclass Animal:\n\n def __init__(self, edad, nombre, numero_patas=None):\n self.edad = edad\n self.numero_patas = numero_patas or 4\n\n\n def comer(self):\n if self.nombre == '':\n raise ValueError(\"El objeto no tiene definido un nombre\")\n print(f\"Comiendo ({self}): {self.nombre}\")\n\n\nclass Perro(Animal, SerVivo):\n numero_patas = 4\n\n def __init__(self, nombre, raza='', color='', edad=0):\n super().__init__(edad, nombre, numero_patas=self.numero_patas)\n print(f\"Se creo el objeto {self}\")\n self.nombre = nombre\n self.raza = raza\n self.color = color\n\n def ladrar(self):\n print(\"Wuau! (\", self.nombre, \")\")\n print(f\"Wuau! ({self.nombre})\")\n print(\"Wuau! ({0})\".format(self.nombre))\n print(\"Wuau! (\" + self.nombre + ')')\n print(\"Wuau! (%s)\" % self.nombre + ')')\n\n def __del__(self):\n print(f\"Se elimina el objeto {self}\")\n\n\nperro_1 = Perro('Toby', raza='Pastor Aleman', color='Negro', edad=5)\nprint(perro_1, type(perro_1), perro_1.nombre, perro_1.raza, perro_1.color)\nperro_1.ladrar()\nperro_1.comer()\nperro_1.respirar()\n","repo_name":"jesusmares82-hub/contador_de_palabras_python","sub_path":"Ejemplos/poo.py","file_name":"poo.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23723657394","text":"def ordem_inversa(lista):\n cont = 0\n cont_1 = -1\n for a in range(len(lista) // 2):\n x = lista[cont]\n lista[cont] = lista[cont_1]\n lista[cont_1] = x\n cont += 1\n cont_1 -= 1\n return lista\n\n\nsequencia = []\n\nwhile True:\n num = int(input(\"Digite um número: \"))\n if num != 0:\n sequencia.append(num)\n else:\n break\n\nsequencia_invertida = ordem_inversa(sequencia)\n\nfor n in sequencia_invertida:\n print(n)\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"ciencia_da_computacao_com_python_coursera/parte_01/pratica07_exercicio02.py","file_name":"pratica07_exercicio02.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16008067644","text":"\"\"\" instruction:\nNOT USING - same size as 5 folders, plus we'll need to support postal codes with letters of the alphabet.\nbefore running the script:\n/community/data/zip has to be created, or else the script will fail.\nthen run:\npython zipgraph_2folders.py ../zipcodes/zcta/zip_to_zcta_2018.csv ../../../community/data/zip/\n\n \n\n\nthe order CANNOT be switched, or else the script will fail.\n\"\"\"\nimport os\nimport pandas as pd\nimport sys\nimport subprocess\ndef getZipCodeList(fileDir):\n df=pd.read_csv(fileDir,sep=',')\n zipcode_list=[z for z in df['ZIP_CODE']]\n return zipcode_list\ndef zipCode_to_String(zipcode_list): #convert zip int format to string format\n zipcode_string_list=[]\n for z in zipcode_list:\n if len(str(z))==3:\n z=\"00\"+str(z)\n zipcode_string_list.append(z)\n if len(str(z))==4:\n z=\"0\"+str(z)\n zipcode_string_list.append(z)\n else:\n z=str(z)\n zipcode_string_list.append(z)\n return zipcode_string_list\ndef createPath(mainPath,zip_list):\n zip_dict={} #key is zipcode Int), value is the path of where the output file is saved\n zip_string_list=zipCode_to_String(zip_list)\n #print(zip_string_list)\n #to create zip folder like 0/0/3/0/2\n \"\"\" for i in range(len(zip_list)):\n zip_string=zip_string_list[i]\n subPath=\"\"\n for c in zip_string: #loop through each character\n subPath=os.path.join(subPath,c)\n fullPath=os.path.join(mainPath,subPath)\n zip_dict[zip_list[i]]=fullPath \"\"\"\n #to create zip folder like 30/318\n for idx,zip in enumerate(zip_list):\n zip_string=zip_string_list[idx]\n p1,p2=zip_string[0:2],zip_string[2:]\n fullPath=os.path.join(mainPath,p1,p2)\n zip_dict[zip]=fullPath\n return zip_dict\ndef createMDfile(mainPath,fileDir,zip_list):\n zip_dict=createPath(mainPath,zip_list)\n df=pd.read_csv(fileDir,sep=',')\n for row in df.itertuples():\n zip_code=row.ZIP_CODE\n if len(str(zip_code))==3:\n zipcode_=\"00\"+str(zip_code)\n if len(str(zip_code))==4:\n zipcode_=\"0\"+str(zip_code)\n else:\n zipcode_=str(zip_code)\n zcta=row.ZCTA\n outputDir=zip_dict[zip_code]\n if os.path.exists(outputDir):\n subprocess.call([\"rm\",\"-dr\",outputDir])\n command=\"mkdir -p {}\".format(outputDir)\n subprocess.call(command, shell=True)\n outFile=os.path.join(outputDir,\"zipinfo.md\")\n with open(outFile,\"w\") as fh:\n fh.write(\"# {}, {}, {} \\n\".format(row.PO_NAME,row.STATE,zipcode_))\n fh.write(\"ZCTA {} \\n\".format(row.ZCTA))\n fh.write(\"\".format(row.ZIP_TYPE)) \ndef main():\n fileDir=os.path.abspath(sys.argv[1])\n mainPath=os.path.abspath(sys.argv[2])\n zipcode_list=getZipCodeList(fileDir)\n createMDfile(mainPath,fileDir,zipcode_list)\nif __name__ == \"__main__\":\n main()","repo_name":"ModelEarth/community","sub_path":"prep/all/zipgraph_2folders.py","file_name":"zipgraph_2folders.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72472772967","text":"import os.path\n\nimport torch\nfrom torch import nn\nimport yaml\n\nfrom .model import Transformer\n\n\ndef load_config(save_dir):\n with open(os.path.join(save_dir, \"config.yaml\")) as fh:\n return yaml.unsafe_load(fh)\n\n\ndef load_snapshot(model: nn.Module, save_dir: str):\n state = torch.load(os.path.join(save_dir, \"model.pt\"), map_location=\"cpu\")\n model.load_state_dict(state)\n\n\ndef load_model(save_dir, device=\"cpu\"):\n config = load_config(save_dir)\n model = Transformer(config, device=device)\n load_snapshot(model, save_dir)\n return model\n\n\ndef save_model(model: Transformer, save_dir: str):\n os.makedirs(save_dir, exist_ok=True)\n\n torch.save(\n model.state_dict(),\n os.path.join(save_dir, \"model.pt\"),\n )\n with open(os.path.join(save_dir, \"config.yaml\"), \"w\") as fh:\n yaml.dump(model.cfg, fh)\n","repo_name":"nelhage/taktician","sub_path":"python/xformer/loading.py","file_name":"loading.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"53"} +{"seq_id":"18133592558","text":"#!/usr/bin/env python3\nimport os,json,time,sys,hashlib,datetime\nfrom subprocess import Popen, PIPE\n\nSETTINGS_FILE = \"settings.json\"\n\n## GLOBALS - Defined in init_settings() ##\nAPI_KEYS_FILE = \"\"\nMESSAGES_FILE = \"\"\n\ndef main():\n init_settings()\n title()\n menu()\n\ndef title():\n print(\" ▄ ▄███▄ █▄▄▄▄ █▀▄▀█ ▄█ █ █ ▄█ ████▄ ▄ \")\n print(\" █ █▀ ▀ █ ▄▀ █ █ █ ██ █ █ ██ █ █ █ \")\n print(\"█ █ ██▄▄ █▀▀▌ █ ▄ █ ██ █ █ ██ █ █ ██ █ \")\n print(\" █ █ █▄ ▄▀ █ █ █ █ ▐█ ███▄ ███▄ ▐█ ▀████ █ █ █ \")\n print(\" █ █ ▀███▀ █ █ ▐ ▀ ▀ ▐ █ █ █ \")\n print(\" █▐ ▀ ▀ █ ██ \")\n print(\" ▐ \")\n print(\" Created by Sean McElhare | github.com/thecrimsoncoder \")\n print(\"\\n\")\n\ndef menu():\n print(\"+-------------------------------------------------------+\")\n print(\"| 1. Start the Server |\")\n print(\"| 2. Stop the Server |\")\n print(\"| 3. List API Keys |\")\n print(\"| 4. Generate Client API Key |\")\n print(\"| 5. Activate Client API Key |\")\n print(\"| 6. Deactivate Client API Key |\")\n print(\"| 7. Remove an API Key |\")\n print(\"| 8. Clean up inactive API Keys (active : False) |\")\n print(\"| 9. Wipe Messages Database |\")\n print(\"| 10. Exit |\")\n print(\"+-------------------------------------------------------+\")\n print(\"\\n\")\n opt = input(\"[OPTION]: \")\n\n if opt == \"1\":\n if server_handler(\"start\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"2\":\n if server_handler(\"stop\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"3\":\n if api_key_handler(\"list\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"4\":\n if api_key_handler(\"create\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"5\":\n if api_key_handler(\"activate\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"6\":\n if api_key_handler(\"deactivate\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"7\":\n if api_key_handler(\"destroy\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"8\":\n if api_key_handler(\"cleanup\") == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"9\":\n if wipe_messages() == True:\n menu()\n else:\n print(\"\") ## TODO\n elif opt == \"10\":\n print(\"\") ## TODO\n sys.exit(0)\n else:\n print(\"\") ## TODO\n time.sleep(2)\n menu()\n\ndef server_handler(server_cmd):\n\n if str(server_cmd) == \"start\":\n cmd = \"\" ## TODO\n\n elif str(server_cmd) == \"stop\":\n cmd = \"\" ## TODO\n\n try:\n server_handler = Popen(cmd,stdout=\"PIPE\",stderr=\"PIPE\",shell=True)\n (cmd_output,cmd_err) = server_handler.communicate()\n cmd_rc = server_handler.poll()\n\n if cmd_rc == 0:\n print(\"[INFO] \" + str(cmd_output))\n return True\n else:\n print(str(cmd_err))\n return False\n except Exception as err:\n print(err)\n return False\n\ndef api_key_handler(cmd):\n if cmd == \"list\":\n try:\n with open(API_KEYS_FILE, \"r\") as api_key_database:\n api_keys = json.load(api_key_database)\n api_key_database.close()\n print(\"---------------------------------------------------------------------------------------------\")\n for api_key in api_keys:\n print(\"Key: \" + str(api_key['api_key']))\n print(\"Description: \" + str(api_key['description']))\n print(\"Active: \" + str(api_key['active']))\n print(\"---------------------------------------------------------------------------------------------\")\n return True\n except Exception as err:\n print(err)\n return False\n\n elif cmd == \"create\":\n try:\n with open(\"/etc/machine-id\",\"r\") as file_handler:\n system_uuid = file_handler.read() ## Grabbing UUID of system set at install time\n file_handler.close()\n\n encoded_key = (str(time.time())+str(system_uuid)).encode()\n api_token = hashlib.sha256(encoded_key).hexdigest()\n api_token_description = input(\"API KEY DESCRIPTION: \")\n\n if len(api_token) == 64 and api_token_description != \"\":\n api_key = {\n \"api_key\" : str(api_token),\n \"active\" : False,\n \"description\" : str(api_token_description)\n }\n\n with open(API_KEYS_FILE,\"r\") as api_key_database:\n api_keys = json.load(api_key_database)\n api_key_database.close()\n api_keys.append(api_key)\n\n with open(API_KEYS_FILE,\"w\") as api_key_database:\n json.dump(api_keys,api_key_database, indent=4, separators=(',', ' : '))\n api_key_database.close()\n\n print(\"\\n\")\n print(\"---------------------------------------------------------------------------------------------\")\n print(\"[API_KEY] \" + str(api_token) + \" [API_KEY]\")\n print(\"---------------------------------------------------------------------------------------------\")\n print(\"[INFO] Copy this key and distribute to a client\")\n print(\"[INFO] This key will need to be ACTIVATED before use!\")\n print(\"\\n\")\n return True\n \n else:\n print(\"[INFO] Please enter a description\")\n time.sleep(2)\n api_key_handler(\"create\")\n\n except Exception as err:\n print(err)\n return False\n\n elif cmd == \"activate\":\n try:\n with open(API_KEYS_FILE, \"r\") as api_key_database:\n api_keys = json.load(api_key_database)\n api_key_database.close()\n print(\"---------------------------------------------------------------------------------------------\")\n for api_key in api_keys:\n print(\"Key: \" + str(api_key['api_key']))\n print(\"Description: \" + str(api_key['description']))\n print(\"Active: \" + str(api_key['active']))\n print(\"---------------------------------------------------------------------------------------------\")\n activate_key = input(\"Paste the key you would like to activate: \")\n if len(activate_key) == 64 and helper_search_for_key(str(activate_key)) == True:\n for api_key in api_keys:\n if api_key['api_key'] == str(activate_key):\n api_key['active'] = True\n break\n with open(API_KEYS_FILE, \"w\") as api_key_database:\n json.dump(api_keys,api_key_database, indent=4, separators=(',', ' : '))\n api_key_database.close()\n return True\n else:\n print(\"Please enter a valid api key...\")\n time.sleep(2)\n api_key_handler(\"activate\")\n\n except Exception as err:\n print(err)\n return False\n\n elif cmd == \"deactivate\":\n try:\n with open(API_KEYS_FILE, \"r\") as api_key_database:\n api_keys = json.load(api_key_database)\n api_key_database.close()\n print(\"---------------------------------------------------------------------------------------------\")\n for api_key in api_keys:\n print(\"Key: \" + str(api_key['api_key']))\n print(\"Description: \" + str(api_key['description']))\n print(\"Active: \" + str(api_key['active']))\n print(\"---------------------------------------------------------------------------------------------\")\n deactivate_key = input(\"Paste the key you would like to deactivate: \")\n if len(deactivate_key) == 64 and helper_search_for_key(str(deactivate_key)) == True:\n for api_key in api_keys:\n if api_key['api_key'] == str(activate_key):\n api_key['active'] = False\n break\n with open(API_KEYS_FILE, \"w\") as api_key_database:\n json.dump(api_keys,api_key_database, indent=4, separators=(',', ' : '))\n api_key_database.close()\n\n return True\n\n else:\n print(\"Please enter a valid api key...\")\n time.sleep(2)\n api_key_handler(\"deactivate\")\n\n except Exception as err:\n print(err)\n return False\n\n elif cmd == \"destroy\":\n try:\n with open(API_KEYS_FILE, \"r\") as api_key_database:\n api_keys = json.load(api_key_database)\n api_key_database.close()\n print(\"---------------------------------------------------------------------------------------------\")\n for api_key in api_keys:\n print(\"Key: \" + str(api_key['api_key']))\n print(\"Description: \" + str(api_key['description']))\n print(\"Active: \" + str(api_key['active']))\n print(\"---------------------------------------------------------------------------------------------\")\n destroy_key = input(\"Paste the key you would like to remove: \")\n if len(destroy_key) == 64 and helper_search_for_key(str(destroy_key)) == True:\n for api_key in api_keys:\n if api_key['api_key'] == str(destroy_key):\n api_keys.remove(api_key)\n break\n with open(API_KEYS_FILE, \"w\") as api_key_database:\n json.dump(api_keys,api_key_database, indent=4, separators=(',', ' : '))\n api_key_database.close()\n\n return True\n else:\n print(\"Please enter a valid api key...\")\n time.sleep(2)\n api_key_handler(\"destroy\")\n \n except Exception as err:\n print(err)\n return False\n\n elif cmd == \"cleanup\":\n active_keys = []\n try:\n with open(API_KEYS_FILE, \"r\") as api_key_database:\n api_keys = json.load(api_key_database)\n api_key_database.close()\n for api_key in api_keys:\n if api_key['active'] == True:\n active_keys.append(api_key)\n with open(API_KEYS_FILE, \"w\") as api_key_database:\n json.dump(active_keys,api_key_database, indent=4, separators=(',', ' : '))\n api_key_database.close()\n return True\n except Exception as err:\n print(err)\n return False\n else:\n return False\n\ndef helper_search_for_key(search_key):\n try:\n key_found = False\n with open(API_KEYS_FILE,\"r\") as api_key_database:\n api_keys = json.load(api_key_database)\n api_key_database.close()\n for api_key in api_keys:\n if api_key['api_key'] == str(search_key):\n key_found = True\n break\n return key_found\n except Exception as err:\n print(err)\n return False\n\ndef wipe_messages():\n try:\n return True\n except Exception as err:\n print(err)\n return False\n\ndef init_settings():\n try:\n with open(globals()[\"SETTINGS_FILE\"],\"r\") as settings_file:\n json_settings = json.load(settings_file)\n \n ## Defining GLOBALS ##\n globals()[\"API_KEYS_FILE\"] = json_settings[\"API_KEYS_FILE\"]\n globals()[\"MESSAGES_FILE\"] = json_settings[\"MESSAGES_FILE\"]\n return True\n except Exception as err:\n print(err)\n return False\n\nif __name__ == \"__main__\":\n main()","repo_name":"thecrimsoncoder/vermillion","sub_path":"vermillion_server/vermillion_server_admin_tool.py","file_name":"vermillion_server_admin_tool.py","file_ext":"py","file_size_in_byte":13097,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21524068600","text":"from itertools import combinations\n\nN = int(input())\narr = list(input().split())\nK = int(input())\n\ncount = 0\ntotal = 0\n\nfor c in combinations(arr, K):\n if 'a' in c:\n count += 1\n total += 1\n\nprint(count / total)","repo_name":"uki-a/hackerrank","sub_path":"iterables_and_iterators.py","file_name":"iterables_and_iterators.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44928357070","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 3 22:59:02 2021\n\n@author: shiluzhang\nuse intersection of tfs and targets in true network and predicted network, total possible = tfs*targets - self-loops;\n\n\"\"\"\n\n\nimport os\nimport pandas as pd\nimport sys\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nfrom itertools import product\n\n\ndef main(args):\n tfile=args.gold ##prefix='chromatin_marks_atac_expr_motif'\n pfile=args.inferred\n outdir=args.outdir\n #outfile=args.outfile\n #tfile='/mnt/dv/wid/projects2/Roy-common/data/data_new/mouse/known_interactions/mesc/mESC_chipunion.txt'\n #pfile='/mnt/dv/wid/projects5/Roy-singlecell/shilu_work/integrate_scrna_scatac/networkinference/Results/scCVN_upto1transition/liger_sqrt_ncell50_k10_filterhumanbc/pg0.2_pm0.8_pr0.2_maxReg50_b2_bm4/subsample/analysis/cluster4/consensus_edges.txt'\n #regulators='/mnt/dv/wid/projects5/Roy-singlecell/shilu_work/integrate_scrna_scatac/networkinference/data/liger_sqrt_ncell50_k10_filterhumanbc/merged/merged_allregulators.txt'\n #targets='/mnt/dv/wid/projects5/Roy-singlecell/shilu_work/integrate_scrna_scatac/networkinference/data/liger_sqrt_ncell50_k10_filterhumanbc/merged/merged_allGenes.txt'\n regulators=pd.read_table(args.regulators, sep='\\n',header=None)\n regulators=regulators[0].values\n genes=pd.read_table(args.targets, sep='\\n',header=None)\n genes=genes[0].values\n Tnet = pd.read_table(tfile, sep='\\t',header=None)\n if Tnet.iloc[0,0]=='TF':\n Tnet = pd.read_table(tfile, sep='\\t',skiprows=1,header=None)\n Tnet['edge']=Tnet.iloc[:,0]+'->'+Tnet.iloc[:,1]\n Tnet['ytrue']=1\n\n ## Step1: filter gold standard network with tfs and targets in the dataset:\n tfs=list(set(Tnet[0]).intersection(regulators))\n targets=list(set(Tnet[1]).intersection(genes))\n id1=Tnet.index[Tnet[0].isin(tfs)]\n id2=Tnet.index[Tnet[1].isin(targets)]\n tid=id1.intersection(id2)\n if len(tid)==0:\n print(\"gold standard does not have tfs/genes present in the dataset\")\n print('{0:.1f}\\t{1:.1f}\\t{2:.1f}'.format(0, 0, 0))\n quit()\n Tnet_filter=Tnet.loc[tid,]\n ## remove self-loops in the gold standard dataset:\n idrm0=Tnet_filter.index[Tnet_filter[0]==Tnet_filter[1]]\n Tnet_filter.drop(idrm0,inplace=True)\n Tnet_filter.iloc[:,0:2].to_csv(outdir+'gold_filtrered.txt',index=None,header=None,sep='\\t',mode='w',float_format='%g')\n tfs_filter=list(set(Tnet_filter[0]))\n targets_filter=list(set(Tnet_filter[1]))\n # if len(tfs_filter)>1:\n # targets_filter=list(set(Tnet_filter[1]))\n # else:\n # targets_filter=genes\n # print(\"Only 1 tf:\"+tfs_filter[0])\n\n Pnet0=pd.read_table(pfile,header=None)\n Pnet0['edge']=Pnet0[0]+'->'+Pnet0[1]\n Pnet0.rename(columns={2:'score'},inplace=True)\n #if args.top_edges is not None:\n idx1=Pnet0.index[Pnet0[0].isin(tfs_filter)]\n idx2=Pnet0.index[Pnet0[1].isin(targets_filter)]\n pid=idx1.intersection(idx2)\n if len(pid)==0:\n print(\"predicted network does not have tfs/genes present in the gs dataset\")\n print('{0:.1f}\\t{1:.1f}\\t{2:.1f}'.format(0, 0, 0))\n quit()\n Pnet0=Pnet0.loc[pid,:]\n Pnet0.sort_values(['score'],ascending=0,axis=0,inplace=True)\n print(\"Number of Edges in filtered predicted networks:\",Pnet0.shape[0])\n Pnet0.iloc[:,0:3].to_csv(outdir+'prednet_filtrered.txt',index=None,header=None,sep='\\t',mode='w',float_format='%g')\n\n ## Compure F-score for top edges compared to gold standard dataset:\n dout=[]\n for top_edges in [100,200,300,400,500,1000,2000]: #[5000,10000,50000,100000]:\n Pnet=Pnet0.iloc[:top_edges,:] #Pnet=Pnet0.iloc[:args.top_edges,:]\n ## filter Predicted network with tfs and targets in filtered gold standard dataset, but this will make different algorithms not comparable\n #idx1=Pnet.index[Pnet[0].isin(tfs_filter)]\n #idx2=Pnet.index[Pnet[1].isin(targets_filter)]\n #pid=idx1.intersection(idx2)\n #if len(pid)==0:\n #print(\"predicted network does not have tfs/genes present in the gold standard\")\n #print('{0:.1f}\\t{1:.1f}\\t{2:.1f}'.format(0, 0, 0))\n #continue\n #Pnet_filter=Pnet.loc[pid,]\n #Pnet_filter.iloc[:,0:3].to_csv(outdir+'predicted_filtrered.txt',index=None,header=None,sep='\\t',mode='w',float_format='%g')\n # fscore:\n dp=pd.merge(Tnet_filter.loc[:,['edge','ytrue']], Pnet.loc[:,['edge','score']],on='edge',how='inner')\n n_TP = dp.shape[0]\n precision = n_TP / Pnet.shape[0]\n recall = n_TP / Tnet_filter.shape[0]\n dpp=dp['edge'].str.split('->',expand=True)\n if n_TP>0:\n tfs_overlap=len(set(dpp[0]))\n targets_overlap=len(set(dpp[1]))\n else:\n tfs_overlap=0\n targets_overlap=0\n try:\n fscore = 2 * (precision * recall) / (precision + recall)\n except ZeroDivisionError:\n fscore = 0\n d = {'edge': top_edges,'TP':n_TP,'tfs_overlap':tfs_overlap,'targets_overlap':targets_overlap,'gs':Tnet_filter.shape[0],'gs_tfs':len(tfs_filter),'gs_targets':len(targets_filter),'predicted':Pnet.shape[0],'predicted_tfs':len(set(Pnet[0])),'predicted_targets':len(set(Pnet[1])),'precision': precision,'recall':recall,'fscore':fscore}\n df = pd.DataFrame(d, columns=['edge','TP','tfs_overlap','targets_overlap','gs','gs_tfs','gs_targets','predicted','predicted_tfs','predicted_targets','precision','recall','fscore'], index=[0])\n print(\"Number of Edges in gold standard:\",Tnet_filter.shape[0],\" Number of edges in predicted:\",Pnet.shape[0],\"intersection:\",n_TP)\n #print(\"fscore:\",fscore)\n print('{0:.4f}\\t{1:.4f}\\t{2:.4f}'.format(precision, recall, fscore)) \n dout.append(df)\n\n dout = pd.concat(dout)\n dout.to_csv(outdir+'fscore.txt',index=None,header=True,sep='\\t',mode='w',float_format='%g')\n print(outdir+'fscore.txt')\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n\n description=__doc__,\n\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument('--gold',\n\n help='gold',\n\n type=str,\n\n default='')\n\n parser.add_argument('--inferred',\n\n help='inferred',\n\n type=str,\n\n default='')\n # parser.add_argument('--top_edges',\n # type=int, default=None,\n # help=\"Number of edges to filter to\")\n # parser.add_argument('-k', '--top-edges',\n # type=int, default=None,\n # help=\"Number of edges to filter to\")\n parser.add_argument('--outdir',\n\n help='outdir',\n\n type=str,\n\n default='')\n parser.add_argument('--regulators',\n\n help='regulators',\n\n type=str,\n\n default='')\n parser.add_argument('--targets',\n\n help='targets',\n\n type=str,\n\n default='')\n args = parser.parse_args()\n main(args)\n\n\n\n\n","repo_name":"Roy-lab/scMTNI","sub_path":"Scripts/Evaluation/fscore_filterPred.py","file_name":"fscore_filterPred.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"16746139087","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport socket\nfrom collections import namedtuple\n\nimport boto3\nimport requests\n\nInstance = namedtuple('Instance', ('ip', 'name'))\n\n\nclass AWSSession(object):\n def __init__(self, hosted_zone_id, arn, region, role_session_name):\n self.hosted_zone_id = hosted_zone_id\n self.arn = arn\n self.region = region\n self.role_session_name = role_session_name\n self._token = None\n self._r53 = None\n self._instance = None\n self._aws_access_key_id = None\n self._aws_secret_access_key = None\n self._aws_session_token = None\n\n @property\n def token(self):\n return self._token or self.role_arn_to_session()\n\n @property\n def r53(self):\n return self._r53 or self.connect_to_r53()\n\n @property\n def instance(self):\n return self._instance or self.get_instance()\n\n def role_arn_to_session(self):\n client = boto3.client('sts')\n response = client.assume_role(RoleArn=self.arn, RoleSessionName=self.role_session_name)\n self._aws_access_key_id = response['Credentials']['AccessKeyId'],\n self._aws_secret_access_key = response['Credentials']['SecretAccessKey'],\n self._aws_session_token = response['Credentials']['SessionToken'],\n self._token = boto3.Session(\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken'],\n region_name=self.region,\n )\n return self._token\n\n def connect_to_r53(self):\n self._r53 = boto3.client('route53',\n aws_access_key_id=self._aws_access_key_id[0],\n aws_secret_access_key=self._aws_secret_access_key[0],\n aws_session_token=self._aws_session_token[0],\n )\n return self._r53\n\n def get_instance(self):\n ec2 = boto3.resource('ec2', region_name=self.region)\n instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text\n instance = ec2.Instance(instance_id)\n instance_ip = instance.private_ip_address\n instance_name = socket.gethostname()\n self._instance = Instance(ip=instance_ip, name=instance_name)\n return self._instance\n\n\nclass DNSHandler(object):\n def dns_update(self, session):\n dns_changes = {\n 'Changes': [\n {\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': session.instance.name,\n 'Type': 'A',\n 'TTL': 300,\n 'ResourceRecords': [\n {\n 'Value': session.instance.ip\n }\n ]\n }\n }\n ]\n }\n\n response = session.r53.change_resource_record_sets(\n HostedZoneId=session.hosted_zone_id,\n ChangeBatch=dns_changes\n ) \n change_id = response['ChangeInfo']['Id']\n waiter = session.r53.get_waiter('resource_record_sets_changed')\n waiter.wait(\n Id=change_id\n )\n change_status = session.r53.get_change(\n Id=change_id\n )\n return change_status\n\n\ndef main():\n session = AWSSession(\n hosted_zone_id='Z3MG9MOXPX0FH1',\n arn='arn:aws:iam::787109557840:role/iam_sts_r53',\n region='ap-southeast-2',\n role_session_name='r53_sts',\n )\n session.role_arn_to_session()\n dns_handler = DNSHandler()\n update_record = dns_handler.dns_update(session)\n print(update_record)\n return 0\n\nif __name__ == \"__main__\":\n main()","repo_name":"merps/aws-tools","sub_path":"r53_zone.py","file_name":"r53_zone.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39056522893","text":"#!/usr/bin/python3\n\"\"\"Query first state in Database\n\"\"\"\nimport sys\nfrom model_state import Base, State\n\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(\n sys.argv[1], sys.argv[2], sys.argv[3]), pool_pre_ping=True)\n Base.metadata.create_all(engine)\n\n Session = sessionmaker()\n session = Session(bind=engine)\n\n query = session.query(State).order_by(State.id)\n\n for obj in query:\n if 'a' in obj.name:\n print(\"{}: {}\".format(obj.id, obj.name))\n","repo_name":"stephenoba/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/9-model_state_filter_a.py","file_name":"9-model_state_filter_a.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14500336099","text":"__author__ = 'pengmessi'\n\"\"\"\nURL: http://www.spoj.com/problems/PIR/\n\nPIR - Pyramids\nno tags\nRecently in Farland, a country in Asia, the famous scientist Mr. Log Archeo discovered ancient pyramids. But unlike those in Egypt and Central America, they have a triangular (not rectangular) foundation. That is, they are tetrahedrons in the mathematical sense. In order to find out some important facts about the early society of the country (it is widely believed that the pyramid sizes are closely connected with Farland's ancient calendar), Mr. Archeo needs to know the volume of the pyramids. Unluckily, he has reliable data about their edge lengths only. Please, help him!\n\nInput\n\nt [number of tests to follow] In each of the next t lines six positive integer numbers not exceeding 1000 separated by spaces (each number is one of the edge lengths of the pyramid ABCD). The order of the edges is the following: AB, AC, AD, BC, BD, CD.\n\nOutput\n\nFor each test output a real number - the volume, printed accurate to four digits after decimal point.\n\nExample\n\nInput:\n\n\n2\n1 1 1 1 1 1\n1000 1000 1000 3 4 5\n\nOutput:\n\n\n0.1179\n1999.9937\n\"\"\"\nimport math\n\nif __name__ == '__main__':\n n = int(input())\n while n:\n n -= 1\n a, c, e, b, f, d = map(int, input().split())\n y = (a*a + c*c - b*b) / (2*c)\n x = math.sqrt(a*a - y*y)\n q = (c*c + e*e - d*d) / (2*c)\n p = (a*a + e*e - f*f - 2*q*y) / (2*x)\n r = math.sqrt(e*e - q*q - p*p)\n V = c * x * r / 6\n print(round(V,4))","repo_name":"pengmessi/SPOJpengmessi","sub_path":"p23.py","file_name":"p23.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35512092769","text":"# coin change 1 problem\n# it is a variation of count subset sum in unbounded knapsack\n# we have to find the count of ways in which a given sum can be formed from the infinite supply of the given denomintation of coins\n\ndef coin_change_1(arr , s , n):\n dp = [[0 for i in range(s+1)] for j in range(n+1)]\n for i in range(n+1):\n for j in range(s+1):\n if i==0:\n dp[i][j]=0\n if j==0:\n dp[i][j]=1\n elif arr[i-1]<=j:\n dp[i][j] = dp[i][j-arr[i-1]]+dp[i-1][j]\n else:\n dp[i][j] = dp[i-1][j]\n return dp[n][s]\n\narr = [1,2,3,5]\ns = 8\nn = len(arr)\nprint(coin_change_1(arr , s , n))\n","repo_name":"AbhinavSingh111/HackerRank-DS","sub_path":"coin_change_1_ubnpsk.py","file_name":"coin_change_1_ubnpsk.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72961231207","text":"\"\"\"\nAccepter les données de l'utilisateur\nProduire les résultats des matchs\nLancer de nouveaux tournois\n...\n\"\"\"\n\nimport re\nfrom models.data import Data\nfrom models.round import Round\nfrom models.player import Player\nfrom models.tournament import Tournament\nfrom view.view import View, Report, EditTournament\nfrom view.createplayer import get_players\nfrom view.createtournament import get_info_tournament, choose_players\n\n\nclass Controller:\n def __init__(self):\n \"\"\"Create a controller for the application\"\"\"\n self.db = Data()\n self.view = View()\n self.report = Report()\n self.edit = EditTournament()\n\n def load_or_create_tournament(self):\n \"\"\"Interact with Views and Models\n and returns tournament data to the Controller\"\"\"\n # Asks user to choose between loading an existing\n # or creating a new tournament\n self.tournament_user_choice = \"\"\n # Entry must be [1] (load), [2] (create)\n # or [Q] to quit the program\n while self.tournament_user_choice.upper() not in [\"1\", \"2\", \"Q\"]:\n self.tournament_user_choice = (\n self.view.load_or_create_tournament()\n ) # 1 = load, 2 = create, Q = Quit\n if self.tournament_user_choice.upper() == \"Q\":\n exit()\n elif self.tournament_user_choice == \"1\": # loading list of tournaments\n # loading all tournaments -> dict of tournaments\n self.load_all_tournaments()\n # display all tournaments\n self.display_all_tournaments()\n # user chooses one of the displayed tournaments from its id\n # returns selected_tournament\n self.select_a_tournament()\n # ask if user wants to edit the tournament\n self.ask_for_edit_a_tournament()\n return self.selected_tournament\n\n\n def ask_for_edit_a_tournament(self):\n \"\"\"\n Ask if the user wants to edit the selected tournament\n \"\"\"\n self.edit_tournament = \"\"\n while self.edit_tournament.upper() not in [\"Y\", \"N\"]:\n self.edit_tournament = self.view.choose_to_edit_tournament(\n self.selected_tournament.name\n )\n if self.edit_tournament.upper() == \"Y\":\n self.edit_a_tournament()\n\n def edit_a_tournament(self):\n self.edit.edit_all(self.selected_tournament)\n self.data_to_change = None\n while self.data_to_change not in [\n str(x) for x in (range(1, len(self.selected_tournament.label_attributes)))\n ]:\n self.data_to_change = self.edit.ask_for_data_to_change()\n # Determination of the attribute to modify\n self.parameter = self.selected_tournament.dict_attributes[self.data_to_change]\n self.old_value = self.selected_tournament.__getattribute__(self.parameter)\n self.new_value = self.edit.new_value_for_data(\n self.selected_tournament.label_attributes[self.parameter], self.old_value\n )\n if self.edit.confirm_new_value(self.old_value, self.new_value).upper() == \"Y\":\n self.edit.modification_validated()\n self.selected_tournament.set_new_value(self.parameter, self.new_value)\n else:\n self.edit.modification_cancelled()\n self.edit.edit_all(self.selected_tournament)\n # ask if the user wishes to proceed to more modifications\n # or wants to save the updated tournament\n self.choice_between_change_and_save = \"\"\n while self.choice_between_change_and_save.upper() not in [\"S\", \"M\"]:\n self.choice_between_change_and_save = (\n self.edit.ask_for_other_changes_or_save()\n )\n if self.choice_between_change_and_save.upper() == \"S\":\n self.db.save_tournaments(self.tournaments_to_choose)\n self.edit.save_ok()\n self.load_all_tournaments()\n self.ask_for_edit_a_tournament()\n else:\n self.edit_a_tournament()\n\n def load_all_tournaments(self):\n \"\"\"\n Data gets the dict of saved tournaments\n \"\"\"\n self.tournaments_to_choose = self.db.load_tournaments()\n\n def display_all_tournaments(self):\n \"\"\"\n View displays the list of tournaments (ended/ in progress / upcoming)\n \"\"\"\n self.report.display_tournaments_global(self.tournaments_to_choose)\n\n def select_a_tournament(self):\n # View asks the user to enter tournament id until it's a valid id\n self.selected_tournament_id = \"\"\n self.selected_tournament = None\n while (\n self.selected_tournament_id.upper() not in self.tournaments_to_choose.keys()\n and self.selected_tournament_id.upper() != \"Q\"\n ):\n self.selected_tournament_id = self.view.select_a_loaded_tournament()\n # Back to precedent menu if user enters [M]\n if self.selected_tournament_id.upper() == \"M\":\n self.load_or_create_tournament()\n # Determination of the tournament corresponding to the entered id\n else:\n self.selected_tournament = self.tournaments_to_choose[\n self.selected_tournament_id\n ]\n print(self.selected_tournament)\n\n def run(self):\n \"\"\"loading or creation of the tournament\"\"\"\n self.mytournament = self.load_or_create_tournament()\n if not self.mytournament.players:\n self.choice_add_player = \"\"\n while self.choice_add_player.upper() not in [\"A\", \"Q\", \"L\"]:\n self.choice_add_player = self.view.ask_to_add_players(self.mytournament)\n if self.choice_add_player.upper() == \"Q\":\n exit()\n elif self.choice_add_player.upper() == \"L\":\n self.display_all_tournaments()\n self.select_a_tournament()\n else:\n print(\"ajout des joueurs\")\n\n else:\n self.report.display_tournament_players_by_rank()\n\n exit()\n # récupération des infos sur les 8 joueurs du tournoi\n self.data_players = get_players()\n for data_player in self.data_players:\n player = Player(data_player) # création de chaque joueur\n # ajout de chaque joueur à la liste globale des joueurs\n self.db.add_players(player)\n # enregistrement de la liste des joueurs dans un fichier\n self.db.save_players()\n\n self.players = choose_players(self.db.get_players())\n for player in self.players:\n self.mytournament.add_player(player)\n\n # ajout du tournoi à la liste des tournois\n self.db.add_tournament(self.mytournament)\n # enregistrement de la liste des tournois dans un fichier\n self.db.save_tournaments()\n\n for num_round in range(1, self.mytournament.nb_rounds + 1):\n if num_round == 1:\n self.mytournament.sort_players()\n self.mytournament.first_round_sort_players()\n self.matchs = self.mytournament.first_matchs()\n self.mytournament.generate_round(Round(\"Round 1\", self.matchs))\n self.mytournament.generate_results(self.mytournament.rounds[0])\n self.mytournament.sort_players()\n self.mytournament.display_scores()\n else:\n self.matchs = self.mytournament.other_matchs()\n self.mytournament.generate_round(\n Round(f\"Round {num_round}\", self.matchs)\n )\n self.mytournament.generate_results(\n self.mytournament.rounds[num_round - 1]\n )\n self.mytournament.sort_players()\n self.mytournament.display_scores()\n","repo_name":"Olrio/OR_OC_ProjectsDAP","sub_path":"P4/controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":7748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3452075635","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\nUserModel = get_user_model()\n\n\nclass Board(models.Model):\n class Meta:\n verbose_name = 'Доска'\n verbose_name_plural = 'Доски'\n\n name = models.CharField(\n verbose_name='name',\n max_length=32,\n )\n user = models.ForeignKey(\n UserModel,\n on_delete=models.CASCADE,\n related_name='boards',\n editable=False,\n )\n\n def __str__(self):\n return self.name\n\n\nclass Column(models.Model):\n class Meta:\n verbose_name = 'Колонка'\n verbose_name_plural = 'Колонки'\n\n name = models.CharField(\n verbose_name='name',\n max_length=32,\n )\n board = models.ForeignKey(\n Board,\n on_delete=models.CASCADE,\n related_name='columns',\n editable=False,\n )\n\n def __str__(self):\n return self.name\n\n\nclass Card(models.Model):\n class Meta:\n verbose_name = 'Карточка'\n verbose_name_plural = 'Корточки'\n\n name = models.CharField(\n verbose_name='name',\n max_length=32,\n )\n column = models.ForeignKey(\n Column,\n on_delete=models.CASCADE,\n related_name='cards',\n )\n\n def __str__(self):\n return self.name\n","repo_name":"Damir-Saitov/VolHinSoft_testcase_django_backend","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37310199057","text":"import zipfile\nimport requests\nimport os\nimport json\nfrom bs4 import BeautifulSoup\nimport html2text\n\n# Load secrets from JSON file\nwith open(\".secrets.json\", \"r\") as secrets_file:\n secrets = json.load(secrets_file)\n\nconfluence_url = secrets[\"confluence_url\"]\nazure_devops_url = secrets[\"azure_devops_url\"]\nspace_key = secrets[\"space_key\"]\nproject_name = secrets[\"project_name\"]\nconfluence_token = secrets[\"confluence_token\"]\nazure_devops_token = secrets[\"azure_devops_token\"]\n\ndef export_import_pages(parent_id):\n # Get list of child pages for the parent page\n child_pages_url = f\"{confluence_url}/content?spaceKey={space_key}&parentId={parent_id}\"\n child_pages_response = requests.get(child_pages_url, headers={\"Authorization\": f\"Basic {confluence_token}\"})\n child_pages = child_pages_response.json()[\"results\"]\n return child_pages\n\ndef export_page(child_page):\n export_url = f\"{confluence_url}/content/{child_page['id']}/export/html\"\n response = requests.get(export_url, headers={\"Authorization\": f\"Basic {confluence_token}\"})\n open(f\"{child_page['title']}.html\", \"wb\").write(response.content)\n return child_page[\"title\"]\n\ndef convert_to_markdown(file_name):\n with open(f\"{file_name}.html\", \"r\") as html_file:\n html_content = html_file.read()\n soup = BeautifulSoup(html_content, 'html.parser')\n # Replace macro elements with their corresponding markdown representation\n for macro in soup.find_all(class_='confluence-macro'):\n macro_type = macro['data-macro-name']\n macro_body = macro.find_all('span')[0].get_text()\n \n if macro_type == 'code':\n macro_markdown = f\"```{macro_body}```\"\n elif macro_type == 'image':\n macro_markdown = f\"![image]({macro_body})\"\n else:\n macro_markdown = f\"Macro of type {macro_type} is not supported\"\n macro.replace_with(macro_markdown)\n\n # Use a library such as 'html2text' to convert the HTML to markdown\n md_content = html2text.html2text(str(soup))\n with open(f\"{file_name}.md\", \"w\") as md_file:\n md_file.write(md_content)\n os.remove(f\"{file_name}.html\")\n return file_name\n\ndef import_page(file_name):\n with open(f\"{file_name}.md\", \"r\") as md_file:\n md_content = md_file.read()\n import_url = f\"{azure_devops_url}/{project_name}/_apis/wiki/wikis/{file_name}/pages?path={file_name}&content={md_content}\"\n response = requests.put(import_url, headers={\"Authorization\": f\"Basic {azure_devops_token}\"})\n if response.status_code != 200:\n print(f\"Error: Failed to import {file_name} to Azure DevOps Wiki. Status code: {response.status_code}\")\n else:\n print(f\"{file_name} imported to Azure DevOps Wiki with status code {response.status_code}\")\n os.remove(f\"{file_name}.md\")\n\n# Start recursive export and import with the root page\nroot_url = f\"{confluence_url}/content?spaceKey={space_key}&expand=ancestors\"\nroot_response = requests.get(root_url, headers={\"Authorization\": f\"Basic {confluence_token}\"})\nroot_page = root_response.json()[\"results\"][0]\n\nchild_pages = export_import_pages(root_page[\"id\"])\n\nfor child_page in child_pages:\n file_name = export_page(child_page)\n file_name = convert_to_markdown(file_name)\n #import_page(file_name)\n\n","repo_name":"mihailfox/confluence2ado-wiki","sub_path":"migrate_confluence.py","file_name":"migrate_confluence.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27583339288","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport pickle\n\n'''\ndef plot_speed(laps):\n speeds = []\n for lap in laps:\n for j in range(lap.speedX.__len__()):\n speeds.append(math.sqrt(math.pow(lap.speedX[j], 2) + math.pow(lap.speedY[j], 2) + math.pow(lap.speedZ[j], 2)))\n chunks = [speeds[i:i+50] for i in range(0, len(speeds), 50)]\n mean_speeds = [] #velocità medie secondo per secondo\n for c in chunks:\n mean_speeds.append(float(np.sum(c)/len(c)))\n time = np.arange(0, len(mean_speeds)) # secondi nel range [0; end 2 giri]\n scaley = [] # scala delle velocità\n scalex = [] # scala del tempo\n for i in range(15):\n scaley.append(i * 20)\n for i in range(18):\n scalex.append(i * 5)\n plt.plot(time, mean_speeds, color=\"blue\") #axis x: time - axis y: speed\n plt.title(\"PSO: mean velocity in CG Speedway n.1\")\n #plt.title(\"PSO: mean velocity in Forza\")\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"Mean velocity (km/h)\")\n plt.yscale('linear')\n plt.yticks(scaley)\n plt.xticks(scalex)\n plt.show()\n\ndef plot_trackPos(laps):\n t_off = 0.0\n for lap in laps:\n t_off += lap.get_time_offtrack()\n positions = np.append(laps[0].trackPos, laps[1].trackPos)\n chunks = [positions[i:i + 3] for i in range(0, len(positions), 3)] # media di ogni 40 ms\n mean_pos = []\n for c in chunks:\n mean_pos.append(float(np.sum(c) / len(c)))\n time = np.arange(0, len(mean_pos)*60,60)\n\n up_bound_x = [0, len(mean_pos)*60]\n up_bound_y = [0.95, 0.95]\n down_bound_x = [0, len(mean_pos)*60]\n down_bound_y = [-0.95, -0.95]\n middle_bound_x = [0, len(mean_pos)*60]\n middle_bound_y = [0, 0]\n\n plt.plot(time, mean_pos, color=\"black\") # axis x: time - axis y: speed\n plt.plot(up_bound_x, up_bound_y, color=\"blue\")\n plt.plot(middle_bound_x, middle_bound_y, color=\"red\")\n plt.plot(down_bound_x, down_bound_y, color=\"blue\")\n plt.title(\"Profilo di posizione di CarSim in Forza\")\n plt.xlabel(\"Tempo (millisec)\")\n legend = 'Fuori pista: '+str(round(t_off,2))+' secondi.'\n plt.legend([legend], loc='lower left')\n plt.ylabel(\"Posizione nella pista (metri)\")\n plt.yscale('linear')\n plt.yticks(np.arange(-1.5, 7.5, 0.5))\n plt.show()\n'''\n\npath = ['laps_info/']\nvars = ['carsim_']\ntracks = ['forza'] #'forza'\nsuper_list = []\nfor p in path:\n for v in vars:\n for t in tracks:\n info = p + v + t\n with open(info, \"rb\") as f:\n print(info)\n super_list.append(pickle.load(f))\n\ni = 0\ncolors = ['black']# 'red', 'green']\nlabels = ['CarSim']# 'SADE', 'PSO']\n\nfor laps in super_list:\n t_off = 0.0\n for lap in laps:\n t_off += lap.get_time_offtrack()\n positions = np.append(laps[0].trackPos, laps[1].trackPos)\n chunks = [positions[i:i + 3] for i in range(0, len(positions), 3)] # media di ogni 40 ms\n mean_pos = []\n for c in chunks:\n mean_pos.append(float(np.sum(c) / len(c)))\n time = np.arange(0, len(mean_pos) * 60, 60)\n\n up_bound_x = [0, len(mean_pos) * 60]\n up_bound_y = [0.95, 0.95]\n down_bound_x = [0, len(mean_pos) * 60]\n down_bound_y = [-0.95, -0.95]\n middle_bound_x = [0, len(mean_pos) * 60]\n middle_bound_y = [0, 0]\n\n plt.plot(up_bound_x, up_bound_y, color=\"blue\")\n plt.plot(middle_bound_x, middle_bound_y, color=\"red\")\n plt.plot(down_bound_x, down_bound_y, color=\"blue\")\n\n plt.plot(time, mean_pos, label=\"Fuori pista \" + labels[i] + \": \" + str(round(t_off, 2)) + \" secondi.\", color=colors[i]) # axis x: time - axis y: speed\n plt.title(\"Profilo di posizione CarSim in Forza\")\n\n\n i+=1\n\n\n\nplt.xlabel(\"Tempo (millisec)\")\nplt.ylabel(\"Posizione nella pista (metri)\")\nplt.yscale('linear')\nplt.yticks(np.arange(-3.0, 8.0, 0.5))\nplt.legend(loc='lower left')\nplt.show()","repo_name":"alessandro-montefusco/Progetto-Computazione-Naturale","sub_path":"CarSim/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1187117217","text":"def Train_sublp_with_pseudo_labels(dataset, train_params, io_dir, full_data_dir, device='0'):\n \n import torch.nn as nn\n import os\n import time\n from model_inorm_mod import lpCNN\n import pytorch_ssim\n from tqdm import tqdm\n import torch\n import torch.optim as optim\n from torch.utils.data import DataLoader\n from torch.optim.lr_scheduler import MultiStepLR\n from torch.autograd import Variable\n from PLabel_Dataset import PLabel_Dataset\n from glob import glob\n from io_functions import read_img_as_tensor, save_tensor_as_img\n \n num_epochs = train_params['num_epochs_full']\n lr_decay = train_params['lr_decay_full']\n bs = train_params['batch_size']\n lr = train_params['lr']\n save_freq = num_epochs - 1\n ######### copying the real lbld data #################\n \n long_dir = io_dir + 'img_data/train_lbld/long_lp/' \n short_dir = io_dir + 'img_data/train_lbld/short_lp/' \n write_dir = io_dir + 'img_data/train_unlbld/pseudo_labels/'\n filenames = os.listdir(short_dir)\n \n for name in tqdm(filenames):\n if not dataset == 'lol':\n long_path = glob(long_dir + name[:5] + '*.png') \n im_long = read_img_as_tensor(long_path[0], '0')\n else:\n long_path = long_dir + name\n im_long = read_img_as_tensor(long_path[0], '0') \n save_tensor_as_img(im_long, write_dir + name)\n ###########################################################\n \n seed = 2222\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n short_dir = full_data_dir + 'train/short_lp/'\n long_dir = io_dir + 'img_data/train_unlbld/pseudo_labels/' \n save_dir = io_dir + 'checkpoints/lp_sub/with_plabels/'\n \n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n cuda1 = torch.device('cuda:' + device)\n \n ps = 48\n \n ## dataloader\n train_dataloader = DataLoader(PLabel_Dataset(dataset, short_dir, long_dir, ps), \n batch_size=bs, shuffle=True)\n \n model = lpCNN()\n model.train()\n model = model.cuda(cuda1)\n \n l1_err = nn.L1Loss()\n optimizer = optim.Adam(model.parameters(), lr=lr)\n scheduler = MultiStepLR(optimizer, milestones=lr_decay, gamma=0.1)\n \n for epoch in tqdm(range(num_epochs)):\n \n scheduler.step(epoch) # step to the learning rate in this epcoh\n \n epoch_loss = 0\n \n start_time = time.time()\n \n for n_count, batch_yx in enumerate(train_dataloader):\n \n optimizer.zero_grad()\n \n short_batch, long_batch = Variable(batch_yx[0]), Variable(batch_yx[1])\n \n out_batch = model(short_batch)\n \n l1_loss = l1_err(out_batch, long_batch)\n ssim_loss = pytorch_ssim.ssim(out_batch, long_batch)\n loss = l1_loss - ssim_loss \n \n epoch_loss += loss.item()\n \n loss.backward()\n optimizer.step()\n \n elapsed_time = time.time() - start_time\n # print('epcoh = %4d , loss = %4.4f , time = %4.2f s' % (epoch+1, epoch_loss/n_count, elapsed_time))\n if epoch%save_freq==0:\n torch.save(model, os.path.join(save_dir, 'modelpl.pth'))","repo_name":"sameerIISc/SSL-LLR","sub_path":"Train/Train_sublp_with_pseudo_labels.py","file_name":"Train_sublp_with_pseudo_labels.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26436729483","text":"from pprint import pprint\nfrom collections import deque\nimport argparse\n\n\nclass BencodeError(Exception):\n\tpass\n\n\nclass UnexpectedEnd(BencodeError):\n\tdef __init__(self, msg=\"End of stream found during parsing.\"):\n\t\tsuper(UnexpectedEnd, self).__init__(msg)\n\n\nclass Bencoder(deque):\n\tdef __init__(self, source):\n\t\tsuper(Bencoder, self).__init__(source)\n\n\tdef parse(self):\n\t\ttry:\n\t\t\tself.result = self.token()\n\t\texcept IndexError:\n\t\t\traise UnexpectedEnd()\n\t\texcept:\n\t\t\tpprint(self)\n\t\t\traise\n\n\t\treturn self.result\n\n\tdef _read_till(self, char):\n\t\tc_char = \"\"\n\t\tresult = \"\"\n\t\twhile c_char != char:\n\t\t\tresult += c_char\n\t\t\tc_char = self.popleft()\n\t\treturn result\n\n\tdef read_int(self):\n\t\treturn int(self._read_till(\"e\"))\n\n\tdef read_bytes(self):\n\t\tsize = int(self._read_till(\":\"))\n\t\treturn \"\".join(self.popleft() for i in range(size))\n\n\tdef read_list(self):\n\t\tresult = []\n\t\tnext_char = self.popleft()\n\t\twhile next_char != \"e\":\n\t\t\tself.appendleft(next_char)\n\t\t\tresult.append(self.token())\n\t\t\tnext_char = self.popleft()\n\n\t\treturn result\n\n\tdef read_dict(self):\n\t\ttokens = self.read_list()\n\t\tkeys = (k for n, k in enumerate(tokens) if not n % 2)\n\t\tvalues = (k for n, k in enumerate(tokens) if n % 2)\n\t\treturn dict(zip(keys, values))\n\n\tdef token(self):\n\t\taction = self.popleft()\n\t\ttoken = None\n\t\tif action == \"i\":\n\t\t\ttoken = self.read_int()\n\t\telif action == \"l\":\n\t\t\ttoken = self.read_list()\n\t\telif action == \"d\":\n\t\t\ttoken = self.read_dict()\n\t\telse:\n\t\t\tself.appendleft(action)\n\t\t\ttoken = self.read_bytes()\n\n\t\treturn token\n\n\ndef parse_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"torrent\", type=argparse.FileType(\"r\"))\n\treturn parser.parse_args()\n\n\ndef main(args):\n\tparser = Bencoder(args.torrent.read())\n\tpprint(parser.parse())\n\nif __name__ == \"__main__\":\n\tmain(parse_args())\n","repo_name":"NightBlues/training","sub_path":"python/bencode/torrent.py","file_name":"torrent.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3885606747","text":"from typing import Optional\nfrom fastapi import APIRouter\n\nrouter = APIRouter(prefix='/strong', tags=['Strong Numbers'])\n\ndef factorial(num):\n fact = 1\n for i in range(1,num+1):\n fact *= i\n return fact\n\ndef isStrong(value: int):\n temp = value\n sum: int = 0\n\n while value>0:\n digit = value%10\n sum += factorial(digit) \n value = value//10\n \n if(temp == sum):\n return True\n else:\n return False\n\n\n@router.get('/{value}')\ndef main(value: int):\n return isStrong(value)\n\n@router.get('/limit')\ndef limit(u: int, l: int):\n data = {}\n for num in range(u,l):\n data[int(num)] = isStrong(num)\n return data","repo_name":"anubhavanand3007/Maths-API","sub_path":"MathAPI/routers/strong.py","file_name":"strong.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71824332648","text":"import os\nfrom datetime import date\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\ngdrive = os.path.join(\"base/gdrive_key.json\")\n\n\ndef clean_column(df, column_name: str, character: str):\n character = character\n frame = df\n column = column_name\n frame[column] = frame[column].str.replace(character, \"\", regex=True)\n return frame\n\n\ndef google_drive_upload(df, sport: str):\n df = df\n sport = str(sport)\n savetime = date.today()\n df.to_csv(f\"{sport}.{savetime}.csv\")\n scope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\",\n ]\n # establishing credentials given to me by Google API\n credentials = ServiceAccountCredentials.from_json_keyfile_name(gdrive, scope)\n client = gspread.authorize(credentials)\n # opening the sheet I am keeping the scores on\n if sport == \"NFL\":\n spreadsheet = client.open(\"nfl_weekly_fanduel_scores\")\n elif sport == \"NBA\":\n spreadsheet = client.open(\"Player_Game_Records_2021\")\n # Updating sheet with my new csv\n with open(f\"{sport}.{savetime}.csv\", \"r\") as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)\n","repo_name":"CSorrel58/FanduelResearch","sub_path":"base/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10257078988","text":"from pylewm.layout import Layout\nfrom pylewm.rects import Rect, Direction\nfrom pylewm.window import Window\n\nfrom pylewm.layouts.sidebar import SidebarLayout\n\nimport math\n\nclass AutoGridLayout(Layout):\n def __init__(self):\n Layout.__init__(self)\n self.columns : list[list[Window]] = []\n self.windows : list[Window] = []\n self.need_reposition = False\n \n def is_portrait_mode(self):\n return self.rect.width < self.rect.height\n\n def get_wanted_grid_dimensions(self, window_count):\n columns = math.ceil(math.sqrt(float(window_count)))\n rows = int(math.ceil(window_count / columns))\n if window_count > 2:\n rows = max(rows, 3)\n return columns, rows\n\n def get_window_column(self, window):\n for column_index, column in enumerate(self.columns):\n if window in column:\n return column_index, column.index(window)\n return -1, -1\n\n def get_column_window(self, slot):\n if slot[0] == -1 or slot[0] >= len(self.columns):\n return None\n column = self.columns[slot[0]]\n if slot[1] == -1 or slot[1] >= len(column):\n return None\n return column[slot[1]]\n\n def get_mru_index_in_column(self, column_index):\n for window in reversed(self.focus_mru):\n if window not in self.columns[column_index]:\n continue\n index = self.columns[column_index].index(window)\n return index\n return 0\n\n def get_last_focus_column(self):\n for window in reversed(self.focus_mru):\n for column_index, column in enumerate(self.columns):\n if window not in column:\n continue\n slot_index = column.index(window)\n return column_index, slot_index\n return -1, -1\n\n def select_mru_span_window(self, column_index, pos_top, pos_bottom):\n candidates = []\n for window in self.columns[column_index]:\n if pos_top < window.layout_position.bottom and window.layout_position.top < pos_bottom:\n candidates.append(window)\n\n for window in reversed(self.focus_mru):\n if window in candidates:\n return window\n \n if candidates:\n return candidates[0]\n elif self.columns[column_index]:\n return self.columns[column_index][0]\n else:\n return None\n\n def add_window(self, window, at_slot=None, insert_direction=None):\n self.windows.append(window)\n self.need_reposition = True\n\n wanted_columns, wanted_rows = self.get_wanted_grid_dimensions(len(self.windows))\n\n # Find the first column that has fewer windows than the amount of rows we want\n candidate_columns = []\n for column_index, column in enumerate(self.columns):\n if len(column) < wanted_rows:\n candidate_columns.append(column_index)\n\n focus_column, focus_slot = -1, -1\n if self.focus:\n focus_column, focus_slot = self.get_window_column(self.focus)\n\n if at_slot == Direction.InsertLeft:\n if self.columns and not self.columns[0]:\n self.columns[0].append(window)\n else:\n self.columns.insert(0, [window])\n elif at_slot == Direction.InsertRight:\n if self.columns and not self.columns[-1]:\n self.columns[-1].append(window)\n else:\n self.columns.append([window])\n elif at_slot and at_slot[0] != -1:\n insert_column, insert_slot = at_slot\n if insert_slot == -1 or insert_slot >= len(self.columns[insert_column]):\n self.columns[insert_column].append(window)\n else:\n self.columns[insert_column].insert(insert_slot, window)\n elif self.is_portrait_mode() and len(self.columns) == 1 and len(self.columns[0]) <= 2:\n if insert_direction in Direction.ANY_Down:\n self.columns[0].insert(0, window)\n else:\n self.columns[0].append(window)\n elif len(self.columns) < wanted_columns and focus_column not in candidate_columns:\n # If we don't have enough columns, add the window as a new column\n if insert_direction in Direction.ANY_Right:\n self.columns.insert(0, [window])\n else:\n self.columns.append([window])\n elif insert_direction == Direction.InsertRight:\n self.columns.insert(0, [window])\n elif insert_direction == Direction.InsertLeft:\n self.columns.append([window])\n else:\n insert_column = -1\n\n # Put it in the candidate column that has focus if we can\n if focus_column in candidate_columns:\n insert_column = focus_column\n\n # Prefer leftmost column if coming in from the left\n if insert_column == -1 and insert_direction in Direction.ANY_Right:\n candidate_columns.sort(key = lambda col_index: col_index, reverse=False)\n insert_column = candidate_columns[0]\n\n # Prefer rightmost column if coming in from the right\n if insert_column == -1 and insert_direction in Direction.ANY_Left:\n candidate_columns.sort(key = lambda col_index: col_index, reverse=True)\n insert_column = candidate_columns[0]\n\n # Choose the most recently focused candidate column\n if insert_column == -1:\n for mru_window in reversed(self.focus_mru):\n mru_column, mru_slot = self.get_window_column(mru_window)\n if mru_column in candidate_columns:\n insert_column = mru_column\n break\n\n # Put it in the shortest current column\n if insert_column == -1:\n candidate_columns.sort(key = lambda col_index: len(self.columns[col_index]), reverse=True)\n insert_column = candidate_columns[0]\n\n # Final fallback, should only happen if inserting additional windows from a direction\n if insert_column == -1:\n if insert_direction in Direction.ANY_Right:\n insert_column = 0\n else:\n insert_column = len(self.columns)-1\n\n if insert_direction in Direction.ANY_Down:\n self.columns[insert_column].insert(0, window)\n else:\n self.columns[insert_column].append(window)\n\n def remove_window(self, window):\n self.windows.remove(window)\n\n for column in self.columns:\n if window in column:\n column.remove(window)\n if len(column) == 0:\n self.columns.remove(column)\n\n self.need_reposition = True\n\n def replace_window(self, old_window, new_window):\n index = self.windows.index(old_window)\n self.windows[index] = new_window\n\n for column in self.columns:\n if old_window in column:\n index = column.index(old_window)\n column[index] = new_window\n\n self.need_reposition = True\n\n def get_window_in_direction(self, from_window, direction):\n if not from_window:\n if not self.columns:\n return None, direction\n elif direction in Direction.ANY_Right:\n return self.columns[0][self.get_mru_index_in_column(0)], direction\n elif direction in Direction.ANY_Left:\n return self.columns[-1][self.get_mru_index_in_column(-1)], direction\n elif direction in Direction.ANY_Down:\n last_column, last_slot = self.get_last_focus_column()\n return self.columns[last_column][0], direction\n elif direction in Direction.ANY_Up:\n last_column, last_slot = self.get_last_focus_column()\n return self.columns[last_column][-1], direction\n return None, direction\n\n window_column, window_slot = self.get_window_column(from_window)\n column_length = len(self.columns[window_column])\n\n if direction in Direction.ANY_Left:\n if window_column == 0:\n return None, direction\n\n target_window = self.select_mru_span_window(window_column-1, from_window.layout_position.top, from_window.layout_position.bottom)\n return target_window, direction\n elif direction in Direction.ANY_Right:\n if window_column == len(self.columns)-1:\n return None, direction\n\n target_window = self.select_mru_span_window(window_column+1, from_window.layout_position.top, from_window.layout_position.bottom)\n return target_window, direction\n elif direction == Direction.Next:\n new_slot = (window_slot + 1) % column_length\n return self.columns[window_column][new_slot], direction\n elif direction == Direction.Previous:\n new_slot = (window_slot - 1 + column_length) % column_length\n return self.columns[window_column][new_slot], direction\n elif direction == Direction.Down:\n if (window_slot+1) < column_length:\n return self.columns[window_column][window_slot+1], direction\n else:\n return None, direction\n elif direction == Direction.Up:\n if window_slot > 0:\n return self.columns[window_column][window_slot-1], direction\n else:\n return None, direction\n\n return None, direction\n\n def move_window_to_column(self, window, to_column_index, always_insert=False):\n wanted_columns, wanted_rows = self.get_wanted_grid_dimensions(len(self.windows))\n from_column_index, from_slot_index = self.get_window_column(window)\n target_window = self.select_mru_span_window(to_column_index, window.layout_position.top, window.layout_position.bottom)\n target_column, target_slot = self.get_window_column(target_window)\n\n if (len(self.columns[target_column]) < len(self.columns[from_column_index])\n or len(self.columns) > wanted_columns) or always_insert:\n # Move the window from the larger column to the smaller column\n if window.layout_position.center[1] >= target_window.layout_position.center[1]:\n target_slot += 1\n self.columns[target_column].insert(target_slot, window)\n self.columns[from_column_index].remove(window)\n if not self.columns[from_column_index]:\n del self.columns[from_column_index]\n else:\n # Swap columns with the target window\n self.columns[target_column][target_slot] = window\n self.columns[from_column_index][from_slot_index] = target_window\n\n def move_window_in_direction(self, window, direction):\n window_column, window_slot = self.get_window_column(window)\n column_length = len(self.columns[window_column])\n wanted_columns, wanted_rows = self.get_wanted_grid_dimensions(len(self.windows))\n self.need_reposition = True\n\n if direction == Direction.Left:\n if window_column == 0:\n if len(self.columns) >= wanted_columns or len(self.columns[window_column]) == 1:\n return False, direction\n else:\n self.columns[window_column].remove(window)\n self.columns.insert(0, [window])\n return True, direction\n self.move_window_to_column(window, window_column-1)\n return True, direction\n elif direction == Direction.InsertLeft:\n if window_column == 0:\n if len(self.columns[window_column]) == 1:\n return False, direction\n else:\n self.columns[window_column].remove(window)\n self.columns.insert(0, [window])\n return True, direction\n \n self.move_window_to_column(window, window_column-1, always_insert=True)\n return True, direction\n elif direction == Direction.Right:\n if window_column == len(self.columns)-1:\n if len(self.columns) >= wanted_columns or len(self.columns[window_column]) == 1:\n return False, direction\n else:\n self.columns[window_column].remove(window)\n self.columns.append([window])\n return True, direction\n self.move_window_to_column(window, window_column+1)\n return True, direction\n elif direction == Direction.InsertRight:\n if window_column == len(self.columns)-1:\n if len(self.columns[window_column]) == 1:\n return False, direction\n else:\n self.columns[window_column].remove(window)\n self.columns.append([window])\n return True, direction\n \n self.move_window_to_column(window, window_column+1, always_insert=True)\n return True, direction\n elif direction == Direction.Next:\n new_slot = (window_slot + 1) % column_length\n other_window = self.columns[window_column][new_slot]\n\n self.columns[window_column][window_slot] = other_window\n self.columns[window_column][new_slot] = window\n return True, direction\n elif direction == Direction.Previous:\n new_slot = (window_slot - 1 + column_length) % column_length\n other_window = self.columns[window_column][new_slot]\n\n self.columns[window_column][window_slot] = other_window\n self.columns[window_column][new_slot] = window\n return True, direction\n elif direction == Direction.Down:\n if (window_slot+1) < column_length:\n new_slot = window_slot + 1\n other_window = self.columns[window_column][new_slot]\n\n self.columns[window_column][window_slot] = other_window\n self.columns[window_column][new_slot] = window\n return True, direction\n else:\n return None, direction\n elif direction == Direction.Up:\n if window_slot > 0:\n new_slot = window_slot - 1\n other_window = self.columns[window_column][new_slot]\n\n self.columns[window_column][window_slot] = other_window\n self.columns[window_column][new_slot] = window\n return True, direction\n else:\n return False, direction\n\n return False, direction\n\n def get_drop_slot(self, position, rect, fake_window_count=-1, allow_drop_zones=True):\n window_count = len(self.windows) + 1\n if fake_window_count != -1:\n window_count = fake_window_count\n\n if window_count <= 2:\n if allow_drop_zones:\n # Allow dropping at the top of an empty screen for auto-tile\n if (position[1] < self.rect.top + 100\n or position[1] > self.rect.bottom - 100\n or position[0] < self.rect.left + 50\n or position[0] > self.rect.right - 50):\n if position[0] > self.rect.center[0]:\n return Direction.InsertRight, True\n else:\n return Direction.InsertLeft, True\n\n # Allow dropping around the center of a single column to split\n if position[0] < self.rect.left + 50:\n return Direction.InsertLeft, True\n elif position[0] > self.rect.right - 50:\n return Direction.InsertRight, True\n elif position[1] < self.rect.top + 100:\n return (0, 0), True\n elif position[1] > self.rect.bottom - 100:\n return (0, 1), True\n\n if position[0] < self.rect.center[0]:\n return Direction.InsertLeft, False\n else:\n return Direction.InsertRight, False\n\n wanted_columns, wanted_rows = self.get_wanted_grid_dimensions(window_count)\n\n require_force = False\n if len(self.columns) < wanted_columns:\n # Never insert into an existing column if we want more columns to begin with\n require_force = True\n\n column_count = len(self.columns)\n if column_count == 0:\n return None, False\n\n column_splits = self.get_column_splits(len(self.columns))\n for column_index in range(0, column_count):\n # Check if the position is within this column\n if position[0] < column_splits[column_index]:\n continue\n if position[0] > column_splits[column_index+1]:\n continue\n\n # Only allow insertion into this column if the column is short of wanted height\n column_require_force = require_force\n if len(self.columns[column_index]) >= wanted_rows:\n column_require_force = True\n\n # If the column is empty always drop into it\n slot_count = len(self.columns[column_index])\n if slot_count == 0:\n return (column_index, 0), False\n\n # On the first column, dropping on the left means a new column to the left\n if column_index == 0 and position[0] < column_splits[column_index] + 50 and allow_drop_zones:\n return Direction.InsertLeft, True\n\n # On the last column, dropping on the right means a new column to the right\n if column_index == column_count-1 and position[0] > column_splits[column_index+1] - 50 and allow_drop_zones:\n return Direction.InsertRight, True\n\n # Allow force dropping at the edge of the column\n is_force_drop = (position[0] < column_splits[column_index] + 50\n or position[0] > column_splits[column_index+1] - 50\n or position[1] < self.rect.top + 100\n or position[1] > self.rect.bottom - 100)\n\n if not is_force_drop and column_require_force:\n continue\n\n slot_splits = self.get_slot_splits(slot_count + 1)\n for slot_index in range(0, slot_count+1):\n slot_start = slot_splits[slot_index]\n slot_end = slot_splits[slot_index+1]\n\n if position[1] < slot_start:\n continue\n if position[1] > slot_end:\n continue\n\n return (column_index, slot_index), is_force_drop\n\n return None, False\n\n def get_focus_window_after_removing(self, window_before_remove):\n window_column, window_slot = self.get_window_column(window_before_remove)\n if window_slot > 0:\n return self.columns[window_column][window_slot-1]\n elif window_slot+1 < len(self.columns[window_column]):\n return self.columns[window_column][window_slot+1]\n elif window_column > 0:\n return self.columns[window_column-1][0]\n elif window_column+1 < len(self.columns):\n return self.columns[window_column+1][0]\n return None\n\n def get_column_splits(self, column_count):\n column_width = int(float(self.rect.width) / float(column_count))\n column_splits = []\n for i in range(0, column_count):\n column_splits.append(self.rect.left + (column_width * i))\n column_splits.append(self.rect.right)\n return column_splits\n\n def get_slot_splits(self, slot_count):\n slot_height = int(float(self.rect.height) / float(slot_count))\n slot_splits = []\n for i in range(0, slot_count):\n slot_splits.append(self.rect.top + (slot_height * i))\n slot_splits.append(self.rect.bottom)\n return slot_splits\n\n def refresh_layout(self):\n self.need_reposition = True\n\n def update_layout(self):\n if not self.windows:\n return\n\n # Update all window positions\n if self.need_reposition:\n new_rect = Rect()\n self.need_reposition = False\n\n column_count = len(self.columns)\n\n pending_column, pending_slot = -1, -1\n extra_column = -1\n if self.pending_drop_slot == Direction.InsertLeft:\n column_count += 1\n extra_column = 0\n elif self.pending_drop_slot == Direction.InsertRight:\n column_count += 1\n extra_column = column_count-1\n elif self.pending_drop_slot is not None:\n pending_column, pending_slot = self.pending_drop_slot\n\n column_splits = self.get_column_splits(column_count)\n for column_index, column in enumerate(self.columns):\n column_position = column_index\n if extra_column != -1 and column_index >= extra_column:\n column_position += 1\n\n slot_count = len(column)\n if pending_column == column_index:\n slot_count += 1\n\n slot_splits = self.get_slot_splits(slot_count)\n for slot_index, window in enumerate(column):\n slot_position = slot_index\n if pending_column == column_index and slot_index >= pending_slot:\n slot_position += 1\n\n new_rect.coordinates = (\n column_splits[column_position],\n slot_splits[slot_position],\n column_splits[column_position+1],\n slot_splits[slot_position+1],\n )\n\n edges_flush = (\n column_index == 0,\n slot_index == 0,\n column_index == len(self.columns)-1,\n slot_index == len(column)-1\n )\n\n window.set_layout(new_rect, True, edges_flush)\n\n def set_pending_drop_slot(self, pending_slot):\n self.pending_drop_slot = pending_slot\n self.need_reposition = True\n\n def takeover_from_layout(self, old_layout):\n self.need_reposition = True\n\n if isinstance(old_layout, SidebarLayout):\n if not old_layout.main_window:\n return True\n\n new_windows = [old_layout.main_window]\n new_windows += old_layout.sidebar\n return self.takeover_from_windows(new_windows)\n return False\n\n def takeover_from_windows(self, window_list):\n if not window_list:\n return True\n\n window_count = len(window_list)\n column_count, row_count = self.get_wanted_grid_dimensions(window_count)\n\n # Create empty columns to fill with windows\n self.columns = []\n for i in range(0, column_count):\n self.columns.append([])\n\n # Put windows in the most appropriate positions\n for window in window_list:\n drop_slot, force_drop = self.get_drop_slot(window.real_position.center, window.real_position, fake_window_count=window_count, allow_drop_zones=False)\n self.add_window(window, at_slot=drop_slot)\n\n # Remove columns that didn't get any windows\n self.columns = [col for col in self.columns if col]\n\n return True","repo_name":"GGLucas/PyleWM","sub_path":"pylewm/layouts/autogrid.py","file_name":"autogrid.py","file_ext":"py","file_size_in_byte":23505,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"3299437028","text":"#%%\nfrom src.data import load_maggot_graph\nfrom pathlib import Path\nimport pandas as pd\nimport time\nfrom graspologic.cluster import DivisiveCluster, AutoGMMCluster\nfrom src.visualization import CLASS_COLOR_DICT\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom giskard.plot import crosstabplot\nimport datetime\nfrom src.visualization import set_theme\nfrom src.data import join_node_meta\nfrom src.io import savefig\nimport ast\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.preprocessing import normalize\nfrom scipy.cluster.hierarchy import linkage, fcluster\nfrom scipy.spatial.distance import squareform\nfrom graspologic.utils import symmetrize\n\nset_theme()\n\nt0 = time.time()\nCLASS_KEY = \"merge_class\"\npalette = CLASS_COLOR_DICT\n\nmg = load_maggot_graph()\nnodes = mg.nodes\n\nout_path = Path(\"./maggot_models/experiments/revamp_embed\")\n\nFORMAT = \"png\"\n\n\ndef stashfig(name, format=FORMAT, **kws):\n savefig(\n name, pathname=out_path / \"figs\", format=format, dpi=300, save_on=True, **kws\n )\n\n\ndef uncondense_series(condensed_nodes, nodes, key):\n for idx, row in condensed_nodes.iterrows():\n skids = row[\"skeleton_ids\"]\n for skid in skids:\n nodes.loc[int(skid), key] = row[key]\n\n\ncondensed_nodes = pd.read_csv(\n out_path / \"outs/condensed_nodes.csv\",\n index_col=0,\n converters=dict(skeleton_ids=ast.literal_eval),\n)\n\nn_components = 12\nlatent = condensed_nodes[[f\"latent_{i}\" for i in range(n_components)]].values\n\n\n#%%\ncurrtime = time.time()\ndc = DivisiveCluster(\n min_split=16,\n max_level=8,\n cluster_kws=dict(\n kmeans_n_init=25,\n affinity=[\"euclidean\", \"cosine\", \"none\"],\n linkage=[\"ward\", \"average\"],\n ),\n)\n# covariance_type=[\"full\", \"diag\", \"spherical\"]\nhier_labels = dc.fit_predict(latent, fcluster=True)\nprint(f\"{time.time() - currtime:.3f} seconds elapsed for divisive clustering.\")\n\n#%%\n\n\ndef cluster_crosstabplot(\n nodes,\n group=\"cluster_labels\",\n order=\"sum_signal_flow\",\n hue=\"merge_class\",\n palette=None,\n):\n group_order = (\n nodes.groupby(group)[order].agg(np.median).sort_values(ascending=False).index\n )\n\n fig, ax = plt.subplots(1, 1, figsize=(16, 8))\n crosstabplot(\n nodes,\n group=group,\n group_order=group_order,\n hue=hue,\n hue_order=order,\n palette=palette,\n outline=True,\n thickness=0.5,\n ax=ax,\n )\n ax.set(xticks=[], xlabel=\"Cluster\")\n return fig, ax\n\n\nfor i, pred_labels in enumerate(hier_labels.T):\n key = f\"dc_labels_level={i}\"\n condensed_nodes[key] = pred_labels\n fig, ax = cluster_crosstabplot(\n condensed_nodes,\n group=key,\n palette=palette,\n hue=CLASS_KEY,\n order=\"sum_signal_flow\",\n )\n ax.set_title(f\"# clusters = {len(np.unique(pred_labels))}\")\n stashfig(f\"crosstabplot-level={i}\")\n uncondense_series(condensed_nodes, nodes, key)\n join_node_meta(nodes[key], overwrite=True)\n\n#%%\n\nn_components = 14\nn_clusters = 85\nlatent = condensed_nodes[[f\"latent_{i}\" for i in range(n_components)]].values.copy()\nlatent = normalize(latent, norm=\"l2\", axis=1)\n# agg = AgglomerativeClustering(\n# n_clusters=n_clusters, affinity=\"cosine\", linkage=\"average\"\n# )\n# pred_labels = agg.fit_predict(latent)\n\ndists = symmetrize(pairwise_distances(latent, metric=\"euclidean\"))\nZ = linkage(squareform(dists), method=\"ward\")\npred_labels = fcluster(Z, n_clusters, criterion=\"maxclust\")\n\nkey = f\"agg_labels_n_clusters={n_clusters}\"\ncondensed_nodes[key] = pred_labels\nfig, ax = cluster_crosstabplot(\n condensed_nodes,\n group=key,\n palette=palette,\n hue=CLASS_KEY,\n order=\"sum_signal_flow\",\n)\nax.set_title(f\"# clusters = {len(np.unique(pred_labels))}\")\nstashfig(f\"agg-n_clusters={n_clusters}\")\nuncondense_series(condensed_nodes, nodes, key)\njoin_node_meta(nodes[key], overwrite=True)\n\n#%%\nagmm = AutoGMMCluster(\n min_components=n_clusters,\n max_components=n_clusters,\n label_init=pred_labels,\n n_jobs=-1,\n covariance_type=\"full\",\n)\nagmm_pred_labels = agmm.fit_predict(latent)\n\n#%%\nkey = f\"agmm_agg_n_clusters={n_clusters}\"\ncondensed_nodes[key] = agmm_pred_labels\nfig, ax = cluster_crosstabplot(\n condensed_nodes,\n group=key,\n palette=palette,\n hue=CLASS_KEY,\n order=\"sum_signal_flow\",\n)\nax.set_title(f\"# clusters = {len(np.unique(pred_labels))}\")\nstashfig(key)\nuncondense_series(condensed_nodes, nodes, key)\njoin_node_meta(nodes[key], overwrite=True)\n\n#%%\nfrom sklearn.metrics import adjusted_rand_score, rand_score\n\nadjusted_rand_score(agmm_pred_labels, pred_labels)\n\n#%%\nelapsed = time.time() - t0\ndelta = datetime.timedelta(seconds=elapsed)\nprint(\"----\")\nprint(f\"Script took {delta}\")\nprint(f\"Completed at {datetime.datetime.now()}\")\nprint(\"----\")\n","repo_name":"neurodata/maggot_models","sub_path":"experiments/revamp_gaussian_cluster/revamp_gaussian_cluster.py","file_name":"revamp_gaussian_cluster.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39155797337","text":"from threading import RLock,Thread\nfrom typing import List\nlist_lock=RLock()\n\ndef sum_list(int_list:list[int])->int:\n with list_lock:\n if len(int_list)==0:\n print(\"finished summing\")\n return 0\n else:\n head,*tail=int_list\n print(\"summing rest of list\")\n return head+sum(tail)\n\n\nif __name__==\"__main__\":\n thread=Thread(target=sum_list,args=([1,2,3,4],))\n thread.start()\n thread.join()\n print(thread)","repo_name":"owari-taro/concurrency_in_python","sub_path":"concurrency_with_asyncio/ch7/recursion_lock.py","file_name":"recursion_lock.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29791337020","text":"import json\nimport time\nfrom .scraper.main import main\nfrom .scraper.parser import Fetch\nfrom .scraper.json import create_json\nfrom .scraper.Image import Image_size\nimport logging\nfrom threading import Thread\nfrom pymongo import MongoClient\n\n# fetching urls from file/set threading\nurl_threads = 100\n\n# output files\ninput_file = 'urls.json'\n# output_file = 'op_files/opfile.json'\nerr_file = 'op_files/log.log'\n# err_urls = 'op_files/error_urls.txt'\n# null_image = 'op_files/null_image_urls.json'\n# time_urls = 'op_files/time_urls.json'\n# status_error = 'op_files/status_error.txt'\n# status404 = 'op_files/status404.json'\n\n# mongo authentication\ndb_name = 'zembed'\ncollection_name = 'scrapper_op_100'\ncollection_status = 'scrapper_error_100'\n\nuser = 'zembed'\npassword = '$C}(38G>LTff5,pU'\nmclient = MongoClient('mongodb://'+user+':'+password+'@localhost:45651/zembed')\n# mclient = MongoClient('mongodb://'+user+':'+password+'@127.0.0.1')\n\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n filename=err_file,\n filemode='a')\n\n\ndef call(url):\n allow_types = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/webp', 'image/tiff',\n 'image/bmp']\n start = time.time()\n\n try:\n print(url)\n raw_data = Fetch(url, '').get_url_data() # fetch url data\n end = time.time()\n if raw_data.headers['content-type'].split(';')[0] in ['text/html', 'text/html; charset=utf-8'] or raw_data.headers['content-type'] in allow_types:\n\n if Fetch(url, raw_data).get_header() and Fetch(url, raw_data).get_header()['status'] in [200, '200, 200 OK', '200 OK']:\n\n if Fetch(url,raw_data).get_header()['type'] in allow_types:\n data = {\n \"url\": url,\n \"image\": Image_size().body_image_fetch(raw_data.url, []),\n \"time\": time.time() - start\n }\n else:\n url_parsing = Fetch(raw_data.url, \"\").expand_url(url) # URL parsing details\n meta = main(url_parsing, raw_data)\n if meta[\"image\"] in [None, [], \"\", (None,), ('',)] or not meta[\"image\"]:\n meta[\"image\"] = Image_size().body_image_fetch(\"https://logo.clearbit.com/\"+url_parsing['provider_url']+\"?s=300\", [])\n\n data = create_json(meta)\n data[\"time\"][\"page_fetch\"] = end - start\n data[\"time\"][\"total_time\"] += data[\"time\"][\"page_fetch\"]\n # print(data['image'])\n # if data['image'] is None:\n # with open(null_image, 'a') as image_file:\n # json.dump(data, image_file)\n # image_file.write('\\n')\n # elif data['time']['total_time'] > 5:\n # print(data['time']['total_time'])\n # with open(time_urls, 'a') as time_file:\n # json.dump(data, time_file)\n # time_file.write('\\n')\n\n # with open(output_file, 'a') as opfile:\n # json.dump(data, opfile)\n # opfile.write('\\n')\n try:\n client = mclient\n db = client[db_name]\n collection = db[collection_name]\n collection.insert(data)\n except Exception as e:\n print('failed ondata,', str(e))\n\n # elif Fetch(url, raw_data) and Fetch(url, raw_data).url_header() in [404, '404', \"\"]:\n elif raw_data[0].status_code if isinstance(raw_data, tuple) else raw_data.status_code in [404, '404', \"\"]:\n\n data = {\n \"url\": url,\n \"time\": time.time() - start,\n \"title\": 'Error 404: Page not found',\n \"comment\": 'Error 404'\n }\n # with open(status404, 'a') as not_found:\n # json.dump(data, not_found)\n # not_found.write('\\n')\n try:\n client = mclient\n db = client[db_name]\n collection = db[collection_status]\n collection.insert(data)\n except Exception as e:\n print('failed ondata,', str(e))\n\n\n else:\n data = {\n \"url\": url,\n \"time\": time.time() - start,\n \"title\": Fetch(url,raw_data).url_header() if (Fetch(url,raw_data) and Fetch(url,raw_data).url_header()) else 'Unknown Error code',\n \"comment\": 'Status Error'\n }\n\n # with open(status_error, 'a') as status:\n # status.write(url)\n # status.write('\\n')\n\n try:\n client = mclient\n db = client[db_name]\n collection = db[collection_status]\n collection.insert(data)\n except Exception as e:\n print('failed ondata,', str(e))\n\n\n else:\n data = {\n \"url\": url,\n \"time\": time.time() - start,\n \"title\": raw_data.headers['content-type'],\n \"comment\": 'Not suitable Webpage'\n }\n \n\n # with open(status_error, 'a') as status:\n # status.write(url)\n # status.write('\\n')\n\n try:\n client = mclient\n db = client[db_name]\n collection = db[collection_status]\n collection.insert(data)\n except Exception as e:\n print('failed ondata,', str(e))\n\n\n except Exception as e:\n # with open(err_urls,'a') as err_url:\n # err_url.write(url)\n # err_url.write('\\n')\n # with open(err_file,'a') as err:\n # err.write('error in url:'+url)\n # print(e,' error in url: ', url)\n # err.write('\\n')\n logging.info(url)\n logging.exception(e)\n\n data = {\n \"url\": url,\n \"time\": time.time() - start,\n \"title\": 'URL Error',\n \"comment\": 'URL Error'\n }\n try:\n client = mclient\n db = client[db_name]\n collection = db[collection_status]\n collection.insert(data)\n except Exception as e:\n print('failed ondata,', str(e))\n\n\n\nwith open(\"urls.json\", 'r') as oip_urls:\n ip_urls = []\n oip_urls = json.load(oip_urls)\n for i in oip_urls:\n if i not in ip_urls:\n ip_urls.append(i)\n print(len(ip_urls))\n\n while ip_urls:\n total_threads = []\n if len(ip_urls) > url_threads:\n url_sublist = list(ip_urls[0:url_threads])\n del (ip_urls[0:url_threads])\n else:\n url_sublist = list(ip_urls)\n ip_urls.clear()\n for url in url_sublist:\n thread = Thread(target=call, args=(url,))\n thread.start()\n total_threads.append(thread)\n\n for thread in total_threads:\n thread.join()\n time.sleep(0)\n\n\n","repo_name":"ryderaka/zembed","sub_path":"zembed/crawler/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35763933363","text":"import pickle\n\n# Load the stackoverflow dictionnary of words-vectors\nfile_Name = \"en_model\"\nfileObject = open(file_Name,'rb')\nen_model = pickle.load(fileObject)\nfileObject.close()\n\n# Turn a sentence to a vector\ndef sentence2vec(sentence):\n\ttry:\n\t\tsentence = str(sentence)\n\texcept Exception as e:\n\t\treturn None\n\tif sentence == None:\n\t\treturn None\n\tsplit = sentence.split(\" \")\n\tlength = len(split)\n\tvector = [0] * 300\n\tfor word in split:\n\t\tif word in en_model.vocab:\n\t\t\tfor i in range(300):\n\t\t\t\tvector[i] += en_model[word][i]\n\t\telse:\n\t\t\tlength -= 1\n\tif not length:\n\t\treturn None\n\tfor i in range(300):\n\t\tvector[i] /= length\n\treturn (vector)\n","repo_name":"tle-huu/xbrain-hackathon","sub_path":"algorithm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11278192121","text":"from flask import Flask\nimport threading\nfrom flask import jsonify\nfrom flask import request\nimport os\nimport json\nfrom s3utils import *\nfrom imageRecognision import imageRecognision\nfrom nlpClassification import nlpClassification\nfrom imageInfer import ImageInfer\nfrom NLPInfer import NLPInfer\nfrom chestXrayInfer import ChestXrayInfer\n\napp = Flask(__name__)\n\nfrom flask_cors import CORS\ncors = CORS(app, resources={r\"/imgrecog/*\": {\"origins\": \"*\"}})\ncors2 = CORS(app, resources={r\"/hello/*\": {\"origins\": \"*\"}})\ncors3 = CORS(app, resources={r\"/nlpclassify/*\": {\"origins\": \"*\"}})\ncors4 = CORS(app, resources={r\"/nlpinfer/*\": {\"origins\": \"*\"}})\n\n@app.route('/hello', methods=['POST'])\ndef hello():\n param = request.get_json()\n return jsonify({'status' : True})\n\n\n@app.route('/imginfer', methods=['POST'])\ndef imgInfer():\n params = request.get_json()\n response = ImageInfer(params)\n print(response)\n return response\n\n@app.route('/chestxray', methods=['POST'])\ndef chestXInfer():\n params = request.get_json()\n response = ChestXrayInfer(params)\n print(response)\n return response\n\n@app.route('/nlpinfer', methods=['POST'])\ndef nlpInfer():\n params = request.get_json()\n response = NLPInfer(params)\n print(response)\n return response\n\n@app.route('/imgrecog', methods=['POST'])\ndef imgRecog():\n params = request.get_json()\n print(params)\n ID = params[\"TOKEN_ID\"]\n epochs = int(params[\"Epochs\"])\n Ratio = int(params[\"Ratio\"])\n downloadDirectoryFroms3( 'capstone-eva', ID)\n f = open(ID+\"/userInfo.json\",)\n info = json.load(f)\n f.close()\n if (info[\"Project\"]==\"IMG_REC\") and (info[\"TOKEN_ID\"]==params[\"TOKEN_ID\"]):\n #thread = threading.Thread(target=imageRecognision,\n# args=(ID, epochs, Ratio, ), daemon=True)\n #thread.start()\n imageRecognision(ID, epochs, Ratio)\n return jsonify({'status' : True})\n else:\n return jsonify({'status' : False})\n\n@app.route('/nlpclassify', methods=['POST'])\ndef NLPclassify():\n params = request.get_json()\n print(params)\n ID = params[\"TOKEN_ID\"]\n epochs = int(params[\"Epochs\"])\n Ratio = int(params[\"Ratio\"])\n downloadDirectoryFroms3( 'capstone-eva', ID)\n f = open(ID+\"/userInfo.json\",)\n info = json.load(f)\n f.close()\n if (info[\"Project\"]==\"NLP_CLS\") and (info[\"TOKEN_ID\"]==params[\"TOKEN_ID\"]):\n nlpClassification(ID, epochs, Ratio)\n return jsonify({'status' : True})\n else:\n return jsonify({'status' : False})\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","repo_name":"derbidecoders/Hack-Derbi","sub_path":"Deployment/EC2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34742475806","text":"import websocket\nimport json\nimport time\nimport sys\n\ntoken = \"\"\nws = websocket.WebSocket()\nws.connect('wss://gateway.discord.gg/?v=6&encoding=json')\nhello = json.loads(ws.recv())\nheartbeat_interval = hello['d']['heartbeat_interval']\ngamejson = {\n \"name\": \"Alper\",\n \"type\": 1,\n \"url\": \"https://www.twitch.tv/Alper\"\n}\nauth = {\n \"op\": 2,\n \"d\": {\n \"token\": token,\n \"properties\": {\n \"$os\": sys.platform,\n \"$browser\": \"RTB\",\n \"$device\": f\"{sys.platform} Device\"\n },\n \"presence\": {\n \"game\": gamejson,\n \"status\": \"Online\",\n \"since\": 0,\n \"afk\": False\n }\n },\n \"s\": None,\n \"t\": None\n }\nws.send(json.dumps(auth))\nack = {\n \"op\": 1,\n \"d\": None\n }\nwhile True:\n time.sleep(heartbeat_interval/1000)\n try:\n ws.send(json.dumps(ack))\n except Exception:\n break\n","repo_name":"idklmao2/discord_tools","sub_path":"user-streaming_status.py","file_name":"user-streaming_status.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2274719189","text":"# https://adventofcode.com/2021/day/14\n\nfrom collections import Counter\nfrom functools import cache\nfrom pathlib import Path\n\n\ndef load_data(path):\n with open(path) as fd:\n template, _, *rules = fd.read().splitlines()\n rules = {k: v for k, v in (rule.split(\" -> \") for rule in rules)}\n return template, rules\n\n\ndef polymerize(template, rules, steps_num=10):\n @cache\n def count(pair, step):\n if step == steps_num or pair not in rules:\n return Counter()\n step += 1\n pair_ins = rules[pair]\n new_counter = Counter(pair_ins)\n new_counter.update(count(pair[0] + pair_ins, step))\n new_counter.update(count(pair_ins + pair[1], step))\n return new_counter\n\n counter = Counter(template)\n for left, right in zip(template[0:], template[1:]):\n counter.update(count(left + right, 0))\n return counter\n\n\ndef get_diff(counter):\n sorted_by_quantity = counter.most_common()\n return sorted_by_quantity[0][1] - sorted_by_quantity[-1][1]\n\n\ndef part_one(data):\n return get_diff(polymerize(*data))\n\n\ndef part_two(data):\n return get_diff(polymerize(*data, steps_num=40))\n\n\nif __name__ == \"__main__\":\n input_dir = Path().resolve().parent / \"inputs/14\"\n samples = load_data(input_dir / \"samples.in\")\n data = load_data(input_dir / \"data.in\")\n\n assert part_one(samples) == 1588\n assert part_two(samples) == 2188189693529\n\n print(part_one(data))\n print(part_two(data))\n","repo_name":"koczanm/advent-of-code","sub_path":"2021/python/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31637475952","text":"import cv2\nimport numpy as np\n\n# DFS Search\ndef DFS_Search(grid, originalmaze):\n (x,y)=grid.shape\n x-=1\n y-=1\n path=[[grid[0,0],0]]\n root=grid[0,0]\n trash=[]\n mazecopy=np.zeros((x*2+1,y*2+1),dtype='uint8')\n mazecopy[0,0]=1\n while(True):\n if(root.x==x and root.y==y): \n temp=[]\n for t in reversed(path): temp.append(t[0])\n return temp, mazecopy\n\n \n turn=0\n while(turn=len(root.connections)):\n trash.append(path.pop()[0])\n try:\n root=path[-1][0]\n except: return\n continue\n\n\n root=path[-1][0].connections[turn]\n mazecopy[root.x*2,root.y*2]=1\n mazecopy[path[-1][0].x*2 + root.x - path[-1][0].x, path[-1][0].y*2+root.y-path[-1][0].y]=1\n cv2.waitKey(1)\n cv2.imshow('Simmulate',originalmaze-cv2.resize(mazecopy*127,(600,600),interpolation=0))\n path.append([root,0])","repo_name":"HoangPi/Maze_Game","sub_path":"DFS_Search.py","file_name":"DFS_Search.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19549425742","text":"from django.db import models\r\n\r\nclass Recipe(models.Model):\r\n id = models.AutoField(primary_key=True)\r\n recipe_name = models.CharField(max_length=255)\r\n recipe = models.TextField()\r\n\r\n @classmethod\r\n def create(cls, name: str, content: str):\r\n recipe = cls(recipe_name=name, recipe=content)\r\n return recipe\r\n\r\nclass Users(models.Model):\r\n username = models.TextField()\r\n email = models.EmailField(primary_key=True)\r\n\r\n @classmethod\r\n def create(cls, username: str, email: str):\r\n user = cls(username=username, email=email)\r\n return user\r\n \r\nclass Event(models.Model):\r\n id = models.AutoField(primary_key=True)\r\n eventDate = models.DateField()\r\n eventData = models.TextField()\r\n\r\n @classmethod\r\n def create(cls, eventDate: str, eventData: str):\r\n event = cls(eventDate=eventDate, eventData=eventData)\r\n return event\r\n","repo_name":"MagKartik/myapp_django","sub_path":"myapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20751896098","text":"#!/usr/bin/env python\n# coding: utf-8\n\ndef output_values(Y_data):\n Y_t = []\n for e in Y_data:\n if e == 'passed':\n Y_t.append(1)\n else:\n Y_t.append(0) \n return Y_t\n\n\n\ndef get_pass_streak(y_project):\n p = y_project[0]\n pass_streak = [y_project[0]]\n for i in range(1, len(y_project)):\n pass_streak.append(p)\n if y_project[i] == 1:\n p += 1\n else:\n p = 0\n return pass_streak\n\n\n\ndef get_first_failures(df):\n \n results = df['tr_status'].tolist()\n length = len(results)\n verdict = ['keep']\n prev = results[0]\n \n for i in range(1, length):\n if results[i] == 0:\n if prev == 0:\n verdict.append('discard')\n #print(i+1)\n else:\n verdict.append('keep')\n else:\n verdict.append('keep')\n prev = results[i]\n \n df['verdict'] = verdict\n df = df[ df['verdict'] == 'keep' ]\n df.drop('verdict', inplace=True, axis=1)\n return df\n\n\n\ndef get_complete_data(p_name, first_failures=True):\n \n #open the metrics file\n filename = 'project_metrics/' + p_name.split('.')[0] + '_metrics.csv'\n project = pd.read_csv(filename)\n project = project.drop(project.columns[9], axis=1)\n project['tr_status'] = output_values(project['tr_status'])\n if first_failures:\n project = get_first_failures(project)\n return project\n\n\n\n\ndef hybrid_performance(p_name, test_builds, test_result, batchsize, ci):\n total_builds = len(test_result)\n\n bad_builds = 0\n flag = 0\n for i in range(len(test_result)):\n if flag == 1:\n if ci[i] == 1:\n bad_builds += 1\n else:\n flag == 0\n else:\n if test_result[i] == 0:\n if ci[i] == 1:\n flag = 1\n bad_builds += 1\n\n \n\n delay = []\n delay_indexes = []\n built_indexes = []\n for i in range(len(test_result)):\n if ci[i] == 0:\n built_indexes.append(i)\n if test_result[i] == 0:\n if ci[i] != 0:\n delay_indexes.append(i)\n \n num_failed = test_result.count(0)\n if num_failed == 0:\n failures_found = 100\n failures_not_found = 0\n\n else:\n num_of_failure_unidentified = len(delay_indexes)\n identified_failures = test_result.count(0) - num_of_failure_unidentified\n failures_found = 100*identified_failures/test_result.count(0)\n failures_not_found = 100*num_of_failure_unidentified/test_result.count(0)\n \n# print(delay_indexes)\n# print(built_indexes)\n from_value = 0\n \n for k in range(len(built_indexes)):\n for j in range(len(delay_indexes)):\n if delay_indexes[j] > from_value and delay_indexes[j] < built_indexes[k]:\n delay.append(built_indexes[k] - delay_indexes[j])\n from_value = built_indexes[k]\n \n if len(delay_indexes) != 0:\n final_index = len(test_result)\n for j in range(len(delay_indexes)):\n delay.append(final_index - delay_indexes[j])\n \n# print(\"===========================================\")\n# print('Total Number of builds for {} = {}'.format(p_name, total_builds))\n# print('Total % of builds required for {} = {}'.format(p_name, builds_reqd))\n# print('Total % of time required for {} = {}'.format(p_name, time_reqd))\n# print('Total delays made for {} = {}'.format(p_name, sum(delay)))\n# print('Total % of failures identified for {} = {}'.format(p_name, failures_found))\n# print('Total % of failures unidentified for {} = {}'.format(p_name, 100*num_of_failure_unidentified/test_result.count(0)))\n# print(\"===========================================\")\n \n return (sum(delay), failures_found, failures_not_found, bad_builds)\n\n\n\n\n\ndef bootstrapping(train_data, count):\n \n \n #grid search hyperparameters\n n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n max_depth = [int(x) for x in np.linspace(10, 110, num = 5)]\n \n #setting up grid search\n param_grid = {'n_estimators': n_estimators, 'max_depth': max_depth}\n forest = RandomForestClassifier()\n grid_search = GridSearchCV(estimator = forest, param_grid = param_grid, cv = 3, n_jobs = -1, verbose = 0)\n \n \n \n \n train_result = train_data['tr_status'].tolist()\n train_data['num_of_passes'] = get_pass_streak(train_result)\n\n best_n_estimators = []\n best_max_depth = []\n\n best_f1 = 0\n best_f1_sample = 0\n best_f1_sample_result = 0\n best_f1_estimator = 0\n best_thresholds = []\n\n\n #bootstrap 100 times\n for i in range(1):\n print('Bootstrapping {} for {}'.format(i, p_name))\n\n #Ensuring we get a non-zero training or testing sample\n while True:\n print('Here for {} {}'.format(i, p_name))\n sample_train = resample(train_data, replace=True, n_samples=len(train_data))\n sample_train_result = sample_train['tr_status']\n\n build_ids = sample_train['tr_build_id'].tolist()\n sample_test = train_data [~train_data['tr_build_id'].isin(build_ids)] \n sample_test_result = sample_test['tr_status']\n\n if len(sample_test_result) != 0:\n break\n\n #dropping result column and build ids column\n sample_train.drop('tr_status', inplace=True, axis=1)\n sample_train.drop('tr_build_id', inplace=True, axis=1)\n sample_test.drop('tr_status', inplace=True, axis=1)\n sample_test.drop('tr_build_id', inplace=True, axis=1)\n\n #training the sample\n print('Training {} for {}'.format(i, p_name))\n grid_search.fit(sample_train, sample_train_result)\n sample_pred_vals = grid_search.predict_proba(sample_test)\n\n pred_vals = sample_pred_vals[:, 1]\n fpr, tpr, t = roc_curve(sample_test_result, pred_vals)\n gmeans = sqrt(tpr * (1-fpr))\n ix = argmax(gmeans)\n bt = t[ix]\n best_thresholds.append(bt)\n\n final_pred_result = []\n #threshold setting\n for j in range(len(pred_vals)):\n if pred_vals[j] > bt:\n final_pred_result.append(1)\n else:\n final_pred_result.append(0)\n\n try:\n f1 = f1_score(sample_test_result, final_pred_result)\n except:\n print('')\n\n if f1 > best_f1:\n best_f1 = f1\n best_f1_sample = sample_train\n best_f1_sample_result = sample_train_result\n best_f1_estimator = grid_search.best_estimator_\n\n best_n_estimators.append(grid_search.best_params_['n_estimators'])\n best_max_depth.append(grid_search.best_params_['max_depth'])\n\n\n #completed with bootstrapping \n threshold = median(best_thresholds)\n n_estimator = median(best_n_estimators)\n max_depth = median(best_max_depth)\n\n #retrain to get the best model\n forest = RandomForestClassifier(n_estimators=int(n_estimator), max_depth=int(max_depth))\n forest.fit(best_f1_sample, best_f1_sample_result)\n\n file_name = 'dump_data/rq2_' + p_name + '_' + str(count) + '_best_model.pkl'\n dump_file = open(file_name, 'wb')\n pickle.dump(forest, dump_file)\n pickle.dump(threshold, dump_file)\n pickle.dump(n_estimator, dump_file)\n pickle.dump(max_depth, dump_file)\n \n \n return forest\n \n \n\n","repo_name":"SAILResearch/replication-21-divya_kamath-build_avoiding_heuristics-code","sub_path":"Rebuttal/RQ2/scripts/mlci_bootstrapping.py","file_name":"mlci_bootstrapping.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11639730850","text":"# The function is not returning the correct values. Can you figure out why?\n\n# def get_planet_name(id):\n# # This doesn't work; Fix it!\n# name=\"\"\n# switch id:\n# case 1: name = \"Mercury\"\n# case 2: name = \"Venus\"\n# case 3: name = \"Earth\"\n# case 4: name = \"Mars\"\n# case 5: name = \"Jupiter\"\n# case 6: name = \"Saturn\"\n# case 7: name = \"Uranus\"\n# case 8: name = \"Neptune\"\n# return name\n\ndef get_planet_name(id):\n switch_id = {\n 1:\"Mercury\",\n 2: \"Venus\",\n 3: \"Earth\",\n 4: \"Mars\",\n 5: \"Jupiter\",\n 6: \"Saturn\",\n 7: \"Uranus\",\n 8: \"Neptune\"\n }\n return switch_id.get(id)\n","repo_name":"orlando1080/codewars","sub_path":"8_kyu/get_planet_name_by_id.py","file_name":"get_planet_name_by_id.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37060058755","text":"import os\nimport torch\nimport torchvision\n\n\ndef read_image_torch(path):\n \"\"\"\n Read image in format CHW for RGB or HW for grayscale from path using PyTorch.\n\n Parameters\n ----------\n path : string\n Path to image.\n\n Returns\n -------\n 2D or 3D torch.Tensor\n Image in format CHW for RGB or HW for grayscale.\n \"\"\"\n img = torchvision.io.read_image(path, torchvision.io.ImageReadMode.RGB)\n return img\n\n\nclass OneHotEncoding(object): #TODO: better docstrings\n \"\"\"\n One-hot encode labels.\n \n Attributes\n ----------\n labels : list or tuple\n List of labels.\n \"\"\"\n def __init__(self, labels):\n self.labels = sorted(labels)\n \n def __call__(self, current_label):\n one_hot_label = torch.zeros(len(self.labels))\n one_hot_label[self.labels.index(current_label)] = 1\n return one_hot_label\n\n\nclass FruitDataset(torch.utils.data.Dataset): \n \"\"\"\n Dataset for fruit images. Inherits from torch.utils.data.Dataset.\n Images are read using read_image_torch function.\n\n Attributes\n ----------\n img_dir : string\n Path to directory with images.\n img_labels : list or tuple\n List of labels.\n test : bool, optional\n If True, dataset is used for testing, default is False.\n validation : bool, optional\n If True, dataset is used for validation, default is False.\n transform : callable, optional\n Optional transform to be applied on a sample image.\n target_transform : callable, optional\n Optional transform to be applied on a sample label.\n \"\"\"\n def __init__(self, dir, test=False, validation=False, transform=None, target_transform=None):\n self.img_dir = dir\n self.img_labels = os.listdir(dir)\n self.test = test\n self.validation = validation\n self.transform = transform\n self.target_transform = target_transform\n \n def __len__(self):\n length = 0\n for i, label in enumerate(sorted(self.img_labels)):\n if not self.test:\n length += len(os.listdir(os.path.join(self.img_dir, label)))\n elif self.validation: \n length += len(os.listdir(os.path.join(self.img_dir, label))[::2])\n else:\n length += len(os.listdir(os.path.join(self.img_dir, label))[1::2])\n \n return length\n \n def __getitem__(self, index):\n previous_length = 0\n for label in sorted(self.img_labels):\n if not self.test:\n if index > previous_length + len(os.listdir(os.path.join(self.img_dir, label))) - 1:\n previous_length += len(os.listdir(os.path.join(self.img_dir, label)))\n else:\n current_label = label\n break\n elif self.validation:\n if index > previous_length + len(os.listdir(os.path.join(self.img_dir, label))[::2]) - 1:\n previous_length += len(os.listdir(os.path.join(self.img_dir, label))[::2])\n else:\n current_label = label\n break\n else:\n if index > previous_length + len(os.listdir(os.path.join(self.img_dir, label))[1::2]) - 1:\n previous_length += len(os.listdir(os.path.join(self.img_dir, label))[1::2])\n else:\n current_label = label\n break\n \n current_index = index - previous_length\n img = read_image_torch(os.path.join(self.img_dir, current_label, os.listdir(os.path.join(self.img_dir, current_label))[current_index])) / 255\n \n if self.transform:\n img = self.transform(img)\n \n if self.target_transform:\n current_label = self.target_transform(current_label)\n \n return img, current_label\n \n\nclass MLP6(torch.nn.Module):\n \"\"\"\n Multilayer perceptron with 6 hidden layers.\n\n Attributes\n ----------\n input_size : int\n Size of input.\n no_classes : int\n Number of classes.\n flatten : torch.nn.Flatten\n Flatten layer.\n linear_relu_stack : torch.nn.Sequential\n Sequential layer.\n softmax : torch.nn.Softmax\n Softmax layer.\n \"\"\"\n def __init__(self, input_size, no_classes):\n super(MLP6, self).__init__()\n self.input_size = input_size\n self.no_classes = no_classes\n self.flatten = torch.nn.Flatten()\n self.linear_relu_stack = torch.nn.Sequential(\n torch.nn.Linear(self.input_size, 1024),\n torch.nn.ReLU(),\n torch.nn.Linear(1024, 512),\n torch.nn.ReLU(),\n torch.nn.Linear(512, 512),\n torch.nn.ReLU(),\n torch.nn.Linear(512, 512),\n torch.nn.ReLU(),\n torch.nn.Linear(512, 256),\n torch.nn.ReLU(),\n torch.nn.Linear(256, 256),\n torch.nn.ReLU(),\n torch.nn.Linear(256, self.no_classes),\n )\n self.softmax = torch.nn.Softmax(dim=1)\n \n def forward(self, x):\n x = self.flatten(x)\n y = self.linear_relu_stack(x)\n # y = self.softmax(y)\n \n return y\n\n\ndef train_loop(train_dataloader, model, loss_fn, optimizer, device):\n \"\"\"\n Training loop.\n\n Parameters\n ----------\n train_dataloader : torch.utils.data.DataLoader\n Training dataloader.\n model : torch.nn.Module\n Model to be trained on.\n loss_fn : torch.nn\n Loss function.\n optimizer : torch.optim\n Optimizer.\n device : torch.device\n Device to be used for training.\n \"\"\"\n train_size = len(train_dataloader.dataset)\n num_batches = len(train_dataloader)\n total_loss, correct = 0, 0\n \n for batch, (x, y) in enumerate(train_dataloader):\n # Compute prediction and loss\n x, y = x.to(device), y.to(device)\n pred = model(x)\n loss = loss_fn(pred, y)\n total_loss += loss.item()\n correct += (pred.argmax(1) == y.argmax(1)).type(torch.float).sum().item()\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 10 == 0:\n loss = loss.item()\n print(f\" Batch {batch} / {num_batches - 1} Train loss: {loss:>7f}\")\n \n total_loss /= num_batches\n correct /= train_size\n print(f\"Train Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {total_loss:>8f} \\n\")\n \n\ndef test_loop(dataloader, model, loss_fn, device, validation):\n \"\"\"\n Testing loop.\n\n Parameters\n ----------\n dataloader : torch.utils.data.DataLoader\n Testing dataloader.\n model : torch.nn.Module\n Model to be tested on.\n loss_fn : torch.nn\n Loss function.\n device : torch.device\n Device to be used for testing.\n \"\"\"\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n test_loss, correct = 0, 0\n\n with torch.no_grad():\n for x, y in dataloader:\n x, y = x.to(device), y.to(device)\n pred = model(x)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y.argmax(1)).type(torch.float).sum().item()\n\n test_loss /= num_batches\n correct /= size\n \n if validation:\n print(f\"Validation Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n else:\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n","repo_name":"Cata400/fruit_classification","sub_path":"utils_torch.py","file_name":"utils_torch.py","file_ext":"py","file_size_in_byte":7564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28614037000","text":"import os\nimport argparse\nimport datetime\n\nfrom tqdm import tqdm\n\nfrom dataset import normalizer\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--tag', type=str, default='full')\n parser.add_argument('--train_start', type=str, default='2006-03-01 00:00:00')\n parser.add_argument('--train_end', type=str, default='2006-05-18 00:00:00')\n parser.add_argument('--valid_start', type=str, default='2006-05-18 00:00:00')\n parser.add_argument('--valid_end', type=str, default='2006-05-25 00:00:00')\n parser.add_argument('--test_start', type=str, default='2006-05-25 00:00:00')\n parser.add_argument('--test_end', type=str, default='2006-06-01 00:00:00')\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n splits = ['train', 'valid', 'test']\n columns = ['uid', 'query', 'time']\n fmt = '%Y-%m-%d %H:%M:%S'\n\n print(f\"Split original data into data/aol/{args.tag}\")\n itv = {s: tuple(vars(args)[f\"{s}_{i}\"] for i in ['start', 'end']) for s in splits}\n for s in splits:\n print(f\" {s:5s} data: from {itv[s][0]} until {itv[s][1]}\")\n itv = {k: tuple(datetime.datetime.strptime(x, fmt) for x in v) for k, v in itv.items()}\n valid = (itv['train'][0] < itv['train'][1] <= itv['valid'][0] < itv['valid'][1] <= itv['test'][0] < itv['test'][1])\n assert valid, \"Invalid time intervals\"\n\n # make directory and open files to write\n target_dir = f\"data/aol/{args.tag}\"\n os.makedirs(target_dir, exist_ok=True)\n f = {s: {column: open(os.path.join(target_dir, f\"{s}.{column}.txt\"), 'w') for column in columns} for s in splits}\n\n # read original AOL query log dataset and write data into files\n print(\"\")\n cnt = {s: 0 for s in splits}\n for i in range(1, 11):\n filename = f\"user-ct-test-collection-{i:02d}.txt\"\n print(f\"Reading {filename}...\")\n f_org = open(os.path.join(\"data/aol/org\", filename))\n f_org.readline()\n prev = {column: '' for column in columns}\n for line in tqdm(f_org):\n data = {column: v for column, v in zip(columns, line.strip().split('\\t')[:3])}\n # normalize queries\n data['query'] = normalizer(data['query'])\n # filter out too short queries and redundant queries\n # data['query'] == '-'\n if len(data['query']) < 3 or (data['uid'], data['query']) == (prev['uid'], prev['query']):\n continue\n t = datetime.datetime.strptime(data['time'], fmt)\n for s in splits:\n if itv[s][0] <= t < itv[s][1]:\n cnt[s] += 1\n for column in columns:\n f[s][column].write(data[column] + '\\n')\n prev = data\n\n # print total number of data in each split\n print(\"\")\n for s in splits:\n print(f\"Number of {s:5s} data: {cnt[s]:8d}\")\n\n\nif __name__ == \"__main__\":\n main(get_args())\n","repo_name":"clovaai/subword-qac","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"53"} +{"seq_id":"22454953823","text":"N, M = map(int, input().split())\n\nmiro = []\nfor i in range(N):\n miro.append(list(map(int,input())))\nvisit = [[0]*M for i in range(N)]\nstack =[[0,0]]\ndir = [[-1,0], [1,0], [0,-1], [0,1]]\n\nvisit[0][0] =1\nwhile stack:\n top = stack.pop()\n \n for d in dir:\n y = top[0]+d[0]; x=top[1]+d[1]\n \n if y >=N or y <0 or x>=M or x<0:\n continue\n if miro[y][x]==0:\n continue\n \n if visit[y][x]==0 or visit[y][x] >visit[top[0]][top[1]]+1:\n visit[y][x] = visit[top[0]][top[1]]+1\n stack.append([y,x])\n\nprint(visit[-1][-1])","repo_name":"parksey/baekjoon","sub_path":"BFS/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41743095897","text":"from openpyxl import Workbook\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom datetime import datetime\nimport time\n\n\nurl = \"https://search.naver.com/search.naver?where=news&sm=tab_jum&query=나이스피앤아이\"\nnewsTitleList = []\ncurTime = datetime.today()\nyear = curTime.strftime(\"%Y\")\nmonth = curTime.strftime(\"%m\")\nday = curTime.strftime(\"%d\")\n\nexcelName = \"D:\\새 폴더\\dd_\" + year + month + day + \".xlsx\"\n\n\nprint(excelName)\n\n\ntry:\n driver = webdriver.Chrome()\n driver.maximize_window()\n driver.get(url)\n \n \n \n for index in range(1,11):\n newsTitle = driver.find_element(By.XPATH,\"/html/body/div[3]/div[2]/div/div[1]/section/div/div[2]/ul/li[\"+str(index)+\"]/div[1]/div/a\").text\n newsTitleList.append(newsTitle)\n \n \n for index2 in range(0,len(newsTitleList)):\n print(newsTitleList[index2] + \"\\n\")\n \n time.sleep(5)\n \n driver.quit()\n \n workBook = Workbook()\n \n workSheet = workBook.active\n \n for index3 in range(0,len(newsTitleList)):\n workSheet[\"A\"+str(index3+1)] = newsTitleList[index3]\n \n \n \n workBook.save(excelName)\n \n \n \n \n\n\n \nexcept Exception as ex:\n print(ex)\n ","repo_name":"Stanlee904/python","sub_path":"selenium/example/naverNewsSearch.py","file_name":"naverNewsSearch.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35508181509","text":"\"\"\"El programa debe recibir una cadena de caracteres y devolver un diccionario con cada palabra que\ncontiene y el número de veces que aparece.\nOtra función que reciba el diccionario generado con la función anterior y devuelva una tupla con\nla palabra más repetida y su frecuencia\"\"\"\n\ndef actividad3A (texto):\n texto=texto.split()\n dicc_palabras={}\n for i in texto:\n if i in dicc_palabras:\n dicc_palabras[i]+=1\n else:\n dicc_palabras[i]=1\n return dicc_palabras\n\n\ndef actividad3B(dicc_palabras):\n palabra_mas_repetida=\"\"\n max_frecuencia=0\n for palabra,frecuencia in dicc_palabras.items():\n if frecuenciamax_frecuencia\n\n","repo_name":"Pablo-R-B/Tarea_Pycharm","sub_path":"Actividad 3.py","file_name":"Actividad 3.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3631743425","text":"# Tee ratkaisusi tänne\nclass Opintorekisteri:\n def __init__(self):\n self.__suoritus = {}\n\n def lisaa_suoritus(self, nimi, arvosana, op):\n if nimi in self.__suoritus:\n self.__suoritus[nimi].korota_arvosana(arvosana)\n else:\n self.__suoritus[nimi] = Kurssi(nimi, arvosana, op)\n \n def hae_tiedot(self, nimi):\n if nimi not in self.__suoritus:\n return None\n else:\n return self.__suoritus[nimi]\n\n def suoritukset(self):\n return self.__suoritus\n\nclass OpintorekisteriSovellus:\n def __init__(self):\n self.__suoritus = Opintorekisteri() \n\n def ohje(self):\n print(\"komennot: \") \n print(\"1 lisää suoritus\")\n print(\"2 hae suoritus\")\n print(\"3 tilastot\")\n print(\"0 lopetus\")\n\n def lisaa_suoritus(self):\n nimi = input(\"kurssi: \")\n arvosana = input(\"arvosana: \")\n op = input(\"opintopisteet: \")\n self.__suoritus.lisaa_suoritus(nimi, arvosana, op) \n\n def haku(self):\n nimi = input(\"nimi: \")\n tiedot = self.__suoritus.hae_tiedot(nimi)\n if tiedot==None:\n print(\"ei suoritusta\")\n return\n print(f\"{tiedot.nimi()} ({tiedot.op()} op) arvosana {tiedot.arvosana()}\")\n\n def tilastot(self): \n tieto = self.__suoritus.suoritukset() \n op = [tieto[kurssi].op() for kurssi in tieto]\n print(f\"suorituksia {len(tieto)} kurssilta, yhteensä {sum(op)} opintopistettä\") \n arvosanat = [tieto[kurssi].arvosana() for kurssi in tieto] \n ka = f\"{sum(arvosanat)/len(arvosanat):.1f}\"\n print(\"keskiarvo\", ka)\n print(\"arvosanajakauma\")\n jakauma = {i: arvosanat.count(i)*\"x\" for i in range(1,6)}\n for rivi in jakauma:\n print(str(rivi) + \": \" + str(jakauma[rivi]))\n \n def suorita(self):\n self.ohje()\n while True:\n print(\"\")\n komento = input(\"komento: \")\n if komento == \"0\":\n break\n elif komento == \"1\":\n self.lisaa_suoritus()\n elif komento == \"2\":\n self.haku()\n elif komento == \"3\": \n self.tilastot()\n else:\n self.ohje()\n\nclass Kurssi:\n\n def __init__(self, nimi, arvosana, op):\n self.__nimi = nimi\n self.__arvosana = arvosana \n self.__op = op\n\n def korota_arvosana(self, arvosana):\n if arvosana > self.__arvosana:\n self.__arvosana = arvosana\n\n def nimi(self):\n return self.__nimi\n\n def arvosana(self):\n return int(self.__arvosana)\n\n def op(self):\n return int(self.__op)\n\n# kun testaat, mitään muuta koodia ei saa olla luokkien ulkopuolella kuin seuraavat rivit\nsovellus = OpintorekisteriSovellus()\nsovellus.suorita()","repo_name":"sami-one/mooc-ohjelmointi-21","sub_path":"osa10-12_opintorekisteri/src/koodi.py","file_name":"koodi.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"fi","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25033092900","text":"import requests\nimport pandas as pd\nimport time, os, sys, re\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n\ndef getDataUrls(url):\n #site = 'https://tiki.vn/nha-sach-tiki/c8322?src=c.8322.hamburger_menu_fly_out_banner&order=top_seller'\n response = requests.get(url)\n page_content = BeautifulSoup(response.text, 'html.parser')\n\n # B1 -------- Lấy title sách\n # lấy tất cả thẻ div sau đó lọc lại data theo 2 cách sau\n\n # Cách 01: tiki có add tên sách vào data-title nên dùng thẻ div lấy title sách\n # product_names = page_content.find_all('div')\n # titles = []\n # for i in range(len(product_names)):\n # if product_names[i].has_attr('data-title'):\n # titles.append(product_names[i]['data-title'])\n\n # Cách 02: \n books = page_content.find_all('p', attrs={\"class\":\"title\"})\n list_of_titles = []\n for i in books:\n list_of_titles.append(i.text.strip())\n\n # B2 --- lấy giá sách:\n\n # ---- hàm lấy price:\n def get_price(str_price):\n price = 0\n str_price = (str_price.text).strip().split()\n if len(str_price) > 0:\n str_price = str_price[0].replace(\"đ\", \"\")\n price = float(str_price.replace(\".\", \"\"))\n return price\n\n # ---- lấy tất cả thẻ p chứa price-sale:\n books = page_content.find_all('p', attrs={\"class\":\"price-sale\"})\n\n # ---- list price thường và price giảm giá:\n list_of_final_prices = []\n list_of_regular_prices = []\n\n prices = page_content.find_all('p', attrs={\"class\":\"price-sale\"})\n\n for section in prices:\n if section.find('span', 'final-price'):\n final = get_price(section.find('span', 'final-price'))\n list_of_final_prices.append(final)\n else:\n list_of_final_prices.append(0)\n \n if section.find('span', 'price-regular'):\n regular = get_price(section.find('span', 'price-regular'))\n list_of_regular_prices.append(regular)\n else:\n list_of_regular_prices.append(0)\n\n # B3 --- lấy link image:\n images = page_content.find_all('img', attrs={\"class\":\"product-image img-responsive\"})\n\n list_of_images = []\n for image in images:\n list_of_images.append(image[\"src\"])\n\n # B4 --- tạo dataFrame --> save to file:\n if len(list_of_titles) == 0 and len(list_of_final_prices) == 0 and len(list_of_regular_prices) == 0 and len(list_of_images) == 0:\n print(url)\n print(len(list_of_titles), len(list_of_final_prices),len(list_of_regular_prices), len(list_of_images))\n print('Data doesn\\'t exist !!!')\n pass\n else:\n print(url)\n print(len(list_of_titles), len(list_of_final_prices),len(list_of_regular_prices), len(list_of_images))\n\n pwd_file = os.path.dirname(__file__)\n stamp = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n with open(pwd_file + '/' + 'temp.txt', 'a') as f:\n f.write('url: %s \\n' % url)\n f.write('len: %s %s %s %s \\n' % (len(list_of_titles), len(list_of_final_prices),len(list_of_regular_prices), len(list_of_images)))\n\n dictionary_books = {\"title\": list_of_titles,\n \"final_price\": list_of_final_prices,\n \"regular_price\": list_of_regular_prices,\n 'image': list_of_images}\n\n df = pd.DataFrame(dictionary_books)\n\n page_next = re.findall(\"page=[0-9]+\", url)\n\n try:\n file_name = 'best_seller_books_' + stamp + '_' + page_next[0] +'.xlsx'\n df.to_excel(pwd_file + '/data/' + file_name, encoding='utf-8')\n except IndexError:\n file_name = 'best_seller_books_' + stamp + '_' + 'page=1' +'.xlsx'\n df.to_excel(pwd_file + '/data/' + file_name, encoding='utf-8')\n\n time.sleep(2)\n\nif __name__ == \"__main__\":\n pwd_file = os.path.dirname(__file__)\n df = pd.read_csv(pwd_file + '/'+ \"urls.csv\")\n\n urls = []\n for i in df['url']:\n urls.append(i)\n\n # --- Get data\n for url in urls:\n getDataUrls(url)\n","repo_name":"truongdinhtrong/test_crawl","sub_path":"get_best_sellers_book_from_list_urls.py","file_name":"get_best_sellers_book_from_list_urls.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70626996647","text":"# Compiler Construction CSF363 Assignment\n# Phase 2 - Syntax Analysis\n# Made by --\n# Kumar Pranjal - 2018A7PS0163H\n# Ashna Swaika - 2018A7PS0027H\n# Abhishek Bapna - 2018A7PS0184H\n# Ashish Verma - 2018A7PS0009H\n\n# code starts here\nimport json\nimport platform\nimport subprocess\nimport sys\n\nfrom lexer import lexer\n\n# list of safe symbols for error recovery\nsafe_symbols = [';', '}']\n\n\n# update safe stack to newest safe state\ndef update_safe(stack):\n return stack.copy()\n\n\n# bring original stack to safe state\ndef stack_safe_state(safe_stack):\n return safe_stack.copy()\n\n\n# bring token in required format as in parse table\ndef parse_format(lines):\n tokens = []\n\n for line in lines:\n lin_num, token, lexeme = line.split('')\n token = token.strip()\n lexeme = lexeme.strip('\\n')\n lexeme = lexeme.strip()\n if token == 'integer_literal':\n token = 'INT_LIT'\n if token == 'floating_literal':\n token = 'FLT_LIT'\n if token == 'identifier':\n token = 'ID'\n if token == 'string literal':\n token = 'STR_LIT'\n if token == 'operator':\n token = lexeme\n if token == 'delimiter':\n token = lexeme\n if token == 'keyword':\n token = lexeme\n if token == 'stop':\n token = lexeme\n if token == 'Error':\n pass\n tokens.append([lin_num, token, lexeme])\n\n return tokens\n\n\n# look up parse table or goto table\ndef parse_lookup(parse_table, token, number):\n return parse_table[number].get(token, 'Error')\n\n\n# look up reduce table\ndef reduce_lookup(reduce_table, number):\n rule = reduce_table[number]\n lines = rule.split('->')\n return lines[0].strip(), lines[1].strip()\n\n\n# parsing process\ndef parse(fname=None):\n\n # get input file\n if fname is None:\n filename = f'Testcases/{sys.argv[1]}'\n else:\n filename = fname\n\n # get parse table , goto table and reduce table\n with open(\"Tables/Action Table.json\", 'r', encoding='utf8') as f1:\n parse_table = json.load(f1)\n with open(\"Tables/Goto Table.json\", 'r', encoding='utf8') as f2:\n goto_table = json.load(f2)\n with open(\"Tables/Rules.json\", 'r', encoding='utf8') as f3:\n reduce_table = json.load(f3)\n\n # input file lexical analysis , open input and output parser files\n parse_outf = open(\n f'{filename.split(\".\")[0]}_output_parse.txt', \"w\", encoding='utf8')\n lexer(filename)\n inputfname = f'{filename.split(\".\")[0]}_input_parse.txt'\n\n # code to just bring input into required format - lines will contain final (line, token, lexeme) tuple\n with open(inputfname, 'r', encoding='utf8') as f:\n lines = f.readlines()\n\n if lines != []:\n last = lines[-1].split('')\n last = [x.strip() for x in last]\n if last != []:\n lines.append(f'{last[0]} stop $')\n else:\n lines.append('0 stop $')\n\n lines = parse_format(lines)\n\n # stack - has stack contents\n # safe_stack - maintains the last safe state and all symbols inside it\n # tokens - have all tokens - terminals in order of insertion into stack\n # count - keeps a check on which token we currently are looking at\n # steps - is the number of times we go trhough the loop\n\n stack = ['0']\n safe_stack = ['0']\n tokens = []\n count = 0\n steps = 0\n\n # get parse_table, reduce_table and goto_table\n while True and count < len(lines):\n lin_num, token, lexeme = lines[count]\n update_safe_stack = False\n print(f'{steps} : {stack}', file=parse_outf)\n steps += 1\n\n if token in safe_symbols:\n update_safe_stack = True\n\n # handle lexical error\n if token == 'Error':\n stack = stack_safe_state(safe_stack)\n count += 1\n if token == '$':\n print(f'\\033[93mPARSING FAIL ::: FOUND EOF\\033[0m')\n break\n\n print(\n f'\\033[35mLexical Error Line number {lin_num} :::: Unidentified character {lexeme} found\\033[0m')\n continue\n\n # look up parse table for next character\n number = stack[-1]\n next_op = parse_lookup(parse_table, token, number)\n\n # shift action\n if next_op[0] == 's':\n stack.append(token)\n tokens.append(token)\n stack.append(next_op[1:])\n if update_safe_stack:\n safe_stack = update_safe(stack)\n count += 1\n continue\n\n # reduce action\n elif next_op[0] == 'r':\n non_terminal, condense = reduce_lookup(\n reduce_table, next_op[1:])\n condense = condense.split()\n condense.reverse()\n\n # to pop out elements from the stack\n flag = True\n condense = [i.strip() for i in condense if i]\n\n if condense[0] != 'ϵ':\n for i in condense:\n if flag:\n stack.pop()\n if stack[-1] == i:\n stack.pop()\n flag = True\n else:\n flag = False\n\n # now look up goto table and find the next rule\n num = stack[-1]\n new_num = parse_lookup(goto_table, non_terminal, num)\n\n if new_num == 'Error':\n stack = stack_safe_state(safe_stack)\n count += 1\n if token == '$':\n print(f'\\033[93mPARSING FAIL ::: FOUND EOF\\033[0m')\n break\n expected = []\n for keys in goto_table[num].keys():\n expected.append(keys)\n print(\n f'\\033[0;31mSyntax Error:: Line number {lin_num}, Found {lexeme} Expected one among {expected}, Please check again\\033[0m')\n\n else:\n stack.append(non_terminal)\n stack.append(new_num)\n\n elif next_op == 'acc':\n stack.clear()\n stack.append('0')\n stack.append(next_op)\n print(f'{steps} : {stack}', file=parse_outf)\n parse_outf.close()\n print(\"\\033[32mPARSING SUCCESS - accept state reached\\033[0m\")\n print(\n f\"{filename.split('.')[0]}_output_parse.txt\".replace('/', '\\\\'))\n if platform.system() == \"Windows\":\n subprocess.run(\n [\"notepad\", f\"{filename.split('.')[0]}_output_parse.txt\".replace('/', '\\\\')])\n elif platform.system() == \"Linux\":\n subprocess.run(\n [\"subl\", f\"{filename.split('.')[0]}_output_parse.txt\"])\n break\n\n else:\n expected = []\n for keys in parse_table[number].keys():\n if keys == '$':\n continue\n expected.append(keys)\n print(\n f'\\033[0;31mSyntax Error Line number {lin_num} :::: Found {lexeme} Expected one among {expected} \\033[0m')\n stack = stack_safe_state(safe_stack)\n count += 1\n if token == '$':\n print(f'\\033[93mPARSING FAIL ::: FOUND EOF\\033[0m')\n break\n\n\nif __name__ == '__main__':\n parse()\n","repo_name":"LuciFR1809/VyPr-Compilers","sub_path":"Assignment2/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5673733248","text":"with open('test.txt', 'r') as f:\n # input = \"\"\n input = []\n for line in f:\n input.append(line.strip(\"\\n\"))\n\n# def findMin(i,j, input):\n# num = input[i][j]\n# if i == 0 and j == 0:\n# if num < input[i+1][j] and num < input[i][j+1]:\n# return num\n# elif i == 0:\n# if num < input[i][j+1] and num < input[i][j-1] and num < input[i+1][j]:\n# return num\n# elif j == 0:\n# if num < input[i][j+1] and num < input[i+1][j] and num < input[i][j-1]:\n# return num\n# else:\n# if num < input[i+1][j] and num < input[i-1][j] and num < input[i][j-1] and num < input[i][j+1]:\n# return num\n# return -1\n\ndef getSurr(i,j,input):\n surr = []\n\n if 0<=i+1= num:\n isMin = False\n if isMin:\n # print(i,j)\n mins.append([i,j])\n\ndef getBasin(i,j,input):\n bList = []\n for xi, xj in getSurrInd(i,j,input):\n # print(input[xi][xj])\n if input[xi][xj] != 9:\n bList.append([xi,xj])\n return bList\n\n\n\nbasin = []\nn=0\n# for i,j in mins:\nbasin=(getBasin(0,0,input))\nfor _ in range(100):\n for fi,fj in basin:\n print(fi,fj,getBasin(fi,fj,input))\n # if x not in basin: basin.append(x)\n # basin[n].append(getBasin(fi,fj,input))\n # [res.append(x) for x in test_list if x not in res]\nprint(basin)","repo_name":"kjunuh/Advent-of-Code","sub_path":"2021/day09/day9 pt2.py","file_name":"day9 pt2.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13749045936","text":"\"\"\"\n有环链表的定义:在链表中某个节点的next元素指向在它前面出现过的节点,则表明该链表存在环路\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n # 返回环路的入口结点\n def detectCycle(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return None\n\n fast, slow = head, head\n # 快指针的合法性判断很重要, 快指针合法时,慢一定合法\n while fast and fast.next: # .next 两次,就在这里限定 fast 和 fast.next\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n\n if fast != slow:\n return None\n else:\n # x = (n-1) * C + (C - p)\n # 左: fast 从 head 走;\n # 右: slow 从其原来位置出发,走过 C-p 和 n-1 个 C 后 与 x 相遇\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return fast\n","repo_name":"Shuai-Xie/LeetCode","sub_path":"case/linked_list/环路检测.py","file_name":"环路检测.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3768911963","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\n\nclass SimpleSequenceModel(nn.Module):\n def __init__(self, feature_dim, hidden_dim, num_actions):\n super(SimpleSequenceModel, self).__init__()\n self.lstm = nn.LSTM(input_size=features_dim, hidden_size=hidden_dim, batch_first=True)\n self.fc = nn.Linear(hidden_dim, num_actions)\n\n def forward(self, x):\n # x shape: (batch, seq_len, features_dim)\n lstm_out, _ = self.lstm(x)\n # lstm_out shape: (batch, seq_len, hidden_dim)\n \n # We take the output of the last time step\n last_time_step = lstm_out[:, -1, :]\n # last_time_step shape: (batch, hidden_dim)\n \n out = self.fc(last_time_step)\n # out shape: (batch, num_actions)\n return out\n\ndef generate_simple_dataset(size=500, seq_len=10, num_features=1):\n data = np.zeros((size, seq_len, num_features))\n labels = np.zeros(size, dtype=int) # Assuming three classes: 0 (BUY), 1 (SELL), 2 (HOLD)\n\n for i in range(size):\n pattern_type = np.random.choice(['increasing', 'decreasing', 'constant'])\n if pattern_type == 'increasing':\n data[i, :, 0] = np.linspace(0, 1, seq_len)\n labels[i] = 0 # 'BUY'\n elif pattern_type == 'decreasing':\n data[i, :, 0] = np.linspace(1, 0, seq_len)\n labels[i] = 1 # 'SELL'\n else: # 'constant'\n constant_value = np.random.uniform(0, 1)\n data[i, :, 0] = constant_value\n labels[i] = 2 # 'HOLD'\n\n data_tensor = torch.tensor(data, dtype=torch.float32)\n labels_tensor = torch.tensor(labels, dtype=torch.long)\n\n return data_tensor, labels_tensor\n\n# Function to train the model\ndef train_model(model, data, labels, epochs=50, batch_size=32, learning_rate=0.001):\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n \n for epoch in range(epochs):\n total_loss = 0\n model.train()\n\n for i in range(0, len(data), batch_size):\n inputs = data[i:i + batch_size]\n target = labels[i:i + batch_size]\n\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, target)\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n\n print(f'Epoch {epoch+1}/{epochs}, Loss: {total_loss/len(data)}')\n\nif __name__ == '__main__':\n features_dim = 3\n seq_len = 10\n data_tensor, labels_tensor = generate_simple_dataset(seq_len=seq_len, num_features=features_dim)\n\n # Define the model\n hidden_dim = 64\n num_actions = 3\n model = SimpleSequenceModel(features_dim, hidden_dim, num_actions)\n\n # Train the model\n train_model(model, data_tensor, labels_tensor)","repo_name":"jamesliu/nanoDPO","sub_path":"nanodpo/simple_sequence_model.py","file_name":"simple_sequence_model.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13450859432","text":"class Naive_Bayes_Gaussian_Classifier():\n def __init__(self,categorical,numeric,target):\n self.categorical = categorical\n self.numeric = numeric\n self.target = target\n self.model = {}\n self.yes = 0\n self.no = 0\n def train(self,df):\n n = len(df)\n y = len(df.loc[df[self.target] == 1])\n no = n-y;\n p_y = y/n\n p_n = 1-p_y\n self.yes = p_y\n self.no = p_n\n for i in self.categorical:\n l = df[i].unique()\n a = {}\n for j in l:\n k_y = len(df[i].loc[df[i]==j])\n p_y_n = len(df[(df[self.target]==1)&(df[i]==j)])\n p_n_n = len(df[(df[self.target]==0)&(df[i]==j)])\n p_f = k_y/n\n temp = [0]*2\n temp[0] = ((p_n_n/no)*p_n)/p_f\n temp[1] = ((p_y_n/y)*p_y)/p_f\n a[j] = temp\n self.model[i] = a\n for i in self.numeric:\n a = {}\n for j in range(0,2):\n temp = df[df[self.target] ==j]\n mean = temp[i].mean()\n std = temp[i].std()\n a[j] = {'mean' : mean,'std' :std}\n self.model[i] = a\n def find_val(self,val,feature,outcome):\n mean = self.model[feature][outcome]['mean']\n std = self.model[feature][outcome]['std']\n exponent = -((val-mean)*(val-mean))/(std*std)\n first_term = -np.log(std)\n second_term = -np.log(2*np.pi)/2\n return first_term+second_term+exponent\n \n\n def predict(self,df):\n preds = []\n probs = []\n for i in range(len(df)):\n p_yes = np.log(self.yes)\n p_no = np.log(self.no)\n for j in self.categorical:\n val = df.iloc[i][j]\n val1 = self.model[j][val][0]\n val2 = self.model[j][val][1]\n p_no += np.log(val1)\n p_yes += np.log(val2)\n\n for j in self.numeric:\n val = df.iloc[i][j]\n p_no+=self.find_val(val,j,0)\n p_yes+=self.find_val(val,j,1)\n if(p_no>p_yes):\n prob = np.e**(p_no)\n probs.append(prob)\n preds.append(0)\n else:\n prob = np.e**(p_yes)\n probs.append(prob)\n preds.append(1)\n return preds,probs\n","repo_name":"ArixCrest/ML_Implementations","sub_path":"naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10260885339","text":"import requests\nimport numpy as np\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\nimport argparse\nimport pandas as pd\nimport requests\nimport math\nfrom io import StringIO\nimport warnings\nimport logging\nimport pathlib\nimport shutil\nimport geopandas as gpd\nfrom glob import glob\n\n# download shapefiles: wget -P ../data/shapefiles/ https://www2.census.gov/geo/tiger/TIGER2020PL/LAYER/TABBLOCK/2020/tl_2020_13121_tabblock20.zip\n\n\ndef main(\n input_file,\n input_county_fips,\n output_file,\n):\n warnings.filterwarnings(\"ignore\")\n\n # Fix later\n # os.system(\n # \"wget -N ../data/shapefiles/ https://www2.census.gov/geo/tiger/TIGER2020PL/LAYER/TABBLOCK/2020/tl_2020_%s_tabblock20.zip\"\n # % input_county_fips\n # )\n shapefile_path = \"../data/shapefiles/tl_2020_%s_tabblock20.zip\" % input_county_fips\n assert os.path.isfile(shapefile_path)\n\n state_df = gpd.read_file(shapefile_path)\n state_df[\"county_id\"] = state_df[\"STATEFP20\"] + state_df[\"COUNTYFP20\"]\n\n lat_lon_df = pd.read_csv(input_file)\n cgdf = gpd.GeoDataFrame(\n lat_lon_df,\n geometry=gpd.points_from_xy(lat_lon_df.longitude, lat_lon_df.latitude),\n )\n # Spatial join the address with points and the jeffesron county blocks with polygons\n\n joined_county_df = state_df.sjoin(cgdf, how=\"left\", predicate=\"intersects\")\n print(joined_county_df)\n joined_county_df = joined_county_df[\n [\"address\", \"GEOID20\", \"longitude\", \"latitude\", \"geometry\"]\n ]\n joined_county_df = joined_county_df.rename(columns={\"GEOID20\": \"geoid20\"})\n joined_county_df.to_csv(output_file, index=False)\n logging.info(\"[%s] File saved to: %s\" % (os.path.isfile(output_file), output_file))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Given a csv and you are running in the code repository, download the shape file and attempt an outerjoin spatially and return the results\"\n )\n\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n help=\"The input csv with lat long columns to sptial join\",\n required=True,\n )\n parser.add_argument(\n \"-c\",\n \"--input_county_fips\",\n type=str,\n help=\"The input county fips to do the spatial joining\",\n required=True,\n )\n parser.add_argument(\n \"-o\",\n \"--output_file\",\n type=str,\n help=\"The output csv where matches are found\",\n required=True,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Show debugging outputs\",\n action=argparse.BooleanOptionalAction,\n )\n parser.add_argument(\n \"-f\",\n \"--force\",\n action=argparse.BooleanOptionalAction,\n help=\"Whether or not to override the output file\",\n required=False,\n default=False,\n )\n\n args = parser.parse_args()\n log_level = logging.INFO\n if args.verbose:\n log_level = logging.DEBUG\n\n logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=log_level)\n\n assert os.path.isfile(args.input_file), \"input file is invalid\"\n\n test_df = pd.read_csv(args.input_file)\n\n assert \"latitude\" in test_df.columns\n assert \"longitude\" in test_df.columns\n\n if not args.force:\n assert not os.path.isfile(args.output_file), \"output file is invalid\"\n\n main(\n args.input_file,\n args.input_county_fips,\n args.output_file,\n )\n","repo_name":"uva-bi-sdad/national_address_database","sub_path":"code/spatial_join.py","file_name":"spatial_join.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24825010597","text":"\nimport math\n\nn = int(input(\"Plz enter your number:\"))\nfactorial = 0\ncounter = 1\nwhile factorial < n:\n factorial = math.factorial(counter)\n counter += 1\n\n\nif factorial == n:\n print(\"This is a factorial number\")\nelse:\n print(\"this is not a factorial number\")\n","repo_name":"Ancksunamun/New-python-course-autumn","sub_path":"04.Assignment(Aydineradat)/04.Factorial_finder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3246435853","text":"__author__ = 'Francholi'\n__author__ = 'Francholi'\n\n# panda imports\nfrom direct.showbase.DirectObject import DirectObject\n\nfrom Elements.Element.Element import *\nfrom Utils.Logger import Logger\n\n\nclass EventLogger(Element):\n \"\"\"\n \"\"\"\n def __init__(self, **kwargs):\n super(EventLogger, self).__init__(**kwargs)\n self.listener = DirectObject()\n\n # registering some events by hand\n self.listener.accept('crossHair',self.logEvent)\n\n uniqueFileName = self.config.logfile +\"_\"+ self.config.world.participantId + \".log\"\n self.eventLog = Logger(self.baseTime, uniqueFileName, 'w')\n #self.eventLog.startLog()\n self.eventLog.logEvent('Event logger started\\n')\n taskMgr.add( self.updateHooks, 'updateHooks' )\n\n self.registeredEvents = messenger.getEvents()\n for e in self.registeredEvents:\n self.listener.accept(e, self.logEvent, [e])\n\n self.hideElement()\n\n def logEvent(self, event=None, args=[]):\n if event == 'mouse1':\n args = base.mouseWatcherNode.getMouse()\n self.eventLog.logEvent(\"%s;%s\\n\"%(event,args))\n\n def enterState(self):\n # super class enterState\n Element.enterState(self)\n\n def exitState(self):\n # super class exitState\n Element.exitState(self)\n taskMgr.remove( 'updateHooks' )\n self.eventLog.logEvent('Event logger stopped\\n')\n self.eventLog.closeLog()\n\n def updateHooks(self, task):\n # run every 100 ms\n task.delayTime = 0.1\n newEvents = [x for x in messenger.getEvents() if x not in self.registeredEvents]\n for x in newEvents:\n #print \"NEW EVENT\"\n self.listener.accept(x, self.logEvent, [x])\n self.registeredEvents.append(x)\n return task.cont\n","repo_name":"Soetfisk/experimental_framework","sub_path":"Elements/EventLogger/EventLogger.py","file_name":"EventLogger.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27309107342","text":"import os\nimport gettext\nimport babel\nfrom collections import OrderedDict\n\nbabel_cfg_default = \"\"\"\n# format see https://babel.pocoo.org/en/latest/messages.html\n\n[python: **.py]\n\n[jinja2: **.html]\nextensions=jinja2.ext.i18n\n\n\"\"\"\n\nlocales_cfg_default = \"\"\"\nlocales=[\"zh\", \"en\"]\n\"\"\"\n\nroot_dir = os.path.abspath(os.path.dirname(__file__))\nlocale = \"en\"\n\ntr = lambda x:x\n\ndef _(text):\n return tr(text)\n\ndef set_locale(locale_in):\n global locale, tr, root_dir\n print(\"-- set locale to\", locale_in)\n locale = locale_in\n locales_path = os.path.join(root_dir, 'locales')\n if not os.path.exists(locales_path): # for pyinstaller pack\n locales_path = os.path.join(os.path.dirname(root_dir), 'locales')\n # check translate binary file\n mo_path = os.path.join(locales_path, \"en\", \"LC_MESSAGES\", \"messages.mo\")\n if not os.path.exists(mo_path):\n main(\"finish\")\n lang = gettext.translation('messages', localedir=locales_path, languages=[locale])\n tr = lang.gettext\n\ndef get_languages(locales):\n languages = OrderedDict()\n for locale in locales:\n obj = babel.Locale.parse(locale)\n languages[locale] = obj.language_name + (\" \" + obj.script_name if obj.script_name else \"\")\n return languages\n\ndef extract(src_path, config_file_path, out_path):\n from babel.messages.frontend import extract_messages\n cmdinst = extract_messages()\n cmdinst.initialize_options()\n cmdinst.mapping_file = config_file_path\n cmdinst.output_file = out_path\n cmdinst.input_paths = src_path\n cmdinst.omit_header = True\n cmdinst.header_comment = \"#\"\n try:\n cmdinst.ensure_finalized()\n cmdinst.run()\n except Exception as err:\n raise err\n\ndef init(template_path, out_dir, locale, domain=\"messages\"):\n from babel.messages.frontend import init_catalog\n cmdinst = init_catalog()\n cmdinst.initialize_options()\n cmdinst.input_file = template_path\n cmdinst.output_dir = out_dir\n cmdinst.locale = locale\n cmdinst.domain = domain\n try:\n cmdinst.ensure_finalized()\n cmdinst.run()\n except Exception as err:\n raise err\n\ndef update(template_path, out_dir, locale, domain=\"messages\"):\n from babel.messages.frontend import update_catalog\n cmdinst = update_catalog()\n cmdinst.initialize_options()\n cmdinst.input_file = template_path\n cmdinst.output_dir = out_dir\n cmdinst.omit_header = True\n cmdinst.locale = locale\n cmdinst.domain = domain\n try:\n cmdinst.ensure_finalized()\n cmdinst.run()\n except Exception as err:\n raise err\n\ndef compile(translate_dir, locale, domain=\"messages\"):\n from babel.messages.frontend import compile_catalog\n cmdinst = compile_catalog()\n cmdinst.initialize_options()\n cmdinst.directory = translate_dir\n cmdinst.locale = locale\n cmdinst.domain = domain\n try:\n cmdinst.ensure_finalized()\n cmdinst.run()\n except Exception as err:\n raise err\n\n\ndef rm_po_meta(po_path):\n with open(po_path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n with open(po_path, \"w\", encoding=\"utf-8\") as f:\n start_line = 0\n start = False\n for i, line in enumerate(lines):\n if line.startswith(\"msgid\"):\n start = True\n continue\n if start and line.startswith('#'):\n start_line = i\n break\n lines = lines[start_line:]\n lines.insert(0, '#\\nmsgid \"\"\\nmsgstr \"\"\\n\\n')\n f.writelines(lines)\n\n\ndef main(cmd, root_dir, cfg_path=None, locales_path=None, locales=None, rm_meta=False):\n ret = 0\n root_dir = os.path.abspath(root_dir)\n if not os.path.exists(root_dir):\n print(\"path {} not exists\".format(root_dir))\n return -1\n cfg_path_final = cfg_path if cfg_path else os.path.join(root_dir, \"i18n_babel.cfg\")\n locales_path_final = locales_path if locales_path else os.path.join(root_dir, \"i18n_locales.cfg\")\n if not os.path.exists(cfg_path_final):\n print(\"cfg path {} not exists\".format(cfg_path_final))\n return -1\n if not os.path.exists(locales_path_final):\n print(\"locales path {} not exists\".format(locales_path_final))\n return -1\n # read locales config(locales variable in locales_path_final file)\n if not locales:\n locales = []\n with open(locales_path_final, \"r\", encoding=\"utf-8\") as f:\n g = {}\n exec(f.read(), g)\n locales = g[\"locales\"]\n\n cwd = os.getcwd()\n os.chdir(root_dir)\n if cmd == \"prepare\":\n print(\"-- translate locales: {}\".format(locales))\n print(\"-- extract keys from files\")\n if not os.path.exists(\"locales\"):\n os.makedirs(\"locales\")\n # os.system(\"pybabel extract -F babel.cfg -o locales/messages.pot ./\")\n extract(\"./\", cfg_path_final, \"locales/messages.pot\")\n print(\"-- extract keys from files done\")\n for locale in locales:\n print(\"-- generate {} po files from pot files\".format(locale))\n if os.path.exists('locales/{}/LC_MESSAGES/messages.po'.format(locale)):\n print(\"-- file already exits, only update\")\n # \"pybabel update -i locales/messages.pot -d locales -l {}\".format(locale)\n update(\"locales/messages.pot\", \"locales\", locale)\n else:\n print(\"-- file not exits, now create\")\n # \"pybabel init -i locales/messages.pot -d locales -l {}\".format(locale)\n init(\"locales/messages.pot\", \"locales\", locale)\n # remove meta info from header first msgid to charactor \"#\"\n if rm_meta:\n rm_po_meta(\"locales/{}/LC_MESSAGES/messages.po\".format(locale))\n print(\"-- generate {} po files done\".format(locale))\n elif cmd == \"finish\":\n print(\"-- translate locales: {}\".format(locales))\n for locale in locales:\n print(\"-- generate {} mo file from po files\".format(locale))\n # \"pybabel compile -d locales -l {}\".format(locale)\n compile(\"locales\", locale)\n print(\"-- generate mo files done\")\n elif cmd == \"all\":\n ret = main(\"prepare\", root_dir, cfg_path, locales_path, locales=locales, rm_meta=rm_meta)\n if ret == 0:\n ret = main(\"finish\", root_dir, cfg_path, locales_path, locales=locales, rm_meta=rm_meta)\n if ret != 0:\n print(\"finish failed\")\n else:\n print(\"prepare failed\")\n os.chdir(cwd)\n return ret\n\n\ndef cli_main():\n import argparse\n parser = argparse.ArgumentParser(\"tranlate tool\")\n parser.add_argument(\"-p\", \"--path\", default=\"\", help=\"path to the root translation directory, locales dir will be created in this path\")\n parser.add_argument(\"-c\", \"--cfg\", default=\"\", help=\"path to the babel config file, by default will create i18n_babel.cfg in the root path\")\n parser.add_argument(\"-l\", \"--locales\", default=\"\", help=\"path to the locales config file, by default will create i18n_locales.cfg in the root path\")\n parser.add_argument(\"--rm_meta\", action=\"store_true\", help=\"remove meta info in the po file\")\n parser.add_argument(\"cmd\", type=str, choices=[\"prepare\", \"finish\", \"all\"], default=\"all\")\n args = parser.parse_args()\n main(args.cmd, args.path, args.cfg, args.locales, rm_meta = args.rm_meta)\n\nif __name__ == \"__main__\":\n cli_main()\n","repo_name":"teedoc/teedoc","sub_path":"teedoc/layout_i18n.py","file_name":"layout_i18n.py","file_ext":"py","file_size_in_byte":7400,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"53"} +{"seq_id":"43242227452","text":"import numpy as np\nimport scipy.linalg\n__all__ = ['cauchy_ldl', 'cauchy_hermitian_svd']\n\ndef cauchy_ldl(mu):\n\tr\"\"\" Compute LDL* factorization of Cauchy matrix\n\n\tGiven a Hermitian Cauchy matrix specified by parameters :math:`\\\\boldsymbol{\\mu}\\in \\mathbb{C}^n`\n\twhere \n\n\t.. math::\n\n\t\t\\mathbf{M}(\\\\boldsymbol{\\mu})\n\t\t\t= \\\\begin{bmatrix}\n\t\t\t\t(\\mu_1 + \\\\bar{\\mu}_1)^{-1} & \\ldots &(\\mu_1 + \\\\bar{\\mu}_n)^{-1} \\\\\\\\\n\t\t\t\t\\\\vdots & & \\\\vdots \\\\\\\\\n\t\t\t\t(\\mu_n + \\\\bar{\\mu}_1)^{-1} & \\ldots & (\\mu_n + \\\\bar{\\mu}_n)^{-1} \n\t\t\t\\end{bmatrix}\n\n\tCompute the LDL* factorization \n\n\t.. math::\n\n\t\t\\mathbf{M}= \\mathbf{P} \\mathbf{L} \\mathbf{D} \\mathbf{L}^* \\mathbf{P}^*\n\n\twhere :math:`\\mathbf{L}` is a lower triangular matrix,\n\t:math:`\\mathbf{D}` is a diagonal matrix,\n\tand :math:`\\mathbf{P}` is a permutation matrix given by\n\tthe permutation vector :math:`\\mathbf{p}`\n\t\n\t.. math::\n\t\t\t\n\t\t\\mathbf{P} = \\mathbf{I}_{\\cdot, p}.\n\n\tTo compute this permutation matrix, \n\n\t.. code::\n\n\t\tP = np.eye(n)[:,p]\n\t\n\n\n\tParameters\n\t----------\n\tmu: np.ndarray (n,)\n\t\tParameters for Cauchy matrix\n\n\tReturns\n\t-------\n\tL: np.ndarray (n,n)\n\t\tLower triangular matrix factor\n\tD: np.ndarray (n,)\n\t\tentries in diagonal weighting matrix\n\tp: np.ndarray (n,)\n\t\tpermutation vector\n\t\"\"\"\n\tn = len(mu)\n\tmu = np.copy(mu)\n\ts = 1./(mu + mu.conj())\n\t# Permutation vector\n\tp = np.arange(n)\n\tfor k in range(n):\n\t\tjhat = k+np.argmax(np.abs(s[k:]))\n\t\tmu[[k,jhat]] = mu[[jhat,k]]\n\t\ts[[k,jhat]] = s[[jhat,k]]\n\t\tp[[k,jhat]] = p[[jhat,k]]\n\t\t\n\t\t# Update diagonal entries\n\t\ts[k+1:] = s[k+1:]*(mu[k+1:] - mu[k])*(mu[k+1:] - mu[k]).conj() / \\\n\t\t\t\t( (mu[k] + mu[k+1:].conj())*(mu[k+1:] + mu[k].conj()) )\n\n\t# Now compute LDL factorization of this permuted data\n\tg = np.ones( (n,), dtype = mu.dtype)\n\td = np.zeros((n,))\n\tL = np.zeros((n,n), dtype = mu.dtype)\n\tfor k in range(n-1):\n\t\td[k] = 2*mu[k].real #=(mu[k] + mu[k].conj())\n\t\tL[k:,k] = g[k:] / (mu[k:] + mu[k].conj())\n\t\tg[k+1:] = g[k+1:] * (mu[k+1:] - mu[k])/(mu[k+1:] + mu[k].conj())\n\n\td[-1] = 1./(2*mu[-1].real) #1./(mu[-1] + mu[-1].conj())\n\tL[-1,-1] = g[-1]\n\treturn L, d, p\n\n\ndef cauchy_hermitian_svd(mu, L = None, d = None, p = None):\n\tr\"\"\" Computes the singular value decomposition of a Hermitian Cauchy matrix\n\t\"\"\"\n\n\tn = len(mu)\n\tmu = np.array(mu)\n\n\tif (L is None) or (d is None) or (p is None):\n\t\tL, d, p = cauchy_ldl(mu)\n\n\tM = 1./(np.tile(mu.reshape(n,1), (1,n)) + np.tile(mu.conj().reshape(1,n), (n,1)))\n\t\n\t# Change to match notation in Dem00, Alg. 3 (end)\n\tP = np.eye(len(mu))[p]\n\tD = np.diag(d)\n\tX = L\n\tYH = L.conj().T\n\n\tM2 = P.T.dot(X.dot(D.dot(YH).dot(P)))\n\n\t# STEP 1: compte X*D*Pinv = Q*R\n\t[Q,R,p1] = scipy.linalg.qr(X.dot(D), pivoting = True, mode = 'economic')\n\n\t\n\t# STEP 2: W = R*P*Y'\n\t# We pivot the rows \n\tW = np.dot(R, YH[p1,:])\n\t\n\t# STEP 3: compute svd of W\n\t[Ubar,s,VH] = np.linalg.svd(W, full_matrices = False, compute_uv = True)\n\n\t# STEP 4: U = Q*Ubar\n\tU = np.dot(Q, Ubar)\n\n\tU = P.T.dot(U)\n\tVH = VH.dot(P)\n\n\treturn U, s, VH\n\n","repo_name":"jeffrey-hokanson/sysmor","sub_path":"sysmor/cauchy.py","file_name":"cauchy.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"2759311862","text":"from keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, Flatten\n\n#one-hot encode target column\ny_train_clusters = to_categorical(y_train_clusters)\n\n#create model\nmodel = Sequential()\n\n#add model layers\nmodel.add(Conv2D(64, kernel_size=3, activation=\"relu\", input_shape=(768,1024,1)))\nmodel.add(Conv2D(32, kernel_size=3, activation=\"relu\"))\nmodel.add(Flatten())\nmodel.add(Dense(3, activation=\"softmax\"))\n\n#compile model using accuracy to measure model performance\nmodel.compile(optimizer='adam', loss='categorical_crossentropy')\n\n#train the model\nmodel.fit(x_train_gray[:3], y_train_clusters[:3], validation_data=(x_train_gray[:3], y_train_clusters[:3]), epochs=3)\n\n\n#predict first 3 images in the test set\nmodel.predict(x_train_gray[:3])\ny_train_clusters","repo_name":"patriciamv/eDO_datathon","sub_path":"code/03_train_keras_IMAGE.py","file_name":"03_train_keras_IMAGE.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13725871583","text":"# 학습과 테스트에 사용할 데이터셋을 만드는데 쓰이는 메소드들이 저장된 모듈\n\n# 제작일 : 2022.01.09\n# 제작자 : 김민규(minkyu4506@gmail.com)\n\nfrom tqdm import tqdm\nfrom PIL import Image\nimport json\nimport os\n\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset\n\nclass Scalp_Health_Dataset(Dataset) :\n def __init__(self, image_path_list, vals_list) : # 용량을 고려해 이미지는 경로만 받는걸로\n self.image_path_list = image_path_list\n self.vals_list = vals_list\n def __len__(self) : \n return len(self.image_path_list)\n\n def __getitem__(self, index) : # 한 개의 데이터 가져오는 함수\n # 224 X 224로 전처리\n to_tensor = transforms.ToTensor()\n img = to_tensor(Image.open(self.image_path_list[index]).convert('RGB'))\n img.resize_(3, 224, 224)\n img = torch.divide(img, 255.0) # 텐서로 변경 후 이미지 리사이징하고 각 채널을 0~1 사이의 값으로 만들어버림\n \n label = self.vals_list[index]\n \n return img, label\n\n\n# dataset_path : root_path + '/Train'이나 root_path + '/Test'을 받음\n# (root_path : 데이터셋이 저장된 경로) \ndef make_dataset(dataset_path, category) :\n \n \n image_group_folder_path = dataset_path + '/Image'\n label_group_folder_path = dataset_path + '/Label'\n \n ori_label_folder_list = os.listdir(label_group_folder_path) # '[라벨]피지과다_3.중증' 등 폴더명 알기\n \n label_folder_list = []\n \n for i in range(len(ori_label_folder_list)) :\n if ori_label_folder_list[i] != '.DS_Store' : # '.DS_Store'가 생성되었을 수 있으니 폴더 목록에서 제외\n label_folder_list.append(ori_label_folder_list[i])\n \n image_path_list = []\n vals_list = []\n class_str_list = []\n \n desc_str = category + \"_make_dataset\"\n \n for i in tqdm(range(len(label_folder_list)), desc = desc_str) :\n \n label_folder_path = label_group_folder_path + \"/\" + label_folder_list[i]\n \n # label_folder_list에서 '라벨'을 '원천'으로 만 바꿔도 image파일들이 들어있는 폴더명으로 만들 수 있다\n image_folder_path = image_group_folder_path + \"/\" + label_folder_list[i].replace('라벨', '원천')\n \n json_list = os.listdir(label_folder_path) # json파일 목록 담기\n\n for j in range(len(json_list)) : \n json_file_path = label_folder_path + '/' + json_list[j]\n\n with open(json_file_path, \"r\", encoding=\"utf8\") as f: \n contents = f.read() # string 타입 \n json_content = json.loads(contents) # 딕셔너리로 저장\n\n image_file_name = json_content['image_file_name'] # 라벨 데이터에 이미지 파일의 이름이 들어있다\n \n image_file_path = image_folder_path + \"/\" + image_file_name\n\n # val1 : 미세각질, val2 : 피지과다, val3 : 모낭사이홍반, val4 : 모낭홍반/농포, val5 : 비듬, val6 : 탈모\n # 모든 val은 0,1,2,3 중 하나의 값을 가지고 있다\n vals_true = []\n vals_true.append(int(json_content['value_1']))\n vals_true.append(int(json_content['value_2']))\n vals_true.append(int(json_content['value_3']))\n vals_true.append(int(json_content['value_4']))\n vals_true.append(int(json_content['value_5']))\n vals_true.append(int(json_content['value_6']))\n\n vals_true = torch.Tensor(vals_true).type(torch.float32)\n\n image_path_list.append(image_file_path)\n vals_list.append(vals_true/3.0)\n \n return image_path_list, vals_list\n # image_path_list : 파일 경로가 저장된 리스트\n # vals_list : val1 ~ val6이 들어있는 Tensor 리스트\n \n# 하나의 모발 이미지가 여러 증상을 가진 경우가 있다.\n# 하나의 이미지가 [A증상 중증, B증상 경증] 등 여러 증상에 대한 중증도를 나타내게끔 라벨 데이터를 만들어주는 기능도 한다\n# 즉, 데이터 전처리\ndef make_unique_dataset(image_path_list, vals_list) : \n unique_image_path_list = []\n unique_vals_list = []\n \n for i in tqdm(range(len(image_path_list)), desc = \"make unique dataset\" ) : \n file_name = image_path_list[i].split('/')[-1] # 이미지 파일 이름\n \n # 만들고 있던 unique list의 안에 같은 파일이름을 가진게 없는지 확인\n is_sameFilename_here = False\n for j in range(len(unique_image_path_list)) :\n if unique_image_path_list[j].split('/')[-1] == file_name : # 중복된 파일이 있으면\n is_sameFilename_here = True # 중복 처리\n \n # 동일한 파일이 없으면 unique리스트에 추가\n if is_sameFilename_here == False :\n unique_image_path_list.append(image_path_list[i])\n unique_vals_list.append(vals_list[i])\n \n return unique_image_path_list, unique_vals_list\n\n\ndef get_dataset(root_path) : \n \n Train_image_path_list, Train_vals_list = make_dataset(root_path + '/Train', \"Train\")\n Train_image_path_list, Train_vals_list = make_unique_dataset(Train_image_path_list, Train_vals_list)\n \n \n Test_image_path_list, Test_vals_list = make_dataset(root_path + '/Test', \"Test\")\n Test_image_path_list, Test_vals_list = make_unique_dataset(Test_image_path_list, Test_vals_list)\n \n len_train_ds = int(len(Train_image_path_list) * 0.8) # 학습에 사용할 데이터 개수\n \n Valid_image_path_list = Train_image_path_list[len_train_ds:]\n Train_image_path_list = Train_image_path_list[:len_train_ds]\n \n Valid_vals_list = Train_vals_list[len_train_ds:]\n Train_vals_list = Train_vals_list[:len_train_ds]\n \n Train_Scalp_Health_Dataset = Scalp_Health_Dataset(Train_image_path_list, Train_vals_list)\n Valid_Scalp_Health_Dataset = Scalp_Health_Dataset(Valid_image_path_list, Valid_vals_list)\n Test_Scalp_Health_Dataset = Scalp_Health_Dataset(Test_image_path_list, Test_vals_list)\n\n return Train_Scalp_Health_Dataset, Valid_Scalp_Health_Dataset, Test_Scalp_Health_Dataset\n \n ","repo_name":"CUAI-CAU/OhMyHead","sub_path":"model/scalp_dataset.py","file_name":"scalp_dataset.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33080131126","text":"#porcess the file\ndef process_file(filename):\n\ttry:\n\t\tf = open(filename, 'r')\n\t\ttransaction_ls = f.readlines()\n\t\tf.close()\n\t\ti = 0\n\t\twhile i < len(transaction_ls):\n\t\t\ttransaction_ls[i].strip('\\n')\n\t\t\ttransaction_ls[i] = float(transaction_ls[i])\n\t\t\ti += 1\n\t\tf.close()\n\texcept FileNotFoundError as Error:\n\t\traise ValueError('Error: String does not represent a valid file!')\n\t\t\n\tincome = []\n\texpenses = []\n\ti = 0 \n\twhile i < len(transaction_ls):\n\t\tif i % 2 == 0:\n\t\t\tincome.append(transaction_ls[i])\n\t\telse:\n\t\t\texpenses.append(transaction_ls[i])\n\t\ti += 1\n\tregular_b = (income, expenses)\n\treturn regular_b\n\n\n# print the regular transaction\ndef regular(tup, weekdays):\n\ti = 0\n\tprint('Regular Transactions:')\n\twhile i < 7:\n\t\tprint('{}: +${:.2f} -${:.2f}'.format(weekdays[i], tup[0][i], tup[1][i]))\n\t\ti += 1\n\n\t\t\n# instruction\ndef help():\n\tprint('''The available commands are:\n\"transaction\": Record a new income or expense\n\"next\": Move on to the next day\n\"status\": Show a summary of how you're doing today\n\"regular\": Show a summary of your regular transactions\n\"help\": Show this help message\n\"quit\": Quit the program''')\n\n\t\n# quit program\ndef terminate():\n\tprint('Bye!')\n\texit()","repo_name":"JaydenYL/Practice","sub_path":"INFO1110challenge/Assignment1/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26965185359","text":"from turtle import Turtle\n\n\nclass Wall(Turtle):\n def __init__(self):\n super().__init__()\n self.penup()\n self.hideturtle()\n self.pencolor(\"#CDFCF6\")\n self.goto(-300, -300)\n self.pendown()\n self.pensize(10)\n self.create_wall()\n\n def create_wall(self):\n for wall in range(4):\n self.forward(600)\n self.left(90)\n\n\n\n\n\n\n\n\n","repo_name":"chuthimai/demo","sub_path":"Snack_game/wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35073725033","text":"import binascii\nimport zlib\n\nmax_crc = 0xffffffff\n\ndef inverse_crc(data):\n crc = binascii.crc32(data) & max_crc\n invcrc = max_crc - crc\n return invcrc.to_bytes(4, 'little')\n\n# data = b\"01yhhaljfanbajhdhjaeuy1y4qitruhfhajald\"\n# crc_calc = b\"\\x60\\x1b\\x9e\\xce\"\n# data += crc_calc\ndata = b\"hello world\"\ncrc = binascii.crc32(data) & max_crc\nprint(\"CRC:\\t\\t%X\" %crc)\n\ncrc = binascii.crc32(b\"hello \") & max_crc\ncrc = binascii.crc32(b\"world\", crc) & max_crc\nprint(\"CRC:\\t\\t%X\" %crc)\n\n# crc = zlib.crc32(data) & max_crc\n# print(\"CRC:\\t\\t%X\" %crc)\n\ncrc = 0x0\nfor i in range(len(data)):\n print(\"%X\" %crc)\n crc = binascii.crc32(data[i:i+1], crc) & max_crc\n# for i in range(len(data)-1, -1, -1):\n# crc = binascii.crc32(data[i:i+1], crc) & max_crc\nprint(\"CRC:\\t\\t%X\" %crc)\n\n# inv_crc = inverse_crc(data)\n# print(\"Inv CRC:\\t%s\" %binascii.b2a_hex(inv_crc))\n","repo_name":"nen9mA0/MyHoloCubic","sub_path":"main/modules/module_test/crc_tst.py","file_name":"crc_tst.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2647838260","text":"import os\nimport sys\nimport random\nimport math\nimport numpy as np\nimport skimage.io\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport mysql.connector\nfrom mysql.connector import Error\nimport cv2\nimport time\n\n\n\nclass CafeImageAnalysis:\n\n\n\n\tglobal_image = 0\n\tdef captureImage(self):\n\t\tnumber = 0\n\t\tcamera = cv2.VideoCapture(0)\n\t\tfor i in range(1):\n\t\t\treturn_value, image = camera.read()\n\t\t\tcv2.imwrite('xxxcap'+str(time.time())+'.png', image)\n\t\t#cv2.imshow(\"test\",image)\n\t\tdel(camera)\n\t\treturn image\n\n\t\n\n\tdef videoPreview():\n\t cv2.namedWindow(\"preview\")\n\t vc = cv2.VideoCapture(0)\n\n\t if vc.isOpened():\n\t rval, frame = vc.read()\n\t else:\n\t rval = False\n\t while rval:\n\t cv2.imshow(\"preview\", frame)\n\t rval, frame = vc.read()\n\t key = cv2.waitKey(20)\n\t if key == 27: # exit on ESC\n\t break\n\t vc.release() \n\t cv2.destroyWindow(\"preview\") \n\n\n\t#==========================================================================\n\n\n\tdef insertVariablesIntoTable(self,person_count,commentString):\n\t try:\n\t connection = mysql.connector.connect(host='localhost', database='people', user='root', password='')\n\t if connection.is_connected():\n\t db_info = connection.get_server_info()\n\t print(\"Connected to MySQL Server version\", db_info)\n\t \n\t mySql_insert_query = \"\"\"INSERT INTO Cafe_Count (Persons,Comments) VALUES(%s,%s)\"\"\"\n\t record_values = (person_count,commentString)\n\t cursor = connection.cursor()\n\t \n\t #global connection timeout arguments\n\t global_connect_timeout = 'SET GLOBAL connect_timeout=180'\n\t global_wait_timeout = 'SET GLOBAL connect_timeout=180'\n\t global_interactive_timeout = 'SET GLOBAL connect_timeout=180'\n\n\t cursor.execute(global_connect_timeout)\n\t cursor.execute(global_wait_timeout)\n\t cursor.execute(global_interactive_timeout)\n\t \n\n\t #connection.commit()\n\n\t cursor.execute(\"select database();\")\n\t record = cursor.fetchone()\n\t print(\"You are connected to database: \", record)\n\n\t cursor.execute(mySql_insert_query,record_values)\n\t connection.commit()\n\t print(cursor.rowcount, \"Record inserted Successfully!\")\n\t cursor.close()\n\t except Error as e:\n\t print(\"Error While connecting to MySQL: \", e)\n\t finally:\n\t if connection.is_connected():\n\t connection.close()\n\t print(\"MySQL connection is closed\") \n\n\t# #==========================================================================\n\n\tdef analyzePicture(self):\n\n\t\t# Root directory of the project\n\t\tROOT_DIR = os.path.abspath(\"../\")\n\n\t\t# Import Mask RCNN\n\t\tsys.path.append(ROOT_DIR) # To find local version of the library\n\t\tfrom mrcnn import utils\n\t\timport mrcnn.model as modellib\n\t\tfrom mrcnn import visualize\n\t\t# Import COCO config\n\t\tsys.path.append(os.path.join(ROOT_DIR, \"samples/coco/\")) # To find local version\n\t\timport coco\n\n\t\t#%matplotlib inline \n\n\t\t# Directory to save logs and trained model\n\t\tMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n\t\t# Local path to trained weights file\n\t\tCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"samples/mask_rcnn_coco.h5\")\n\t\tprint(COCO_MODEL_PATH)\n\t\t# Download COCO trained weights from Releases if needed\n\t\tif not os.path.exists(COCO_MODEL_PATH):\n\t\t utils.download_trained_weights(COCO_MODEL_PATH)\n\n\t\t# Directory of images to run detection on\n\t\tIMAGE_DIR = os.path.join(ROOT_DIR, \"images\")\n\n\t\t# Directory of images to run detection on\n\t\t#IMAGE_DIR = os.path.join(ROOT_DIR, \"peoples\")\n\n\t\tclass InferenceConfig(coco.CocoConfig):\n\t\t # Set batch size to 1 since we'll be running inference on\n\t\t # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n\t\t GPU_COUNT = 1\n\t\t IMAGES_PER_GPU = 1\n\t\t #BATCH_SIZE = 1\n\n\t\tconfig = InferenceConfig()\n\t\tconfig.display()\n\n\t\t# Create model object in inference mode.\n\t\tmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n\t\t# Load weights trained on MS-COCO\n\t\tmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\n\n\t\t# COCO Class names\n\t\t# Index of the class in the list is its ID. For example, to get ID of\n\t\t# the teddy bear class, use: class_names.index('teddy bear')\n\t\tclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n\t\t 'bus', 'train', 'truck', 'boat', 'traffic light',\n\t\t 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n\t\t 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n\t\t 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n\t\t 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n\t\t 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n\t\t 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n\t\t 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n\t\t 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n\t\t 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n\t\t 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n\t\t 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n\t\t 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n\t\t 'teddy bear', 'hair drier', 'toothbrush']\n\n\t\t\n\t\n\t\t\n\t\tcount = 0\n\t\t#Load a random image from the images folder\n\n\t\n\n\t\tfor i in range(1):\n\t\t print(i)\n\t\t file_names = next(os.walk(IMAGE_DIR))[2]\n\t\t image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))\n\t\t #image = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[i]))\n\t\t \n\t\t \n\t\t #image = skimage.io.imread('edge2.jpg')\n\n\t\t # Run detection\n\t\t results = model.detect([image], verbose=1)\n\n\t\t # Visualize results\n\t\t r = results[0]\n\t\t #count = 0\n\n\t\t print(r['class_ids'])\n\t\t for i in r['class_ids']:\n\t\t \tif i == 1:\n\t\t \t\tcount = count+1\n\t\t print(count)\n\t\t #visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],class_names, r['scores'])\n\t\t visualize.display_instances(image, r['rois'], r['masks'],r['class_ids'],class_names,r['scores'],title=\"Number of People:\"+str(count))\n\n\n\t\treturn count\t \n\n\t# insertVariablesIntoTable(count,commentString)\n\n\tdef globalRepeatFunction(self):\n\t\tstarttime = time.time()\n\t\tdefaultComment = \"Defaut Comment!\"\n\t\twhile True:\n\t\t\tcafeObject = CafeImageAnalysis()\n\t\t\tcurrentImage = cafeObject.captureImage() \n\t\t\tcurrentCount = cafeObject.analyzePicture(currentImage)\n\t\t\tcafeObject.insertVariablesIntoTable(currentCount,defaultComment)\n\t\t\tprint(\"captured!\")\n\t\t\ttime.sleep(30.0-((time.time()-starttime)%30.0))\n\n\nif __name__ == \"__main__\":\n\tgreenCafe = CafeImageAnalysis()\n\t#capturedImage = greenCafe.captureImage()\n\tgreenCafe.analyzePicture()\t\n\t#greenCafe.globalRepeatFunction()","repo_name":"sherkhan91/Smart_Cafe","sub_path":"maskrcnn_demo.py","file_name":"maskrcnn_demo.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38732910574","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom collections import defaultdict\n\nfrom pysblgnt import morphgnt_rows\n\nfrom accent import strip_length\nfrom greek_inflexion import GreekInflexion\nfrom morphgnt_utils import bcv_tuple, convert_parse, key_to_part\n\n\nargparser = argparse.ArgumentParser(\n description=\"generate starting point for new lexicon\")\n\nargparser.add_argument(\n \"books\", metavar=\"BOOK_NUMBER\", type=int, nargs=\"+\",\n help=\"a book (Matt = 1)\")\n\nargparser.add_argument(\n \"--lexicon\", dest=\"lexicon\",\n default=\"STEM_DATA/morphgnt_lexicon.yaml\",\n help=\"path to initial stem lexicon file \"\n \"(defaults to morphgnt_lexicon.yaml)\")\n\nargparser.add_argument(\n \"--stemming\", dest=\"stemming\",\n default=\"stemming.yaml\",\n help=\"path to stemming rules file \"\n \"(defaults to stemming.yaml)\")\n\nargs = argparser.parse_args()\n\n\nginflexion = GreekInflexion(\n args.stemming,\n args.lexicon,\n strip_length=True\n)\n\n\nSTEM_GUESSES = defaultdict(lambda: defaultdict(set))\n\nfor book_num in args.books:\n for row in morphgnt_rows(book_num):\n b, c, v = bcv_tuple(row[\"bcv\"])\n if row[\"ccat-pos\"] == \"V-\":\n lemma = row[\"lemma\"]\n key = convert_parse(row[\"ccat-parse\"])\n form = row[\"norm\"]\n\n tags = set([\n \"final-nu-aai.3s\",\n \"oida-yai3p-variant\",\n \"no-final-nu-yai.3s\",\n \"late-pluperfect-singulars\",\n \"sigma-loss-pmd.2s\",\n \"HGrk\",\n ])\n\n c = form.count(\"/\") + 1\n stem = ginflexion.find_stems(lemma, key, tags)\n generated = ginflexion.generate(lemma, key, tags)\n\n if stem:\n stem_guess = None\n else:\n stem_guess = [\n stem for key, stem in\n ginflexion.possible_stems(form, \"^\" + key + \"$\")]\n\n if [strip_length(w) for w in sorted(generated)] == \\\n [strip_length(w) for w in sorted(form.split(\"/\"))]:\n correct = \"✓\"\n else:\n correct = \"✕\"\n if correct == \"✕\":\n if stem_guess:\n STEM_GUESSES[lemma][key_to_part(key)].add(\n frozenset(stem_guess))\n\n\nfor lemma, parts in sorted(STEM_GUESSES.items()):\n print()\n print(\"{}:\".format(lemma))\n print(\" stems:\".format(lemma))\n for part, stem_sets in sorted(parts.items()):\n stem = set.intersection(*(set(s) for s in stem_sets))\n if len(stem) == 0:\n print(\" {}: {} # @0\".format(part, stem_sets))\n elif len(stem) == 1:\n print(\" {}: {} # @1\".format(part, stem.pop()))\n else:\n print(\" {}: {} # @m\".format(part, stem))\n","repo_name":"jtauber/greek-inflexion","sub_path":"generate_morphgnt_lexicon.py","file_name":"generate_morphgnt_lexicon.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"53"} +{"seq_id":"22546682822","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\n\"\"\"\r\n File name: book_type.py\r\n Function Des: ...\r\n ~~~~~~~~~~\r\n \r\n author: Jerry \r\n \r\n\"\"\"\r\nfrom flask.ext.restful import fields\r\n\r\nbook_type_fields = {\r\n 'id': fields.String,\r\n 'name': fields.String,\r\n}\r\n\r\n# for get /types\r\nbook_types_fields = {\r\n 'books_types': fields.List(fields.Nested(book_type_fields))\r\n}\r\n","repo_name":"skyduy/RESTfulAPI","sub_path":"RESTfulApi/utils/fields/book_type.py","file_name":"book_type.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"53"} +{"seq_id":"14055445063","text":"# -*- coding: cp1252 -*-\n#|--------------------------------------------------|\n#| LIBRERIAS |\n#|--------------------------------------------------|\nfrom tkinter import *\nfrom tkinter import messagebox\nimport time\nimport fix_yahoo_finance as yf\n\nimport quandl\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom math import *\nfrom decimal import *\n\n#|--------------------------------------------------|\n#| FUNCIONES |\n#|--------------------------------------------------|\ndef prom(list):\n suma = 0\n for i in list:\n suma += i\n return (suma/len(list))\n\ndef sigma(s):\n cteT = sqrt(1/252)\n return(round(s/cteT,6))\n\ndef functionS(data):\n list = []\n \n i = 0\n for x in data:\n if i == 0:\n list.append(0)\n else:\n list.append(log(data[i]/data[i-1]))\n i += 1\n \n sumatoria = 0\n promedio = prom(list)\n\n for i in list:\n sumatoria += (i-promedio)**2\n\n return((1/(len(list)-1))*sumatoria)\n\n\ndef procesar():\n aapl = yf.download(Codigo.get(), start=Fecha_Inicio.get(), end=Fecha_Final.get())\n aapl.to_csv('fb_ohlc.csv')\n limpieza = (str(aapl).split(\"\\n\"))\n DataFinal = []\n for linea in limpieza[1:]:\n datos = linea.split()\n if len(datos) > 5:\n DataFinal.append([datos[0],datos[4]])\n\n if len(DataFinal) == 0:\n messagebox.showwarning(\"Advertencia\",\"Informacion invalida vuelva a intentar\")\n return\n else:\n messagebox.showinfo(\"Ok\",\"Datos Listos y almacenados\")\n test = aapl['Close'][-60:-1]\n test2 = aapl['Close'][-120:-60]\n if Time_maduracion.get() > 3:\n print (test,test2)\n volatividad = sigma(functionS(test)+functionS(test2))\n messagebox.showinfo(\"ok\",\"La Volatividad es de: \"+ str(volatividad))\n etiqueta_Volatividad = Label(ventana, text='La Volatividad es de: '+str(volatividad))\n etiqueta_Volatividad.grid(row=15, column=3)\n \n else:\n print (test)\n volatividad =sigma(functionS(test))\n messagebox.showinfo(\"ok\",\"la Volatividad es de: \"+ str(volatividad))\n etiqueta_Volatividad = Label(ventana, text='La Volatividad es de: '+str(volatividad))\n etiqueta_Volatividad.grid(row=15, column=3)\n \n \n \n#def guardar(data):\n# archivo = open(str(Codigo.get())+\".txt\", \"w\")\n# archivo.write(str(data))\n# archivo.close()\n# return (\"Guardado\")\n\t\n#|--------------------------------------------------|\n#| Instancia de la clase Tk |\n#|--------------------------------------------------|\n\nventana = Tk()\nventana.title('Valoración de Opciones sobre Acciones')\n\n#|--------------------------------------------------|\n#| VARIABLES |\n#|--------------------------------------------------|\nCodigo = StringVar()\nFecha_Inicio = StringVar()\nFecha_Final = StringVar()\nTasa_interes = IntVar()\nactividad = StringVar()\nTime_maduracion = IntVar()\nPrecio_ejecucion = IntVar()\nVolatividad = IntVar()\n#inicio\nactividad.set(\"Americana\")\nFecha_Inicio.set(str(time.strftime(\"20%y-%m-%d\")))\nFecha_Final.set(str(time.strftime(\"20%y-%m-%d\")))\n\n#|--------------------------------------------------|\n#| Generación de widgets | \n#|--------------------------------------------------|\n\n#Codigo\netiqueta_Codigo = Label(ventana, text='Codigo:')\nentrada_Codigo = Entry(ventana, textvariable=Codigo)\netiqueta_Codigo.grid(row=1, column=1)\nentrada_Codigo.grid(row=1, column=2)\n\n\n#Fecha Inicio\netiqueta_Fecha_Inicio = Label(ventana, text='Fecha Inicio: ')\nentrada_Fecha_Inicio = Entry(ventana, textvariable=Fecha_Inicio)\netiqueta_Fecha_Inicio.grid(row=4, column=1)\nentrada_Fecha_Inicio.grid(row=4, column=2)\n\n#Fecha Final\netiqueta_Fecha_Final = Label(ventana, text='Fecha Final: ')\nentrada_Fecha_Final = Entry(ventana, textvariable=Fecha_Final)\netiqueta_Fecha_Final.grid(row=4, column=4)\nentrada_Fecha_Final.grid(row=4, column=5)\n\n#Tasa interes\netiqueta_Tasa_interes = Label(ventana, text='Tasa interes: ')\nentrada_Tasa_interes = Entry(ventana, textvariable=Tasa_interes)\netiqueta_Tasa_interes.grid(row=6, column=1)\nentrada_Tasa_interes.grid(row=6, column=2)\n\n#Tiempo de maduracion \netiqueta_Time_maduracion = Label(ventana, text='Tiempo de maduracion: ')\nentrada_Time_maduracion = Entry(ventana, textvariable=Time_maduracion)\netiqueta_Time_maduracion.grid(row=6, column=4)\nentrada_Time_maduracion.grid(row=6, column=5)\n\n#Precio de ejecucion \netiqueta_Precio_ejecucion = Label(ventana, text='Precio de ejecucion : ')\nentrada_Precio_ejecucion = Entry(ventana, textvariable=Precio_ejecucion)\netiqueta_Precio_ejecucion.grid(row=10, column=1)\nentrada_Precio_ejecucion.grid(row=10, column=2)\n\n#OPCIONES\netiqueta_actividad = Label(ventana, text='OPCIONES: ')\nentrada_actividad = OptionMenu(ventana, actividad, \"Americana\", \"Europea\", \"Monte-Carlos\")\netiqueta_actividad.grid(row=10, column=4)\nentrada_actividad.grid(row=10, column=5)\n\n#boton\nboton = Button(ventana, text='Procesar',font=('Governor',10),background='#01a8a6',foreground='White',command=procesar)\nboton.grid(row=12, column=3)\n\n#ejecución de ventana\nventana.mainloop()\n","repo_name":"Projas14/ISW_Softwarrior-2018-1","sub_path":"Prototipo I/interfaz-final.py","file_name":"interfaz-final.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73078281769","text":"from django.db import models\n\n\nclass Specialization(models.Model):\n title = models.CharField(\"Специализация\", max_length=120, default=\"\",)\n\n def __str__(self):\n return self.title\n\n\nclass Specialist(models.Model):\n first_name = models.CharField(\"Имя\", max_length=120, default=\"\",)\n last_name = models.CharField(\"Фамилия\", max_length=120, default=\"\",)\n short_bio = models.CharField(\"Описание\", max_length=200, default=\"\",)\n long_bio = models.TextField(\"Полное описание\", blank=True, default=\"\",)\n telegram = models.CharField(\"Telegram\", max_length=120, default=\"\",)\n github = models.CharField(\"Github\", max_length=120, default=\"\", blank=True,)\n photo = models.ImageField(\n default=\"default-profile.png\", upload_to=\"profile-pics\", verbose_name=\"Фото\",\n )\n specializations = models.ManyToManyField(\n \"Specialization\", verbose_name=\"Специализации\"\n )\n\n def __str__(self):\n return self.first_name\n","repo_name":"chitcomhub/chitweb","sub_path":"chitweb/specialists/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"9353155530","text":"import os\nimport sys\nimport argparse\nimport socket\nimport random\nimport torch\nimport subprocess\nimport re\nimport pickle\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom psutil import process_iter\nfrom sklearn import tree\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\n\n\n\n# ---------------- Misc -------------------\nclass emptyobject():\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n def __str__(self):\n return str(self.__dict__)\n\nclass arguments_info:\n def __init__(self):\n self.host = \"localhost\"\n self.port = \"2000\"\n self.sync = False\n self.debug = 0\n self.spectator = True\n self.record = \"\"\n self.timeout = \"30.0\"\n self.challenge_mode = True\n self.routes = None\n self.scenarios = \"leaderboard/data/all_towns_traffic_scenarios_public.json\"\n self.repetitions = 1\n self.agent = \"scenario_runner/team_code/image_agent.py\"\n self.agent_config = \"models/epoch=24.ckpt\"\n self.track = \"SENSORS\"\n self.resume = False\n self.checkpoint = \"\"\n self.weather_index = 19\n self.save_folder = \"carla_lbc/collected_data_customized\"\n self.deviations_folder = \"\"\n self.background_vehicles = False\n self.save_action_based_measurements = 0\n self.changing_weather = False\n self.record_every_n_step = 2000\n\ndef parse_fuzzing_arguments():\n # the default is for carla+lbc stack\n default_objective_weights = np.array([-1., 1., 1., 0., 0., 0., 0., 0., 0., 0.])\n default_objectives = np.array([0., 20., 1., 7., 7., 0., 0., 0., 0., 0.])\n default_check_unique_coeff = [0, 0.1, 0.5]\n\n\n parser = argparse.ArgumentParser()\n\n # general\n parser.add_argument(\"-r\", \"--route_type\", type=str, default='town05_right_0')\n parser.add_argument(\"-c\", \"--scenario_type\", type=str, default='default')\n parser.add_argument(\"-m\", \"--ego_car_model\", type=str, default='lbc')\n parser.add_argument('-a','--algorithm_name', type=str, default='nsga2')\n\n parser.add_argument('-p','--ports', nargs='+', type=int, default=[2003], help='TCP port(s) to listen to (default: 2003)')\n # parser.add_argument(\"-s\", \"--scheduler_port\", type=int, default=8785)\n # parser.add_argument(\"-d\", \"--dashboard_address\", type=int, default=8786)\n\n parser.add_argument('--simulator', type=str, default='carla')\n\n parser.add_argument('--random_seed', type=int, default=0)\n\n\n # carla specific\n parser.add_argument(\"--has_display\", type=str, default='0')\n parser.add_argument(\"--debug\", type=int, default=1, help=\"whether using the debug mode: planned paths will be visualized.\")\n parser.add_argument('--correct_spawn_locations_after_run', type=int, default=0)\n parser.add_argument(\"--terminate_on_collision\", type=int, default=1, help=\"whether stopping the simulation when a collision happens.\")\n\n\n # carla_op specific\n parser.add_argument('--carla_path', type=str, default=\"../carla_0911_rss/CarlaUE4.sh\")\n\n # no_simulation specific\n parser.add_argument('--no_simulation_data_path', type=str, default=None)\n parser.add_argument('--objective_labels', type=str, nargs='+', default=[])\n\n\n\n # logistic\n parser.add_argument(\"--root_folder\", type=str, default='carla_lbc/run_results')\n parser.add_argument(\"--parent_folder\", type=str, default='') # will be automatically created\n parser.add_argument(\"--mean_objectives_across_generations_path\", type=str, default='') # will be automatically created\n parser.add_argument(\"--episode_max_time\", type=int, default=60)\n parser.add_argument('--record_every_n_step', type=int, default=2000)\n parser.add_argument('--gpus', type=str, default='0,1')\n\n\n # algorithm related\n parser.add_argument(\"--n_gen\", type=int, default=10, help='the number of generations to run.')\n parser.add_argument(\"--pop_size\", type=int, default=50, help='population size at each generation.')\n parser.add_argument(\"--has_run_num\", type=int, default=1000, help='the total number of simulations to run before the algorithm ends.')\n parser.add_argument(\"--survival_multiplier\", type=int, default=1)\n parser.add_argument(\"--n_offsprings\", type=int, default=300)\n parser.add_argument('--sample_multiplier', type=int, default=200)\n parser.add_argument('--mating_max_iterations', type=int, default=200)\n parser.add_argument('--only_run_unique_cases', type=int, default=1)\n parser.add_argument('--consider_interested_bugs', type=int, default=1)\n\n parser.add_argument(\"--outer_iterations\", type=int, default=3)\n parser.add_argument('--objective_weights', nargs='+', type=float, default=default_objective_weights, help='the weights corresponding to each objective when estimating the fitness function.')\n parser.add_argument('--default_objectives', nargs='+', type=float, default=default_objectives)\n parser.add_argument(\"--standardize_objective\", type=int, default=1)\n parser.add_argument(\"--normalize_objective\", type=int, default=1)\n parser.add_argument('--traj_dist_metric', type=str, default='nearest')\n\n # used only when algorithm_name == 'grid'\n parser.add_argument('--grid_dict_name', type=str, default='grid_dict_one_ped_town07')\n parser.add_argument('--grid_start_index', type=int, default=0)\n\n # used only when simulator == 'no_simulation_function'\n parser.add_argument('--synthetic_function', type=str, default='')\n\n # used only when algorithm_name == 'random_local_sphere'\n parser.add_argument('--chosen_labels', nargs='+', type=str, default=[])\n\n\n parser.add_argument('--check_unique_coeff', nargs='+', type=float, default=default_check_unique_coeff, help='the thresholds (norm, th_2, th_1) used to count unique bugs. Currently, norm is always set to 0. For a given type of traffic violation (collision or out-of-road), two violations caused by specific scenarios x and y are unique if at least th1 of the total number of changeable fields are different between the two. For a continuous field, the corresponding normalized values should be distinguishable by at least th2. (See section 4.1 in our paper for more details.)')\n parser.add_argument('--use_single_objective', type=int, default=1)\n parser.add_argument('--rank_mode', type=str, default='none')\n parser.add_argument('--ranking_model', type=str, default='nn_pytorch')\n parser.add_argument('--initial_fit_th', type=int, default=100, help='minimum number of instances needed to train a DNN.')\n parser.add_argument('--min_bug_num_to_fit_dnn', type=int, default=10, help='minimum number of bug instances needed to train a DNN.')\n\n parser.add_argument('--pgd_eps', type=float, default=1.01)\n parser.add_argument('--adv_conf_th', type=float, default=-4)\n parser.add_argument('--attack_stop_conf', type=float, default=0.9)\n parser.add_argument('--use_single_nn', type=int, default=1)\n\n parser.add_argument('--warm_up_path', type=str, default=None)\n parser.add_argument('--warm_up_len', type=int, default=-1)\n parser.add_argument('--regression_nn_use_running_data', type=int, default=1)\n\n parser.add_argument('--sample_avoid_ego_position', type=int, default=0)\n\n\n parser.add_argument('--uncertainty', type=str, default='')\n parser.add_argument('--model_type', type=str, default='one_output')\n\n\n parser.add_argument('--termination_condition', type=str, default='generations')\n parser.add_argument('--max_running_time', type=int, default=3600*24)\n\n parser.add_argument('--emcmc', type=int, default=0)\n parser.add_argument('--use_unique_bugs', type=int, default=1)\n parser.add_argument('--finish_after_has_run', type=int, default=1)\n\n fuzzing_arguments = parser.parse_args()\n\n\n os.environ['HAS_DISPLAY'] = fuzzing_arguments.has_display\n os.environ['CUDA_VISIBLE_DEVICES'] = fuzzing_arguments.gpus\n fuzzing_arguments.objective_weights = np.array(fuzzing_arguments.objective_weights)\n # ['BNN', 'one_output']\n # BALD and BatchBALD only support BNN\n if fuzzing_arguments.uncertainty.split('_')[0] in ['BALD', 'BatchBALD']:\n fuzzing_arguments.model_type = 'BNN'\n\n if 'un' in fuzzing_arguments.algorithm_name:\n fuzzing_arguments.use_unique_bugs = 1\n else:\n fuzzing_arguments.use_unique_bugs = 0\n\n if fuzzing_arguments.algorithm_name in ['nsga2-emcmc', 'nsga2-un-emcmc']:\n fuzzing_arguments.emcmc = 1\n else:\n fuzzing_arguments.emcmc = 0\n\n return fuzzing_arguments\n\n\n\ndef make_hierarchical_dir(folder_names):\n cur_folder_name = \"\"\n for i in range(len(folder_names)):\n cur_folder_name += folder_names[i]\n if not os.path.exists(cur_folder_name):\n os.mkdir(cur_folder_name)\n cur_folder_name += \"/\"\n return cur_folder_name\n\ndef is_port_in_use(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex((\"localhost\", int(port))) == 0\n\ndef port_to_gpu(port):\n n = torch.cuda.device_count()\n # n = 2\n gpu = port % n\n\n return gpu\n\n# TBD: separate the two exit handlers\ndef exit_handler(ports):\n # carla\n for port in ports:\n while is_port_in_use(port):\n try:\n subprocess.run(\"kill -9 $(lsof -t -i :\" + str(port) + \")\", shell=True)\n # subprocess.run(\"sudo kill $(lsof -t -i :\" + str(port) + \")\", shell=True)\n print(\"-\" * 20, \"kill server at port\", port)\n except:\n continue\n # svl\n import psutil\n PROC_NAME = \"mainboard\"\n for proc in psutil.process_iter():\n # check whether the process to kill name matches\n if proc.name() == PROC_NAME:\n proc.kill()\n # subprocess.run(\"sudo kill -9 \" + str(proc.pid), shell=True)\n\n\ndef get_sorted_subfolders(parent_folder, folder_type='all'):\n if 'rerun_bugs' in os.listdir(parent_folder):\n bug_folder = os.path.join(parent_folder, \"rerun_bugs\")\n non_bug_folder = os.path.join(parent_folder, \"rerun_non_bugs\")\n else:\n bug_folder = os.path.join(parent_folder, \"bugs\")\n non_bug_folder = os.path.join(parent_folder, \"non_bugs\")\n\n if folder_type == 'all':\n sub_folders = [\n os.path.join(bug_folder, sub_name) for sub_name in os.listdir(bug_folder)\n ] + [\n os.path.join(non_bug_folder, sub_name)\n for sub_name in os.listdir(non_bug_folder)\n ]\n elif folder_type == 'bugs':\n sub_folders = [\n os.path.join(bug_folder, sub_name) for sub_name in os.listdir(bug_folder)\n ]\n elif folder_type == 'non_bugs':\n sub_folders = [\n os.path.join(non_bug_folder, sub_name) for sub_name in os.listdir(non_bug_folder)\n ]\n else:\n raise\n\n ind_sub_folder_list = []\n for sub_folder in sub_folders:\n if os.path.isdir(sub_folder):\n ind = int(re.search(\".*bugs/([0-9]*)\", sub_folder).group(1))\n ind_sub_folder_list.append((ind, sub_folder))\n # print(sub_folder)\n ind_sub_folder_list_sorted = sorted(ind_sub_folder_list)\n subfolders = [filename for i, filename in ind_sub_folder_list_sorted]\n # print('len(subfolders)', len(subfolders))\n return subfolders\n\ndef load_data(subfolders):\n data_list = []\n is_bug_list = []\n\n objectives_list = []\n mask, labels, cur_info = None, None, None\n for sub_folder in subfolders:\n if os.path.isdir(sub_folder):\n pickle_filename = os.path.join(sub_folder, \"cur_info.pickle\")\n\n with open(pickle_filename, \"rb\") as f_in:\n cur_info = pickle.load(f_in)\n\n data, objectives, is_bug, mask, labels = cur_info[\"x\"], cur_info[\"objectives\"], int(cur_info[\"is_bug\"]), cur_info[\"mask\"], cur_info[\"labels\"]\n # hack: backward compatibility that removes the port info in x\n if data.shape[0] == len(labels) + 1:\n data = data[:-1]\n\n data_list.append(data)\n\n is_bug_list.append(is_bug)\n objectives_list.append(objectives)\n\n return data_list, np.array(is_bug_list), np.array(objectives_list), mask, labels, cur_info\n\n\ndef get_picklename(parent_folder):\n pickle_folder = parent_folder + \"/bugs/\"\n if not os.path.isdir(pickle_folder):\n pickle_folder = parent_folder + \"/0/bugs/\"\n i = 1\n while i < len(os.listdir(pickle_folder)):\n if os.path.isdir(pickle_folder + str(i)):\n pickle_folder = pickle_folder + str(i) + \"/cur_info.pickle\"\n break\n i += 1\n return pickle_folder\n\n\ndef set_general_seed(seed=0):\n os.environ['PYTHONHASHSEED'] = str(seed)\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n # torch.use_deterministic_algorithms(True)\n torch.backends.cudnn.benchmark = False\n # torch.backends.cudnn.deterministic = True\n # torch.backends.cudnn.enabled = False\n\ndef rand_real(rng, low, high):\n return rng.random() * (high - low) + low\n\n# ---------------- Misc -------------------\n\n\n\n# ---------------- Uniqueness -------------------\ndef is_distinct_vectorized(cur_X, prev_X, mask, xl, xu, p, c, th, verbose=True):\n if len(cur_X) == 0:\n return []\n cur_X = np.array(cur_X)\n prev_X = np.array(prev_X)\n eps = 1e-10\n remaining_inds = np.arange(cur_X.shape[0])\n\n mask = np.array(mask)\n xl = np.array(xl)\n xu = np.array(xu)\n\n n = len(mask)\n\n variant_fields = (xu - xl) > eps\n variant_fields_num = np.sum(variant_fields)\n th_num = np.max([np.round(th * variant_fields_num), 1])\n\n mask = mask[variant_fields]\n int_inds = mask == \"int\"\n real_inds = mask == \"real\"\n xl = xl[variant_fields]\n xu = xu[variant_fields]\n xl = np.concatenate([np.zeros(np.sum(int_inds)), xl[real_inds]])\n xu = np.concatenate([0.99*np.ones(np.sum(int_inds)), xu[real_inds]])\n\n # hack: backward compatibility with previous run data\n # if cur_X.shape[1] == n-1:\n # cur_X = np.concatenate([cur_X, np.zeros((cur_X.shape[0], 1))], axis=1)\n\n cur_X = cur_X[:, variant_fields]\n cur_X = np.concatenate([cur_X[:, int_inds], cur_X[:, real_inds]], axis=1) / (np.abs(xu - xl) + eps)\n\n if len(prev_X) > 0:\n prev_X = prev_X[:, variant_fields]\n prev_X = np.concatenate([prev_X[:, int_inds], prev_X[:, real_inds]], axis=1) / (np.abs(xu - xl) + eps)\n\n diff_raw = np.abs(np.expand_dims(cur_X, axis=1) - np.expand_dims(prev_X, axis=0))\n diff = np.ones(diff_raw.shape) * (diff_raw > c)\n diff_norm = np.linalg.norm(diff, p, axis=2)\n equal = diff_norm < th_num\n remaining_inds = np.mean(equal, axis=1) == 0\n remaining_inds = np.arange(cur_X.shape[0])[remaining_inds]\n\n # print('remaining_inds', remaining_inds, np.arange(cur_X.shape[0])[remaining_inds], cur_X[np.arange(cur_X.shape[0])[remaining_inds]])\n if verbose:\n print('prev X filtering:',cur_X.shape[0], '->', len(remaining_inds))\n\n if len(remaining_inds) == 0:\n return []\n\n cur_X_remaining = cur_X[remaining_inds]\n print('len(cur_X_remaining)', len(cur_X_remaining))\n unique_inds = []\n for i in range(len(cur_X_remaining)-1):\n diff_raw = np.abs(np.expand_dims(cur_X_remaining[i], axis=0) - cur_X_remaining[i+1:])\n diff = np.ones(diff_raw.shape) * (diff_raw > c)\n diff_norm = np.linalg.norm(diff, p, axis=1)\n equal = diff_norm < th_num\n if np.mean(equal) == 0:\n unique_inds.append(i)\n\n unique_inds.append(len(cur_X_remaining)-1)\n\n if verbose:\n print('cur X filtering:',cur_X_remaining.shape[0], '->', len(unique_inds))\n\n if len(unique_inds) == 0:\n return []\n remaining_inds = remaining_inds[np.array(unique_inds)]\n\n\n return remaining_inds\n\ndef eliminate_repetitive_vectorized(cur_X, mask, xl, xu, p, c, th, verbose=True):\n cur_X = np.array(cur_X)\n eps = 1e-8\n verbose = False\n remaining_inds = np.arange(cur_X.shape[0])\n if len(cur_X) == 0:\n return remaining_inds\n else:\n mask = np.array(mask)\n xl = np.array(xl)\n xu = np.array(xu)\n\n variant_fields = (xu - xl) > eps\n variant_fields_num = np.sum(variant_fields)\n th_num = np.max([np.round(th * variant_fields_num), 1])\n\n mask = mask[variant_fields]\n xl = xl[variant_fields]\n xu = xu[variant_fields]\n\n cur_X = cur_X[:, variant_fields]\n\n int_inds = mask == \"int\"\n real_inds = mask == \"real\"\n\n xl = np.concatenate([np.zeros(np.sum(int_inds)), xl[real_inds]])\n xu = np.concatenate([0.99*np.ones(np.sum(int_inds)), xu[real_inds]])\n\n cur_X = np.concatenate([cur_X[:, int_inds], cur_X[:, real_inds]], axis=1) / (np.abs(xu - xl) + eps)\n\n\n unique_inds = []\n for i in range(len(cur_X)-1):\n diff_raw = np.abs(np.expand_dims(cur_X[i], axis=0) - cur_X[i+1:])\n diff = np.ones(diff_raw.shape) * (diff_raw > c)\n diff_norm = np.linalg.norm(diff, p, axis=1)\n equal = diff_norm < th_num\n if np.mean(equal) == 0:\n unique_inds.append(i)\n\n if len(unique_inds) == 0:\n return []\n remaining_inds = np.array(unique_inds)\n if verbose:\n print('cur X filtering:',cur_X.shape[0], '->', len(remaining_inds))\n\n return remaining_inds\n# ---------------- Uniqueness -------------------\n\nfrom sklearn.preprocessing import MinMaxScaler\n\n# ---------------- Bug, Objective -------------------\ndef get_F(current_objectives, all_objectives, objective_weights, use_single_objective, standardize=False, normalize=False):\n # standardize current objectives using all objectives so far\n all_objectives = np.stack(all_objectives)\n current_objectives = np.stack(current_objectives).astype(np.float64)\n\n # standardize objectives\n if standardize:\n standardizer = StandardScaler()\n standardizer.fit(all_objectives)\n all_objectives_std = standardizer.transform(all_objectives)\n current_objectives_std = standardizer.transform(current_objectives)\n else:\n all_objectives_std = all_objectives\n current_objectives_std = current_objectives\n\n # normalize objectives\n if normalize:\n normalizer = MinMaxScaler()\n normalizer.fit(all_objectives_std)\n current_objectives_norm = normalizer.transform(current_objectives_std)\n else:\n current_objectives_norm = current_objectives_std\n\n # print('current_objectives')\n # print(current_objectives)\n # print('current_objectives_norm')\n # print(current_objectives_norm)\n\n current_Fs = current_objectives_norm * objective_weights\n\n if use_single_objective:\n current_F = np.expand_dims(np.sum(current_Fs, axis=1), axis=1)\n else:\n current_F = np.row_stack(current_Fs)\n return current_F\n# ---------------- Bug, Objective -------------------\n\n\n\n# ---------------- NN -------------------\n# dependent on description labels\ndef encode_fields(x, labels, labels_to_encode, keywords_dict):\n x = np.array(x).astype(np.float)\n encode_fields = []\n inds_to_encode = []\n\n for label in labels_to_encode:\n for k, v in keywords_dict.items():\n if k in label:\n ind = labels.index(label)\n inds_to_encode.append(ind)\n\n encode_fields.append(v)\n break\n inds_non_encode = list(set(range(x.shape[1])) - set(inds_to_encode))\n\n # handle the case when no integer fields exist\n if len(labels_to_encode) == 0:\n from sklearn.preprocessing import FunctionTransformer\n enc = FunctionTransformer()\n return x, enc, inds_to_encode, inds_non_encode, encode_fields\n\n\n\n enc = OneHotEncoder(handle_unknown=\"ignore\", sparse=False)\n\n embed_dims = int(np.sum(encode_fields))\n embed_fields_num = len(encode_fields)\n data_for_fit_encode = np.zeros((embed_dims, embed_fields_num))\n counter = 0\n for i, encode_field in enumerate(encode_fields):\n for j in range(encode_field):\n data_for_fit_encode[counter, i] = j\n counter += 1\n enc.fit(data_for_fit_encode)\n\n embed = np.array(x[:, inds_to_encode].astype(np.int))\n embed = enc.transform(embed)\n\n x = np.concatenate([embed, x[:, inds_non_encode]], axis=1).astype(np.float)\n\n return x, enc, inds_to_encode, inds_non_encode, encode_fields\n\n# dependent on description labels\ndef get_labels_to_encode(labels, keywords_for_encode):\n labels_to_encode = []\n for label in labels:\n for keyword in keywords_for_encode:\n if keyword in label:\n labels_to_encode.append(label)\n return labels_to_encode\n\ndef max_one_hot_op(images, encode_fields):\n m = np.sum(encode_fields)\n one_hotezed_images_embed = np.zeros([images.shape[0], m])\n s = 0\n for field_len in encode_fields:\n max_inds = np.argmax(images[:, s : s + field_len], axis=1)\n one_hotezed_images_embed[np.arange(images.shape[0]), s + max_inds] = 1\n s += field_len\n images[:, :m] = one_hotezed_images_embed\n\ndef customized_fit(X_train, standardize, one_hot_fields_len, partial=True):\n # print('\\n'*2, 'customized_fit X_train.shape', X_train.shape, '\\n'*2)\n if partial:\n standardize.fit(X_train[:, one_hot_fields_len:])\n else:\n standardize.fit(X_train)\n\ndef customized_standardize(X, standardize, m, partial=True, scale_only=False):\n # print(X[:, :m].shape, standardize.transform(X[:, m:]).shape)\n if partial:\n if scale_only:\n res_non_encode = X[:, m:] * standardize.scale_\n else:\n res_non_encode = standardize.transform(X[:, m:])\n res = np.concatenate([X[:, :m], standardize.transform(X[:, m:])], axis=1)\n else:\n if scale_only:\n res = X * standardize.scale_\n else:\n res = standardize.transform(X)\n return res\n\ndef customized_inverse_standardize(X, standardize, m, partial=True, scale_only=False):\n if partial:\n if scale_only:\n res_non_encode = X[:, m:] * standardize.scale_\n else:\n res_non_encode = standardize.inverse_transform(X[:, m:])\n res = np.concatenate([X[:, :m], res_non_encode], axis=1)\n else:\n if scale_only:\n res = X * standardize.scale_\n else:\n res = standardize.inverse_transform(X)\n return res\n\ndef decode_fields(x, enc, inds_to_encode, inds_non_encode, encode_fields, adv=False):\n n = x.shape[0]\n m = len(inds_to_encode) + len(inds_non_encode)\n embed_dims = int(np.sum(encode_fields))\n if embed_dims > 0:\n embed = x[:, :embed_dims]\n kept = x[:, embed_dims:]\n\n if adv:\n one_hot_embed = np.zeros(embed.shape)\n s = 0\n for field_len in encode_fields:\n max_inds = np.argmax(x[:, s : s + field_len], axis=1)\n one_hot_embed[np.arange(x.shape[0]), s + max_inds] = 1\n s += field_len\n embed = one_hot_embed\n\n x_encoded = enc.inverse_transform(embed)\n # print('encode_fields', encode_fields)\n # print('embed', embed[0], x_encoded[0])\n x_decoded = np.zeros([n, m])\n x_decoded[:, inds_non_encode] = kept\n x_decoded[:, inds_to_encode] = x_encoded\n\n return x_decoded\n else:\n return x\n\ndef remove_fields_not_changing(x, embed_dims=0, xl=[], xu=[]):\n eps = 1e-8\n if len(xl) > 0:\n cond = xu - xl > eps\n else:\n cond = np.std(x, axis=0) > eps\n kept_fields = np.where(cond)[0]\n if embed_dims > 0:\n kept_fields = list(set(kept_fields).union(set(range(embed_dims))))\n\n removed_fields = list(set(range(x.shape[1])) - set(kept_fields))\n x_removed = x[:, removed_fields]\n x = x[:, kept_fields]\n return x, x_removed, kept_fields, removed_fields\n\ndef recover_fields_not_changing(x, x_removed, kept_fields, removed_fields):\n n = x.shape[0]\n m = len(kept_fields) + len(removed_fields)\n\n # this is True usually when adv is used\n if x_removed.shape[0] > 0 and x_removed.shape[0] != n:\n x_removed = np.array([x_removed[0] for _ in range(n)])\n x_recovered = np.zeros([n, m])\n x_recovered[:, kept_fields] = x\n x_recovered[:, removed_fields] = x_removed\n\n return x_recovered\n\ndef process_X(\n initial_X,\n labels,\n xl_ori,\n xu_ori,\n cutoff,\n cutoff_end,\n partial,\n unique_bugs_len,\n keywords_dict,\n standardize_prev=None,\n):\n keywords_for_encode = list(keywords_dict.keys())\n\n labels_to_encode = get_labels_to_encode(labels, keywords_for_encode)\n X, enc, inds_to_encode, inds_non_encode, encoded_fields = encode_fields(\n initial_X, labels, labels_to_encode, keywords_dict\n )\n one_hot_fields_len = np.sum(encoded_fields).astype('int')\n\n xl, xu = encode_bounds(\n xl_ori, xu_ori, inds_to_encode, inds_non_encode, encoded_fields\n )\n\n labels_non_encode = np.array(labels)[inds_non_encode]\n # print(np.array(X).shape)\n X, X_removed, kept_fields, removed_fields = remove_fields_not_changing(\n X, one_hot_fields_len, xl=xl, xu=xu\n )\n # print(np.array(X).shape)\n\n param_for_recover_and_decode = (\n X_removed,\n kept_fields,\n removed_fields,\n enc,\n inds_to_encode,\n inds_non_encode,\n encoded_fields,\n xl_ori,\n xu_ori,\n unique_bugs_len,\n )\n\n xl = xl[kept_fields]\n xu = xu[kept_fields]\n\n kept_fields_non_encode = np.array(kept_fields) - one_hot_fields_len\n kept_fields_non_encode = kept_fields_non_encode[kept_fields_non_encode >= 0]\n labels_used = labels_non_encode[kept_fields_non_encode]\n\n X_train, X_test = X[:cutoff], X[cutoff:cutoff_end]\n # print('X_train.shape, X_test.shape', X_train.shape, X_test.shape, one_hot_fields_len)\n if standardize_prev:\n standardize = standardize_prev\n else:\n standardize = StandardScaler()\n customized_fit(X_train, standardize, one_hot_fields_len, partial)\n X_train = customized_standardize(X_train, standardize, one_hot_fields_len, partial)\n if len(X_test) > 0:\n X_test = customized_standardize(X_test, standardize, one_hot_fields_len, partial)\n xl = customized_standardize(\n np.array([xl]), standardize, one_hot_fields_len, partial\n )[0]\n xu = customized_standardize(\n np.array([xu]), standardize, one_hot_fields_len, partial\n )[0]\n\n return (\n X_train,\n X_test,\n xl,\n xu,\n labels_used,\n standardize,\n one_hot_fields_len,\n param_for_recover_and_decode,\n )\n\n\ndef inverse_process_X(\n initial_test_x_adv_list,\n standardize,\n one_hot_fields_len,\n partial,\n X_removed,\n kept_fields,\n removed_fields,\n enc,\n inds_to_encode,\n inds_non_encode,\n encoded_fields,\n):\n test_x_adv_list = customized_inverse_standardize(\n initial_test_x_adv_list, standardize, one_hot_fields_len, partial\n )\n X = recover_fields_not_changing(\n test_x_adv_list, X_removed, kept_fields, removed_fields\n )\n X_final_test = decode_fields(\n X, enc, inds_to_encode, inds_non_encode, encoded_fields, adv=True\n )\n return X_final_test\n# ---------------- NN -------------------\n\n\n\n# ---------------- ADV -------------------\ndef if_violate_constraints_vectorized(X, customized_constraints, labels, ego_start_position=None, verbose=False):\n labels_to_id = {label: i for i, label in enumerate(labels)}\n\n keywords = [\"coefficients\", \"labels\", \"value\"]\n extra_keywords = [\"power\"]\n\n if_violate = False\n violated_constraints = []\n involved_labels = set()\n\n X = np.array(X)\n remaining_inds = np.arange(X.shape[0])\n\n for i, constraint in enumerate(customized_constraints):\n for k in keywords:\n assert k in constraint\n assert len(constraint[\"coefficients\"]) == len(constraint[\"labels\"])\n\n ids = np.array([labels_to_id[label] for label in constraint[\"labels\"]])\n\n\n # x_ids = [x[id] for id in ids]\n if \"powers\" in constraint:\n powers = np.array(constraint[\"powers\"])\n else:\n powers = np.array([1 for _ in range(len(ids))])\n\n coeff = np.array(constraint[\"coefficients\"])\n\n if_violate_current = (\n np.sum(coeff * np.power(X[remaining_inds[:, None], ids], powers), axis=1) > constraint[\"value\"]\n )\n remaining_inds = remaining_inds[if_violate_current==0]\n\n # beta: eliminate NPC vehicles having generation collision with the ego car\n # TBD: consider customized_center_transforms, customizable NPC vehicle size\n # also only consider OP for now\n print('remaining_inds before', len(remaining_inds))\n tmp_remaining_inds = remaining_inds.copy()\n if ego_start_position:\n j = 0\n ego_x, ego_y, ego_yaw = ego_start_position\n ego_w = 0.93\n vehicle_w_j = 0.93\n ego_l = 2.35\n vehicle_l_j = 2.35\n dw = ego_w + vehicle_w_j\n dl = ego_l + vehicle_l_j\n while 'vehicle_x_'+str(j) in labels:\n remaining_inds_i = remaining_inds.copy()\n\n x_ind = labels.index('vehicle_x_'+str(j))\n y_ind = labels.index('vehicle_y_'+str(j))\n\n vehicle_x_j = X[remaining_inds_i, x_ind]\n vehicle_y_j = X[remaining_inds_i, y_ind]\n\n dx_rel = vehicle_x_j\n dy_rel = vehicle_y_j\n\n\n x_far_inds = remaining_inds_i[np.abs(dx_rel) > dw]\n x_close_inds = remaining_inds_i[np.abs(dx_rel) <= dw]\n\n y_far_inds = x_close_inds[np.abs(dy_rel[x_close_inds]) > dl]\n\n remaining_inds_i = np.concatenate([x_far_inds, y_far_inds])\n tmp_remaining_inds = np.intersect1d(tmp_remaining_inds, remaining_inds_i)\n j += 1\n remaining_inds = tmp_remaining_inds\n\n\n if verbose:\n print('constraints filtering', len(X), '->', len(remaining_inds))\n\n return remaining_inds\n\ndef rotate_via_numpy(xy, radians):\n \"\"\"Use numpy to build a rotation matrix and take the dot product.\"\"\"\n x, y = xy\n c, s = np.cos(radians), np.sin(radians)\n j = np.array([[c, -s], [s, c]])\n m = np.dot(j, np.array([x, y]))\n\n return m[0], m[1]\n\ndef if_violate_constraints(x, customized_constraints, labels, verbose=False):\n labels_to_id = {label: i for i, label in enumerate(labels)}\n\n keywords = [\"coefficients\", \"labels\", \"value\"]\n extra_keywords = [\"power\"]\n\n if_violate = False\n violated_constraints = []\n involved_labels = set()\n\n for i, constraint in enumerate(customized_constraints):\n for k in keywords:\n assert k in constraint\n assert len(constraint[\"coefficients\"]) == len(constraint[\"labels\"])\n\n ids = [labels_to_id[label] for label in constraint[\"labels\"]]\n x_ids = [x[id] for id in ids]\n if \"powers\" in constraint:\n powers = np.array(constraint[\"powers\"])\n else:\n powers = np.array([1 for _ in range(len(ids))])\n\n coeff = np.array(constraint[\"coefficients\"])\n features = np.array(x_ids)\n\n if_violate_current = (\n np.sum(coeff * np.power(features, powers)) > constraint[\"value\"]\n )\n if if_violate_current:\n if_violate = True\n violated_constraints.append(constraint)\n involved_labels = involved_labels.union(set(constraint[\"labels\"]))\n if verbose:\n print(\"\\n\" * 1, \"violate_constraints!!!!\", \"\\n\" * 1)\n print(\n coeff,\n features,\n powers,\n np.sum(coeff * np.power(features, powers)),\n constraint[\"value\"],\n constraint[\"labels\"],\n )\n\n return if_violate, [violated_constraints, involved_labels]\n\ndef encode_bounds(xl, xu, inds_to_encode, inds_non_encode, encode_fields):\n m1 = int(np.sum(encode_fields))\n if m1 > 0:\n xl_embed, xu_embed = np.zeros(m1), np.ones(m1)\n\n xl_new = np.concatenate([xl_embed, xl[inds_non_encode]])\n xu_new = np.concatenate([xu_embed, xu[inds_non_encode]])\n\n return xl_new, xu_new\n else:\n return xl, xu\n# ---------------- ADV -------------------\n\n\n\n# ---------------- NSGA2-DT -------------------\n# check if x is in critical regions of the tree\ndef is_critical_region(x, estimator, critical_unique_leaves):\n leave_id = estimator.apply(x.reshape(1, -1))[0]\n print(leave_id, critical_unique_leaves)\n return leave_id in critical_unique_leaves\n\ndef filter_critical_regions(X, y):\n print(\"\\n\" * 20)\n print(\"+\" * 100, \"filter_critical_regions\", \"+\" * 100)\n\n min_samples_split = np.max([int(0.1 * X.shape[0]), 2])\n # estimator = tree.DecisionTreeClassifier(min_samples_split=min_samples_split, min_impurity_decrease=0.01, random_state=0)\n estimator = tree.DecisionTreeClassifier(\n min_samples_split=min_samples_split,\n min_impurity_decrease=0.0001,\n random_state=0,\n )\n print(X.shape, y.shape)\n # print(X, y)\n estimator = estimator.fit(X, y)\n\n leave_ids = estimator.apply(X)\n print(\"leave_ids\", leave_ids)\n\n unique_leave_ids = np.unique(leave_ids)\n unique_leaves_bug_num = np.zeros(unique_leave_ids.shape[0])\n unique_leaves_normal_num = np.zeros(unique_leave_ids.shape[0])\n\n for j, unique_leave_id in enumerate(unique_leave_ids):\n for i, leave_id in enumerate(leave_ids):\n if leave_id == unique_leave_id:\n if y[i] == 0:\n unique_leaves_normal_num[j] += 1\n else:\n unique_leaves_bug_num[j] += 1\n\n for i, unique_leave_i in enumerate(unique_leave_ids):\n print(\n \"unique_leaves\",\n unique_leave_i,\n unique_leaves_bug_num[i],\n unique_leaves_normal_num[i],\n )\n\n critical_unique_leaves = unique_leave_ids[\n unique_leaves_bug_num >= unique_leaves_normal_num\n ]\n\n print(\"critical_unique_leaves\", critical_unique_leaves)\n\n inds = np.array([leave_id in critical_unique_leaves for leave_id in leave_ids])\n print(\"\\n\" * 20)\n\n return estimator, inds, critical_unique_leaves\n# ---------------- NSGA2-DT -------------------\n\n\n\n# ---------------- NSGA2-SM -------------------\ndef pretrain_regression_nets(initial_X, initial_objectives_list, objective_weights, xl_ori, xu_ori, labels, customized_constraints, cutoff, cutoff_end, keywords_dict, choose_weight_inds):\n\n # we are not using it so set it to 0 for placeholding\n unique_bugs_len = 0\n partial = True\n\n print(np.array(initial_X).shape, cutoff, cutoff_end)\n (\n X_train,\n X_test,\n xl,\n xu,\n labels_used,\n standardize,\n one_hot_fields_len,\n param_for_recover_and_decode,\n ) = process_X(\n initial_X, labels, xl_ori, xu_ori, cutoff, cutoff_end, partial, unique_bugs_len, keywords_dict\n )\n\n (\n X_removed,\n kept_fields,\n removed_fields,\n enc,\n inds_to_encode,\n inds_non_encode,\n encoded_fields,\n _,\n _,\n unique_bugs_len,\n ) = param_for_recover_and_decode\n\n weight_inds = choose_weight_inds(objective_weights)\n\n\n from pgd_attack import train_regression_net\n chosen_weights = objective_weights[weight_inds]\n clfs = []\n confs = []\n for weight_ind in weight_inds:\n y_i = np.array([obj[weight_ind] for obj in initial_objectives_list])\n y_train_i, y_test_i = y_i[:cutoff], y_i[cutoff:cutoff_end]\n\n clf_i, conf_i = train_regression_net(\n X_train, y_train_i, X_test, y_test_i, batch_train=200, return_test_err=True\n )\n clfs.append(clf_i)\n confs.append(conf_i)\n\n confs = np.array(confs)*chosen_weights\n return clfs, confs, chosen_weights, standardize\n# ---------------- NSGA2-SM -------------------\n\n\n# ---------------- AVFuzzer -------------------\ndef choose_farthest_offs(tmp_off_candidates_X, all_pop_run_X, pop_size):\n from sklearn.preprocessing import Normalizer\n Normalizer\n # transformer = Normalizer().fit(tmp_off_candidates_X)\n # tmp_off_candidates_X = transformer.transform(tmp_off_candidates_X)\n # all_pop_run_X = transformer.transform(all_pop_run_X)\n\n # mean = np.mean(tmp_off_candidates_X, axis=0)\n # std = np.std(tmp_off_candidates_X, axis=0)\n # tmp_off_candidates_X = (tmp_off_candidates_X-mean)/std\n # all_pop_run_X = (all_pop_run_X-mean)/std\n\n dis = tmp_off_candidates_X[:, np.newaxis,:] - all_pop_run_X\n # print('\\n'*5, 'choose_farthest_offs')\n # print('tmp_off_candidates_X', tmp_off_candidates_X)\n # print('all_pop_run_X', all_pop_run_X)\n # print('dis', dis)\n # print('\\n'*5)\n dis_sum = np.mean(np.mean(np.abs(dis), axis=2), axis=1)\n chosen_inds = np.argsort(dis_sum)[-pop_size:]\n # with open('tmp_log.txt', 'a') as f_out:\n # f_out.write('shapes: '+str(np.shape(tmp_off_candidates_X[:, np.newaxis,:]))+','+str(np.shape(all_pop_run_X))+str(np.shape(dis))+str(np.shape(dis_sum))+str(dis_sum)+'\\n\\n'+str(dis_sum[chosen_inds])+'\\n')\n return chosen_inds\n# ---------------- AVFuzzer -------------------\n\n\n# ---------------- acquisition related -------------------\n# TBD: greedily add point\ndef calculate_rep_d(clf, X_train, X_test):\n X_train_embed = clf.extract_embed(X_train)\n X_test_embed = clf.extract_embed(X_test)\n X_combined_embed = np.concatenate([X_train_embed, X_test_embed])\n\n d_list = []\n for x_test_embed in X_test_embed:\n d = np.linalg.norm(X_combined_embed - x_test_embed, axis=1)\n # sorted_d = np.sort(d)\n # d_list.append(sorted_d[1])\n d_list.append(d)\n return np.array(d_list)\n\ndef select_batch_max_d_greedy(d_list, train_test_cutoff, batch_size):\n consider_inds = np.arange(train_test_cutoff)\n remaining_inds = np.arange(len(d_list))\n chosen_inds = []\n\n print('d_list.shape', d_list.shape)\n print('remaining_inds.shape', remaining_inds.shape)\n print('consider_inds.shape', consider_inds.shape)\n for i in range(batch_size):\n # print(i)\n # print('d_list[np.ix_(remaining_inds, consider_inds)].shape', d_list[np.ix_(remaining_inds, consider_inds)].shape)\n min_d_list = np.min(d_list[np.ix_(remaining_inds, consider_inds)], axis=1)\n # print('min_d_list', min_d_list.shape, min_d_list)\n remaining_inds_top_ind = np.argmax(min_d_list)\n chosen_ind = remaining_inds[remaining_inds_top_ind]\n\n # print('chosen_ind', chosen_ind)\n consider_inds = np.append(consider_inds, chosen_ind)\n # print('remaining_inds before', remaining_inds)\n # print('remaining_inds_top_ind', remaining_inds_top_ind)\n remaining_inds = np.delete(remaining_inds, remaining_inds_top_ind)\n # print('remaining_inds after', remaining_inds)\n chosen_inds.append(chosen_ind)\n return chosen_inds\n\ndef torch_subset(pool_data):\n return torch.utils.data.Subset(pool_data, np.arange(len(pool_data)))\n\n\n# ---------------- acquisition related -------------------\n\n# ---------------- metric related -------------------\ndef angle_distance_adjustment(values, angle_max):\n diff_forward = values\n diff_backward = angle_max-values\n diff_both = np.stack([diff_forward, diff_backward], axis=2)\n return np.min(diff_both, axis=2)\n\n\ndef get_pairwise_distances(X_query, X_query_2, angle_features, scales):\n diff_raw = np.abs(np.expand_dims(X_query, axis=1) - np.expand_dims(X_query_2, axis=0))\n for angle_feature in angle_features:\n diff_raw[:, :, angle_feature] = angle_distance_adjustment(diff_raw[:, :, angle_feature], 360)\n for i, scale in enumerate(scales):\n diff_raw[:, :, angle_feature] /= scale\n diff_norm = np.sum(np.abs(diff_raw), axis=2)\n return diff_norm\n\ndef get_boundary_perc_and_avg_boundary_dist(X_query, y_query, angle_features, scales):\n diff_norm = get_pairwise_distances(X_query, X_query, angle_features, scales)\n\n # precision\n total_count = diff_norm.shape[0]\n boundary_count = 0\n boundary_inds_list = []\n for i in range(total_count):\n inds = np.where(diff_norm[i] <= 1)[0]\n for j in inds:\n if j != i and y_query[i] != y_query[j]:\n boundary_inds_list.append(i)\n\n boundary_perc = len(boundary_inds_list)/total_count\n\n # coverage\n dist_list = []\n for i in range(len(boundary_inds_list)):\n for j in range(len(boundary_inds_list)):\n if i != j:\n dist_list.append(diff_norm[i, j])\n print('total_count', total_count)\n print('len(boundary_inds_list):', len(boundary_inds_list))\n print('len(dist_list):', len(dist_list))\n avg_boundary_dist = np.mean(dist_list)\n\n return boundary_perc, avg_boundary_dist\n\ndef nndv(X_train, y_train, X_test, pop_size, angle_features, scales):\n diff_norm = get_pairwise_distances(X_test, X_train, angle_features, scales)\n\n score_variance = np.zeros(diff_norm.shape[0])\n avg_neighbor_dist = np.zeros(diff_norm.shape[0])\n\n for i in range(diff_norm.shape[0]):\n neighbor_inds = np.argsort(diff_norm[i])[:10]\n neighbor_values = np.sort(diff_norm[i])[:10]\n\n\n score_variance[i] = np.std(y_train[neighbor_inds])\n avg_neighbor_dist[i] = np.mean(neighbor_values)\n print(X_test.shape)\n print(np.min(score_variance), np.max(score_variance), np.mean(score_variance), np.median(score_variance))\n print(np.min(avg_neighbor_dist), np.max(avg_neighbor_dist), np.mean(avg_neighbor_dist), np.median(avg_neighbor_dist))\n w = 1.0\n scores = score_variance * w + avg_neighbor_dist * (1-w)\n inds = np.argsort(scores)[-pop_size:]\n\n return inds\n# ---------------- metric related -------------------\n\n\ndef get_job_results(tmp_run_info_list, x_sublist, objectives_sublist_non_traj, trajectory_vector_sublist, x_list, objectives_list, trajectory_vector_list, traj_dist_metric=None):\n\n job_results = objectives_sublist_non_traj\n\n x_list.extend(x_sublist)\n objectives_list.extend(job_results)\n trajectory_vector_list.extend(trajectory_vector_sublist)\n\n\n return job_results, x_list, objectives_list, trajectory_vector_list\n\n\nif __name__ == \"__main__\":\n print('ok')\n","repo_name":"AIasd/ADFuzz","sub_path":"customized_utils.py","file_name":"customized_utils.py","file_ext":"py","file_size_in_byte":42193,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"36635590102","text":"# Pick the secret number from a range for the possible numbers (i.e. 0-10, 0-100, 100-200)\n# Provide the player X number of guesses, based on range of numbers. \n# First player to score at least 3 points is declared the winner. \n # Player guesses the number. \n # Compare guess to secret number. \n # If the guess is correct what should happen?\n # Award the player a point. \n # Print a 'win' message on the screen. \n # If the guess is incorrect, what should happen? \n # Indicate if guess is high/low compared to secret number. \n # If the player does not guess correctly before hitting limit, what happens?\n # Award a point to the CPU. \n # Print a loss message. \n\n\n# Guess a Number, Ryan Kelley, v0.0 \nimport random \n\n# DECLARATIONS \nsecretNumber = -1 # Range: 0 -- 20 \nplayerName = \"\" # empty string \nplayerScore = 0 \ncpuScore = 0 \nnumGuesses = 0 \nplayerGuess = -1 \n\nprint(\"\"\"\n +==============================+\n | |\n | Guess the Number |\n | by |\n | Ryan K. |\n | 2023 |\n +==============================+ \n \"\"\")\n\nplayerName = input(\"What should I call you?\\nType your name and press enter.\\n\")\n# VERIFY INPUT WHENEVER POSSIBLE! \nprint(f\"You want me to call you {playerName}. Is that correct?\")\nisCorrect = input(\"Please type yes if correct, no if not correct.\\n\")\nif isCorrect == \"yes\": \n print(f\"Ok {playerName}, let's continue!\")\nelse: \n playerName = input(\"What should I call you?\\nType your name and press enter.\\n\") \n\n# PLAYER GUESS \nprint(\"You need to guess a number from 0 to 20. You have four guesses!\\n\")\n \nwhile playerScore != 3 and cpuScore != 3: \n #pass Tells Python to skip this block without giving an error. \n secretNumber = random.randint(0, 20) # INCLUSIVE\n #print(secretNumber)\n print(f\"Player Score: {playerScore}\\nCPU Score: {cpuScore}\\n\")\n numGuesses = 0\n for guesses in range(4): \n print(f\"You have {4 - numGuesses} guesses left this round!\\n\") \n playerGuess = int(input(\"Think of your number, type it in and then push ENTER.\\n\"))\n # int() converts whatever is input into an INTEGER \n print(f\"You have picked {playerGuess}. Let's see if it is a match!\\n\")\n if playerGuess == secretNumber: \n playerScore += 1 \n print(\"A winner is you! It's a match!\\n\")\n break # immediately exit a loop! \n else: \n if playerGuess < secretNumber: \n print(\"Your guess is too low!\\n\")\n else: \n print(\"Your guess is too high!\\n\")\n numGuesses += 1 \n if playerGuess != secretNumber: \n cpuScore += 1\n print(\"The CPU was able to trick you and win this round!\\n\")\n\nif playerScore >= 3: \n print(\"You have won three rounds, so you win the game!\\n\")\nelse: \n print(\"Git gud scrub, the CPU was able to smash you!\\n\")\n\n","repo_name":"RyanK-TFATF/ajhs_game_programming_2022-23","sub_path":"01_StudentCode/3A/00b_numberGuess/numberGuess.py","file_name":"numberGuess.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"70900262249","text":"import torch\nimport numpy as np\nimport cv2\nimport subprocess\nimport ffmpeg\nimport os\nfrom matplotlib import pyplot as plt\nimport sys\n\nclass utils:\n @classmethod\n def display_pics(cls, pics, shape=[2,4], color=None):\n fig = plt.figure(figsize=(12,8))\n for i in range(len(pics)):\n plt.subplot(shape[0],shape[1],i+1)\n if color is not None:\n plt.imshow(pics[i], color)\n else:\n plt.imshow(pics[i])\n plt.show()\n\n @classmethod\n def draw_square(cls, point, img, bgr=False):\n for i in range(-4,5):\n for j in range(-4,5):\n cur_point_y,cur_point_x = round(point[1])+j,round(point[0])+i\n if cls.in_bounds(img.shape, [cur_point_x,cur_point_y]):\n if bgr:\n img[cur_point_y,cur_point_x,:] = [0.,0.,1.]\n else:\n img[cur_point_y,cur_point_x] = 1.\n return img\n\n @classmethod\n def in_bounds(cls, image_shape, point):\n if point[0] >= 0 and point[0] < image_shape[1] and point[1] >= 0 and point[1] < image_shape[0]:\n return True\n return False\n \n @classmethod\n def draw_squares(cls, points, img, bgr=False):\n points = cls.to_numpy(points)\n points = points.reshape(-1,2)\n for point in points:\n img = cls.draw_square(point, img, bgr)\n return img\n \n @classmethod\n def get_images_random(cls, num_images, data_dir, out_dir):\n one_liner = \"ffmpeg -i in.mp4 -vf select='between(n\\,x\\,y)' -vsync 0 image%d.png\"\n command_args = one_liner.split(' ')\n trial_ids = os.listdir(data_dir)\n cls.process_dir_list(trial_ids)\n trial_paths = list(map(lambda x: os.path.join(data_dir, x), trial_ids))\n for i in range(len(trial_paths)):\n trial_id = trial_ids[i]\n videos = os.listdir(trial_paths[i])\n cls.process_dir_list(videos)\n video_paths = list(map(lambda x: os.path.join(trial_paths[i], x), videos))\n video_num_frames = []\n for j in range(len(videos)):\n metadata = ffmpeg.probe(video_paths[j])\n for stream in metadata['streams']:\n if stream['codec_type'] == 'video':\n video_num_frames.append(stream['nb_frames'])\n min_frames = video_num_frames[0]\n for j in range(1, len(videos)):\n min_frames = min(min_frames,video_num_frames[j])\n frames = np.random.rand(num_images+2)*float(min_frames)\n frames_list = frames.tolist()\n indices = list(map(lambda x: round(x), frames_list))\n for idx in indices[1:-1]:\n cur_command_args = command_args.copy()\n cur_dir = os.path.join(out_dir, trial_id+str(idx))\n if not os.path.exists(cur_dir):\n os.mkdir(cur_dir)\n for j in range(len(videos)):\n cur_command_args = command_args.copy()\n cur_command_args[2] = video_paths[j]\n out_path = os.path.join(cur_dir, videos[j].split('.')[0] + \"-\" + cur_command_args[-1])\n cur_command_args[-1] = out_path\n if os.path.exists(out_path):\n os.remove(out_path)\n cur_command_args[4] = \"select='between(n\\,\"+str(idx)+\"\\,\"+str(idx)+\")\"\n subprocess.run(cur_command_args, stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)\n \n @classmethod\n def get_images_even(cls, num_images, data_dir, out_dir):\n one_liner = \"ffmpeg -i in.mp4 -vf select='between(n\\,x\\,y)' -vsync 0 image%d.png\"\n command_args = one_liner.split(' ')\n trial_ids = os.listdir(data_dir)\n cls.process_dir_list(trial_ids)\n trial_paths = list(map(lambda x: os.path.join(data_dir, x), trial_ids))\n for i in range(len(trial_paths)):\n trial_id = trial_ids[i]\n videos = os.listdir(trial_paths[i])\n cls.process_dir_list(videos)\n video_paths = list(map(lambda x: os.path.join(trial_paths[i], x), videos))\n video_num_frames = []\n for j in range(len(videos)):\n metadata = ffmpeg.probe(video_paths[j])\n for stream in metadata['streams']:\n if stream['codec_type'] == 'video':\n video_num_frames.append(stream['nb_frames'])\n min_frames = video_num_frames[0]\n for j in range(1, len(videos)):\n min_frames = min(min_frames,video_num_frames[j])\n frames = np.linspace(0.,float(min_frames),num_images+2)\n frames_list = frames.tolist()\n indices = list(map(lambda x: round(x), frames_list))\n for idx in indices[1:-1]:\n cur_command_args = command_args.copy()\n cur_dir = os.path.join(out_dir, trial_id+str(idx))\n if not os.path.exists(cur_dir):\n os.mkdir(cur_dir)\n for j in range(len(videos)):\n cur_command_args = command_args.copy()\n cur_command_args[2] = video_paths[j]\n out_path = os.path.join(cur_dir, videos[j].split('.')[0] + \"-\" + cur_command_args[-1])\n cur_command_args[-1] = out_path\n if os.path.exists(out_path):\n os.remove(out_path)\n cur_command_args[4] = \"select='between(n\\,\"+str(idx)+\"\\,\"+str(idx)+\")\"\n subprocess.run(cur_command_args, stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)\n\n @classmethod\n def get_n_images_per_video(cls, num_images, data_dir, out_dir, method=\"even\"):\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n if method == \"even\":\n cls.get_images_even(num_images, data_dir, out_dir)\n elif method == \"random\":\n cls.get_images_random(num_images, data_dir, out_dir)\n else:\n print(\"Invalid Method Keyword\")\n sys.exit(-1)\n \n @classmethod\n def process_dir_list(cls, input_list):\n input_list.sort()\n if input_list[0] == \".DS_Store\":\n del input_list[0]\n if input_list[-1] == \".DS_Store\":\n del input_list[-1]\n\n @classmethod\n def check_shape(cls, tensor, shape):\n tensor_size = tensor.size()\n for i in range(len(shape)):\n assert(tensor_size[i] == shape[i])\n\n @classmethod\n def check_type(cls, tensor, dtype):\n assert(tensor.dtype == dtype)\n\n @classmethod\n def check_tensor(cls, tensor, shape=None, dtype=None):\n if shape is not None:\n cls.check_shape(tensor, shape)\n if dtype is not None:\n cls.check_type(tensor, dtype)\n\n @classmethod\n def get_base_imgs(cls, cam_names, base_img_dir):\n base_images = []\n for cam_name in cam_names:\n base_img_path = base_img_dir+\"camera_\"+cam_name+\"_base_img.png\"\n base_img = cv2.imread(base_img_path)\n base_img = cv2.cvtColor(base_img, cv2.COLOR_BGR2GRAY)\n base_img = base_img.astype(\"float32\",copy=False)\n base_img /= np.max(base_img)\n base_images.append(base_img)\n return base_images\n\n @classmethod\n def to_numpy(cls, thing):\n if isinstance(thing, np.ndarray):\n return thing\n elif torch.is_tensor(thing):\n return thing.detach().cpu().numpy()\n elif isinstance(thing, list):\n return np.array(thing)\n else:\n raise TypeError(\"Please pass a list, tensor, or ndarray.\")","repo_name":"patrickdwyer33/Tracking_3d","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13737497761","text":"# 5. Escreva um program que leia dois números e que pergunte qual operação o usuário deseja realizar. Esse programa deve aceitar como respostas a soma(+), a subtração(-), a multiplicação (*) e a divisão (/). Ao final, o programa deve exibir o resultado da operação escolhida.\n\nnumber1 = float(input('Digite um valor: '))\nnumber2 = float(input('Digite outro valor: '))\n\noperation = (input('Digite a operação desejada'))\n\nif(operation == '+'):\n sum = (number1+number2)\n print(f'O resultado da soma é:{sum:.1f}')\nelif(operation == '-'):\n subtraction = (number1-number2)\n print(f'O resultado da subtração é:{subtraction:.1f}')\nelif(operation == '*'):\n multiplication = (number1*number2)\n print(f'O resultado da multiplicão é:{multiplication:.1f}')\nelif(operation == '/'):\n division = (number1/number2)\n print(f'O resultado da divisão é:{division:.1f}')\nelse:\n print('Opção não localizada!')\n","repo_name":"Elisandro-Santolin/Experian-Serasa-Python","sub_path":"s02Exe/exercise2.5.py","file_name":"exercise2.5.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"910001424","text":"\"\"\"\r\nThis script constitutes the evaluation of an $M/G/N_{K}$ Queue Model at the Base Station [Custom Implementation]\r\n\r\nAuthor: Bharath Keshavamurthy \r\nOrganization: School of Electrical, Computer and Energy Engineering, Arizona State University, Tempe, AZ.\r\n School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN.\r\nCopyright (c) 2021. All Rights Reserved.\r\n\"\"\"\r\n\r\n# The imports\r\nimport os\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # TensorFlow logging level\r\n\r\nimport time\r\nimport warnings\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom threading import Lock\r\nfrom concurrent.futures import ThreadPoolExecutor\r\n\r\n\"\"\"\r\nGlobal settings\r\n\"\"\"\r\n# Filter user warnings\r\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\r\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\n# Numpy seed\r\nnp.random.seed(10191)\r\n# Tensorflow seed\r\ntf.random.set_seed(10191)\r\n\r\n\"\"\"\r\nCONFIGURATIONS-I: NUMBER OF ORTHOGONAL CHANNELS AT THE BS ($N_{K}$) | NUMBER OF GN REQUESTS ($N_{GN}$)\r\n\"\"\"\r\ndata_payload_lengths = [0.1e6, 1e6, 5e6]\r\nrequest_arrival_rates = {0.1e6: 10, 1e6: 1.3333e-2, 5e6: 6.6666e-3}\r\nnumber_of_channels, number_of_requests, comm_delay_min, comm_delay_max, sleep_seconds = 5, 100, 1.0, 4.0, 0.01\r\n\r\n\"\"\"\r\nGlobal resources\r\n\"\"\"\r\nqueue = tf.Variable(tf.zeros(shape=(number_of_requests,), dtype=tf.int8), dtype=tf.int8)\r\nwait_times = tf.Variable(tf.zeros(shape=(number_of_requests,), dtype=tf.float64), dtype=tf.float64)\r\nlock, server_pool = Lock(), tf.Variable(tf.zeros(shape=(number_of_channels,), dtype=tf.int8), dtype=tf.int8)\r\nservice_times = tf.Variable(tf.random.uniform(shape=(number_of_requests,), dtype=tf.float64, minval=comm_delay_min,\r\n maxval=comm_delay_max), dtype=tf.float64)\r\n\r\n\r\ndef service_request(request_number, available_servers):\r\n \"\"\"\r\n Service the GN request: Consume | Occupy a server | Sleep for the service period | Un-occupy the server\r\n\r\n :param request_number: The GN request index for a global post-processing assignment of its service delay\r\n :param available_servers: A tensor of available servers at this point in the emulation\r\n\r\n :return: The server index (>0 | SERVED) or (-1 | NOT SERVED)\r\n \"\"\"\r\n server = available_servers[0, 0].numpy() if tf.not_equal(tf.size(available_servers), 0) else -1\r\n # A server is available\r\n if server != -1:\r\n tf.compat.v1.assign(queue[request_number], 1, use_locking=True)\r\n tf.compat.v1.assign(server_pool[server], 1, use_locking=True)\r\n time.sleep(service_times[request_number].numpy())\r\n tf.compat.v1.assign(server_pool[server], 0, use_locking=True)\r\n return server\r\n\r\n\r\n# The life of a GN communication request\r\ndef life_of_a_request(start_time, request_number, call_number):\r\n \"\"\"\r\n A thread-safe, executor-handled routine that emulates the life of a GN request\r\n\r\n :param start_time: The time at which the processing of this GN request started\r\n :param request_number: The GN request index for a global post-processing assignment of its service delay\r\n :param call_number: This function call's place in the overall process hierarchy of the GN request under analysis\r\n\r\n :return: A boolean indicating whether the request has been served\r\n \"\"\"\r\n served = False\r\n while not served:\r\n with lock:\r\n available_servers = tf.where(tf.equal(server_pool, 0))\r\n queue_go_ahead = tf.equal(queue[request_number - 1], 1) if request_number > 0 and call_number == 0 else True\r\n # FIFO: Ready to Pop!\r\n if queue_go_ahead:\r\n # All servers are busy\r\n if service_request(request_number, available_servers) == -1:\r\n print(f'Call Number = {call_number} | Waiting {request_number}')\r\n time.sleep(sleep_seconds)\r\n served = life_of_a_request(start_time, request_number, call_number + 1)\r\n # Request served\r\n else:\r\n print(f'Call Number = {call_number} | Served {request_number}!')\r\n tf.compat.v1.assign(wait_times[request_number], (time.time_ns() - start_time) / 1e9, use_locking=True)\r\n served = True\r\n # FIFO: Waiting for the request in front of me to Pop!\r\n else:\r\n time.sleep(sleep_seconds)\r\n continue\r\n return served\r\n # The request's life ends here...:-(\r\n\r\n\r\n# The memory-less arrival process of GN communication requests\r\ndef simulate_poisson_arrivals(payload_length):\r\n \"\"\"\r\n Simulate the Poisson arrival process of the active communication requests originating from the GNs in the cell\r\n\r\n :param: The data payload length input which serves as a key into the \"arrival_rates\" dictionary in order to select\r\n a payload-appropriate arrival rate\r\n\r\n :return: A list of GN active communication request arrival times\r\n \"\"\"\r\n request_number, arrival_rate, arrival_times = 0, request_arrival_rates[payload_length], []\r\n arrival_time = (-np.log(1 - np.random.random_sample())) / arrival_rate\r\n while request_number < number_of_requests:\r\n request_number += 1\r\n arrival_times.append(arrival_time)\r\n arrival_time += (-np.log(1 - np.random.random_sample())) / arrival_rate\r\n return arrival_times\r\n\r\n\r\n# Run Trigger\r\nif __name__ == '__main__':\r\n print('[INFO] BSQueueModel main: Starting the evaluation of the M/G/N Queueing System at the Base Station...')\r\n data_payload_length = data_payload_lengths[0] # 0.1 Mb data payload example\r\n request_arrival_times = simulate_poisson_arrivals(data_payload_length)\r\n with ThreadPoolExecutor(max_workers=8) as executor:\r\n for index, request_arrival_time in enumerate(request_arrival_times):\r\n print(f'Root Call | Arrived {index}!')\r\n executor.submit(life_of_a_request, time.time_ns(), index, 0)\r\n time.sleep(request_arrival_times[index + 1] - request_arrival_time\r\n if index < number_of_requests - 1 else 0.0)\r\n print(f'[INFO] BSQueueModel main: The wait delays associated with the {number_of_channels} servers at the '\r\n f'Base Station for {number_of_requests} GN requests of data payload size {data_payload_length} are '\r\n f'{wait_times.numpy()}')\r\n print(f'[INFO] BSQueueModel main: The total service delays associated with the {number_of_channels} servers at the '\r\n f'Base Station for {number_of_requests} GN requests of data payload size {data_payload_length} are '\r\n f'{tf.add(service_times, wait_times).numpy()}')\r\n# The evaluations of the ($M/G/N_{K}$) Queueing Model a the Base Station end here.\r\n","repo_name":"bharathkeshavamurthy/MAESTRO-X","sub_path":"src/archive/utilities/BSQueueModel.py","file_name":"BSQueueModel.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"38425230958","text":"import csv\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\n\n\nfilename = '1.csv'\nwith open(filename) as f:\n reader = csv.reader(f)\n\n dates, highs = [], []\n for row in reader:\n current_date = datetime.strptime(row[0], \"%Y-%m-%d\")\n dates.append(current_date)\n high = int(row[1])\n highs.append(high)\n\n\nfig = plt.figure(dpi=128, figsize=(10, 6))\nplt.plot(dates, highs, c='red')\n# 设置图形的格式\nplt.title(\"Daily high temperatures, July 2014\")\n\nplt.xlabel('', fontsize=16)\nplt.ylabel(\"Temperature (F)\", fontsize=16)\nfig.autofmt_xdate()\nplt.tick_params(axis='both', which='major', labelsize=16)\n\nplt.show()\n","repo_name":"singi2016cn/python-start-programming","sub_path":"16/1/highs_lows.py","file_name":"highs_lows.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37217482870","text":"\"\"\"Tests for `words` module.\"\"\"\n\nfrom typing import List, Optional\n\nimport pandas as pd\nimport pytest\n\nfrom stimpool import WordPool\n\n\ndef test_get_default_pool() -> None:\n \"\"\"Test words._get_default_pool.\"\"\"\n\n shape_exp = (55457,)\n word_pool = WordPool()\n word_pool_default = word_pool._get_default_pool()\n shape_obs = word_pool_default.shape\n\n assert shape_obs == shape_exp\n\n\n@pytest.mark.parametrize(\n (\"word_original\", \"word_expected\"),\n [\n # none\n (\"perro\", \"perro\"),\n # upper\n (\"PErrO\", \"perro\"),\n # space\n (\" perro \", \"perro\"),\n # mixed\n (\" PErrO\", \"perro\"),\n ],\n)\ndef test_normalize_word(word_original: str, word_expected: str) -> None:\n \"\"\"Test the _normalize_word with different cases.\"\"\"\n\n word_pool = WordPool()\n word_observed = word_pool._normalize_word(word_original)\n\n assert word_observed == word_expected\n\n\n@pytest.mark.parametrize(\n (\"word\", \"expected\"),\n [\n # valid\n (\"perro\", False),\n # edge case; this is the expected behavior\n # blank spaces are handled by other methods\n (\"carro \", False),\n # invalid\n (\"canción\", True),\n (\"así\", True),\n (\"güiro\", True),\n (\"ñame\", True),\n ],\n)\ndef test_check_accented_characters(word: str, expected: bool) -> None:\n \"\"\"Test the _check_accented_characters with different cases.\"\"\"\n\n word_pool = WordPool()\n obs = word_pool._check_accented_characters(word)\n\n assert obs == expected\n\n\n@pytest.mark.parametrize(\n (\"words\", \"exp\", \"how\"),\n [\n ([\"yes\", \"no\", \"yes\", \"no\"], pd.Series([\"yes\", \"yes\"], dtype=\"object\"), \"keep\"),\n ([\"yes\", \"no\", \"yes\", \"no\"], pd.Series([\"no\", \"no\"], dtype=\"object\"), \"remove\"),\n ],\n)\ndef test_get_words_meeting_criteria(words: List[str], exp: pd.Series, how: str) -> None:\n \"\"\"Test _get_words_meeting_criteria with different cases.\"\"\"\n word_pool = WordPool(words)\n obs: pd.Series = word_pool._get_words_meeting_criteria(\n func_checks_criteria=lambda x: \"yes\" == x,\n how=how,\n )\n\n obs = obs.reset_index(drop=True)\n assert obs.equals(exp)\n\n\n@pytest.mark.parametrize(\n (\"word\", \"min_len\", \"max_len\", \"exp\"),\n [\n # only min\n (\"perro\", 1, None, True),\n (\"perro\", 6, None, False),\n # only max\n (\"perro\", None, 6, True),\n (\"perro\", None, 1, False),\n # None; special case\n (\"perro\", None, None, True),\n (\"perro\", None, None, True),\n # both\n (\"perro\", 1, 5, True),\n (\"perro\", 1, 4, False),\n ],\n)\ndef test_check_word_length(word: str, min_len: int, max_len: int, exp: bool) -> None:\n \"\"\"Test the _check_word_length with different cases.\"\"\"\n\n word_pool = WordPool()\n obs = word_pool._check_word_length(word, min_len, max_len)\n\n assert obs == exp\n\n\n@pytest.mark.parametrize(\n (\"words\", \"min_len\", \"max_len\", \"exp\"),\n [\n # only min\n # all meet criteria\n (\n [\"al\", \"gato\", \"cabeza\", \"periódico\"],\n 1,\n None,\n pd.Series([\"al\", \"gato\", \"cabeza\", \"periódico\"], dtype=\"object\"),\n ),\n # only min\n # some meet criteria\n (\n [\"al\", \"gato\", \"cabeza\", \"periódico\"],\n 6,\n None,\n pd.Series([\"cabeza\", \"periódico\"], dtype=\"object\"),\n ),\n # only min\n # none meet criteria\n (\n [\"al\", \"gato\", \"cabeza\", \"periódico\"],\n 15,\n None,\n pd.Series([], dtype=\"object\"),\n ),\n # only max\n # all meet criteria\n (\n [\"al\", \"gato\", \"cabeza\", \"periódico\"],\n None,\n 15,\n pd.Series([\"al\", \"gato\", \"cabeza\", \"periódico\"], dtype=\"object\"),\n ),\n # only max\n # some meet criteria\n (\n [\"al\", \"gato\", \"cabeza\", \"periódico\"],\n None,\n 5,\n pd.Series([\"al\", \"gato\"], dtype=\"object\"),\n ),\n # only max\n # none meet criteria\n ([\"al\", \"gato\", \"cabeza\", \"periódico\"], None, 1, pd.Series([], dtype=\"object\")),\n # only min y max\n # all meet criteria\n (\n [\"al\", \"gato\", \"cabeza\", \"periódico\"],\n 0,\n 15,\n pd.Series([\"al\", \"gato\", \"cabeza\", \"periódico\"], dtype=\"object\"),\n ),\n # only min y max\n # some meet criteria\n (\n [\"al\", \"gato\", \"cabeza\", \"periódico\"],\n 2,\n 5,\n pd.Series([\"al\", \"gato\"], dtype=\"object\"),\n ),\n # only min y max\n # none meet criteria\n ([\"al\", \"gato\", \"cabeza\", \"periódico\"], 3, 1, pd.Series([], dtype=\"object\")),\n ],\n)\ndef test_select_words_of_length(\n words: List[str], min_len: Optional[int], max_len: Optional[int], exp: pd.Series\n) -> None:\n \"\"\"Test the _select_words_of_length with different cases.\"\"\"\n\n word_pool = WordPool(words)\n word_pool.select_words_of_length(min_len, max_len)\n obs: pd.Series = word_pool._pool_cleaned\n\n obs = obs.reset_index(drop=True)\n exp = exp.reset_index(drop=True)\n obs.equals(exp)\n\n\ndef test_select_words_of_length_exception() -> None:\n \"\"\"Test that _select_words_of_length raises exception when appropriate.\n\n It is appropriate to raise an exception if no min or max length is specified.\n \"\"\"\n\n word_pool = WordPool()\n with pytest.raises(ValueError):\n word_pool.select_words_of_length()\n\n\n@pytest.mark.parametrize(\n (\"word\", \"exp\"),\n [\n # with\n (\"acantio/S\", \"acantio\"),\n (\"acantonamiento/hS\", \"acantonamiento\"),\n (\"acantarar/RED\", \"acantarar\"),\n # without\n (\"acaso\", \"acaso\"),\n (\"accidentalmente\", \"accidentalmente\"),\n ],\n)\ndef test_remove_conjugation_suffix_from_word(word: str, exp: str) -> None:\n \"\"\"Test the _remove_conjugation_suffix_from_word with different cases.\"\"\"\n\n word_pool = WordPool(word)\n obs = word_pool._remove_conjugation_suffix_from_word(word)\n\n assert obs == exp\n\n\n@pytest.mark.parametrize(\n (\"words\", \"exp\"),\n [\n # none\n (\n [\"accidentalmente\", \"úsenos\", \"óigame\"],\n [\"accidentalmente\", \"úsenos\", \"óigame\"],\n ),\n # all\n (\n [\"accidentar/RED\", \"accidentario/GS\", \"accidente/S\"],\n [\"accidentar\", \"accidentario\", \"accidente\"],\n ),\n # mixed\n (\n [\n \"accidentalmente\",\n \"úsenos\",\n \"óigame\",\n \"accidentar/RED\",\n \"accidentario/GS\",\n \"accidente/S\",\n ],\n [\n \"accidentalmente\",\n \"úsenos\",\n \"óigame\",\n \"accidentar\",\n \"accidentario\",\n \"accidente\",\n ],\n ),\n ],\n)\ndef test_clean_conjugation_suffixes(words: List[str], exp: List[Optional[str]]) -> None:\n \"\"\"Test the _clean_conjugation_suffixes with different cases.\"\"\"\n\n exp: pd.Series = pd.Series(exp, dtype=\"object\") # type: ignore\n exp = exp.reset_index(drop=True) # type: ignore\n word_pool = WordPool(words)\n words = pd.Series(words, dtype=\"object\")\n obs = word_pool._clean_conjugation_suffixes(words)\n obs = obs.reset_index(drop=True)\n\n assert obs.equals(exp)\n\n\n@pytest.mark.parametrize(\"words\", [[\"al\", \"gato\", \"cabeza\", \"periódico\", \"ratón\"]])\ndef test_sample_pool_is_reproducible(words: List[str]) -> None:\n \"\"\"Test that sample_pool is reproducible.\"\"\"\n\n word_pool1 = WordPool(words)\n word_pool1.sample_pool(n=3)\n\n word_pool2 = WordPool(words)\n word_pool2.sample_pool(n=3)\n\n word_pool1.words.equals(word_pool2.words)\n","repo_name":"mario-bermonti/stimpool","sub_path":"tests/test_words.py","file_name":"test_words.py","file_ext":"py","file_size_in_byte":7783,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"27194871112","text":"import os\nfrom box.exceptions import BoxValueError\nimport yaml\nimport json\nimport joblib\nfrom src import logger\nfrom ensure import ensure_annotations\nfrom box import ConfigBox\nfrom pathlib import Path\nfrom typing import Any\nimport base64\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n@ensure_annotations\ndef read_yaml(path_to_yaml:Path)->ConfigBox:\n \"\"\"reads yaml file\n \n Args:path_to_yaml (str): path to yaml file\n \n Raises: \n ValueError: if yaml file is empty\n e: empty file\n \n Returns:\n\n ConfigBox: configuration box \n \"\"\"\n try: \n with open(path_to_yaml) as yaml_file:\n content =yaml.safe_load(yaml_file)\n logger.info(f\"yaml file{path_to_yaml}loaded Successfully\")\n return ConfigBox(content)\n \n except BoxValueError:\n raise ValueError(f\"yaml file{path_to_yaml} is empty\")\n except Exception as e:\n raise e \n\n@ensure_annotations\ndef create_directories(path_to_directories: list , verbose=True):\n \"\"\"creates directories\n \n Args: \n path_to_directories(list): list of path of directories\n \n \"\"\"\n for path in path_to_directories:\n os.makedirs(path, exist_ok=True)\n if verbose:\n logger.info(f'created directory at:{path}')\n\n@ensure_annotations\ndef display(display_list : list):\n plt.figure(figsize=(15, 15))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))\n plt.axis('off')\n plt.show()\n\ndef create_mask(pred_mask):\n pred_mask = tf.argmax(pred_mask, axis=-1)\n pred_mask = pred_mask[..., tf.newaxis]\n return pred_mask[0]\n\ndef show_predictions(unet, dataset=None, num=1):\n \"\"\"\n Displays the first image of each of the num batches\n \"\"\"\n\n for image, mask in dataset.take(num):\n pred_mask = unet.predict(image)\n display([image[0], mask[0], create_mask(pred_mask)])\n","repo_name":"rsmeghana8/Semantic_Segmentation_for_self_driving_cars","sub_path":"src/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35589021496","text":"def solution(n, times):\n left = 1 # 가장 최소 시간\n right = max(times)*n # 가장 오래걸리는 심사관으로만 심사한 시간\n \n # 더 많은 인원을 심사할 수 있다면 현재 탐색한 시간보다 아래쪽에서 다시 찾고, \n # 심사해야 하는 인원수보다 현재 심사 가능한 인원수가 더 작다면 위쪽에서 다시 찾는다.\n\n while left < right:\n people = 0 # 심사받을수있는 사람 수\n \n mid = (left+right)//2\n \n for i in times:\n people += mid//i # 중간값으로 심사가능한 사람 세기\n \n if people >= n: # n명 이상 심사가능한 경우\n right = mid # 심사시간 더 짧게 가능한지 확인\n \n else: # n명 불가능하면\n left = mid+1 # 심사시간 늘리기\n \n return left","repo_name":"agilestar8/coding-test-","sub_path":"프로그래머스 lv3/입국심사.py","file_name":"입국심사.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74866854569","text":"import numpy\nimport tensorflow as tf\nimport gzip\nimport argparse\nimport os\nimport sys\nimport time\n\nfrom six.moves import urllib\nfrom six.moves import xrange\n\nSOURCE_URL = \"http://yann.lecun.com/exdb/mnist/\"\nWORK_DIRECTORY = \"../Data/mnist\"\nIMAGE_SIZE = 28\nNUM_CHANNELS = 1\nPIXEL_DEPTH = 255\nNUM_LABELS = 10\nVALIDATION_SIZE = 5000 # Size of the validation set.\nSEED = 66478 # Set to None for random seed.\nBATCH_SIZE = 64\nNUM_EPOCHS = 10\nEVAL_BATCH_SIZE = 64\nEVAL_FREQUENCY = 100 # Number of steps between evaluations.\n\nFLAGS = None\n\ndef maybe_download(filename):\n \"\"\"Download the data from Yann's website, unless it's already here.\"\"\"\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath\n\n\ndef extract_data(filename, num_images):\n \"\"\"Extract the images into a 4D tensor [image index, y, x, channels].\n\n Values are rescaled from [0, 255] down to [-0.5, 0.5].\n \"\"\"\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data\n\n\ndef extract_labels(filename, num_images):\n \"\"\"Extract the labels into a vector of int64 label IDs.\"\"\"\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels\n\n\ndef fake_data(num_images):\n \"\"\"Generate a fake dataset that matches the dimensions of MNIST.\"\"\"\n data = numpy.ndarray(\n shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),\n dtype=numpy.float32)\n labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)\n for image in xrange(num_images):\n label = image % 2\n data[image, :, :, 0] = label - 0.5\n labels[image] = label\n return data, labels\n\ndef one_hot(labels):\n one_hot_labels = numpy.zeros((labels.shape[0], NUM_LABELS))\n for i in range(len(labels)):\n one_hot_labels[i][labels[i]] = 1\n return one_hot_labels\n\ndef get_data():\n if FLAGS.self_test:\n print('Running self-test.')\n train_data, train_labels = fake_data(256)\n validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)\n test_data, test_labels = fake_data(EVAL_BATCH_SIZE)\n num_epochs = 1\n else:\n # Get the data.\n train_data_filename = maybe_download('train-images-idx3-ubyte.gz')\n train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')\n test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')\n test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')\n\n # Extract it into numpy arrays.\n train_data = extract_data(train_data_filename, 60000)\n train_labels = extract_labels(train_labels_filename, 60000)\n test_data = extract_data(test_data_filename, 10000)\n test_labels = extract_labels(test_labels_filename, 10000)\n\n train_labels = one_hot(train_labels)\n test_labels = one_hot(test_labels)\n # Generate a validation set.\n validation_data = train_data[:VALIDATION_SIZE, ...]\n validation_labels = train_labels[:VALIDATION_SIZE]\n train_data = train_data[VALIDATION_SIZE:, ...]\n train_labels = train_labels[VALIDATION_SIZE:]\n num_epochs = NUM_EPOCHS\n train_size = train_labels.shape[0]\n test_size = test_labels.shape[0]\n return train_data, train_labels, test_data, test_labels, validation_data, validation_labels\n # print(train_data.shape, train_labels.shape)\n # print(validation_data.shape, validation_labels.shape)\n # print(test_data.shape, test_labels.shape)\n # print(train_size, test_size)\n # print(train_data[0])\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--use_fp16',\n default=False,\n help='Use half floats instead of full floats if True.',\n action='store_true')\nparser.add_argument(\n '--self_test',\n default=False,\n action='store_true',\n help='True if running a self test.')\n\nFLAGS, unparsed = parser.parse_known_args()\n# get_data()\n\n","repo_name":"govg/acass","sub_path":"data/mnist/mnist_input.py","file_name":"mnist_input.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72704813608","text":"# zhangshulin\n# 2018-3-21\n# e-mail: zhangslwork@yeah.net\n\n\nVOCABS_SIZE = 5000\nLSTM_NA = 128\nMAX_LEN = 30\nDEV_TEST_SIZE = 4000\nWEIGHTS = './weights.h5'\n\n\nfrom couplets_utils import load_datasets, load_sample_datasets\nfrom model import create_train_model\nfrom keras.optimizers import Adam \nimport numpy as np\nimport os\nimport logging\nimport argparse\n\n\ndef train(epochs=1, learning_rate=0.01, batch_size=64, keep_prob=1, resume=True, sample=False, mode='train'):\n logging.info('loading datasets')\n\n if sample:\n dataset = load_sample_datasets(VOCABS_SIZE, MAX_LEN, batch_size, 1000, LSTM_NA)\n generator = dataset['sample_gen']\n else:\n datasets = load_datasets(VOCABS_SIZE, MAX_LEN, DEV_TEST_SIZE, batch_size, LSTM_NA)\n if mode == 'train':\n generator = datasets['train_gen']\n elif mode == 'evaluate':\n generator = datasets['dev_gen']\n else:\n generator = datasets['test_gen']\n\n logging.info('begin creating model.')\n model = create_train_model(VOCABS_SIZE, LSTM_NA, MAX_LEN, keep_prob)\n\n adam = Adam(lr=learning_rate)\n model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\n logging.info('model creating complete')\n\n if os.path.exists(WEIGHTS) and resume:\n logging.info('begin load weight')\n model.load_weights(WEIGHTS)\n logging.info('weight load complete')\n\n if mode == 'train':\n logging.info('begin training')\n model.fit_generator(generator, epochs=epochs)\n logging.info('begin weight')\n model.save_weights(WEIGHTS)\n logging.info('end saving weight')\n logging.info('training end')\n elif mode == 'evaluate':\n logging.info('begin evaluate')\n evaluation = model.evaluate_generator(generator)\n accuracy = sum(evaluation[-VOCABS_SIZE:]) / VOCABS_SIZE \n print('total loss: {}, average accuray: {}'.format(evaluation[0], accuracy))\n logging.info('evaluating end')\n else:\n logging.info('begin test')\n evaluation = model.evaluate_generator(generator)\n accuracy = sum(evaluation[-VOCABS_SIZE:]) / VOCABS_SIZE \n print('total loss: {}, average accuray: {}'.format(evaluation[0], accuracy))\n logging.info('testing end')\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--lr', default=0.01, type=float, help='learning rate')\n parser.add_argument('--batch', default=64, type=int, help='mini batch size')\n parser.add_argument('--epochs', default=1, type=int, help='epochs')\n parser.add_argument('--keep_prob', default=1, type=float)\n parser.add_argument('--no_resume', default=True, action='store_false', dest='resume')\n parser.add_argument('--sample', default=False, action='store_true')\n parser.add_argument('--evaluate', default=False, action='store_true')\n parser.add_argument('--test', default=False, action='store_true')\n \n args = parser.parse_args()\n \n if args.evaluate == True:\n mode = 'evaluate'\n elif args.test == True:\n mode = 'test'\n else:\n mode = 'train'\n\n train(\n epochs=args.epochs, \n learning_rate=args.lr, \n batch_size=args.batch, \n keep_prob=args.keep_prob,\n resume=args.resume, \n sample=args.sample,\n mode=mode\n )\n\n","repo_name":"Shulin-Zhang/Use_Tensorflow_Write_Couplets","sub_path":"keras_imp_char/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"74452268007","text":"\"\"\"\nThis is dirwalk.py,\nMatt's very first python code of significance.\n\"\"\"\n\n# TODO: don't catalog sockets or fifos (see finddup for more weird sys files)\n\nimport os\nimport os.path\nimport re\nimport stat\nimport sys\nimport threading\n\n# global flag to show we did/didn't last output a \\n to stderr\n# SYS_STDERR_CR = True\nLAST_STDERR_STRLEN = 0\nWATCHDOG_TIMER = False\nCURRENT_ROOT = \"\"\nCURRENT_FILE = \"\"\n\n# x[1] is integer: sort decreasing\n# then if equal,\n# x[0] is string: sort increasing\ndef byitemvalalpha(x):\n \"\"\"\n given the tuples from a dict (key,item) pairs\n sort based first on item, and if both items are the same\n sort by key\n \"\"\"\n return \"%016d\" % (1e15 - x[1]) + str(x[0])\n\n\nclass StderrPrinter(object):\n r\"\"\"Prints to stderr especially for use with \\r and same-line updates\n\n Keeps track of whether an extra \\n is needed before printing string,\n especially in cases where the previous print string didn't have\n one and this print string doesn't start with \\r\n\n Allows for easily printing error messages (regular print) amongst\n same-line updates (starting with \\r and with no finishing \\n).\n \"\"\"\n\n def __init__(self):\n self.need_cr = False\n\n def print(self, text, **prkwargs):\n \"\"\"Print to stderr, automatically knowing if we need a CR beforehand.\"\"\"\n if text.startswith(\"\\r\"):\n self.need_cr = False\n # we need_cr if last print specifically didn't have a \\n,\n # and this one doesn't start with \\r\n # Most likely last one was a progress display and this one is an\n # error or warning.\n # Instead of printing on the end of the line after the progress\n # line, it \\n to the next line.\n # [It could just as easily be a \\r to erase the previous line.]\n if self.need_cr:\n print(\"\", file=sys.stderr)\n\n print(text, file=sys.stderr, **prkwargs)\n\n self.need_cr = prkwargs.get(\"end\", \"\\n\") == \"\" and not text.endswith(\"\\n\")\n\n\n# Global\nmyerr = StderrPrinter()\n\n\n# get size on disk (blocks*block_size) via lstat, but if we can't,\n# get size of file data\n# TODO: add specific exceptions to except\ndef getfilesize(fullfilename):\n try:\n statinfo = os.lstat(fullfilename)\n try:\n # use blocks if possible\n # st_blocks is in units of 512-byte blocks\n size = statinfo.st_blocks * 512\n except KeyboardInterrupt:\n # actually stop if ctrl-c\n raise\n except AttributeError:\n # Windows has no st_blocks\n # if not available, use stupid python getsize\n size = os.path.getsize(fullfilename)\n except KeyboardInterrupt:\n # actually stop if ctrl-c\n raise\n except:\n size = 0\n myerr.print(\"Can't read \" + fullfilename)\n myerr.print(\"(\" + sys.exc_info()[0].__name__ + \")\")\n return size\n\n\n# convert to string with units\n# use k=1024 for binary (e.g. kB)\n# use k=1000 for non-binary kW\ndef size2eng(size, k=1024):\n if size > k ** 5:\n sizestr = \"%.1fP\" % (float(size) / k ** 5)\n elif size > k ** 4:\n sizestr = \"%.1fT\" % (float(size) / k ** 4)\n elif size > k ** 3:\n sizestr = \"%.1fG\" % (float(size) / k ** 3)\n elif size > k ** 2:\n sizestr = \"%.1fM\" % (float(size) / k ** 2)\n elif size > k:\n sizestr = \"%.1fk\" % (float(size) / k)\n else:\n sizestr = \"%.1g\" % (float(size))\n return sizestr\n\n\ndef eng2size(numstr):\n if numstr.endswith((\"k\", \"K\")):\n num = int(numstr[:-1]) * 1024\n elif numstr.endswith((\"m\", \"M\")):\n num = int(numstr[:-1]) * 1024 * 1024\n elif numstr.endswith((\"g\", \"G\")):\n num = int(numstr[:-1]) * 1024 * 1024 * 1024\n elif numstr.endswith((\"t\", \"T\")):\n num = int(numstr[:-1]) * 1024 * 1024 * 1024 * 1024\n elif numstr.endswith((\"p\", \"P\")):\n num = int(numstr[:-1]) * 1024 * 1024 * 1024 * 1024 * 1024\n else:\n num = int(numstr)\n return num\n\n\n# delete all keys in sizedict that resolve to a number less than filter_val\n# can operate on sizedict because it is a reference\ndef filter_thresh(sizedict, filter_val):\n filtered_keys = []\n for key in sizedict.keys():\n if sizedict[key] < filter_val:\n filtered_keys.append(key)\n for key in filtered_keys:\n sizedict.pop(key)\n\n\ndef bad_filetype(fullfilename):\n returnval = False\n try:\n # don't follow symlinks, just treat them like a regular file\n this_filestat = os.stat(fullfilename, follow_symlinks=False)\n except:\n myerr.print(\"Can't stat: \" + fullfilename)\n return True\n\n if stat.S_ISFIFO(this_filestat.st_mode):\n # skip FIFOs\n returnval = True\n if stat.S_ISSOCK(this_filestat.st_mode):\n # skip sockets\n returnval = True\n\n return returnval\n\n\n# TODO: print which dir we were hung on before exiting\ndef watchdog_timeout():\n myerr.print(\"Timeout due to hung file I/O\")\n myerr.print(\"Current dir: %s\" % (CURRENT_ROOT))\n myerr.print(\"Current file: %s\" % (CURRENT_FILE))\n # os._exit better than sys.exit because it forces all threads to die now\n os._exit(1)\n\n\ndef index_dir(treeroot, exclude_path):\n # use global so we can cancel thread for keyboard interrupt in __main__\n global WATCHDOG_TIMER\n global CURRENT_ROOT\n global CURRENT_FILE\n\n # init main dictionary\n sizedict = {}\n filesdone = 0\n # timeout for 1000 files processed in seconds\n watchdog_timeout_sec = 20.0\n\n if exclude_path:\n exclude_path = re.escape(exclude_path)\n\n # watchdog timer that unless canceled will raise Exception after sec\n # to guard against file system hangs\n WATCHDOG_TIMER = threading.Timer(watchdog_timeout_sec, watchdog_timeout)\n WATCHDOG_TIMER.start()\n\n for (root, dirs, files) in os.walk(treeroot):\n # for debugging on hang\n CURRENT_ROOT = root\n\n # add in directories to list of files in this dir\n if exclude_path and re.search(exclude_path, root):\n myerr.print(\"skipping root \" + root)\n continue\n # remove anything matching exclude from dirs, will prevent\n # os.walk from searching there! (slice or del)\n if exclude_path:\n for thisdir in dirs:\n if re.search(exclude_path, os.path.join(root, thisdir)):\n myerr.print(\"excluding: \" + root + os.sep + thisdir)\n dirs.remove(thisdir)\n # let's not index remote mounts (MacOS only...)\n # TODO: check for mount points and skip those\n if root == os.sep and \"Volumes\" in dirs:\n myerr.print(\"excluding: \" + os.sep + \"Volumes\")\n dirs.remove(\"Volumes\")\n\n # Presumably we add dirs so we can get size of actual dir descriptor\n files.extend(dirs)\n\n for filename in files:\n # for debugging on hang\n CURRENT_FILE = filename\n\n # full path to filename\n fullfilename = os.path.join(root, filename)\n\n if bad_filetype(fullfilename):\n myerr.print(\"Bad filetype: \" + fullfilename)\n continue\n\n if exclude_path and re.search(exclude_path, fullfilename):\n myerr.print(\"skipping file \" + fullfilename)\n continue\n\n size = getfilesize(fullfilename)\n\n # add this file or dir's size to itself and every parent dir\n # in sizedict, so dirs include total size of files below\n while len(fullfilename) >= len(treeroot):\n sizedict[fullfilename] = sizedict.get(fullfilename, 0) + size\n # if-else is a hack, because os.path.split('/')\n # returns ('/',''), making an infinite loop\n if fullfilename == os.sep:\n fullfilename = \"\"\n else:\n (fullfilename, _) = os.path.split(fullfilename)\n\n filesdone += 1\n if filesdone % 1000 == 0:\n myerr.print(\n \"\\r\" + str(filesdone) + \" files processed.\", end=\"\", flush=True\n )\n # reset watchdog timer\n WATCHDOG_TIMER.cancel()\n # threads can only be started once, so re-instance\n WATCHDOG_TIMER = threading.Timer(watchdog_timeout_sec, watchdog_timeout)\n WATCHDOG_TIMER.start()\n\n # we're done, stop watchdog timer\n WATCHDOG_TIMER.cancel()\n\n # now add in size of root dir\n sizedict[treeroot] = sizedict.get(treeroot, 0) + getfilesize(treeroot)\n filesdone += 1\n\n # report final tally of files\n myerr.print(\"\\r\" + str(filesdone) + \" files processed.\")\n print(str(filesdone) + \" files processed.\")\n\n return sizedict\n\n\n# vim: sts=4 et sw=4\n","repo_name":"itsayellow/durank","sub_path":"src/durank/durank.py","file_name":"durank.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27036631866","text":"#!/usr/bin/python3\n\nimport requests\nimport lxml.html as lh\nfrom lxml import etree\n#import pandas as pd\n\nif __name__ == \"__main__\":\n default_stock_site = \"http://finance.daum.net/item/quote.daum?code=\"\n default_tickers = [ \"068270\" ]\n\n for ticker in default_tickers:\n page = requests.get(default_stock_site + ticker)\n doc = lh.fromstring(page.content)\n\n element = doc.xpath(\"//div[@id='price5StepBody']/parent::*\")\n print(element.text)\n","repo_name":"silverfox516/play","sub_path":"python/stock2.py","file_name":"stock2.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70791034408","text":"import ezdxf\n\nLAYER_NAME = 'Lines'\n\n# line weight im mm times 100, e.g. 0.13mm = 13\n# minimum line weight 13\n# maximum line width 200\nWEIGHTS = [13, 18, 20, 25, 35, 50, 70, 100, 200, -1, -3]\n\n\ndef lines_with_lineweight(msp, x1, x2):\n for index, lineweight in enumerate(WEIGHTS):\n y = index * 10\n msp.add_line(\n (x1, y),\n (x2, y),\n dxfattribs={\n 'layer': LAYER_NAME,\n 'lineweight': lineweight,\n },\n )\n\n\ndef lines_with_default_weight(msp, x1, x2):\n for index in range(len(WEIGHTS)):\n y = index * 10\n msp.add_line(\n (x1, y),\n (x2, y),\n dxfattribs={'layer': LAYER_NAME},\n )\n\n\ndoc = ezdxf.new('R2004')\nmsp = doc.modelspace()\nlines_layer = doc.layers.new(LAYER_NAME)\n# set default line width as enum\nlines_layer.dxf.lineweight = 35\n\nlines_with_lineweight(msp, x1=0, x2=100)\nlines_with_default_weight(msp, x1=150, x2=250)\n\ndoc.saveas(\"using_lineweight.dxf\")\n","repo_name":"DatacloudIntl/dc_ezdxf","sub_path":"examples/using_lineweight.py","file_name":"using_lineweight.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23080559046","text":"import db_localhost as db\n\n\ndef get_max_id():\n id_set = set()\n for line in db.QueryBySQL('select id from rest_merge.rest_unid'):\n id_set.add(int(line['id'][1:]))\n return 'r' + str(max(id_set))\n\nif __name__ == '__main__':\n print(get_max_id())","repo_name":"20113261/p_m","sub_path":"rest_merge/get_max_id.py","file_name":"get_max_id.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70312219688","text":"import os\nimport hashlib\nimport requests\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom allauth.socialaccount.models import SocialAccount\nfrom allauth.account.models import EmailAddress\nfrom imagekit.models import ProcessedImageField, ImageSpecField\nfrom imagekit.processors import ResizeToFit\nfrom django.utils import timezone\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import receiver\nfrom allauth.socialaccount.signals import social_account_added\n\ndef delete_image(path):\n \"\"\"\n Delete the image at the specified path.\n\n Parameters:\n - path (str): The path to the image file.\n \"\"\"\n if os.path.isfile(path):\n os.remove(path)\n\nclass UploadImage(models.Model):\n \"\"\"\n Represents an uploaded image.\n \"\"\"\n image = ProcessedImageField(\n upload_to='profile/%Y/%m/%d',\n processors=[ResizeToFit(800, 600, False)],\n format='JPEG',\n options={'quality': 60}\n )\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n edited = models.SmallIntegerField(default=0)\n created_on = models.DateTimeField(default=timezone.now)\n modified_on = models.DateTimeField(auto_now=True)\n thumbnail = ImageSpecField(\n source='image',\n processors=[ResizeToFit(120, 120, False)],\n format='JPEG',\n options={'quality': 100}\n )\n\n class Meta:\n \"\"\"\n Meta class for specifying model options.\n \"\"\"\n ordering = ('-modified_on',)\n\n@receiver(pre_delete, sender=UploadImage)\ndef pre_delete_image_and_thumbnail(sender, instance, **kwargs):\n \"\"\"\n Signal handler to delete image files and thumbnails on pre_delete.\n\n Parameters:\n - sender: The model class.\n - instance: The instance being deleted.\n \"\"\"\n if instance.image:\n delete_image(instance.image.path)\n if instance.thumbnail:\n delete_image(instance.thumbnail.path)\n\nclass UserProfile(models.Model):\n \"\"\"\n Represents a user profile.\n \"\"\"\n user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')\n avatar_url = models.URLField(blank=True, null=True)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the user profile.\n\n Returns:\n - str: The string representation.\n \"\"\"\n return f\"{self.user.username}'s profile\"\n\n class Meta:\n db_table = 'user_profile'\n\n def is_email_verified(self):\n \"\"\"\n Check if the user's email is verified.\n\n Returns:\n - bool: True if the email is verified, False otherwise.\n \"\"\"\n email_addresses = EmailAddress.objects.filter(email=self.user.email)\n return email_addresses.exists() and email_addresses[0].verified\n\n def get_profile_image_url(self):\n \"\"\"\n Get the URL of the user's profile image.\n\n Returns:\n - str: The profile image URL.\n \"\"\"\n if self.avatar_url:\n return self.avatar_url\n\n social_account = SocialAccount.objects.filter(user=self.user).first()\n\n if social_account:\n if social_account.provider == 'google':\n return social_account.extra_data.get('picture', '')\n elif social_account.provider == 'facebook':\n # Fetch Facebook profile picture using the Graph API\n access_token = social_account.socialtoken_set.first().token\n fb_response = requests.get(\n f'https://graph.facebook.com/v12.0/{social_account.uid}/picture',\n params={'access_token': access_token, 'type': 'large'}\n )\n if fb_response.status_code == 200:\n return fb_response.url\n\n return f\"http://www.gravatar.com/avatar/{hashlib.md5(self.user.email.encode('utf-8')).hexdigest()}?s=40\"\n\n User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])\n \n @receiver(social_account_added)\n def update_user_avatar_and_populate_profile(sender, request, sociallogin, **kwargs):\n user = sociallogin.user\n\n if sociallogin.is_existing or not user.profile.avatar_url:\n account = sociallogin.account\n if account.provider == 'google':\n user.profile.avatar_url = account.extra_data.get('picture', '')\n elif account.provider == 'facebook':\n access_token = account.socialtoken_set.first().token\n fb_response = requests.get(\n f'https://graph.facebook.com/v12.0/{account.uid}/picture',\n params={'access_token': access_token, 'type': 'large'}\n )\n print(f\"Facebook API Response: {fb_response.text}\") # Add this line for debugging\n\n if fb_response.status_code == 200:\n user.profile.avatar_url = fb_response.url\n \n user.profile.save()","repo_name":"Craigryy/PyIMAGE","sub_path":"pillycam/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2961780049","text":"from dataclasses import dataclass, field\nfrom typing import Any, List\n\nfrom habitat import registry, Measure, EmbodiedTask\nfrom habitat.config.default_structured_configs import MeasurementConfig\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import DictConfig\n\n\n@registry.register_measure\nclass SumReward(Measure):\n \"\"\"\n Sums various reward measures.\n \"\"\"\n\n cls_uuid: str = \"sum_reward\"\n\n def __init__(\n self, config: \"DictConfig\", *args: Any, **kwargs: Any\n ):\n self._config = config\n self._reward_terms = config.reward_terms\n self._reward_coefficients = [float(i) for i in config.reward_coefficients]\n super().__init__()\n\n def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return self.cls_uuid\n\n def reset_metric(self, episode, task, *args: Any, **kwargs: Any):\n task.measurements.check_measure_dependencies(\n self.uuid, self._reward_terms\n )\n self.update_metric(episode=episode, task=task, *args, **kwargs) # type: ignore\n\n def update_metric(\n self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any\n ):\n self._metric = 0\n for term, coefficient in zip(self._reward_terms, self._reward_coefficients):\n self._metric += coefficient * task.measurements.measures[term].get_metric()\n\n\n@dataclass\nclass SumRewardMeasurementConfig(MeasurementConfig):\n type: str = SumReward.__name__\n reward_terms: List[str] = field(\n # available options are \"disk\" and \"tensorboard\"\n default_factory=list\n )\n reward_coefficients: List[str] = field(\n # available options are \"disk\" and \"tensorboard\"\n default_factory=list\n )\n\n\ncs = ConfigStore.instance()\ncs.store(\n package=f\"habitat.task.measurements.{SumReward.cls_uuid}\",\n group=\"habitat/task/measurements\",\n name=f\"{SumReward.cls_uuid}\",\n node=SumRewardMeasurementConfig,\n)\n","repo_name":"naokiyokoyama/ovon","sub_path":"ovon/measurements/sum_reward.py","file_name":"sum_reward.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37051652443","text":"#!/usr/bin/env python\n# @Time : 2019/3/24 19:07 \n__author__ = 'Boaz'\n\nimport unittest\nfrom survey import AnoymousSurvey\n\n\nclass TestAnonymousSurvey(unittest.TestCase):\n def setUp(self):\n '''\n 创建一个调查对象和一组答案,供使用的测试方法使用\n :return:\n '''\n question = \"What language did you first learn to speak?\"\n self.my_survey = AnoymousSurvey(question)\n self.responses = ['English', 'Spanish', 'Madarin']\n\n def test_store_single_response(self):\n \"\"\" 测试单个答案会被妥善地保存\"\"\"\n # question = \"What language did you first learn to speak?\"\n # my_survey = AnoymousSurvey(question)\n # my_survey.store_response('English')\n\n\n self.my_survey.store_response(self.responses[0])\n self.assertIn(self.responses[0], self.my_survey.responses)\n\n def test_store_three_response(self):\n \"\"\" 测试多个答案会被妥善地保存\"\"\"\n # question = \"What language did you first learn to speak?\"\n # my_survey = AnoymousSurvey(question)\n # responses = ['English', 'Spanish', 'Mandarin']\n\n for response in self.responses:\n self.my_survey.store_response(response)\n\n for response in self.responses:\n self.assertIn(response,self.my_survey.responses)\n\n","repo_name":"davidzhu1989/python-magic","sub_path":"Python-basic/测试代码/11-2测试类/test_survey.py","file_name":"test_survey.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8080505730","text":"#CS175L\r\n#Bryan Kahl\r\n#03/02/2023\r\n\r\ndef main():\r\n numberfile = open('numbers.txt', 'r')\r\n\r\n content = numberfile.readlines()\r\n\r\n\r\n\r\n total = 0\r\n\r\n\r\n\r\n n = 0\r\n for number in content:\r\n n += 1\r\n print(\"I read in\", n, \" number(s) Current number is: \",str(n), end= '' )\r\n print(' number(s) Current number is:' , float(number), end='')\r\n total += float(number)\r\n print(' Total is: ',total)\r\n\r\n average = total/n\r\n print(\"Average is: \",average)\r\n\r\nmain()\r\n\r\n\r\n\r\n\r\n","repo_name":"bryankahl/CS175L-01","sub_path":"AverageFromInput.py","file_name":"AverageFromInput.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17260469709","text":"import random\n\nimport torch\nimport torch.optim as optim\n\nfrom torch import distributed\nfrom torch.optim.optimizer import Optimizer,required\n\nclass SADDOPT(Optimizer):\n def __init__(self,params,model,lr=required,args=None):\n if lr is not required and lr<0.0:\n raise ValueError(\"Invalid learning rate\")\n\n if args.type=='ForceRandom':\n self.FR=True\n else:\n self.FR=False\n\n defaults=dict(lr=lr)\n super(SADDOPT,self).__init__(params,defaults)\n self.model=model\n self.B=args.B\n\n self.iter_cnt=0\n self.epoch=-1\n self.rank=args.rank\n self.node_num=args.world_size\n\n self.topo=args.topo\n self.weight=[0.0]*self.node_num\n self.WM=torch.Tensor([[0.0]*self.node_num]*self.node_num).cuda()\n self.aux_var=torch.Tensor([1.0]).cuda()\n self.aux_vec=torch.Tensor([0.0]*self.node_num).cuda()\n self.rece_indx=[0]*self.node_num\n\n if self.rank>self.node_num:\n raise ValueError(\"Rank more than world size\")\n \n self.communication_size=0\n print('Gradient Tracking')\n\n def reset_communication_size(self):\n self.communication_size=0\n\n def add_communication_size(self,each_send_size):\n tem = 0\n for i in range(self.node_num):\n if i==self.rank:\n continue\n if self.WM[i][self.rank]!=0.0:\n tem = tem + 1\n self.communication_size += each_send_size * tem\n\n def get_acc_communication_size(self):\n return self.communication_size\n\n def __setstate__(self,state):\n super(AVEGT,self).__setstate__(state)\n\n def ave_weight(self):\n self.weight=[0.0]*self.node_num\n\n if self.FR and self.iter_cnt % self.B != 0:\n for i in range(self.node_num):\n if self.topo[i][self.rank]==0:\n continue\n if random.random()', ' ').replace('', ' ').replace(\"\\n\", \" \").strip()\n cleaned_txt = ' '.join(cleaned_txt.split())\n cleaned_txt = ' '.join(nltk.word_tokenize(cleaned_txt))\n cleaned_txt = cleaned_txt.strip()\n idx = cleaned_txt.find(' \\'')\n while idx != -1:\n cleaned_txt = cleaned_txt[:idx] + cleaned_txt[idx + 1:]\n idx = cleaned_txt.find(' \\'')\n\n return cleaned_txt if cleaned_txt else '[ PAD ] .'\n elif isinstance(text, list):\n cleaned_txts = []\n for txt in text:\n if isinstance(txt, str):\n cleaned_txt = txt.replace('', ' ').replace('', ' ').replace(\"\\n\", \" \").strip()\n cleaned_txt = ' '.join(cleaned_txt.split())\n cleaned_txt = ' '.join(nltk.word_tokenize(cleaned_txt))\n cleaned_txt = cleaned_txt.strip()\n idx = cleaned_txt.find(' \\'')\n while idx != -1:\n cleaned_txt = cleaned_txt[:idx] + cleaned_txt[idx + 1:]\n idx = cleaned_txt.find(' \\'')\n cleaned_txts.append(cleaned_txt if cleaned_txt else '[ PAD ] .')\n else:\n print('Warning: find nonstr, convert to empty str')\n cleaned_txts.append('[ PAD ] .')\n return cleaned_txts\n else:\n print('Warning: find nonstr, convert to empty str')\n return '[ PAD ] .'\n\n\ndef convert_qasper_context(input_file, stop_spans, existing_papers = None, start_idx = -1):\n with open(input_file, 'r', encoding='utf-8') as f:\n input_data = json.load(f)\n paper_id_to_paras = {}\n paper_ids = []\n debug_cnt = 0\n paper_start_idx = 0\n for paper_id, article in tqdm(input_data.items(), total=len(input_data), desc='read data...'):\n paper_start_idx += 1\n if start_idx > paper_start_idx - 1:\n continue\n if existing_papers is not None and paper_id in existing_papers:\n continue\n paper_ids.append(paper_id)\n sections = []\n for section_info in article['full_text']:\n section_name = text_clean(section_info['section_name']) if section_info['section_name'] is not None else '[ PAD ] .'\n if ':::' not in section_name:\n stop_spans.add(section_name)\n paras = []\n for p in text_clean(section_info['paragraphs']):\n paras.append(p)\n sections.append({\n 'section_name': section_name,\n 'paragraphs': paras\n })\n paper_id_to_paras[paper_id] = sections\n debug_cnt += 1\n if debug_cnt == 100:\n break\n return paper_id_to_paras, paper_ids\n","repo_name":"JerrryNie/Unsupervised-Long-Document-QA","sub_path":"code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38877992969","text":"import nmap\r\nimport cv2\r\nimport face_recognition\r\nimport datetime\r\nimport os\r\nimport time as tm\r\nimport threading\r\nimport tkinter as tk\r\nimport socket\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nips = pd.read_csv(\"CAMS.csv\", sep=',')['IPv4 Address']\r\n\r\nprint(ips)\r\n\r\nKEY = \"gugaLima8*\"\r\nSTART_DATE = datetime.datetime(2022, 1, 1)\r\nEND_DATE = datetime.datetime(2023, 3, 1)\r\nLAST_FACES = []\r\n\r\nkey = input(\"Insira sua chave de acesso: \")\r\nif key != KEY:\r\n print(\"Chave inválida. O aplicativo será encerrado.\")\r\n exit()\r\n\r\ncurrent_date = datetime.datetime.now()\r\nif current_date < START_DATE or current_date > END_DATE:\r\n print(\"A chave não é válida para a data atual. O aplicativo será encerrado.\")\r\n exit()\r\n\r\n\r\ndef scan_network():\r\n nm = nmap.PortScanner()\r\n nm.scan(hosts='192.168.18.118', arguments='-p 554')\r\n hosts = []\r\n for host in nm.all_hosts():\r\n if nm[host].has_tcp(80):\r\n hosts.append(host)\r\n print(\"Resultado da varredura da rede: \", hosts)\r\n return hosts\r\n\r\n\r\ndef capture_faces(ip, folder):\r\n cap = cv2.VideoCapture(f\"rtsp://admin:{KEY}@{ip}:554/Streaming/Channels/101/\")\r\n while True:\r\n ret, frame = cap.read()\r\n\r\n try:\r\n small_frame = cv2.resize(frame, None, fx=0.25, fy=0.25,)\r\n except:\r\n pass\r\n\r\n if not ret:\r\n break\r\n\r\n cv2.imshow(\"Camera \" + str(ip), small_frame)\r\n\r\n face_locations = face_recognition.face_locations(small_frame)\r\n face_encondings = face_recognition.face_encodings(small_frame, face_locations)\r\n\r\n print(len(LAST_FACES))\r\n for face_enconding in face_encondings:\r\n match = face_recognition.compare_faces(LAST_FACES, face_enconding)\r\n\r\n if True not in match:\r\n LAST_FACES.append(face_enconding)\r\n now = datetime.datetime.now()\r\n date_str = now.strftime(\"%Y-%m-%d\")\r\n time_str = now.strftime(\"%H-%M-%S\")\r\n if not os.path.exists(folder):\r\n os.makedirs(folder)\r\n filename = folder + \"/\" + ip + \"_\" + date_str + \"_\" + time_str + \".jpg\"\r\n cv2.imwrite(filename, frame)\r\n\r\n # Label the results\r\n for top, right, bottom, left in face_locations:\r\n # Draw a box around the face\r\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\r\n break\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef start_detection():\r\n # verificando se o caminho é válido\r\n while True:\r\n folder = input(\"Insira o caminho da pasta onde as imagens devem ser salvas: \")\r\n if os.path.exists(folder):\r\n break\r\n print(\"O caminho informado não é válido.\")\r\n cameras_online = len(ips)\r\n print(cameras_online)\r\n # camera_summary_label.config(text=\"Câmeras online: \" + str(cameras_online))\r\n # t = threading.Thread(target=display_cameras, args=(ips, 10, 8))\r\n # t.start()\r\n for host in ips:\r\n capture_faces(host, folder)\r\n\r\n\r\n# root = tk.Tk()\r\n# image = tk.PhotoImage(file=\"image.png\")\r\n# image_label = tk.Label(root, image=image)\r\n# image_label.pack()\r\n# start_button = tk.Button(root, text=\"Iniciar Detecção\", command=start_detection)\r\n# camera_summary_label = tk.Label(root, text=\"Câmeras online: 0\")\r\n# camera_summary_label.pack()\r\n# start_button.pack()\r\n# root.mainloop()\r\n\r\nstart_detection()\r\n","repo_name":"Guilherme-Joviniano/capturing-face-rtsp-cams-v1","sub_path":"mainWKN.py","file_name":"mainWKN.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39591886652","text":"#!/usr/bin/env python3\n\nimport fileinput\nfrom collections import defaultdict\n\n\ndef read_input() -> list[str]:\n return [str(line.strip()) for line in fileinput.input(\"input.txt\")]\n\n\ndef main() -> None:\n in_lst = read_input()\n lst = [line.split(\" | \")[1] for line in in_lst]\n\n easy_map = defaultdict(lambda: 0)\n easy_map.update({\n 2: 1,\n 3: 1,\n 4: 1,\n 7: 1,\n })\n\n solution = sum([sum(map(lambda x: easy_map[len(x)], [\n w for w in line.split(\" \")])) for line in lst])\n\n print(solution)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"StephanBischoff-Digle/adventofcode","sub_path":"2021/08/08.1/proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2238283138","text":"#!/usr/bin/env python3\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install as _install\nfrom setuptools.command.sdist import sdist as _sdist\n\n# To use a consistent encoding\nfrom codecs import open\nimport os\nimport io\nimport re\nimport sys\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n# Stolen from pip\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as fp:\n return fp.read()\n\n\n# Stolen from pip\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ndef _run_build_cache(dir):\n from subprocess import check_call\n\n # This is run inside the install staging directory (that had no .pyc files)\n # We don't want to generate any.\n # https://github.com/eliben/pycparser/pull/135\n check_call(\n [sys.executable, \"-B\", \"_build_cache.py\"],\n cwd=os.path.join(dir, \"osaca\", \"data\"),\n )\n\n\nclass install(_install):\n def run(self):\n _install.run(self)\n self.execute(\n _run_build_cache,\n (self.install_lib,),\n msg=\"Build ISA and architecture cache\",\n )\n\n\nclass sdist(_sdist):\n def make_release_tree(self, basedir, files):\n _sdist.make_release_tree(self, basedir, files)\n self.execute(_run_build_cache, (basedir,), msg=\"Build ISA and architecture cache\")\n\n\n# Get the long description from the README file\nwith open(os.path.join(here, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"osaca\",\n # Version should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/distributing.html\n version=find_version(\"osaca\", \"__init__.py\"),\n description=\"Open Source Architecture Code Analyzer\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n # The project's main homepage\n url=\"https://github.com/RRZE-HPC/OSACA\",\n # Author details\n author=\"Jan Laukemann\",\n author_email=\"jan.laukemann@fau.de\",\n # Choose your license\n license=\"AGPLv3\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n \"Development Status :: 4 - Beta\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development\",\n \"Topic :: Utilities\",\n # Pick your license as you wish (should match \"license\" above)\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate wheter you support Python2, Python 3 or both.\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n # What doesd your project relate to?\n keywords=\"hpc performance benchmark analysis architecture\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests\"]),\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=[\"networkx\", \"pyparsing>=2.3.1\", \"ruamel.yaml>=0.15.71\"],\n python_requires=\">=3.6\",\n # List additional groups of dependencies here (e.g. development\n # dependencies). You can install these using the following syntax,\n # for example:\n # $ pip install -e .[dev,test]\n # extras_require={\n # 'dev': ['check-manifest'],\n # 'test': ['coverage'],\n # },\n # If there are data files included in your packages that need to be\n # installed, specify them here. If using Python 2.6 or less, then these\n # have to be included in MANIFEST.in as well.\n include_package_data=True,\n # Although 'package_data' is the preferred approach, in some case you may\n # need to place data files outside of your packages. See:\n # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa\n # In this case, 'data_file' will be installed into '/my_data'\n # data_files=[('my_data', ['data/data_file'])],\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n entry_points={\"console_scripts\": [\"osaca=osaca.osaca:main\"]},\n # Overwriting install and sdist to enforce cache distribution with package\n cmdclass={\"install\": install, \"sdist\": sdist},\n)\n","repo_name":"RRZE-HPC/OSACA","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"53"} +{"seq_id":"40098185039","text":"\"\"\"\n@author: Maziar Raissi\n\"\"\"\nfrom collections import defaultdict, OrderedDict\nfrom multiprocessing import cpu_count\nfrom typing import Dict, Union, List\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom lib.DifferentialEquations.DifferentialEquation import DifferentialEquation, APPLY_OPERATOR_TO_U, Condition\nfrom lib.DifferentialEquations.Operators import Id\nfrom lib.IntelligentModels.BaseModelFlow import BaseModelFlow\n\nWEIGHT_PROPORTION_EQUAL = \"equal\"\n\n\nclass PinnFlow:\n def __init__(self, model: BaseModelFlow, differential_equation: DifferentialEquation,\n max_samplings: int = 1, n_iters_per_sampling: int = 5000, loss_metric: str = 'l2',\n actualize_weights=False,\n weight_proportion: Union[\n str, Dict[str, Union[int, float]], List[Dict[str, Union[int, float]]]] = WEIGHT_PROPORTION_EQUAL,\n initialize=True):\n\n self.differential_equation = differential_equation\n\n # -------- weight_proportion ---------\n if weight_proportion == WEIGHT_PROPORTION_EQUAL:\n self.weight_proportion = [{k: 1.0 for k in self.differential_equation.condition_names}]\n elif isinstance(weight_proportion, Dict):\n self.weight_proportion = [weight_proportion] * max_samplings\n else:\n self.weight_proportion = weight_proportion\n\n for i, wp in enumerate(self.weight_proportion):\n self.check_dict_eq_diff_compatibility(wp)\n proportion_sum = sum(wp.values())\n self.weight_proportion[i] = {k: v / proportion_sum for k, v in wp.items()}\n\n # -------- differential equation ---------\n u_condition = Condition(\n operator=Id(),\n function=lambda *domain: 0,\n n_train=1,\n sampling_strategy=[(var_name, np.random.uniform) for var_name, _ in\n self.differential_equation.domain_limits],\n apply_operator_to=APPLY_OPERATOR_TO_U\n )\n self.differential_equation.conditions.update({\"u\": u_condition})\n for wp in self.weight_proportion:\n wp[\"u\"] = 0\n\n self.weight_proportion_per_iter = self.weight_proportion\n\n # -------- optimization ---------\n self.max_samplings = len(self.weight_proportion)\n self.n_iters_per_sampling = n_iters_per_sampling\n self.actualize_weights = actualize_weights\n\n self.train_loss = []\n self.valid_loss = []\n if loss_metric.lower() == 'l2':\n self.loss_metric = tf.square\n elif loss_metric.lower() == 'l1':\n self.loss_metric = tf.abs\n elif loss_metric.lower() == 'max':\n self.loss_metric = tf.reduce_max\n elif loss_metric.lower() == \"id\":\n self.loss_metric = lambda x: x\n else:\n raise Exception(\"loss_metric should be one of 'l2' or 'l1'\")\n\n # -------- Initialize NNs ---------\n self.model = model\n if initialize:\n self.model.initialize(differential_equation.input_dim, differential_equation.output_dim)\n\n # tf placeholders and graph\n config = tf.ConfigProto(\n allow_soft_placement=False,\n log_device_placement=False,\n intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1,\n device_count={'CPU': 1}\n )\n self.sess = tf.Session(config=config)\n # self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n # log_device_placement=True))\n\n self.tf_dict = defaultdict(OrderedDict)\n self.pred = {k: None for k in self.differential_equation.condition_names}\n self.optimizer = None\n\n # ------------- properties and util functions ------------\n def free_tf_session(self):\n self.sess.close()\n\n def check_dict_eq_diff_compatibility(self, dictionary):\n assert set(list(dictionary.keys())) == set(self.differential_equation.condition_names), \\\n \"dictionary keys of {} should coincide with those on the conditions in the differential equation {}\".format(\n dictionary.keys(), self.differential_equation.condition_names)\n\n @staticmethod\n def correct_np_shape(single_var: np.ndarray):\n return np.reshape(single_var, (-1, 1))\n\n def conditions_iterator(self):\n for condition_name, condition in self.differential_equation.conditions.items():\n if condition.n_train > 0:\n yield condition, condition_name\n\n def create_tf_dict(self):\n for condition, condition_name in self.conditions_iterator():\n for var_name in condition.generate_var_names(condition_name):\n self.tf_dict[condition_name][var_name] = tf.placeholder(self.model.float_precision, shape=[None, 1],\n name=var_name)\n\n def create_np_dict(self, train=True):\n np_dict = defaultdict(dict)\n for condition, condition_name in self.conditions_iterator():\n for var_name, values in zip(condition.generate_var_names(condition_name), condition.generate_values(train)):\n np_dict[condition_name][var_name] = self.correct_np_shape(values)\n return np_dict\n\n def define_single_losses_functions(self):\n losses = {}\n for condition_name, condition in self.differential_equation.conditions.items():\n # if condition.n_train > 0:\n self.pred[condition_name], tf_true_values = \\\n self.differential_equation.get_condition_associated_tf_model(\n condition_name,\n self.model,\n self.tf_dict\n )\n\n losses.update({condition_name: tf.reduce_mean(\n self.loss_metric(\n self.pred[condition_name] - tf_true_values)\n )})\n return losses\n\n # ------------- fit functions ------------\n def fit(self):\n best_parameters = self.model.parameters\n\n # -------- conditions + tf variables --------\n self.create_tf_dict()\n np_dict_valid = self.create_np_dict(train=False)\n for i, wp_per_iter in enumerate(self.weight_proportion):\n # -------- conditions + values --------\n np_dict_train = self.create_np_dict()\n loss = sum(\n [wp_per_iter[condition_name] * single_loss for condition_name, single_loss in\n self.define_single_losses_functions().items()])\n\n # -------- define optimizer -------- #\n self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(\n loss,\n method='L-BFGS-B',\n options={\n 'maxiter': self.n_iters_per_sampling,\n 'maxfun': self.n_iters_per_sampling,\n 'maxcor': 50,\n 'maxls': 50,\n 'ftol': 1.0 * np.finfo(float).eps\n }\n )\n\n # -------- start optimization -------- #\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n feed_dict = {self.tf_dict[condition][var_name]: np_dict_train[condition][var_name]\n for condition in np_dict_train.keys() for var_name in np_dict_train[condition]}\n\n def loss_callback(loss_val):\n self.train_loss.append(loss_val)\n print('\\rLoss: {}'.format(loss_val), end=\"\")\n\n self.optimizer.minimize(\n self.sess,\n feed_dict=feed_dict,\n fetches=[loss],\n loss_callback=loss_callback\n )\n\n # -------- validation next iteration --------\n if self.actualize_weights:\n # only change weights if there are many samplings to be done and only one weight set is given.\n valid_condition_error = {}\n for condition_name, single_loss in self.define_single_losses_functions().items():\n feed_dict = {self.tf_dict[condition_name][var_name]: np_dict_valid[condition_name][var_name]\n for var_name in np_dict_valid[condition_name]}\n valid_condition_error.update({condition_name: self.sess.run(\n feed_dict=feed_dict,\n fetches=[single_loss],\n )})\n valid_condition_error = {cond: (np.array(w) if self.weight_proportion[cond] > 0 else 0) for cond, w in\n valid_condition_error.items()}\n wp_next = {cond: w / np.sqrt(sum(list(valid_condition_error.values()))) for cond, w in\n valid_condition_error.items()}\n self.weight_proportion_per_iter.append(wp_next)\n if i < self.max_samplings:\n self.weight_proportion[i + 1] = wp_next\n\n # -------- validation --------\n feed_dict = {self.tf_dict[condition][var_name]: np_dict_valid[condition][var_name]\n for condition in np_dict_valid.keys() for var_name in np_dict_valid[condition]}\n self.valid_loss.append(\n self.sess.run(\n feed_dict=feed_dict,\n fetches=[loss],\n )\n )\n\n if np.argmin(self.valid_loss) == len(self.valid_loss) - 1:\n best_parameters = self.model.parameters\n else:\n self.model.parameters = best_parameters\n return self\n\n # --------------- prediction functions ---------------\n def predict(self, domain: np.ndarray, which=\"u\"):\n # assert which in [\"u\"] + self.differential_equation.condition_names, \"if should be one of 'u' or conditions\"\n assert which in self.differential_equation.condition_names, \"if should be one of conditions\"\n\n # condition_name = which\n # if which in [\"u\"]:\n # for cond_name, condition in self.differential_equation.conditions.items():\n # if condition.apply_operator_to == APPLY_OPERATOR_TO_U and self.weight_proportion[cond_name] > 0:\n # condition_name = cond_name\n # break\n condition_name = which\n\n tf_domain, _ = self.differential_equation.get_tf_domain_and_values(condition_name, self.tf_dict)\n return self.sess.run(\n self.pred[condition_name],\n {tf_domain_var: self.correct_np_shape(np_domain_var)\n for tf_domain_var, np_domain_var in zip(tf_domain, domain.T)}\n )\n","repo_name":"agussomacal/ConDiPINN","sub_path":"src/lib/PINN_models/PinnFlow.py","file_name":"PinnFlow.py","file_ext":"py","file_size_in_byte":10643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70065541928","text":"from tkinter import *\nimport os\nimport tkinter.messagebox\n\ndef folder_init():\n input = popupFolder(root)\n root.wait_window(input.top)\n os.system(\"mkdir \"+input.value+\" && cd \"+input.value)\n\ndef rom_init():\n input = popupRom(root)\n root.wait_window(input.top)\n\ndef cm_init():\n input = popupCm(root)\n root.wait_window(input.top)\n\ndef fix_dependencies():\n input = popupPack(root)\n root.wait_window(input.top)\ndef get_repo():\n os.system(\"mkdir ~/bin\")\n os.system(\"curl http://commondatastorage.googleapis.com/git-repo-downloads/repo > ~/bin/repo && chmod a+x ~/bin/repo\")\n\ndef get_java():\n os.system(\"sudo apt-add-repository ppa:webupd8team/java && sudo apt-get update && sudo apt-get install oracle-java7-installer\")\n\ndef get_packs():\n os.system(\"sudo apt-get install git-core gnupg flex bison gperf build-essential \\\n zip curl zlib1g-dev gcc-multilib g++-multilib libc6-dev-i386 \\\n lib32ncurses5-dev x11proto-core-dev libx11-dev lib32z-dev ccache \\\n libgl1-mesa-dev libxml2-utils xsltproc unzip lzop\")\n\ndef get_all():\n os.system(\"curl http://commondatastorage.googleapis.com/git-repo-downloads/repo > ~/bin/repo && chmod a+x ~/bin/repo\")\n os.system(\"sudo apt-get install git-core gnupg flex bison gperf build-essential \\\n zip curl zlib1g-dev gcc-multilib g++-multilib libc6-dev-i386 \\\n lib32ncurses5-dev x11proto-core-dev libx11-dev lib32z-dev ccache \\\n libgl1-mesa-dev libxml2-utils xsltproc unzip lzop\")\n os.system(\"sudo apt-add-repository ppa:webupd8team/java && sudo apt-get update && sudo apt-get install oracle-java7-installer\")\ndef sync():\n os.system(\"repo sync\")\n\ndef build():\n prompt = tkinter.messagebox.askquestion(\"Prompt\", \"Please ensure that you have selected ROM \\n and synced sources before proceeding\")\n if prompt == \"yes\":\n input = popupBuild(root)\n root.wait_window(input.top)\n build_actual()\n\ndef build_actual():\n os.system(\"source build/envsetup.sh && brunch \"+device)\n\n\nclass popupBuild(object):\n def __init__(self,master):\n top=self.top=Toplevel(master)\n self.l=Label(top,text=\"Enter codename of device to begin build\")\n self.l.pack()\n device_name = \"\"\n self.b1=Entry(top,textvariable=device_name,width=30)\n self.b1.pack()\n self.b2=Button(top,text=\"OK\",command=build_actual)\n self.b2.pack()\n global device\n device = device_name\n\nclass popupPack(object):\n def __init__(self,master):\n top=self.top=Toplevel(master)\n self.l=Label(top,text=\"Select what all do you need to Install... \\n If in doubt, click All\")\n self.l.pack()\n self.b1=Button(top,text=\"repo\",command=get_repo)\n self.b1.pack()\n self.b2=Button(top,text=\"Java\",command=get_java)\n self.b2.pack()\n self.b3=Button(top,text=\"Other dependencies...\",command=get_packs)\n self.b3.pack()\n self.b4=Button(top,text=\"Get All\",command=get_all)\n self.b4.pack()\n\n def cleanup(self):\n self.value=self.e.get()\n self.top.destroy()\n\nclass popupCm(object):\n def __init__(self,master):\n top=self.top=Toplevel(master)\n self.l=Label(top,text=\"Select the branch you want to sync...\")\n self.l.pack()\n self.b1=Button(top,text=\"CM13.0 - MarshMallow\",command=os.system(\"repo init -u git://github.com/CyanogenMod/android.git -b cm-13.0\"))\n self.b1.pack()\n self.b2=Button(top,text=\"CM12.1 - Lollipop 5.1\",command=os.system(\"repo init -u git://github.com/CyanogenMod/android.git -b cm-12.1\"))\n self.b2.pack()\n self.b3=Button(top,text=\"CM12.0 - Lollipop 5.0\",command=os.system(\"repo init -u git://github.com/CyanogenMod/android.git -b cm-12.0\"))\n self.b3.pack()\n self.b4=Button(top,text=\"CM11 - KitKat\",command=os.system(\"repo init -u git://github.com/CyanogenMod/android.git -b cm-11.0\"))\n self.b4.pack()\n\n def cleanup(self):\n self.value=self.e.get()\n self.top.destroy()\n\n\nclass popupRom(object):\n def __init__(self,master):\n top=self.top=Toplevel(master)\n self.l=Label(top,text=\"Select the ROM you want to sync...\")\n self.l.pack()\n self.b1=Button(top,text=\"CyanogenMod\",command=cm_init)\n self.b1.pack()\n\n def cleanup(self):\n self.value=self.e.get()\n self.top.destroy()\n\n\nclass popupFolder(object):\n def __init__(self,master):\n top=self.top=Toplevel(master)\n self.l=Label(top,text=\"Enter the folder name where you want to see your sources...\")\n self.l.pack()\n self.e=Entry(top)\n self.e.pack()\n self.b=Button(top,text='Ok',command=self.cleanup)\n self.b.pack()\n def cleanup(self):\n self.value=self.e.get()\n self.top.destroy()\n\nclass mainWindow(object):\n def __init__(self,master):\n self.master=master\n self.l=Label(master,text=\"Android Auto Compiler v\"+version+\"\\n\"+\"By @MSF-Jarvis\"+\"\\n\"+\"Original Concept by @AndroGeek974\",bg=\"lightblue\")\n self.l.pack(fill=\"x\")\n self.b=Button(master,text=\"Install required packages and applications\",command=fix_dependencies)\n self.b.pack()\n self.b1=Button(master,text=\"Enter Folder name for sources\",command=folder_init)\n self.b1.pack()\n self.b2=Button(master,text=\"Select ROM to sync sources\",command=rom_init)\n self.b2.pack()\n self.b3=Button(master,text=\"Sync the sources\",command=sync)\n self.b3.pack()\n self.b4=Button(master,text=\"Build ROM\",command=build)\n self.b4.pack()\n\n\nif __name__ == \"__main__\":\n version=\"1.0_25112015\"\n root=Tk()\n root.geometry(\"400x440\")\n root.title(\"AAC-Redux v\"+version)\n m=mainWindow(root)\n root.mainloop()\n","repo_name":"DamienBoyer974/AAC-Redux","sub_path":"AAC-Redux.py","file_name":"AAC-Redux.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38804962224","text":"import oauth2\nimport urllib\nimport urllib2\nimport json\n\n\nclass Twitter:\n def __init__(self, ckey, cscr, akey, ascr, url):\n self.url = url\n self.consumer = oauth2.Consumer(key=ckey, secret=cscr)\n self.token = oauth2.Token(key=akey, secret=ascr)\n self.client = oauth2.Client(self.consumer, self.token)\n\n def req(self, text=None):\n post_body = urllib.urlencode({\"status\": text})\n try:\n resp, content = self.client.request(self.url, method=\"POST\",\n body=post_body)\n except:\n pass\n else:\n error_json = json.loads(content).get(\"errors\")\n if not error_json:\n return {'status': 'succeed', 'name': 'Twitter'}\n\n return {\n 'status': 'failed',\n 'name': \"Twitter\",\n }\n\n\nclass Facebook:\n def __init__(self, access_token, url):\n self.url = url\n self.post_body_dic = {\"access_token\": access_token}\n\n def req(self, text=None):\n self.post_body_dic[\"message\"] = text\n post_body = urllib.urlencode(self.post_body_dic)\n req = urllib2.Request(self.url, post_body)\n try:\n resp = urllib2.urlopen(req)\n except:\n pass\n else:\n error_json = json.loads(resp.read()).get(\"error\")\n if not error_json:\n return {'status': 'succeed', 'name': 'Facebook'}\n\n return {\n 'status': 'failed',\n 'name': 'Facebook',\n }\n","repo_name":"cubarco/post-github-commits","sub_path":"app/socialplts.py","file_name":"socialplts.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8828325774","text":"import mimetypes\nfrom uuid import uuid4\n\nimport boto3\n\n# This assumes you have localstack running with the default configuration\nLOCAL_URL = \"http://localhost:4566/\"\n\n\nclass Context:\n def __init__(self, use_local=False, prefix=\"\"):\n \"\"\"prefix if set must end with a trailing /\n use_local if true assumes localstack is running on localhost.\n \"\"\"\n self.use_local = use_local\n\n if not use_local:\n self.client = boto3.client(\"s3\")\n else:\n self.client = boto3.client(\"s3\", endpoint_url=LOCAL_URL)\n\n if prefix is not None and prefix != \"\":\n if prefix[-1] != \"/\":\n raise AttributeError(\"invalid prefix: '{}' prefix must end with a trailing /\".format(prefix))\n\n self.prefix = prefix\n\n\nclass DocumentData:\n def __init__(self, content_type, buffer, document_id=None):\n self.document_id = document_id if document_id else uuid4().hex\n self.content_type = content_type\n self.contents = buffer\n\n def extension(self) -> str:\n if self.content_type == \"application/pdf\":\n return \"pdf\"\n\n if self.content_type == \"application/msword\":\n return \"doc\"\n\n if self.content_type == \"application/text\":\n return \"txt\"\n\n extension = mimetypes.guess_extension(self.content_type)\n if extension:\n extension = extension.strip('.')\n return extension\n\n return \"unknown\"\n\n\nclass DocumentApi:\n \"\"\"\n The DocumentData API, ensures that all content is encrypted.\n It assumes that one key will be used for the duration of a session and is initialized with one.\n \"\"\"\n\n def __init__(self, destination, kms_key):\n self.destination = destination\n self.kms_key = kms_key\n\n if self.destination is None or self.destination == \"\":\n raise AttributeError(\"invalid destination: '{}' must be an S3 bucket\".format(destination))\n\n if self.kms_key is None or self.kms_key == \"\":\n raise AttributeError(\"invalid key: '{}' must be a valid KMS key id\".format(kms_key))\n\n @staticmethod\n def _key(prefix: str, document: DocumentData) -> str:\n \"\"\"\n Store things on storage using hashed directories to keep operator sanity when hunting for a particular doc\n \"\"\"\n dir1 = document.document_id[0:2]\n dir2 = document.document_id[2:4]\n return \"{}{}/{}/{}.{}\".format(prefix, dir1, dir2, document.document_id, document.extension())\n\n def upload(self, context: Context, document: DocumentData) -> str:\n \"\"\"Since the destination is set when the API is instanciated we only return\n the path from that destination with this key\"\"\"\n key = self._key(context.prefix, document)\n context.client.upload_fileobj(document.contents,\n self.destination,\n key,\n ExtraArgs={'ServerSideEncryption': 'aws:kms',\n 'SSEKMSKeyId': self.kms_key,\n \"ContentType\": document.content_type})\n return key\n\n def get_head_info(self, context: Context, key: str):\n return context.client.head_object(Bucket=self.destination, Key=key)\n\n def get_fileobj(self, context: Context, key: str, buffer):\n \"\"\"Writes the contents of the file at key to buffer\"\"\"\n context.client.download_fileobj(self.destination, key, buffer)\n\n def get_obj(self, context: Context, key: str):\n \"\"\"Writes the contents of the file at key to buffer\"\"\"\n return context.client.get_object(Bucket=self.destination, Key=key)\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/libs/sidecar_blocks/document_store/document_api.py","file_name":"document_api.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22580646797","text":"import tkinter as tk\n\n\ndef clicked():\n label.config(text=input.get())\n\n\n# creating a window\nwindow = tk.Tk()\n# adding a title\nwindow.title('Windows and Labels')\n# changing minimum size\nwindow.minsize(width=500, height=300)\n# changing padding around window\nwindow.config(padx=20, pady=20)\n\n# creating a label\nlabel = tk.Label(text='A new label.', font=('Arial', 24, 'bold'))\n# label.pack() # automatically centers label on screen\n# label.place(x=20, y=0) # for precise positoning\nlabel.grid(column=0, row=0)\n# adding padding\nlabel.config(padx=10, pady=20)\n\n# changing text in a label(component)\n# label['text'] = 'Changed text'\n# OR\n# label.config(text='Changed text config')\n\n# creating a button\nbutton = tk.Button(text='Click Me', background='black', foreground='white', command=clicked)\n# button.pack()\nbutton.grid(column=1, row=1)\n\n# creating a button\nbutton1 = tk.Button(text='Click Me', background='black', foreground='white', command=clicked)\n# button.pack()\nbutton1.grid(column=2, row=0)\n\n\n# entry component(Input)\ninput = tk.Entry(width=20)\n# input.pack()\ninput.grid(column=3, row=4)\n# getting the text entered in the input field\nvalue = input.get()\nprint(value)\n\n\n\n\nwindow.mainloop()","repo_name":"joboy-dev/Python-Projects","sub_path":"Intermediate/tkinter/creating_windows_and_labels.py","file_name":"creating_windows_and_labels.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13769255962","text":"# 1. Представлен список чисел. Необходимо вывести элементы исходного списка,\n# значения которых больше предыдущего элемента. Use comprehension.\nfrom random import sample\n\n\ndef get_list_forward_more(n):\n first_list = sample(range(n*2), n)\n print(first_list)\n res_list = [first_list[i] for i in range(1,len(first_list)) if first_list[i] > first_list[i-1]]\n return res_list\n\nabc = int(input('введите длину списка '))\nprint(get_list_forward_more(abc))","repo_name":"GadisovTmr/pythonStudy","sub_path":"All/practic/dz6/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26532719443","text":"\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom conv_package import convertors\n\ndef popup():\n response = messagebox.showerror(\"Wrong input\",\"Your input is not correct, please insert a valid value\")\n\n \ndef button_click(number):\n current = entry.get()\n entry.delete(0,END)\n entry.insert(0,str(current) + str(number))\n\n \n \ndef button_clear():\n entry.delete(0,END)\n entry.insert(0,'')\n \ndef cancel():\n to_cancel = entry.get()\n entry.delete(0,END)\n splitted = []\n for letter in to_cancel:\n splitted.append(letter)\n splitted.pop()\n new = \"\".join(splitted)\n entry.insert(0,new)\n\ndef ask_confirmation():\n return messagebox.askyesno(\"Confirm your choice\",\"Please confirm your choice\")\n\ndef clicked(value):\n global conv_type\n conv_type = value\n response = ask_confirmation()\n if response == True:\n inserting()\n \n\ndef returning():\n value = entry.get()\n top.destroy()\n if conv_type == 'binary':\n convertors.binary_to_decimal(value)\n else:\n convertors.decimal_to_binary(value)\n\ndef inserting():\n global entry\n global top\n top = Toplevel()\n entry = Entry(top, width = 35,borderwidth = 5)\n entry.insert(0,\"Insert here the IP address\")\n \n button_r = Button(top, text = \"Send\", padx = 40, pady = 20, command = returning)\n button1 = Button(top, text = 1, padx = 40, pady=20,command = lambda: button_click(1))\n button2 = Button(top, text = 2, padx = 40, pady=20,command = lambda: button_click(2))\n button3 = Button(top, text = 3, padx = 40, pady=20,command = lambda: button_click(3))\n button4 = Button(top, text = 4, padx = 40, pady=20,command = lambda: button_click(4))\n button5 = Button(top, text = 5, padx = 40, pady=20,command = lambda: button_click(5))\n button6 = Button(top, text = 6, padx = 40, pady=20,command = lambda: button_click(6))\n button7 = Button(top, text = 7, padx = 40, pady=20,command = lambda: button_click(7))\n button8 = Button(top, text = 8, padx = 40, pady=20,command = lambda: button_click(8))\n button9 = Button(top, text = 9, padx = 40, pady=20,command = lambda: button_click(9))\n button0 = Button(top, text = 0, padx = 40, pady=20,command = lambda: button_click(0))\n buttondot = Button(top, text = '.', padx = 40, pady=20,command = lambda: button_click('.'))\n buttonCancel = Button(top, text = \"Cancel\", padx = 40, pady = 20, command = cancel)\n buttonClear = Button(top, text = \"Clear All\", padx = 70, pady = 20, command = button_clear)\n #Put the buttons on the screen\n entry.grid(row = 0, column = 0, columnspan = 3)\n buttonCancel.grid(row = 4, column = 3)\n button_r.grid(row = 0, column = 3)\n button1.grid(row = 3,column = 0)\n button2.grid(row = 3, column =1)\n button3.grid(row = 3, column =2)\n button4.grid(row = 2 , column =0)\n button5.grid(row = 2, column =1)\n button6.grid(row = 2, column =2)\n button7.grid(row = 1 , column =0)\n button8.grid(row = 1, column =1)\n button9.grid(row = 1 , column =2)\n button0.grid(row = 4, column =0)\n buttonClear.grid(row= 4, column = 1, columnspan = 2)\n buttondot.grid(row = 2, column = 3) ","repo_name":"DaniloPierpaoli/DecimalBinaryConveterAPP","sub_path":"conv_package/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16207146370","text":"# Created by FoxSinOfGreed1729 \n# Many thanks to zaid sabih and udemy.com\nimport scapy.all as scapy\nimport subprocess\n\n\ndef get_mac(ip):\n arp_req = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_broadcast = broadcast / arp_req\n answered, unanswered = scapy.srp(arp_broadcast, timeout=2, verbose=False)\n print(answered[0][1].hwsrc)\n return answered[0][1].hwsrc\n\n\ndef sniffer(interface):\n scapy.sniff(iface=interface, store=False, prn=analysis)\n # iface stands for the interface we'd like to listen\n # store=False tells program not to store packet info in memory so that it doesn't put too much load\n # prn allows us to call a callback function\n # i.e. it will call a function each time it intercepts a packet\n # if we want to put a filter, theres another field - filters=''\n\n\ndef analysis(packet):\n if packet.haslayer(scapy.ARP) and packet[scapy.ARP].op == 2:\n # op = 2 means it is arp response\n try:\n packet_mac = get_mac(packet[scapy.ARP].psrc)\n response_mac = get_mac(packet[scapy.ARP].hwsrc)\n # we're extracting mac address from 2 different methods.\n # first from the arp packet\n # next from get mac i.e. arp broadcast\n if packet_mac != response_mac:\n print(\"[+] ARP Spoof Detected\")\n except IndexError:\n pass\n\n\ndef intro():\n while True:\n print(\"\\n1> Run Ifconfig to find out interfaces\")\n print(\"2> Enter Interface and start detection\")\n choice = int(input(\"3> Exit\\n\"))\n if choice == 1:\n subprocess.call('ifconfig')\n if choice == 2:\n interface = input('Enter Interface\\n')\n sniffer(interface)\n if choice == 3:\n exit(1)\n\n\nintro()\n","repo_name":"DigvijayBhosale1729/ArpSpoofDetector","sub_path":"arpspoofdetec.py","file_name":"arpspoofdetec.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28897353177","text":"# https://leetcode.com/problems/as-far-from-land-as-possible/\n# https://leetcode.com/problems/as-far-from-land-as-possible/solutions/422691/7ms-dp-solution-with-example-beats-100/\n\nimport collections\n\n\n# do BFS from all lands at a time, TC:O(N^2), SC:O(N^2)\ndef maxDistance(grid: List[List[int]]) -> int:\n def bfs(queue):\n # visited = set()\n dist = -1\n while queue:\n r, c, dist = queue.popleft()\n for ro, co in ((1, 0), (0, 1), (-1, 0), (0, -1)):\n rn, cn = r + ro, c + co\n if not 0 <= rn < n or not 0 <= cn < m or grid[rn][cn] == 1:\n continue\n grid[rn][cn] = 1\n queue.append((rn, cn, dist + 1))\n return dist\n\n n = len(grid)\n queue = collections.deque([])\n for i in range(n):\n for j in range(n):\n if grid[i][j] == 1:\n queue.append((i, j, 0))\n return bfs(queue) if len(queue) != n * n else -1\n\n# dp, TC:O(N^2), SC:O(N^2)\ndef maxDistance2(grid: List[List[int]]) -> int:\n\n n = len(grid)\n dp = [[0] * n for _ in range(n)]\n land_cell = 0\n for i in range(n):\n for j in range(n):\n if grid[i][j] == 1:\n dp[i][j] = 1\n land_cell += 1\n continue\n dp[i][j] = min(dp[i][j - 1] if j > 0 else float('inf'),\n dp[i - 1][j] if i > 0 else float('inf')) + 1 # left and top\n\n for i in range(n - 1, -1, -1):\n for j in range(n - 1, -1, -1):\n dist = min(dp[i][j + 1] if j < n - 1 else float('inf'),\n dp[i + 1][j] if i < n - 1 else float('inf')) + 1 # right and down\n dp[i][j] = min(dp[i][j], dist)\n res = max(c for row in dp for c in row) - 1\n\n return -1 if land_cell == 0 or land_cell == n * n else res\n\n# dp and store in-place, TC:O(N^2), SC:O(1)\ndef maxDistance2(grid: List[List[int]]) -> int:\n\n n = len(grid)\n land_cell = 0\n for i in range(n):\n for j in range(n):\n if grid[i][j] == 1:\n land_cell += 1\n continue\n grid[i][j] = min(grid[i][j - 1] if j > 0 else float('inf'),\n grid[i - 1][j] if i > 0 else float('inf')) + 1 # left and top\n\n for i in range(n - 1, -1, -1):\n for j in range(n - 1, -1, -1):\n dist = min(grid[i][j + 1] if j < n - 1 else float('inf'),\n grid[i + 1][j] if i < n - 1 else float('inf')) + 1 # right and down\n grid[i][j] = min(grid[i][j], dist)\n res = max(c for row in grid for c in row) - 1\n\n return -1 if land_cell == 0 or land_cell == n * n else res","repo_name":"ychanc2104/LeetCode","sub_path":"As Far from Land as Possible.py","file_name":"As Far from Land as Possible.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22838819549","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 24 00:03:33 2020\r\n\r\nTRIANGLE FIELD\r\n\r\n@author: David\r\n\"\"\"\r\n# function It is a triangle?\r\ndef isaTriangle(a,b,c):\r\n if (a+b) > c:\r\n if (b+c) > a:\r\n if (a+c) > b: \r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False\r\n# ************************************************\r\n\r\ndef areaTriangle(a,b,c):\r\n s = (a+b+c)/2 #Heron's formula\r\n area = (s*(s-a)*(s-b)*(s-c))**0.5\r\n return area\r\n\r\n# main program:\r\na = float(input(\"Enter the first triangle side\\t\"))\r\nb = float(input(\"Enter the second triangle side\\t\"))\r\nc = float(input(\"Enter the third triangle side\\t\"))\r\n\r\nif isaTriangle(a,b,c):\r\n print(\"Congratulations! It is a Triangle\")\r\nelse:\r\n print(\"Sorry, It won't be a Triangle\")\r\n\r\nif isaTriangle(a,b,c): \r\n print(\"The area of the triangle is\",areaTriangle(a,b,c),\"U^2\")\r\n","repo_name":"JDavid121/Script-Curso-Cisco-Python","sub_path":"138 triangle field.py","file_name":"138 triangle field.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27396263278","text":"import os\nimport pika\nimport sys\nimport json\nimport numpy as np\n\ndef main():\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n\n channel.queue_declare(queue='baka')\n\n def callback(ch, method, properties, body):\n # print(\" [x] baka >~< Received\", json.loads(body))\n array = eval(json.loads(body)[\"detections\"])\n print(\"\\n\\n\\n\", np.asarray(array[0]), np.asarray(array[0]).shape)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n channel.basic_consume(queue='baka', on_message_callback=callback)\n\n channel.basic_qos(prefetch_count=1)\n\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)","repo_name":"Razakai/rabbitMQ-testing","sub_path":"receiveFromBaka.py","file_name":"receiveFromBaka.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26899595085","text":"import json\nimport os\nimport shutil\nimport time\nimport numpy as np\nfrom pathlib import Path\n\nfrom flow_sdk.client import FlowClient\nfrom prettytable import PrettyTable, ORGMODE\nfrom fate_test.flow_test.flow_process import get_dict_from_file, serving_connect\n\n\nclass TestModel(object):\n def __init__(self, data_base_dir, server_url, component_name, namespace):\n self.conf_path = None\n self.dsl_path = None\n self.job_id = None\n self.model_id = None\n self.model_version = None\n self.guest_party_id = None\n self.host_party_id = None\n self.arbiter_party_id = None\n self.output_path = None\n self.cache_directory = None\n\n self.data_base_dir = data_base_dir\n self.component_name = component_name\n self.client = FlowClient(server_url.split(':')[0], server_url.split(':')[1].split('/')[0],\n server_url.split(':')[1].split('/')[1])\n self.request_api_info_path = f'./logs/{namespace}/sdk_exception.log'\n os.makedirs(os.path.dirname(self.request_api_info_path), exist_ok=True)\n\n def error_log(self, retmsg):\n if retmsg is None:\n return os.path.abspath(self.request_api_info_path)\n with open(self.request_api_info_path, \"a\") as f:\n f.write(retmsg)\n\n def submit_job(self, stop=True):\n try:\n stdout = self.client.job.submit(config_data=get_dict_from_file(self.conf_path),\n dsl_data=get_dict_from_file(self.dsl_path))\n if stdout.get('retcode'):\n self.error_log('job submit: {}'.format(stdout.get('retmsg')) + '\\n')\n self.job_id = stdout.get(\"jobId\")\n self.model_id = stdout.get(\"data\").get(\"model_info\").get(\"model_id\")\n self.model_version = stdout.get(\"data\").get(\"model_info\").get(\"model_version\")\n if stop:\n return\n return self.query_status()\n except Exception:\n return\n\n def job_dsl_generate(self):\n train_dsl = {\"components\": {\"data_transform_0\": {\"module\": \"DataTransform\", \"input\": {\"data\": {\"data\": []}},\n \"output\": {\"data\": [\"train\"], \"model\": [\"data_transform\"]}}}}\n train_dsl_path = self.cache_directory + 'generate_dsl_file.json'\n with open(train_dsl_path, 'w') as fp:\n json.dump(train_dsl, fp)\n try:\n stdout = self.client.job.generate_dsl(train_dsl=get_dict_from_file(train_dsl_path),\n cpn=['data_transform_0'])\n if stdout.get('retcode'):\n self.error_log('job dsl generate: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get('data')['components']['data_transform_0']['input']['model'][\n 0] == 'pipeline.data_transform_0.data_transform':\n return stdout.get('retcode')\n except Exception:\n return\n\n def job_api(self, command):\n if command == 'stop':\n self.submit_job()\n time.sleep(5)\n try:\n stdout = self.client.job.stop(job_id=self.job_id)\n if stdout.get('retcode'):\n self.error_log('job stop: {}'.format(stdout.get('retmsg')) + '\\n')\n if self.query_job() == \"canceled\":\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'list/job':\n try:\n stdout = self.client.job.list(limit=3)\n if stdout.get('retcode'):\n self.error_log('job list: {}'.format(stdout.get('retmsg')) + '\\n')\n if len(stdout.get('data', {}).get('jobs', [])) == 3:\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'view':\n try:\n stdout = self.client.job.view(job_id=self.job_id, role=\"guest\")\n if stdout.get('retcode'):\n self.error_log('job view: {}'.format(stdout.get('retmsg')) + '\\n')\n if len(stdout.get(\"data\")) == len(list(get_dict_from_file(self.dsl_path)['components'].keys())) - 1:\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'log':\n log_file_dir = os.path.join(self.output_path, 'job_{}_log'.format(self.job_id))\n try:\n stdout = self.client.job.log(job_id=self.job_id, output_path=log_file_dir)\n if stdout.get('retcode'):\n self.error_log('job log: {}'.format(stdout.get('retmsg')) + '\\n')\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'clean/queue':\n try:\n stdout = self.client.queue.clean()\n if stdout.get('retcode'):\n self.error_log('clean queue: {}'.format(stdout.get('retmsg')) + '\\n')\n if not self.query_job(queue=True):\n return stdout.get('retcode')\n except Exception:\n return\n\n def query_job(self, job_id=None, queue=False):\n if job_id is None:\n job_id = self.job_id\n time.sleep(1)\n try:\n if not queue:\n stdout = self.client.job.query(job_id=job_id)\n if not stdout.get('retcode'):\n return stdout.get(\"data\")[0].get(\"f_status\")\n else:\n self.error_log('query job: {}'.format(stdout.get('retmsg')) + '\\n')\n else:\n stdout = self.client.job.query(job_id=job_id, status='waiting')\n if not stdout.get('retcode'):\n return len(stdout.get(\"data\"))\n except Exception:\n return\n\n def job_config(self, max_iter):\n try:\n stdout = self.client.job.config(job_id=self.job_id, role=\"guest\", party_id=self.guest_party_id[0],\n output_path=self.output_path)\n if stdout.get('retcode'):\n self.error_log('job config: {}'.format(stdout.get('retmsg')) + '\\n')\n job_conf_path = stdout.get('directory') + '/runtime_conf.json'\n job_conf = get_dict_from_file(job_conf_path)\n if max_iter == job_conf['component_parameters']['common'][self.component_name]['max_iter']:\n return stdout.get('retcode')\n\n except Exception:\n return\n\n def query_task(self):\n try:\n stdout = self.client.task.query(job_id=self.job_id, role=\"guest\", party_id=self.guest_party_id[0],\n component_name=self.component_name)\n if stdout.get('retcode'):\n self.error_log('task query: {}'.format(stdout.get('retmsg')) + '\\n')\n status = stdout.get(\"data\")[0].get(\"f_status\")\n if status == \"success\":\n return stdout.get('retcode')\n except Exception:\n return\n\n def list_task(self):\n try:\n stdout = self.client.task.list(limit=3)\n if stdout.get('retcode'):\n self.error_log('list task: {}'.format(stdout.get('retmsg')) + '\\n')\n if len(stdout.get('data', {}).get('tasks', [])) == 3:\n return stdout.get('retcode')\n except Exception:\n return\n\n def component_api(self, command, max_iter=None):\n component_output_path = os.path.join(self.output_path, 'job_{}_output_data'.format(self.job_id))\n if command == 'output/data':\n try:\n stdout = self.client.component.output_data(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name=self.component_name,\n output_path=component_output_path)\n if stdout.get('retcode'):\n self.error_log('component output data: {}'.format(stdout.get('retmsg')) + '\\n')\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'output/data/table':\n try:\n stdout = self.client.component.output_data_table(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name=self.component_name)\n if stdout.get('retcode'):\n self.error_log('component output data table: {}'.format(stdout.get('retmsg')) + '\\n')\n table = {'table_name': stdout.get(\"data\")[0].get(\"table_name\"),\n 'namespace': stdout.get(\"data\")[0].get(\"namespace\")}\n if not self.table_api('table_info', table):\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'output/model':\n try:\n stdout = self.client.component.output_model(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name=self.component_name)\n if stdout.get('retcode'):\n self.error_log('component output model: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get(\"data\"):\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'parameters':\n try:\n stdout = self.client.component.parameters(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name=self.component_name)\n if stdout.get('retcode'):\n self.error_log('component parameters: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get('data', {}).get('ComponentParam', {}).get('max_iter', {}) == max_iter:\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'summary':\n try:\n stdout = self.client.component.get_summary(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name=self.component_name)\n if stdout.get('retcode'):\n self.error_log('component summary download: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get(\"data\"):\n summary_file = self.output_path + '{}_summary.json'.format(self.job_id)\n with open(summary_file, 'w') as fp:\n json.dump(stdout.get(\"data\"), fp)\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'metrics':\n try:\n stdout = self.client.component.metrics(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name='evaluation_0')\n if stdout.get('retcode'):\n self.error_log('component metrics: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get(\"data\"):\n metrics_file = self.output_path + '{}_metrics.json'.format(self.job_id)\n with open(metrics_file, 'w') as fp:\n json.dump(stdout.get(\"data\"), fp)\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'metric/all':\n try:\n stdout = self.client.component.metric_all(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name='evaluation_0')\n if stdout.get('retcode'):\n self.error_log('component metric all: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get(\"data\"):\n metric_all_file = self.output_path + '{}_metric_all.json'.format(self.job_id)\n with open(metric_all_file, 'w') as fp:\n json.dump(stdout.get(\"data\"), fp)\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'metric/delete':\n try:\n stdout = self.client.component.metric_delete(job_id=self.job_id, date=str(time.strftime(\"%Y%m%d\")))\n if stdout.get('retcode'):\n self.error_log('component metric delete: {}'.format(stdout.get('retmsg')) + '\\n')\n metric = self.client.component.metrics(job_id=self.job_id, role=\"guest\",\n party_id=self.guest_party_id[0],\n component_name='evaluation_0')\n if not metric.get('data'):\n return stdout.get('retcode')\n except Exception:\n return\n\n def component_list(self):\n try:\n stdout = self.client.component.list(job_id=self.job_id)\n if stdout.get('retcode'):\n self.error_log('component list: {}'.format(stdout.get('retmsg')) + '\\n')\n dsl_json = get_dict_from_file(self.dsl_path)\n if len(stdout.get('data')['components']) == len(list(dsl_json['components'].keys())):\n return stdout.get('retcode')\n except Exception:\n raise\n\n def table_api(self, command, table_name):\n if command == 'table/info':\n try:\n stdout = self.client.table.info(table_name=table_name['table_name'], namespace=table_name['namespace'])\n if stdout.get('retcode'):\n self.error_log('table info: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get('data')['namespace'] == table_name['namespace'] and \\\n stdout.get('data')['table_name'] == table_name['table_name']:\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'table/delete':\n try:\n stdout = self.client.table.delete(table_name=table_name['table_name'],\n namespace=table_name['namespace'])\n\n if stdout.get('retcode'):\n self.error_log('table delete: {}'.format(stdout.get('retmsg')) + '\\n')\n stdout = self.client.table.delete(table_name=table_name['table_name'],\n namespace=table_name['namespace'])\n if stdout.get('retcode'):\n return 0\n except Exception:\n return\n\n def data_upload(self, upload_path, table_index=None):\n upload_file = get_dict_from_file(upload_path)\n upload_file['file'] = str(self.data_base_dir.joinpath(upload_file['file']).resolve())\n upload_file['drop'] = 1\n upload_file['use_local_data'] = 0\n if table_index is not None:\n upload_file['table_name'] = f'{upload_file[\"file\"]}_{table_index}'\n # upload_path = self.cache_directory + 'upload_file.json'\n # with open(upload_path, 'w') as fp:\n # json.dump(upload_file, fp)\n try:\n stdout = self.client.data.upload(config_data=upload_file, drop=1)\n if stdout.get('retcode'):\n self.error_log('data upload: {}'.format(stdout.get('retmsg')) + '\\n')\n return self.query_status(stdout.get(\"jobId\"))\n except Exception:\n return\n\n def data_download(self, table_name):\n download_config = {\n \"table_name\": table_name['table_name'],\n \"namespace\": table_name['namespace'],\n \"output_path\": 'download.csv',\n }\n try:\n stdout = self.client.data.download(config_data=download_config)\n if stdout.get('retcode'):\n self.error_log('data download: {}'.format(stdout.get('retmsg')) + '\\n')\n return self.query_status(stdout.get(\"jobId\"))\n except Exception:\n return\n\n def data_upload_history(self, conf_file):\n self.data_upload(conf_file, table_index=1)\n try:\n stdout = self.client.data.upload_history(limit=2)\n if stdout.get('retcode'):\n self.error_log('data upload history: {}'.format(stdout.get('retmsg')) + '\\n')\n if len(stdout.get('data')) == 2:\n return stdout.get('retcode')\n except Exception:\n return\n\n def tag_api(self, command, tag_name=None, new_tag_name=None):\n if command == 'tag/query':\n try:\n stdout = self.client.tag.query(tag_name=tag_name)\n if stdout.get('retcode'):\n self.error_log('tag query: {}'.format(stdout.get('retmsg')) + '\\n')\n if not stdout.get('retcode'):\n return stdout.get('data')['tags'][0]['name']\n except Exception:\n return\n\n elif command == 'tag/create':\n try:\n stdout = self.client.tag.create(tag_name=tag_name)\n self.error_log('tag create: {}'.format(stdout.get('retmsg')) + '\\n')\n if self.tag_api('tag/query', tag_name=tag_name) == tag_name:\n return 0\n except Exception:\n return\n\n elif command == 'tag/delete':\n try:\n stdout = self.client.tag.delete(tag_name=tag_name)\n if stdout.get('retcode'):\n self.error_log('tag delete: {}'.format(stdout.get('retmsg')) + '\\n')\n if not self.tag_api('tag/query', tag_name=tag_name):\n return 0\n except Exception:\n return\n\n elif command == 'tag/update':\n try:\n stdout = self.client.tag.update(tag_name=tag_name, new_tag_name=new_tag_name)\n self.error_log('tag update: {}'.format(stdout.get('retmsg')) + '\\n')\n if self.tag_api('tag/query', tag_name=new_tag_name) == new_tag_name:\n return 0\n except Exception:\n return\n\n elif command == 'tag/list':\n try:\n stdout = self.client.tag.list(limit=1)\n if stdout.get('retcode'):\n self.error_log('tag list: {}'.format(stdout.get('retmsg')) + '\\n')\n if len(stdout.get('data')['tags']) == 1:\n return stdout.get('retcode')\n except Exception:\n return\n\n def model_api(self, command, remove_path=None, model_path=None, tag_name=None, homo_deploy_path=None,\n homo_deploy_kube_config_path=None, remove=False, model_load_conf=None, servings=None):\n if model_load_conf is not None:\n model_load_conf[\"job_parameters\"].update({\"model_id\": self.model_id,\n \"model_version\": self.model_version})\n\n if command == 'model/load':\n try:\n stdout = self.client.model.load(config_data=model_load_conf)\n if stdout.get('retcode'):\n self.error_log('model load: {}'.format(stdout.get('retmsg')) + '\\n')\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/bind':\n service_id = \"\".join([str(i) for i in np.random.randint(9, size=8)])\n model_load_conf.update({\"service_id\": service_id, \"servings\": [servings]})\n try:\n stdout = self.client.model.bind(config_data=model_load_conf)\n if stdout.get('retcode'):\n self.error_log('model bind: {}'.format(stdout.get('retmsg')) + '\\n')\n else:\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/import':\n config_data = {\n \"model_id\": self.model_id,\n \"model_version\": self.model_version,\n \"role\": \"guest\",\n \"party_id\": self.guest_party_id[0],\n \"file\": model_path,\n \"force_update\": 1,\n }\n\n try:\n remove_path = Path(remove_path + self.model_version)\n if os.path.isdir(remove_path):\n shutil.rmtree(remove_path)\n stdout = self.client.model.import_model(config_data=config_data)\n if not stdout.get('retcode') and os.path.isdir(remove_path):\n return 0\n else:\n self.error_log('model import: {}'.format(stdout.get('retmsg')) + '\\n')\n except Exception:\n return\n\n elif command == 'model/export':\n config_data = {\n \"model_id\": self.model_id,\n \"model_version\": self.model_version,\n \"role\": \"guest\",\n \"party_id\": self.guest_party_id[0],\n \"output_path\": self.output_path\n }\n # config_file_path = self.cache_directory + 'model_export.json'\n # with open(config_file_path, 'w') as fp:\n # json.dump(config_data, fp)\n stdout = self.client.model.export_model(config_data=config_data)\n if stdout.get('retcode'):\n self.error_log('model export: {}'.format(stdout.get('retmsg')) + '\\n')\n else:\n export_model_path = stdout.get('file')\n return stdout.get('retcode'), export_model_path\n\n elif command == 'model/migrate':\n config_data = {\n \"job_parameters\": {\n \"federated_mode\": \"MULTIPLE\"\n },\n \"migrate_initiator\": {\n \"role\": \"guest\",\n \"party_id\": self.guest_party_id[0]\n },\n \"role\": {\n \"guest\": self.guest_party_id,\n \"arbiter\": self.arbiter_party_id,\n \"host\": self.host_party_id\n },\n \"migrate_role\": {\n \"guest\": self.guest_party_id,\n \"arbiter\": self.arbiter_party_id,\n \"host\": self.host_party_id\n },\n \"execute_party\": {\n \"guest\": self.guest_party_id,\n \"arbiter\": self.arbiter_party_id,\n \"host\": self.host_party_id\n },\n \"model_id\": self.model_id,\n \"model_version\": self.model_version,\n \"unify_model_version\": self.job_id + '_01'\n }\n # config_file_path = self.cache_directory + 'model_migrate.json'\n # with open(config_file_path, 'w') as fp:\n # json.dump(config_data, fp)\n try:\n stdout = self.client.model.migrate(config_data=config_data)\n if stdout.get('retcode'):\n self.error_log('model migrate: {}'.format(stdout.get('retmsg')) + '\\n')\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/homo/convert':\n config_data = {\n \"model_id\": self.model_id,\n \"model_version\": self.model_version,\n \"role\": \"guest\",\n \"party_id\": self.guest_party_id[0],\n }\n config_file_path = self.cache_directory + 'model_homo_convert.json'\n with open(config_file_path, 'w') as fp:\n json.dump(config_data, fp)\n try:\n stdout = self.client.model.homo_convert(conf_path=config_file_path)\n if stdout.get('retcode'):\n self.error_log('model homo convert: {}'.format(stdout.get('retmsg')) + '\\n')\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/homo/deploy':\n job_data = {\n \"model_id\": self.model_id,\n \"model_version\": self.model_version,\n \"role\": \"guest\",\n \"party_id\": self.guest_party_id[0],\n \"component_name\": self.component_name\n }\n config_data = get_dict_from_file(homo_deploy_path)\n config_data.update(job_data)\n if homo_deploy_kube_config_path:\n config_data['deployment_parameters']['config_file'] = homo_deploy_kube_config_path\n config_file_path = self.cache_directory + 'model_homo_deploy.json'\n with open(config_file_path, 'w') as fp:\n json.dump(config_data, fp)\n try:\n stdout = self.client.model.homo_deploy(conf_path=config_file_path)\n if stdout.get('retcode'):\n self.error_log('model homo deploy: {}'.format(stdout.get('retmsg')) + '\\n')\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model_tag/model':\n try:\n stdout = self.client.model.tag_model(job_id=self.job_id, tag_name=tag_name, remove=remove)\n if stdout.get('retcode'):\n self.error_log('model tag model: {}'.format(stdout.get('retmsg')) + '\\n')\n return self.model_api('model_tag/list', tag_name=tag_name, remove=True)\n except Exception:\n return\n\n elif command == 'model_tag/list':\n try:\n stdout = self.client.model.tag_list(job_id=self.job_id)\n if stdout.get('retcode'):\n self.error_log('model tag retrieve: {}'.format(stdout.get('retmsg')) + '\\n')\n if remove and len(stdout.get('data').get('tags')) == 0:\n return stdout.get('retcode')\n if stdout.get('data').get('tags')[0].get('name') == tag_name:\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/deploy':\n try:\n stdout = self.client.model.deploy(model_id=self.model_id, model_version=self.model_version)\n if stdout.get('retcode'):\n self.error_log('model deploy: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get('data')['model_id'] == self.model_id and\\\n stdout.get('data')['model_version'] != self.model_version:\n self.model_id = stdout.get('data')['model_id']\n self.model_version = stdout.get('data')['model_version']\n self.job_id = stdout.get('data')['model_version']\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/conf':\n try:\n stdout = self.client.model.get_predict_conf(model_id=self.model_id, model_version=self.model_version)\n if stdout.get('retcode'):\n self.error_log('model conf: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get('data'):\n if stdout.get('data')['job_parameters']['common']['model_id'] == self.model_id \\\n and stdout.get('data')['job_parameters']['common']['model_version'] == \\\n self.model_version and stdout.get('data')['initiator']['party_id'] == \\\n self.guest_party_id[0] and stdout.get('data')['initiator']['role'] == 'guest':\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/dsl':\n try:\n stdout = self.client.model.get_predict_dsl(model_id=self.model_id, model_version=self.model_version)\n if stdout.get('retcode'):\n self.error_log('model dsl: {}'.format(stdout.get('retmsg')) + '\\n')\n model_dsl_cpn = list(stdout.get('data')['components'].keys())\n train_dsl_cpn = list(get_dict_from_file(self.dsl_path)['components'].keys())\n if len([k for k in model_dsl_cpn if k in train_dsl_cpn]) == len(train_dsl_cpn):\n return stdout.get('retcode')\n except Exception:\n return\n\n elif command == 'model/query':\n try:\n stdout = self.client.model.get_model_info(model_id=self.model_id, model_version=self.model_version,\n role=\"guest\", party_id=self.guest_party_id[0])\n if stdout.get('retcode'):\n self.error_log('model query: {}'.format(stdout.get('retmsg')) + '\\n')\n if stdout.get('data')[0].get('f_model_id') == self.model_id and \\\n stdout.get('data')[0].get('f_model_version') == self.model_version and \\\n stdout.get('data')[0].get('f_role') == \"guest\" and \\\n stdout.get('data')[0].get('f_party_id') == str(self.guest_party_id[0]):\n return stdout.get('retcode')\n except Exception:\n return\n\n def query_status(self, job_id=None):\n while True:\n time.sleep(5)\n status = self.query_job(job_id=job_id)\n if status and status in [\"waiting\", \"running\", \"success\"]:\n if status and status == \"success\":\n return 0\n else:\n return\n\n def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path, component_name):\n config = get_dict_from_file(path)\n config[\"initiator\"][\"party_id\"] = guest_party_id[0]\n config[\"role\"][\"guest\"] = guest_party_id\n config[\"role\"][\"host\"] = host_party_id\n if \"arbiter\" in config[\"role\"]:\n config[\"role\"][\"arbiter\"] = arbiter_party_id\n self.guest_party_id = guest_party_id\n self.host_party_id = host_party_id\n self.arbiter_party_id = arbiter_party_id\n conf_file_path = self.cache_directory + 'conf_file.json'\n with open(conf_file_path, 'w') as fp:\n json.dump(config, fp)\n self.conf_path = conf_file_path\n return config['component_parameters']['common'][component_name]['max_iter']\n\n\ndef judging_state(retcode):\n if not retcode and retcode is not None:\n return 'success'\n else:\n return 'failed'\n\n\ndef run_test_api(config_json, namespace):\n output_path = './output/flow_test_data/'\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n test_api = TestModel(config_json['data_base_dir'], config_json['server_url'].split('//')[1],\n config_json['component_name'], namespace)\n test_api.dsl_path = config_json['train_dsl_path']\n test_api.cache_directory = config_json['cache_directory']\n test_api.output_path = str(os.path.abspath(output_path)) + '/'\n conf_path = config_json['train_conf_path']\n guest_party_id = config_json['guest_party_id']\n host_party_id = config_json['host_party_id']\n arbiter_party_id = config_json['arbiter_party_id']\n upload_file_path = config_json['upload_file_path']\n model_file_path = config_json['model_file_path']\n conf_file = get_dict_from_file(upload_file_path)\n serving_connect_bool = serving_connect(config_json['serving_setting'])\n remove_path = str(config_json['data_base_dir']).split(\"python\")[\n 0] + '/fateflow/model_local_cache/guest#{}#arbiter-{}#guest-{}#host-{}#model/'.format(\n guest_party_id[0], arbiter_party_id[0], guest_party_id[0], host_party_id[0])\n max_iter = test_api.set_config(guest_party_id, host_party_id, arbiter_party_id, conf_path,\n config_json['component_name'])\n\n data = PrettyTable()\n data.set_style(ORGMODE)\n data.field_names = ['data api name', 'status']\n data.add_row(['data upload', judging_state(test_api.data_upload(upload_file_path))])\n data.add_row(['data download', judging_state(test_api.data_download(conf_file))])\n data.add_row(\n ['data upload history', judging_state(test_api.data_upload_history(upload_file_path))])\n print(data.get_string(title=\"data api\"))\n\n table = PrettyTable()\n table.set_style(ORGMODE)\n table.field_names = ['table api name', 'status']\n table.add_row(['table info', judging_state(test_api.table_api('table/info', conf_file))])\n table.add_row(['delete table', judging_state(test_api.table_api('table/delete', conf_file))])\n print(table.get_string(title=\"table api\"))\n\n job = PrettyTable()\n job.set_style(ORGMODE)\n job.field_names = ['job api name', 'status']\n job.add_row(['job stop', judging_state(test_api.job_api('stop'))])\n job.add_row(['job submit', judging_state(test_api.submit_job(stop=False))])\n job.add_row(['job query', judging_state(False if test_api.query_job() == \"success\" else True)])\n job.add_row(['job view', judging_state(test_api.job_api('view'))])\n job.add_row(['job list', judging_state(test_api.job_api('list/job'))])\n job.add_row(['job config', judging_state(test_api.job_config(max_iter=max_iter))])\n job.add_row(['job log', judging_state(test_api.job_api('log'))])\n job.add_row(['job dsl generate', judging_state(test_api.job_dsl_generate())])\n print(job.get_string(title=\"job api\"))\n\n task = PrettyTable()\n task.set_style(ORGMODE)\n task.field_names = ['task api name', 'status']\n task.add_row(['task list', judging_state(test_api.list_task())])\n task.add_row(['task query', judging_state(test_api.query_task())])\n print(task.get_string(title=\"task api\"))\n\n tag = PrettyTable()\n tag.set_style(ORGMODE)\n tag.field_names = ['tag api name', 'status']\n tag.add_row(['create tag', judging_state(test_api.tag_api('tag/create', 'create_job_tag'))])\n tag.add_row(['update tag', judging_state(test_api.tag_api('tag/update', 'create_job_tag', 'update_job_tag'))])\n tag.add_row(['list tag', judging_state(test_api.tag_api('tag/list'))])\n tag.add_row(\n ['query tag', judging_state(not test_api.tag_api('tag/query', 'update_job_tag') == 'update_job_tag')])\n tag.add_row(['delete tag', judging_state(test_api.tag_api('tag/delete', 'update_job_tag'))])\n print(tag.get_string(title=\"tag api\"))\n\n component = PrettyTable()\n component.set_style(ORGMODE)\n component.field_names = ['component api name', 'status']\n component.add_row(['output data', judging_state(test_api.component_api('output/data'))])\n component.add_row(['output table', judging_state(test_api.component_api('output/data/table'))])\n component.add_row(['output model', judging_state(test_api.component_api('output/model'))])\n component.add_row(['component parameters', judging_state(test_api.component_api('parameters', max_iter=max_iter))])\n component.add_row(['component summary', judging_state(test_api.component_api('summary'))])\n component.add_row(['component list', judging_state(test_api.component_list())])\n component.add_row(['metrics', judging_state(test_api.component_api('metrics'))])\n component.add_row(['metrics all', judging_state(test_api.component_api('metric/all'))])\n\n model = PrettyTable()\n model.set_style(ORGMODE)\n model.field_names = ['model api name', 'status']\n if config_json.get('component_is_homo'):\n homo_deploy_path = config_json.get('homo_deploy_path')\n homo_deploy_kube_config_path = config_json.get('homo_deploy_kube_config_path')\n model.add_row(['model homo convert', judging_state(test_api.model_api('model/homo/convert'))])\n model.add_row(['model homo deploy',\n judging_state(test_api.model_api('model/homo/deploy',\n homo_deploy_path=homo_deploy_path,\n homo_deploy_kube_config_path=homo_deploy_kube_config_path))])\n if not config_json.get('component_is_homo') and serving_connect_bool:\n model_load_conf = get_dict_from_file(model_file_path)\n model_load_conf[\"initiator\"][\"party_id\"] = guest_party_id\n model_load_conf[\"role\"].update(\n {\"guest\": [guest_party_id], \"host\": [host_party_id], \"arbiter\": [arbiter_party_id]})\n model.add_row(['model load', judging_state(test_api.model_api('model/load', model_load_conf=model_load_conf))])\n model.add_row(['model bind', judging_state(test_api.model_api('model/bind', model_load_conf=model_load_conf,\n servings=config_json['serving_setting']))])\n status, model_path = test_api.model_api('model/export')\n model.add_row(['model export', judging_state(status)])\n model.add_row(['model import', (judging_state(\n test_api.model_api('model/import', remove_path=remove_path, model_path=model_path)))])\n model.add_row(['tag model', judging_state(test_api.model_api('model_tag/model', tag_name='model_tag_create'))])\n model.add_row(['tag list', judging_state(test_api.model_api('model_tag/list', tag_name='model_tag_create'))])\n model.add_row(\n ['tag remove', judging_state(test_api.model_api('model_tag/model', tag_name='model_tag_create', remove=True))])\n if serving_connect_bool:\n model.add_row(\n ['model migrate', judging_state(test_api.model_api('model/migrate'))])\n model.add_row(['model query', judging_state(test_api.model_api('model/query'))])\n if not config_json.get('component_is_homo') and serving_connect_bool:\n model.add_row(['model deploy', judging_state(test_api.model_api('model/deploy'))])\n model.add_row(['model conf', judging_state(test_api.model_api('model/conf'))])\n model.add_row(['model dsl', judging_state(test_api.model_api('model/dsl'))])\n print(model.get_string(title=\"model api\"))\n component.add_row(['metrics delete', judging_state(test_api.component_api('metric/delete'))])\n print(component.get_string(title=\"component api\"))\n\n queue = PrettyTable()\n queue.set_style(ORGMODE)\n queue.field_names = ['api name', 'status']\n test_api.submit_job()\n test_api.submit_job()\n test_api.submit_job()\n queue.add_row(['clean/queue', judging_state(test_api.job_api('clean/queue'))])\n print(queue.get_string(title=\"queue job\"))\n print('Please check the error content: {}'.format(test_api.error_log(None)))\n","repo_name":"FederatedAI/FATE","sub_path":"python/fate_test/fate_test/flow_test/flow_sdk_api.py","file_name":"flow_sdk_api.py","file_ext":"py","file_size_in_byte":39201,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"7577637175","text":"import exceptions\nimport os\nimport sys\n\nimport json\nfrom twisted.internet import defer\n\nfrom ooni import errors\nfrom ooni.settings import config\nfrom ooni.tests import is_internet_connected\nfrom ooni.tests.bases import ConfigTestCase\nfrom ooni.ui.cli import createDeck\nfrom ooni.ui.cli import runWithDirector, setupGlobalOptions\nfrom ooni.ui.cli import setupAnnotations, setupCollector\nfrom ooni.utils.net import hasRawSocketPermission\n\n\ndef verify_header(header):\n assert 'input_hashes' in header.keys()\n assert 'options' in header.keys()\n assert 'probe_asn' in header.keys()\n assert 'probe_cc' in header.keys()\n assert 'probe_ip' in header.keys()\n assert 'software_name' in header.keys()\n assert 'software_version' in header.keys()\n assert 'test_name' in header.keys()\n assert 'test_version' in header.keys()\n\n\ndef verify_entry(entry):\n assert 'input' in entry\n\nconfig_includepcap = \"\"\"\nbasic:\n logfile: ~/.ooni/ooniprobe.log\nprivacy:\n includeip: false\n includeasn: true\n includecountry: true\n includepcap: true\nreports:\n pcap: null\n collector: null\nadvanced:\n debug: false\n interface: auto\n start_tor: false\n measurement_timeout: 60\n measurement_retries: 2\n measurement_concurrency: 10\n reporting_timeout: 80\n reporting_retries: 3\n reporting_concurrency: 15\n data_dir: %s\n oonid_api_port: 8042\ntor:\n socks_port: 9050\n\"\"\" % config.data_directory\n\n\nclass TestRunDirector(ConfigTestCase):\n timeout = 420\n\n def setUp(self):\n super(TestRunDirector, self).setUp()\n if not is_internet_connected():\n self.skipTest(\"You must be connected to the internet to run this test\")\n\n self.filenames = ['example-input.txt']\n with open('example-input.txt', 'w+') as f:\n f.write('http://torproject.org/\\n')\n f.write('http://bridges.torproject.org/\\n')\n f.write('http://blog.torproject.org/\\n')\n\n def tearDown(self):\n super(TestRunDirector, self).tearDown()\n for filename in self.filenames:\n if os.path.exists(filename):\n os.remove(filename)\n self.filenames = []\n\n @defer.inlineCallbacks\n def run_helper(self, test_name, nettest_args, verify_function, ooni_args=()):\n output_file = os.path.abspath('test_report.njson')\n self.filenames.append(output_file)\n oldargv = sys.argv\n sys.argv = ['']\n sys.argv.extend(ooni_args)\n sys.argv.extend(['-n', '-o', output_file, test_name])\n sys.argv.extend(nettest_args)\n global_options = setupGlobalOptions(False, False, False)\n\n config.tor.socks_port = 9050\n config.advanced.start_tor = False\n config.tor.control_port = None\n config.advanced.debug = True\n\n yield runWithDirector(global_options,\n create_input_store=False)\n with open(output_file) as f:\n entries = map(json.loads, f)\n first_entry = entries[0]\n try:\n test_keys = entries[0]['test_keys']\n except StopIteration:\n raise Exception(\"Missing entry in report\")\n verify_header(first_entry)\n verify_entry(first_entry)\n verify_function(test_keys)\n sys.argv = oldargv\n\n @defer.inlineCallbacks\n def test_http_requests(self):\n self.skipTest(\"XXX This integration test fails non deterministically\")\n def verify_function(entry):\n assert 'body_length_match' in entry\n assert 'body_proportion' in entry\n assert 'control_failure' in entry\n assert 'experiment_failure' in entry\n assert 'factor' in entry\n assert 'headers_diff' in entry\n assert 'headers_match' in entry\n yield self.run_helper('blocking/http_requests',\n ['-u', 'http://torproject.org/'],\n verify_function)\n\n @defer.inlineCallbacks\n def test_http_requests_with_file(self):\n self.skipTest(\"XXX This integration test fails non deterministically\")\n def verify_function(entry):\n assert 'body_length_match' in entry\n assert 'body_proportion' in entry\n assert 'control_failure' in entry\n assert 'experiment_failure' in entry\n assert 'factor' in entry\n assert 'headers_diff' in entry\n assert 'headers_match' in entry\n\n yield self.run_helper('blocking/http_requests',\n ['-f', 'example-input.txt'],\n verify_function)\n\n @defer.inlineCallbacks\n def test_dnsconsistency(self):\n def verify_function(entry):\n assert 'queries' in entry\n assert 'control_resolver' in entry\n assert 'errors' in entry\n assert 'inconsistent' in entry\n assert 'failures' in entry\n assert 'successful' in entry\n assert len(entry['inconsistent']) == 0\n\n yield self.run_helper('blocking/dns_consistency',\n ['-b', '8.8.8.8:53',\n '-t', '8.8.8.8',\n '-f', 'example-input.txt'],\n verify_function)\n\n @defer.inlineCallbacks\n def test_http_header_field_manipulation(self):\n self.skipTest(\"This test requires a property configured backend\")\n def verify_function(entry):\n assert 'agent' in entry\n assert 'requests' in entry\n assert 'socksproxy' in entry\n assert 'errors' in entry\n\n yield self.run_helper('manipulation/http_header_field_manipulation',\n ['-b', 'http://4.15.35.157:80'],\n verify_function)\n\n @defer.inlineCallbacks\n def test_sniffing_activated(self):\n if not hasRawSocketPermission():\n self.skipTest(\"You must run this test as root or have the \"\n \"capabilities cap_net_admin,cap_net_raw+eip\")\n self.skipTest(\"Not properly set packet capture?\")\n filename = os.path.abspath('test_report.pcap')\n self.filenames.append(filename)\n conf_file = os.path.abspath('fake_config.conf')\n with open(conf_file, 'w') as cfg:\n cfg.writelines(config_includepcap)\n self.filenames.append(conf_file)\n\n def verify_function(_):\n assert os.path.exists(filename)\n self.assertGreater(os.stat(filename).st_size, 0)\n yield self.run_helper('blocking/http_requests',\n ['-f', 'example-input.txt'],\n verify_function, ooni_args=['-f', conf_file])\n config.scapyFactory.connectionLost('')\n\n\nclass TestOoniCli(ConfigTestCase):\n def test_create_deck_not_found(self):\n global_options = {\n 'no-yamloo': True,\n 'subargs': [],\n 'annotations': {},\n 'collector': None,\n 'bouncer': None,\n 'no-collector': False,\n 'test_file': 'invalid/path',\n 'testdeck': None\n }\n exc = None\n try:\n createDeck(global_options)\n except exceptions.SystemExit as exc:\n exc = exc\n self.assertIsNotNone(exc)\n self.assertEqual(exc.code, 3)\n\n def test_setup_annotations(self):\n global_options = {\n \"annotations\": \"key1:value1,key2:value2\"\n }\n annotations = setupAnnotations(global_options)\n self.assertEqual(annotations,\n {'key1': 'value1', 'key2': 'value2'})\n\n def test_setup_collector(self):\n collector1 = 'https://example.com'\n collector2 = 'httpo://thirteenchars321.onion'\n global_options = {\n 'collector': collector1\n }\n try:\n collector_client = setupCollector(global_options, collector2)\n self.assertEqual(collector_client.settings['address'], collector1)\n self.assertEqual(collector_client.settings['type'], 'https')\n except errors.CollectorUnsupported:\n # Older versions of twisted will raise this. We could be more\n # strict and do a check for older twisted versions in here.\n pass\n","repo_name":"ooni/probe-legacy","sub_path":"ooni/tests/test_oonicli.py","file_name":"test_oonicli.py","file_ext":"py","file_size_in_byte":8277,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"21653492569","text":"##\n## Short script for comparing the files in the receiving directories\n##\n\nimport os\nimport pandas as pd\n\ndef load_files(num = 1000):\n rec_dir = os.listdir('../receiving')\n suff = \"stable\"\n rec_stable_dir = os.listdir(f'../receiving_{suff}')\n rec_common = list(set(rec_dir) & set(rec_stable_dir))\n print(set(rec_dir).symmetric_difference(rec_stable_dir))\n num_iter = 0\n for rec in rec_common:\n if num_iter <= num:\n num_iter += 1\n df_rec = pd.read_csv(f'../receiving/{rec}').set_index('geo_id')\n df_stable = pd.read_csv(f'../receiving_{suff}/{rec}').set_index('geo_id')\n df_join = df_rec.join(df_stable, rsuffix='_stable' )\n yield rec, df_join\n\ndef main():\n load_iter = load_files()\n for rec, df in load_iter:\n if ('msa' in rec) and False:\n msa_ds = (df['val'] - df['val_stable']).sum()\n print(f'{msa_ds} value diff')\n if df.eval('abs(val - val_stable)').sum() > 0.01:\n print(f'Printing {rec} difference')\n df_diff = df[df.eval('val != val_stable')]\n print(df_diff.shape)\n df_diff.to_csv(f'rec_diffs/diff_{rec}.csv')\n # assert \"county_confirmed_7dav_incidence_num\" not in rec, f\"{rec}!!!\"\n #input('w')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alexcoda/covidcast-indicators","sub_path":"jhu/tests/compare_receiving.py","file_name":"compare_receiving.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"9155515851","text":"import sqlite3\nconnection=sqlite3.connect(\"company.db\")\n\n\n# connection.execute('''CREATE TABLE EMPLOYEE(\n# ID INTEGER PRIMARY KEY AUTOINCREMENT,\n# NAME TEXT,\n# DESIGNATION TEXT,\n# SALARY INTEGER,\n# COMPANY_NAME TEXT,\n# MOBILE INT);\n# ''')\n# print(\"Table created successfully Ramya!!!!!\")\n\n\ngetName=input(\"Enter name: \")\ngetDes=input(\"Enter designation: \")\ngetSalary=input(\"Enter the salary\")\ngetCompanyname=input(\"Enter companyname\")\ngetMobile=input(\"Enter mobileNumber\")\n\nconnection.execute(\"INSERT INTO EMPLOYEE(NAME,DESIGNATION,SALARY,COMPANY_NAME,MOBILE)\\\n VALUES('\"+getName+\"','\"+getDes+\"',\"+getSalary+\",'\"+getCompanyname+\"',\"+getMobile+\")\")\nconnection.commit()\nconnection.close()\nprint(\"success!\")","repo_name":"Ramyalakshmi-tech/employeedatabase","sub_path":"employeedatabase.py","file_name":"employeedatabase.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5651661692","text":"import errno\nimport logging\nimport os\nimport shutil\nimport stat\nimport sys\n\nfrom theano import config\nfrom theano.gof.cmodule import get_lib_extension\nfrom theano.gof.compilelock import get_lock, release_lock\nfrom theano.sandbox import cuda\nfrom theano.sandbox.cuda import nvcc_compiler\n\nfrom .shared_code import this_dir\n\nimport pylearn2.sandbox.cuda_convnet.pthreads\nfrom pylearn2.sandbox.cuda_convnet import check_cuda\n\n_logger_name = 'pylearn2.sandbox.cuda_convnet.convnet_compile'\n_logger = logging.getLogger(_logger_name)\n#_logger.addHandler(logging.StreamHandler())\n#_logger.setLevel(logging.DEBUG)\n\n_logger.debug('importing')\n\n\ncuda_convnet_loc = os.path.join(config.compiledir, 'cuda_convnet')\n# In partial dependency order: the last ones depend on the first ones\ncuda_convnet_file_sources = ('nvmatrix_kernels.cu', 'nvmatrix.cu',\n 'conv_util.cu', 'filter_acts.cu', 'img_acts.cu',\n 'weight_acts.cu')\ncuda_convnet_so = os.path.join(cuda_convnet_loc,\n 'cuda_convnet.' + get_lib_extension())\nlibcuda_convnet_so = os.path.join(cuda_convnet_loc,\n 'libcuda_convnet.' + get_lib_extension())\n\n\ndef convnet_available():\n check_cuda(check_enabled=False)\n\n # If already compiled, OK\n if convnet_available.compiled:\n _logger.debug('already compiled')\n return True\n\n # If there was an error, do not try again\n if convnet_available.compile_error:\n _logger.debug('error last time')\n return False\n\n # Else, we need CUDA\n if not cuda.cuda_available:\n convnet_available.compile_error = True\n _logger.debug('cuda unavailable')\n return False\n\n # Try to actually compile\n success = convnet_compile()\n if success:\n convnet_available.compiled = True\n else:\n convnet_available.compile_error = False\n _logger.debug('compilation success: %s', success)\n\n return convnet_available.compiled\n\n# Initialize variables in convnet_available\nconvnet_available.compiled = False\nconvnet_available.compile_error = False\n\n\ndef should_recompile():\n \"\"\"\n Returns True if the .so files are not present or outdated.\n \"\"\"\n # The following list is in alphabetical order.\n source_files = (\n 'conv_util.cu',\n 'conv_util.cuh',\n 'cudaconv2.cuh',\n 'filter_acts.cu',\n 'img_acts.cu',\n 'nvmatrix.cu',\n 'nvmatrix.cuh',\n 'nvmatrix_kernels.cu',\n 'nvmatrix_kernels.cuh',\n 'nvmatrix_operators.cuh',\n 'weight_acts.cu')\n stat_times = [\n os.stat(os.path.join(this_dir, source_file))[stat.ST_MTIME]\n for source_file in source_files]\n date = max(stat_times)\n _logger.debug('max date: %f', date)\n\n if (not os.path.exists(cuda_convnet_so) or\n date >= os.stat(cuda_convnet_so)[stat.ST_MTIME]):\n return True\n\n return False\n\n\ndef symlink_ok():\n \"\"\"\n Check if an existing library exists and can be read.\n \"\"\"\n try:\n open(libcuda_convnet_so).close()\n return True\n except IOError:\n return False\n\n\ndef convnet_compile():\n # Compile .cu files in cuda_convnet\n _logger.debug('nvcc_compiler.rpath_defaults: %s',\n str(nvcc_compiler.rpath_defaults))\n import time\n t1 = time.time()\n if should_recompile():\n _logger.debug('should recompile')\n\n # Concatenate all .cu files into one big mod.cu\n code = []\n for source_file in cuda_convnet_file_sources:\n code.append(open(os.path.join(this_dir, source_file)).read())\n code = '\\n'.join(code)\n\n get_lock()\n try:\n # Check if the compilation has already been done by another process\n # while we were waiting for the lock\n if should_recompile():\n _logger.debug('recompiling')\n\n try:\n compiler = nvcc_compiler.NVCC_compiler()\n args = compiler.compile_args()\n\n # compiler.compile_args() can execute a\n # compilation This currently will remove empty\n # directory in the compile dir. So we must make\n # destination directory after calling it.\n if not os.path.exists(cuda_convnet_loc):\n os.makedirs(cuda_convnet_loc)\n compiler.compile_str('cuda_convnet',\n code,\n location = cuda_convnet_loc,\n include_dirs = [this_dir, config.pthreads.inc_dir] if config.pthreads.inc_dir else [this_dir],\n lib_dirs = nvcc_compiler.rpath_defaults + [cuda_convnet_loc] + ([config.pthreads.lib_dir] if config.pthreads.lib_dir else []),\n libs = ['cublas', config.pthreads.lib] if config.pthreads.lib else ['cublas'],\n preargs = ['-O3'] + args,\n py_module=False)\n except Exception as e:\n _logger.error(\"Failed to compile %s %s: %s\",\n os.path.join(cuda_convnet_loc, 'mod.cu'), cuda_convnet_file_sources, str(e))\n return False\n else:\n _logger.debug('already compiled by another process')\n\n finally:\n release_lock()\n else:\n _logger.debug('not recompiling')\n\n # If necessary, create a symlink called libcuda_convnet.so\n if not symlink_ok():\n if sys.platform == \"win32\":\n # The Python `os` module does not support symlinks on win32.\n shutil.copyfile(cuda_convnet_so, libcuda_convnet_so)\n else:\n try:\n os.symlink(cuda_convnet_so, libcuda_convnet_so)\n except OSError as e:\n # This may happen for instance when running multiple\n # concurrent jobs, if two of them try to create the\n # symlink simultaneously.\n # If that happens, we verify that the existing symlink is\n # indeed working.\n if (getattr(e, 'errno', None) != errno.EEXIST\n or not symlink_ok()):\n raise\n\n # Raise an error if libcuda_convnet_so is still not available\n open(libcuda_convnet_so).close()\n\n # Add cuda_convnet to the list of places that are hard-coded into\n # compiled modules' runtime library search list.\n nvcc_compiler.add_standard_rpath(cuda_convnet_loc)\n\n t2 = time.time()\n _logger.debug('successfully imported. Compiled in %fs', t2 - t1)\n\n return True\n","repo_name":"lisa-lab/pylearn2","sub_path":"pylearn2/sandbox/cuda_convnet/convnet_compile.py","file_name":"convnet_compile.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","stars":2743,"dataset":"github-code","pt":"53"} +{"seq_id":"22554572692","text":"import os, sys, json, logging\nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\ndef make_logger(log):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n \n # formatter\n file_formatter = logging.Formatter(\"%(asctime)s [%(levelname)s:%(lineno)d] -- %(message)s\")\n # file_handler\n file_handler = logging.FileHandler(log, mode='w')\n file_handler.setFormatter(file_formatter)\n file_handler.setLevel(logging.INFO)\n # logger.add\n logger.addHandler(file_handler)\n \n return logger\n\ndef readfiles(dir, Ext):\n file_dict = defaultdict(list)\n\n for root, dirs, files in os.walk(dir):\n for file in files:\n filename, ext = os.path.splitext(file)\n ext = ext.lower()\n if ext == Ext:\n file_path = os.path.join(root, file)\n \n file_dict[filename] = file_path\n return file_dict\n\ndef makeOutputPath(file_path, file_dir, output_dir, Ext):\n root, file = os.path.split(file_path)\n filename, ext = os.path.splitext(file)\n relpath = os.path.relpath(file_path, file_dir)\n mid_dir = os.path.split(relpath)[0]\n output_path = os.path.join(output_dir, mid_dir, f\"{filename}.{Ext}\")\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n\n return output_path \n\ndef readJson(path):\n with open(path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n return data\n\ndef make_csv(_list, col, output_path):\n df = pd.DataFrame(_list, columns=col)\n df.to_csv(output_path, index=False)\n \ndef get_true_value(values):\n for value in values:\n if value['selected'] == True:\n val = value['value']\n return val\n\nif __name__ == \"__main__\":\n _, input_dir = sys.argv\n \n folder_front_list = [os.path.join(input_dir, dirs, '04-1_Lane_grouping', '1_Front') for dirs in os.listdir(input_dir)]\n folder_side_list = [os.path.join(input_dir, dirs, '04-1_Lane_grouping', '2_Side') for dirs in os.listdir(input_dir)]\n \n file_dict = defaultdict(lambda : defaultdict(list))\n for folder in tqdm(folder_front_list, desc='gather file'):\n seq = folder.split('\\\\')[-3]\n file_dict[seq]['1_Front'] = [(os.path.join(folder, file)) for file in os.listdir(folder)]\n\n for folder in tqdm(folder_side_list, desc='gather file'):\n seq = folder.split('\\\\')[-3]\n file_dict[seq]['2_Side'] = [(os.path.join(folder, file)) for file in os.listdir(folder)]\n \n\n list2df = []\n for seq, json_path_dict in tqdm(file_dict.items(), desc='create csv'):\n info_dict = {\"0_Unknown\":0, \"1_None\":0, \"2_Solid\":0, \"3_Dashed\":0, \"4_Double_Solid\":0, \"5_Double_Dashed\":0, \"6_1st-Dashed&2nd-Solid\":0, \"7_1st-Solid&2nd-Dashed\":0}\n\n for direc, json_path_list in json_path_dict.items():\n for json_path in json_path_list:\n data = readJson(json_path)\n if len(data['objects']) == 2:\n for obj in data['objects']:\n for att in obj['attributes']:\n val = get_true_value(att['values'])\n info_dict[val] += 1\n elif len(data['objects']) == 1:\n info_dict['1_None'] += 1\n for obj in data['objects']:\n for att in obj['attributes']:\n val = get_true_value(att['values'])\n elif len(data['objects']) == 0:\n info_dict['1_None'] += 2\n \n list2df.append([seq, direc, info_dict[\"0_Unknown\"], info_dict[\"1_None\"], info_dict[\"2_Solid\"], info_dict[\"3_Dashed\"], info_dict[\"4_Double_Solid\"], info_dict[\"5_Double_Dashed\"], info_dict[\"6_1st-Dashed&2nd-Solid\"], info_dict[\"7_1st-Solid&2nd-Dashed\"]])\n \n make_csv(list2df, [\"sequence\", 'View', \"0_Unknown\", \"1_None\", \"2_Solid\", \"3_Dashed\", \"4_Double_Solid\", \"5_Double_Dashed\", \"6_1st-Dashed&2nd-Solid\", \"7_1st-Solid&2nd-Dashed\"], f'./{os.path.split(input_dir)[-1]}.csv')\n\n","repo_name":"tkdalsrb123/Alchera","sub_path":"12/1222_hdp_json_count_nld/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17519893378","text":"import itertools\ndef solution(relation):\n row = len(relation)\n col = len(relation[0])\n \n combi = []\n for i in range(1,col+1):\n combi.extend(itertools.combinations(range(col),i))\n unique = []\n for i in combi:\n tmp = [tuple([item[key] for key in i]) for item in relation]\n tmp = []\n for item in relation:\n t = ()\n for key in i:\n t = t + (item[key],)\n tmp.append(t) \n if len(set(tmp)) == row:\n put = True\n for x in unique:\n if set(x).issubset(set(i)):\n put = False\n break\n if put:\n unique.append(i) \n return len(unique) ","repo_name":"Ansai-2000/baekjoon","sub_path":"프로그래머스/lv2/42890. 후보키/후보키.py","file_name":"후보키.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40996916796","text":"# This is a guess the number game.\nimport random\n\ndef start_game():\n print(\"Welcome to Numbers Guessing Game!!!\")\n solution = random.randint(1,10)\n \n Number_Of_Guess = 5\n Attempts = 0\n got_answer = False\n while Attempts < Number_Of_Guess:\n print(\"Take a guess at a number between 1 and 10:\")\n try:\n guess = int(input())\n if (guess < 1 or guess > 10):\n print(\"Must enter nubmer between 1 and 10\")\n continue\n except ValueError:\n print(\"Must enter a whole number:\")\n continue\n \n Attempts = Attempts+1\n if guess < solution:\n print(\"try guessing higher\")\n elif guess > solution:\n print(\"try guessing lower\")\n else:\n print(f'Good job. It took {Attempts} attempts to guess {solution}!')\n got_answer=True\n break\n \n if (got_answer==False):\n print(f'Sorry, you guessed the max times {Attempts}. Try playing again')\n \ndef end_game():\n print(\"Game ends here.\")\n \nif __name__ == \"__main__\":\n start_game()\n end_game()\n","repo_name":"gina-cyber/Project1","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3807599437","text":"\"\"\"\nScript to suggest alignments to entities that have a given type using reconciliation service\n\nUsage:\n python suggestAlignments.py --input_file --base_type --limit --output_file [--api_uri --minimum_score --reconciliation_type --log_file ]\n\nArguments:\n --input_file: TTL input file\n --base_type: type for entities to retrieve from input TTL\n --reconciliation_type: type for entities to retrieve from reconciliation service\n --limit: Limit for number of results that are returned by reconciliation service\n --output_file: TTL output file\n --api_uri: URL of reconciliation API. Defaults to https://lobid.org/gnd/reconcile/. \n --minimum_score: Minimum score to consider for results. Defaults to 0.\n --log_file: Log file (optional): path to log file. Defaults to None.\n\"\"\"\n\nfrom rdflib import Graph\nimport argparse\nimport pandas as pd\nimport requests\nfrom itertools import islice\nfrom string import Template\nimport uuid\nfrom tqdm import tqdm\nimport json\nimport os\nimport datetime\nimport logging\n\nESCAPE_DICT = {'\"': r'\\\"'}\n\nNAMESPACES = \"\"\"\nPREFIX loc: \nPREFIX skkg: \nPREFIX mods: \nPREFIX xsd: \nPREFIX skos: \nPREFIX rdfs: \nPREFIX crmdig: \nPREFIX wd: \nPREFIX rso: \nPREFIX aat: \nPREFIX oai: \nPREFIX xml: \nPREFIX dcterms: \nPREFIX sikart: \nPREFIX gnd: \nPREFIX crm: \nPREFIX mets: \nPREFIX resource: \nPREFIX viaf: \nPREFIX ulan: \nPREFIX la: \nPREFIX rdf: \nPREFIX wikidata: \nPREFIX dc: \nPREFIX frbroo: \n\"\"\"\n\nNAMESPACES4TTL_HARDCODED = \"\"\"\n@prefix classification: .\n@prefix crm: .\n@prefix crmdig: .\n@prefix gnd: .\n@prefix rdf: .\n@prefix rdfs: .\n@prefix skos: .\n@prefix xsd: .\n\"\"\"\n\nONE_CLASSIFICATION = Template(\"\"\"\nclassification:$class_id a crm:E13_Attribute_Assignment ;\n crm:P33_used_specific_technique <$technique_uri> ;\n crm:P140_assigned_attribute_to <$base_uri> ;\n crm:P141_assigned gnd:$aligned_id ;\n crm:P177_assigned_property_of_type $same_as skos:$match_type ;\n rdf:value \"$score\"^^xsd:float .\n\ngnd:$aligned_id a crm:E55_Type ;\n rdfs:label \"$aligned_label\" .\n\"\"\")\n\n\nGENERAL_CLASSIFICATION_STATEMENT = Template(\"\"\"\nclassification:$class_id a crm:E13_Attribute_Assignment ;\n crm:P16_used_specific_object $all_uris ;\n crm:P33_used_specific_technique <$technique_uri> ;\n crm:P4_has_time_span ;\n rdf:value \"$minimum_score\"^^xsd:float ;\n rdfs:label \"Alignment Suggestions\" .\n \n $matched_uris_statement\n\n a crm:E52_Time-Span ;\n crm:P82_at_some_time_within \"$current_time\"^^xsd:dateTime .\n\"\"\")\n\nESCAPE_TRANSLATION_TABLE = str.maketrans({\"\\\"\": r\"\\\"\",})\n\n\nMINIMUM_SCORE_NULL = 0\nLOBID_API_URL ='https://lobid.org/gnd/reconcile/'\nAUTHORITY_RESOURCE = 'AuthorityResource'\nMATCHED_INPUT_URIS = []\nALL_INPUT_URIS = []\nTECHNIQUE_URI = 'https://github.com/swiss-art-research-net/skkg-pipeline/blob/45-create-python-script-to-suggest-alignments/scripts/suggestAlignments.py'\n\ndef query_ttl(input_file, query):\n #parse input file\n g = Graph()\n g.parse(input_file)\n #retrieve needed entities and labels from input file\n res = g.query(NAMESPACES + query)\n return res\n\ndef chunks(data, SIZE=10):\n \"\"\"\n function to chunk a dictionary into multiple parts of a given size\n Args:\n data - dictionary\n SIZE - size of each chunk\n Returns:\n Iterator on the chunked dictionary\n \"\"\"\n it = iter(data)\n for i in range(0, len(data), SIZE):\n yield {k:data[k] for k in islice(it, SIZE)}\n\ndef parse_reconciliation_response(q_response, q_number, q2entities_dict, min_score=0):\n \"\"\"\n function to parse the one element in the response from the reconciliation endpoint and format it into a ttl classification\n Args:\n q_response - one element from the reconciliation query response\n q_number - query number in format \"qn\" where n is the number of the query\n q2entities_dict - a dictionary such that keys are the query numbers and values are dictionaries containing 'uri' which\n correspond to uris of elements of interest and 'label' which contain the labels used for reconciliation\n min_score - minimum score to consider for results\n \"\"\"\n ret = ''\n matched_gnd_nb = 0\n for result in q_response:\n if float(result['score']) >= min_score:\n matched_gnd_nb = matched_gnd_nb + 1\n #get SHA1 UUID bases on base uri first and then use resulting UUID to get SHA1 UUID based on ID of aligned entity\n base_uri_uuid = uuid.uuid5(uuid.NAMESPACE_URL, q2entities_dict[q_number]['uri'])\n class_id = uuid.uuid5(base_uri_uuid, result['id'])\n ret = ret + ONE_CLASSIFICATION.substitute(class_id=str(class_id),\n base_uri=q2entities_dict[q_number]['uri'],\n aligned_id=result['id'],\n technique_uri = TECHNIQUE_URI,\n match_type='exactMatch' if result['match'] else 'closeMatch',\n score=result['score'],\n aligned_label=result['name'].translate(str.maketrans(ESCAPE_DICT)),\n same_as = 'crmdig:L54_is_same-as,' if result['match'] else '')\n if matched_gnd_nb > 0:\n MATCHED_INPUT_URIS.append(q2entities_dict[q_number]['uri'])#append input URI\n return ret\n\ndef parse_all_responses(reconciliation_whole_response, q2entities_dict, min_score=0):\n \"\"\"\n function to parse the whole reconciliation query response into a ttl string\n Args:\n reconciliation_whole_response - the whole response from the reconciliation endpoint\n q2entities_dict - a dictionary such that keys are the query numbers and values are dictionaries containing 'uri' which\n correspond to uris of elements of interest and 'label' which contain the labels used for reconciliation\n min_score - minimum score to consider for results\n Returns:\n string with ttl formatted classifications\n \"\"\"\n return ''.join([ parse_reconciliation_response(q_response = values['result'], q_number = key, q2entities_dict = q2entities_dict, min_score=min_score) for (key, values) in reconciliation_whole_response.items() ])\n\ndef main(input_file, base_type, reconciliation_type, limit, output_file, api_uri, log_file=None, minimum_score=0, namespaces4ttl=NAMESPACES4TTL_HARDCODED):\n \n #retrieve needed entities and labels from input file\n base_query = \"\"\"\n SELECT * WHERE {{\n ?entity a <{0}> .\n ?entity rdfs:label ?label .\n }}\n \"\"\".format(base_type)\n base_res = query_ttl(input_file, base_query)\n\n # Initialise log\n if log_file is not None:\n logging.basicConfig(filename=log_file, level=logging.INFO)\n else:\n logging.basicConfig(level=logging.ERROR)\n logging.info('Running at: {0}'.format(datetime.datetime.now()))\n logging.info('Input file: {0}'.format(input_file))\n \n if list(base_res) == []:\n print('There are no entitites of type {0} in input file.'.format(base_type))\n return\n #get entities that are already classified\n if os.path.isfile(output_file):\n \n existing_classifications_query_1 = \"\"\"\n SELECT DISTINCT ?base_uri WHERE {\n ?classification a crm:E13_Attribute_Assignment ;\n crm:P140_assigned_attribute_to ?base_uri ;\n crm:P141_assigned ?gnd ;\n crm:P177_assigned_property_of_type ?sameness ;\n rdf:value ?score .\n\n ?gnd a crm:E55_Type ;\n rdfs:label ?label .\n }\n \"\"\"\n existing_classifications_query_2 = \"\"\"\n SELECT DISTINCT ?base_uri WHERE {\n ?classification a crm:E13_Attribute_Assignment ;\n crm:P16_used_specific_object ?base_uri .\n }\n \"\"\"\n existing_classifications_res_1 = query_ttl(output_file, existing_classifications_query_1)\n existing_classifications_res_2 = query_ttl(output_file, existing_classifications_query_2)\n existing_entities_list = []\n for row in existing_classifications_res_1:\n existing_entities_list.append(str(row.base_uri))\n for row in existing_classifications_res_2:\n existing_entities_list.append(str(row.base_uri))\n\n base_res_dict = {}\n i = 1\n for row in base_res:\n if not str(row.entity) in existing_entities_list:\n ALL_INPUT_URIS.append(str(row.entity))\n base_res_dict.update({'q'+str(i): {'uri': str(row.entity), 'label': str(row.label)}})\n i = i + 1\n else:\n base_res_dict = {}\n i = 1\n for row in base_res:\n ALL_INPUT_URIS.append(str(row.entity))\n base_res_dict.update({'q'+str(i): {'uri': str(row.entity), 'label': str(row.label)}})\n i = i + 1\n\n if base_res_dict == {}:\n print('All entities are present in output file! To query for non classified input entities, please remove the general classification statement. To query for all input entities, please specify another output file.')\n return\n \n #prepare reconciliation queries \n queries = {key : {\"query\": values['label'].translate(ESCAPE_TRANSLATION_TABLE), \"limit\": limit, \"type\": reconciliation_type } for (key, values) in base_res_dict.items()}\n \n #make requests and put in dictionary\n response_dict = {}\n print('Reconciliation queries in progress...')\n current_date_time = datetime.datetime.now()\n pbar = tqdm(total = len(queries))\n for chunk in chunks(queries):\n response = requests.get(api_uri, params={'queries': json.dumps(chunk) })\n if response.status_code == 200:\n response_dict.update(response.json())\n pbar.update(len(chunk))\n else:\n # Log error\n logging.error('Error returned for queries: {0}'.format(json.dumps(chunk)))\n\n pbar.close()\n \n #format response into ttl\n ttl = parse_all_responses(reconciliation_whole_response = response_dict, q2entities_dict = base_res_dict, min_score=minimum_score)\n \n #save ttl\n if os.path.isfile(output_file):\n with open(output_file, 'a') as f:\n f.write(ttl) \n else:\n with open(output_file, 'w') as f:\n f.write(namespaces4ttl + ttl)\n \n with open(output_file, 'a') as f:\n general_classification_base_uri_uuid = uuid.uuid5(uuid.NAMESPACE_URL, TECHNIQUE_URI)\n general_classification_uuid = uuid.uuid5(general_classification_base_uri_uuid, current_date_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\"))\n if len(MATCHED_INPUT_URIS) > 0:\n matched_uris_statement = 'classification:{0} crm:P140_assigned_attribute_to {1} .'.format(general_classification_uuid, ', '.join(['<' + internal_entity + '>' for internal_entity in MATCHED_INPUT_URIS]))\n else:\n matched_uris_statement = ''\n all_uris = ', '.join(['<' + internal_entity + '>' for internal_entity in ALL_INPUT_URIS])\n concluding_classification = GENERAL_CLASSIFICATION_STATEMENT.substitute(\n all_uris = all_uris,\n class_id = general_classification_uuid,\n current_time = current_date_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n technique_uri = TECHNIQUE_URI,\n minimum_score = minimum_score,\n matched_uris_statement = matched_uris_statement)\n f.write(concluding_classification)\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n \n parser = argparse.ArgumentParser(description = 'Produce alignments to SKKG entities using reconciliation service')\n parser.add_argument('--input_file', required= True, help='TTL input file with SKKG entities')\n parser.add_argument('--base_type', required= True, help='Base type of entities to retrieve')\n parser.add_argument('--limit', required= True, help='Limit of results when querying reconciliation service')\n parser.add_argument('--output_file', required= True, help='TTL output file')\n parser.add_argument('--reconciliation_type', required= False, help='GND type of entities to retrieve')\n parser.add_argument('--api_uri', required= False, help='URL of reconciliation API. Defaults to https://lobid.org/gnd/reconcile/.')\n parser.add_argument('--minimum_score', required= False, help='Minimum score to consider for results. Defaults to 0.')\n parser.add_argument('--log_file', required= False, help='Path to log file. Defaults to None.')\n \n args = parser.parse_args()\n \n if args.reconciliation_type is None:\n reconciliation_type = AUTHORITY_RESOURCE\n else:\n reconciliation_type = args.reconciliation_type\n \n if args.api_uri is None:\n api_uri = LOBID_API_URL\n else:\n api_uri = args.api_uri\n\n if args.minimum_score is None:\n minimum_score = MINIMUM_SCORE_NULL\n else:\n minimum_score = float(args.minimum_score)\n \n main(input_file = args.input_file, base_type= args.base_type, reconciliation_type = reconciliation_type, limit = args.limit, output_file = args.output_file, api_uri = api_uri, log_file=args.log_file, minimum_score=minimum_score, namespaces4ttl=NAMESPACES4TTL_HARDCODED)","repo_name":"swiss-art-research-net/skkg-pipeline","sub_path":"scripts/suggestAlignments.py","file_name":"suggestAlignments.py","file_ext":"py","file_size_in_byte":14791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40399502659","text":"# -*- coding: utf-8 -*-\nimport os\nfrom ncc import (\n __CACHE_DIR__,\n __BPE_DIR__, __LIBS_DIR__,\n LOGGER,\n)\nfrom ncc_dataset.codexglue.code_to_code import (\n DATASET_DIR\n)\n\nDATASET_NAME = \"translation\"\nDATASET_DIR = os.path.join(DATASET_DIR, DATASET_NAME)\n\nRAW_DIR = os.path.join(DATASET_DIR, 'raw')\nATTRIBUTES_DIR = os.path.join(DATASET_DIR, 'attributes')\nBPE_DIR = __BPE_DIR__\nLIBS_DIR = __LIBS_DIR__\n\nLANGUAGES = ['java', 'csharp']\nMODES = ['train', 'valid', 'test']\n\n# sbt modality\nMAX_SUB_TOKEN_LEN = 5 # we only consider the first 5 sub-tokens from tokenizer\nMAX_TOKEN_LEN = 256\n\nOP_FILES = os.path.join(os.path.dirname(__file__), 'parser/operators.json')\n\n__all__ = (\n DATASET_NAME,\n RAW_DIR, ATTRIBUTES_DIR,\n BPE_DIR, LIBS_DIR,\n LANGUAGES, MODES,\n LOGGER,\n\n MAX_SUB_TOKEN_LEN,\n)\n","repo_name":"CGCL-codes/naturalcc","sub_path":"ncc_dataset/codexglue/code_to_code/translation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"} +{"seq_id":"9370611002","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n#\r\n# Complete the 'solve' function below.\r\n#\r\n# The function is expected to return an INTEGER.\r\n# The function accepts INTEGER x as parameter.\r\n#\r\n\r\ndef solve(x):\r\n total_cnt_digits=len(str(x))\r\n b=True\r\n nF=0\r\n while b:\r\n for cnt_frs in range(1,total_cnt_digits+1):\r\n cnt_zrs = total_cnt_digits - cnt_frs\r\n strF = cnt_frs*'4' + cnt_zrs*'0'\r\n nF=int(strF)\r\n \r\n if nF%x==0 and nF/x>0:\r\n b=False\r\n break\r\n \r\n if not b:\r\n break\r\n total_cnt_digits+=1\r\n\r\n nF=0\r\n nZ=0\r\n for i in range(len(strF)):\r\n if strF[i]=='4':\r\n nF+=1\r\n else:\r\n nZ+=1\r\n \r\n return 2*nF+nZ\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n t = int(input().strip())\r\n\r\n for t_itr in range(t):\r\n x = int(input().strip())\r\n\r\n result = solve(x)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n","repo_name":"osadly/HackerRank","sub_path":"Math/Resume Working on/Number Theory_Easy.py","file_name":"Number Theory_Easy.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15589896387","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.pipeline import Pipeline, make_pipeline, FeatureUnion\nfrom sklearn.compose import ColumnTransformer, make_column_transformer, make_column_selector\nfrom sklearn.impute import SimpleImputer, KNNImputer\nfrom sklearn.preprocessing import RobustScaler, OneHotEncoder, OrdinalEncoder\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.linear_model import Ridge, Lasso, LinearRegression\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import VotingRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import StackingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nimport numpy as np\nimport pandas as pd\nfrom use_case_tmz.ml_logic.data_preparation import all_data_for_one_site\n\n\ndef preprocess_features(X: pd.DataFrame):\n\n # Category column cleaned\n Cat_final = ['news and media','science and education','community and society','e-commerce and shopping','jobs and career','arts and entertainment','games',\n 'finance','lifestyle','heavy industry and engineering','computers electronics and technology','food and drink','law and government','pets and animals','adult',\n 'sports','gambling','business and consumer services','reference materials','travel and tourism','home and garden','hobbies and leisure','health','vehicles']\n\n def clean_category(cat):\n if cat == \"nan\":\n return np.NAN\n else:\n cat1= cat.replace(\"_\", \" \")\n cat2= cat1.split('>')\n cat3= cat2[0].split('/')\n cat4= cat3[0].replace(\"&\", \"and\").strip()\n if cat4 not in Cat_final:\n return np.NAN\n else:\n return cat4\n\n X['Category'] = X['Category'].apply(lambda x: clean_category(str(x)))\n\n\n # Determine geo if one of top 20 or need to be put in others category\n geo_final = ['FR',\n 'JP',\n 'IT',\n 'BR',\n 'ES',\n 'DE',\n 'US',\n 'RU',\n 'MX',\n 'IN',\n 'AR',\n 'CO',\n 'UA',\n 'GB',\n 'ID',\n 'PE',\n 'GR',\n 'CL',\n 'KZ',\n 'NG']\n def clean_top_geo(geo):\n if geo == \"nan\":\n return 'Other'\n else:\n if geo not in geo_final:\n return 'Other'\n else:\n return geo\n\n X['geo']=X['geo'].apply(lambda x: clean_top_geo(str(x)))\n\n\n\n # def clean_format(format_pub):\n # if format_pub == \"nan\":\n # return 0\n # else:\n # return 1\n\n # for column in ['_1', '_2', '_3', '_4', '_5', '_6', '_11','_15', '_16', '_19', '_20', '_24', '_27', '_28', '_30', '_31', '_34','_38', '_39', '_43', '_44', '_46'] :\n # X[f\"{column}_\"]=X[column].apply(lambda x: clean_format(str(x)))\n\n # Preprocess KPIs from Similarweb\n X = X.drop(columns='Paid_Referrals')\n X['Social'] = X['Social'].fillna(0)\n X['Mail'] = X['Mail'].fillna(0)\n X['Referrals'] = X['Referrals'].fillna(0)\n X['Direct'] = X['Direct'].fillna(0)\n X['Search'] = X['Search'].fillna(0)\n\n # y = X['ca_journalier']\n # X = X.drop(columns=['site_url','site_category_similar','Category','site_id','site_url','_1', '_2', '_3', '_4', '_5', '_6', '_11','_15', '_16', '_19', '_20', '_24', '_27', '_28', '_30', '_31', '_34','_38', '_39', '_43', '_44', '_46'],axis=1)\n\n # # Preprocess numerical columns\n # num_cols = X.select_dtypes(include=[\"int64\", \"float64\"]).columns.to_list()\n\n # for i in ['_1_', '_2_', '_3_', '_4_', '_5_', '_6_', '_11_','_15_', '_16_', '_19_', '_20_', '_24_', '_27_', '_28_', '_30_', '_31_', '_34_','_38_', '_39_', '_43_', '_44_', '_46_'] :\n # num_cols.remove(i)\n\n # # Preprocess category columns\n # cat_cols = X.select_dtypes(exclude=[\"int64\", \"float64\"]).columns.to_list()\n\n # # feat_numerical = sorted(X.select_dtypes(include=[\"int64\", \"float64\"]).columns)\n\n # # Numerical pipeline excluding formats\n # num_pipeline = Pipeline(steps=[\n # ('impute', SimpleImputer(strategy='mean')),\n # ('scale',StandardScaler())\n # ])\n\n\n # # feat_nominal = sorted(list(set(X.columns) - set(feat_numerical)))\n\n # # Categorical pipeline\n # cat_pipeline = Pipeline(steps=[\n # ('impute', SimpleImputer(strategy='most_frequent')),\n # ('one-hot',OneHotEncoder(handle_unknown='ignore', sparse=False))\n # ])\n\n # # Joining both pipelines\n # col_trans = ColumnTransformer(transformers=[\n # ('num_pipeline',num_pipeline,num_cols),\n # ('cat_pipeline',cat_pipeline,cat_cols)\n # ],\n # remainder='passthrough',\n # n_jobs=-1)\n\n # X_preproc = pd.DataFrame(col_trans.transform(X),columns=col_trans.get_feature_names_out())\n\n X = X.drop(columns='site_url')\n X = X.rename(columns={'Category': 'New_Site_Categ',\n 'blocklist_value': 'site_blocklist',\n 'geo': 'top_geo_code_country',\n 'nb_formats': 'nb_dis_format_d_mean'})\n\n X = X[['lighthouse_score', 'LCP', 'FID', 'CLS', 'FCP', 'INP', 'TTFB', 'Social',\n 'Mail', 'Referrals', 'Search', 'Direct', 'BounceRate', 'PagePerVisit',\n 'EstimatedMonthlyVisits', 'site_blocklist', 'top_geo_code_country',\n 'nb_dis_format_d_mean', 'New_Site_Categ', '_1_', '_2_', '_3_', '_4_',\n '_5_', '_6_', '_11_', '_15_', '_16_', '_19_', '_20_', '_24_', '_27_',\n '_28_', '_30_', '_31_', '_34_', '_38_', '_39_', '_43_', '_44_', '_46_']]\n\n return X\n\n\n\n\n\n\nif __name__ == '__main__':\n df_test = all_data_for_one_site('https://www.coursfrancaisfacile.com', 1, 'US', _19_=1, _46_=1)\n print(preprocess_features(df_test).columns)\n","repo_name":"Simoneytizer/use_case_tmz","sub_path":"use_case_tmz/ml_logic/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17625171845","text":"import tkinter\nfrom tkinter import filedialog\nfrom tkinter import *\nimport os, shutil, pathlib, fnmatch\nimport time\nimport sqlite3\n\n\n\n\nclass ParentWindow(Frame):\n def __init__ (self, master):\n Frame.__init__(self)\n\n self.master = master\n self.master.resizable(width=False, height=False)\n self.master.geometry('{}x{}'.format(550, 165))\n self.master.title('Check files')\n self.master.config(bg='lightgray')\n\n\n self.varBrowse1 = StringVar()\n self.varBrowse2 = StringVar()\n\n\n self.txtFName = Entry(self.master,text=self.varBrowse1, font=(\"Helvetica\", 12), fg='black', bg='white',width=40)\n self.txtFName.grid(row=0,column=1,columnspan=12, sticky=W+E+N+S,padx=(0,30), pady=(35,0))\n\n self.txtLName = Entry(self.master,text=self.varBrowse2, font=(\"Helvetica\", 12), fg='black', bg='white',width=40)\n self.txtLName.grid(row=1,column=1,columnspan=12, sticky=W+E+N+S, padx=(0,30), pady=(10,0))\n\n\n self.btnBrowse = Button(self.master, text=\"Browse...\", width=15, height=1, command=self.picSrc_dir)\n self.btnBrowse.grid(row=0,column=0,padx=(30,30), pady=(35,0), sticky=NW)\n\n self.btnBrowse = Button(self.master, text=\"Browse...\", width=15, height=1, command=self.dest_dir)\n self.btnBrowse.grid(row=1,column=0,padx=(30,30), pady=(10,0), sticky=NW)\n\n self.btnF_Check = Button(self.master, text=\"Check for files...\", width=15, height=2, command=self.moveFiles)\n self.btnF_Check.grid(row=2,column=0,padx=(30,30), pady=(10,0), sticky=NW)\n\n self.btnClose = Button(self.master, text=\"Close Program\", width=15, height=2, command=self.cancel)\n self.btnClose.grid(row=2,column=11,padx=(0,6), pady=(10,0), sticky=SE)\n\n\n\n\n conn = sqlite3.connect('drill_final.db')\n\n with conn:\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS tbl_fList\\\n (ID INTEGER PRIMARY KEY AUTOINCREMENT, \\\n col_fileN TEXT \\\n )\")\n \n\n\n\n \n \n def moveFiles(self):\n source=self.txtFName.get()\n dest=self.txtLName.get()\n sourcefiles=os.listdir(source)\n for file in sourcefiles:\n if fnmatch.fnmatch(file, '*.txt'):\n abPath = os.path.join(source,file)\n fTime = os.path.getmtime(abPath)\n local_time = time.ctime(fTime)\n print (file,local_time)\n shutil.move(abPath,dest)\n with conn:\n cur.execute(\"INSERT INTO tbl_fList(col_fileN) VALUES (?)\",(file,))\n conn.commit()\n \n \n \n \n\n\n def picSrc_dir(self):\n # Allow user to select a directory and store it in global var\n # called folder_path\n filename = filedialog.askdirectory()\n self.txtFName.delete(0,END)\n self.txtFName.insert(0,filename)\n \n\n def dest_dir(self):\n # Allow user to select a directory and store it in global var\n # called folder_path\n filename = filedialog.askdirectory()\n self.txtLName.delete(0,END)\n self.txtLName.insert(0,filename)\n\n\n \n \n \n\n def cancel(self):\n self.master.destroy()\n\n\n\nif __name__ == \"__main__\":\n root = Tk()\n App = ParentWindow(root)\n root.mainloop()\n \n\n \n\n#_____________________________________________________________-\n\n\"\"\"import sqlite3\n\nconn = sqlite3.connect('drill_1.db')\n\nwith conn:\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS tbl_fList\\\n (ID INTEGER PRIMARY KEY AUTOINCREMENT, \\\n col_fileN TEXT \\\n )\")\n conn.commit()\n\n\n\nfileList = ('information.docx','Hello.txt','myImage.png', \\\n 'myMovie.mpg','World.txt','data.pdf','myPhoto.jpg')\nwith conn:\n for file in fileList:\n if file.endswith('.txt'):\n cur = conn.cursor()\n cur.execute(\"INSERT INTO tbl_fList(col_fileN) VALUES (?)\", \\\n (file,))\n print (file)\"\"\"\n\n\n\n\n\n\n\n\n","repo_name":"jlefler24/Python-Coding-Projects","sub_path":"Final_Drill.py","file_name":"Final_Drill.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13316362080","text":"def parse_bingo():\n with open(\"input.txt\", \"r\") as f:\n # get numbers\n numbers = [int(x) for x in f.readline().strip().split(',')]\n # read off extra line before boards\n f.readline()\n # read the boards, each one is a 2D array\n boards = []\n next_board = []\n for line in f:\n if line != '\\n':\n next_board.append([[int(x), False] for x in line.split()])\n else:\n boards.append(next_board)\n next_board = []\n return numbers, boards\n\n\ndef check_board(board, called_number):\n # mark number\n for line in board:\n for number in line:\n if number[0] == called_number:\n number[1] = True\n # check if board is a winner\n for i in range(5):\n vertical_winner = True\n for line in board:\n vertical_winner = vertical_winner & line[i][1]\n # check horizontal winner\n if all([number[1] for number in line]):\n return True\n if vertical_winner:\n return True\n return False\n\n\ndef get_board_score(board, called_number):\n total = 0\n for line in board:\n total += sum([number[0] if number[1] is False else 0 for number in line])\n return total * called_number\n\n\ndef find_winner():\n numbers, boards = parse_bingo()\n for number in numbers:\n for board in boards:\n if check_board(board, number):\n return get_board_score(board, number)\n return False\n\n\ndef find_last_winner():\n numbers, boards = parse_bingo()\n for number in numbers:\n winning_boards = []\n for board in boards:\n if check_board(board, number):\n winning_boards.append(board)\n last_score = get_board_score(board, number)\n for winning_board in winning_boards: boards.remove(winning_board)\n return last_score\n\n\nprint(find_winner())\nprint(find_last_winner())\n\n\n","repo_name":"n-parisi/advent-of-code-2021","sub_path":"pkg/04/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18323772913","text":"# -*- coding:utf-8 -*-\n# Filename: 24spread.py\n# Author: Lizi\n# Time: 2019/12/20 13:58\n\n# 计算港股24价差\n\nimport pysnooper\nfrom decimal import Decimal\n\nscala = 1000\n# LastPrice = 0\n# 将每个区间和最小价位差以列表的方式存储\nListPriceList = [(0.01, 0.25, 0.001), (0.25, 0.5, 0.005), (0.5, 10, 0.01),\n (10, 20, 0.02), (20, 100, 0.05), (100, 200, 0.1), (200, 500, 0.2),\n (500, 1000, 0.5), (1000, 2000, 1), (2000, 5000, 2), (5000, 9995, 5)]\n\n\n# 获取最新价所在区间\n# active = True\n# while active:\n# @pysnooper.snoop('log/file.log')\n@pysnooper.snoop('log/file.log', prefix='Spread:')\ndef Spread(lastPrice):\n lastPrice *= scala\n if lastPrice / scala < 0.01 or lastPrice / scala >= 9995:\n print(\"获取不到 \" + str(lastPrice / scala) + \" 所在区间\")\n for item in ListPriceList: # 遍历列表中的项\n if item[0] * scala <= lastPrice < item[1] * scala: # 判断最新价是否大于等于左边界,且小于右边界。通过item[0]、item[1]、item[2]的索引获取左右边界的值,以及最小变动差\n if lastPrice % (item[2] * scala) == 0.0: # 判断最新价是否能被最小价位整除\n position = ListPriceList.index(item) # 获取最新价所在的当前区间\n prePosition = 0 if position - 1 < 0 else position - 1 # 获取最新价前一个区间\n nextPosition = 10 if position + 1 > 10 else position + 1 # 获取最新价后一个区间\n print(\"最小变动价格为: \" + str(item[2]))\n return ListPriceList[prePosition], item, ListPriceList[nextPosition] # 返回三个区间的值,方便后面的函数调用\n else:\n print(\"最小变动价格为: \" + str(item[2]))\n print(\"余数为:\" + str(lastPrice % (item[2] * scala)))\n print(str(lastPrice / scala) + \" 不是最小变动价格 \" + str(item[2]) + \" 的整数倍 \")\n return None # last price is error,不是item区间的最小变动价的整数倍\n\n\n# 计算下限值\n# @pysnooper.snoop('log/file.log')\n@pysnooper.snoop('log/file.log', prefix='MinLimit:')\ndef MinLimit(lastprice, MinPriceValue1, MinPriceValue, leftvalue, rightvalue):\n CurrentPrice = 0\n if lastprice == 0.01 or lastprice - 24 * MinPriceValue <= 0.01:\n CurrentPrice = 0.01\n\n elif lastprice == leftvalue:\n CurrentPrice = Decimal(lastprice - 24 * MinPriceValue1).quantize(Decimal(\"0.0000\"))\n\n elif lastprice > leftvalue > lastprice - 24 * MinPriceValue:\n CurrentPrice = Decimal(leftvalue - (24 - (lastprice - leftvalue) / MinPriceValue) * MinPriceValue1).quantize(\n Decimal(\"0.0000\"))\n\n elif lastprice > leftvalue and leftvalue <= lastprice - 24 * MinPriceValue <= rightvalue:\n CurrentPrice = Decimal(lastprice - 24 * MinPriceValue).quantize(Decimal(\"0.0000\"))\n print(\"24价差下限值为: \" + str(CurrentPrice))\n\n\n# 计算24上限值\n# @pysnooper.snoop('log/file.log')\n@pysnooper.snoop('log/file.log', prefix='MaxLimit:')\ndef MaxLimit(lastprice, MinPriceValue, MinPriceValue2, rightvalue):\n CurrentPrice = 0\n if Decimal(lastprice + 24 * MinPriceValue).quantize(Decimal(\"0.0000\")) <= Decimal(rightvalue).quantize(\n Decimal(\"0.0000\")):\n CurrentPrice = Decimal(lastprice + 24 * MinPriceValue).quantize(Decimal(\"0.0000\"))\n\n elif Decimal(lastprice + 24 * MinPriceValue).quantize(Decimal(\"0.0000\")) > Decimal(rightvalue).quantize(\n Decimal(\"0.0000\")):\n CurrentPrice = Decimal(rightvalue + (24 - (rightvalue - lastprice) / MinPriceValue) * MinPriceValue2).quantize(\n Decimal(\"0.0000\"))\n print(\"24价差上限值为: \" + str(CurrentPrice))\n\n\nif __name__ == \"__main__\":\n active = True\n while active:\n LastPrice = float(input(\"请输入最新价LastPrice: \"))\n zone = Spread(LastPrice) # 调用Spread函数\n print(zone) # 将Spread函数的返回值打印出来\n if zone is not None:\n Minprice = MinLimit(LastPrice, zone[0][2], zone[1][2], zone[1][0],zone[1][1]) # 将Spread函数返回的值传给MinLimit函数的参数,通过zone[X][Y]获取对应区间,以及对应区间的左右边界值和最小价位差\n MaxPrice = MaxLimit(LastPrice, zone[1][2], zone[2][2], zone[1][1])\n if LastPrice == 0:\n break\n\n # if LastPrice == 0:\n # break\n","repo_name":"rage-vampire/Python","sub_path":"lizi_project/24spread/24spread.py","file_name":"24spread.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8140659619","text":"from setuptools import setup, find_packages\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'requirements.txt')) as f:\n REQUIRED = f.read().splitlines()\n\nwith open(os.path.join(here, 'test-requirements.txt')) as f:\n TEST_REQUIRED = f.read().splitlines()\n\nsetup(\n name='capitains_nautilus',\n version=\"2.0.0\",\n description='Resolver for Capitains Guidelines Repository',\n url='http://github.com/Capitains/nautilus',\n author='Thibault Clerice',\n author_email='leponteineptique@gmail.com',\n license='Mozilla Public License Version 2.0',\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n install_requires=REQUIRED,\n tests_require=TEST_REQUIRED,\n entry_points={\n 'console_scripts': ['capitains-nautilus=capitains_nautilus.cmd:cmd'],\n },\n include_package_data=True,\n test_suite=\"nose.collector\",\n zip_safe=False\n)\n","repo_name":"Capitains/Nautilus","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"2353942378","text":"\"\"\"Plots for annotating power spectrum fittings and models.\"\"\"\n\nimport numpy as np\n\nfrom fooof.core.utils import nearest_ind\nfrom fooof.core.errors import NoModelError\nfrom fooof.core.funcs import gaussian_function\nfrom fooof.core.modutils import safe_import, check_dependency\nfrom fooof.sim.gen import gen_aperiodic\nfrom fooof.plts.utils import check_ax\nfrom fooof.plts.spectra import plot_spectrum\nfrom fooof.plts.settings import PLT_FIGSIZES, PLT_COLORS\nfrom fooof.plts.style import check_n_style, style_spectrum_plot\nfrom fooof.analysis.periodic import get_band_peak_fm\nfrom fooof.utils.params import compute_knee_frequency, compute_fwhm\n\nplt = safe_import('.pyplot', 'matplotlib')\nmpatches = safe_import('.patches', 'matplotlib')\n\n###################################################################################################\n###################################################################################################\n\n@check_dependency(plt, 'matplotlib')\ndef plot_annotated_peak_search(fm, plot_style=style_spectrum_plot):\n \"\"\"Plot a series of plots illustrating the peak search from a flattened spectrum.\n\n Parameters\n ----------\n fm : FOOOF\n FOOOF object, with model fit, data and settings available.\n plot_style : callable, optional, default: style_spectrum_plot\n A function to call to apply styling & aesthetics to the plots.\n \"\"\"\n\n # Recalculate the initial aperiodic fit and flattened spectrum that\n # is the same as the one that is used in the peak fitting procedure\n flatspec = fm.power_spectrum - \\\n gen_aperiodic(fm.freqs, fm._robust_ap_fit(fm.freqs, fm.power_spectrum))\n\n # Calculate ylims of the plot that are scaled to the range of the data\n ylims = [min(flatspec) - 0.1 * np.abs(min(flatspec)), max(flatspec) + 0.1 * max(flatspec)]\n\n # Loop through the iterative search for each peak\n for ind in range(fm.n_peaks_ + 1):\n\n # This forces the creation of a new plotting axes per iteration\n ax = check_ax(None, PLT_FIGSIZES['spectral'])\n\n plot_spectrum(fm.freqs, flatspec, ax=ax, plot_style=None,\n label='Flattened Spectrum', color=PLT_COLORS['data'], linewidth=2.5)\n plot_spectrum(fm.freqs, [fm.peak_threshold * np.std(flatspec)]*len(fm.freqs),\n ax=ax, plot_style=None, label='Relative Threshold',\n color='orange', linewidth=2.5, linestyle='dashed')\n plot_spectrum(fm.freqs, [fm.min_peak_height]*len(fm.freqs),\n ax=ax, plot_style=None, label='Absolute Threshold',\n color='red', linewidth=2.5, linestyle='dashed')\n\n maxi = np.argmax(flatspec)\n ax.plot(fm.freqs[maxi], flatspec[maxi], '.',\n color=PLT_COLORS['periodic'], alpha=0.75, markersize=30)\n\n ax.set_ylim(ylims)\n ax.set_title('Iteration #' + str(ind+1), fontsize=16)\n\n if ind < fm.n_peaks_:\n\n gauss = gaussian_function(fm.freqs, *fm.gaussian_params_[ind, :])\n plot_spectrum(fm.freqs, gauss, ax=ax, plot_style=None,\n label='Gaussian Fit', color=PLT_COLORS['periodic'],\n linestyle=':', linewidth=3.0)\n\n flatspec = flatspec - gauss\n\n check_n_style(plot_style, ax, False, True)\n\n\n@check_dependency(plt, 'matplotlib')\ndef plot_annotated_model(fm, plt_log=False, annotate_peaks=True, annotate_aperiodic=True,\n ax=None, plot_style=style_spectrum_plot):\n \"\"\"Plot a an annotated power spectrum and model, from a FOOOF object.\n\n Parameters\n ----------\n fm : FOOOF\n FOOOF object, with model fit, data and settings available.\n plt_log : boolean, optional, default: False\n Whether to plot the frequency values in log10 spacing.\n ax : matplotlib.Axes, optional\n Figure axes upon which to plot.\n plot_style : callable, optional, default: style_spectrum_plot\n A function to call to apply styling & aesthetics to the plots.\n\n Raises\n ------\n NoModelError\n If there are no model results available to plot.\n \"\"\"\n\n # Check that model is available\n if not fm.has_model:\n raise NoModelError(\"No model is available to plot, can not proceed.\")\n\n # Settings\n fontsize = 15\n lw1 = 4.0\n lw2 = 3.0\n ms1 = 12\n\n # Create the baseline figure\n ax = check_ax(ax, PLT_FIGSIZES['spectral'])\n fm.plot(plot_peaks='dot-shade-width', plt_log=plt_log, ax=ax, plot_style=None,\n data_kwargs={'lw' : lw1, 'alpha' : 0.6},\n aperiodic_kwargs={'lw' : lw1, 'zorder' : 10},\n model_kwargs={'lw' : lw1, 'alpha' : 0.5},\n peak_kwargs={'dot' : {'color' : PLT_COLORS['periodic'], 'ms' : ms1, 'lw' : lw2},\n 'shade' : {'color' : PLT_COLORS['periodic']},\n 'width' : {'color' : PLT_COLORS['periodic'], 'alpha' : 0.75, 'lw' : lw2}})\n\n # Get freqs for plotting, and convert to log if needed\n freqs = fm.freqs if not plt_log else np.log10(fm.freqs)\n\n ## Buffers: for spacing things out on the plot (scaled by plot values)\n x_buff1 = max(freqs) * 0.1\n x_buff2 = max(freqs) * 0.25\n y_buff1 = 0.15 * np.ptp(ax.get_ylim())\n shrink = 0.1\n\n # There is a bug in annotations for some perpendicular lines, so add small offset\n # See: https://github.com/matplotlib/matplotlib/issues/12820. Fixed in 3.2.1.\n bug_buff = 0.000001\n\n if annotate_peaks:\n\n # Extract largest peak, to annotate, grabbing gaussian params\n gauss = get_band_peak_fm(fm, fm.freq_range, attribute='gaussian_params')\n\n peak_ctr, peak_hgt, peak_wid = gauss\n bw_freqs = [peak_ctr - 0.5 * compute_fwhm(peak_wid),\n peak_ctr + 0.5 * compute_fwhm(peak_wid)]\n\n if plt_log:\n peak_ctr = np.log10(peak_ctr)\n bw_freqs = np.log10(bw_freqs)\n\n peak_top = fm.power_spectrum[nearest_ind(freqs, peak_ctr)]\n\n # Annotate Peak CF\n ax.annotate('Center Frequency',\n xy=(peak_ctr, peak_top),\n xytext=(peak_ctr, peak_top+np.abs(0.6*peak_hgt)),\n verticalalignment='center',\n horizontalalignment='center',\n arrowprops=dict(facecolor=PLT_COLORS['periodic'], shrink=shrink),\n color=PLT_COLORS['periodic'], fontsize=fontsize)\n\n # Annotate Peak PW\n ax.annotate('Power',\n xy=(peak_ctr, peak_top-0.3*peak_hgt),\n xytext=(peak_ctr+x_buff1, peak_top-0.3*peak_hgt),\n verticalalignment='center',\n arrowprops=dict(facecolor=PLT_COLORS['periodic'], shrink=shrink),\n color=PLT_COLORS['periodic'], fontsize=fontsize)\n\n # Annotate Peak BW\n bw_buff = (peak_ctr - bw_freqs[0])/2\n ax.annotate('Bandwidth',\n xy=(peak_ctr-bw_buff+bug_buff, peak_top-(0.5*peak_hgt)),\n xytext=(peak_ctr-bw_buff, peak_top-(1.5*peak_hgt)),\n verticalalignment='center',\n horizontalalignment='right',\n arrowprops=dict(facecolor=PLT_COLORS['periodic'], shrink=shrink),\n color=PLT_COLORS['periodic'], fontsize=fontsize, zorder=20)\n\n if annotate_aperiodic:\n\n # Annotate Aperiodic Offset\n # Add a line to indicate offset, without adjusting plot limits below it\n ax.set_autoscaley_on(False)\n ax.plot([freqs[0], freqs[0]], [ax.get_ylim()[0], fm.fooofed_spectrum_[0]],\n color=PLT_COLORS['aperiodic'], linewidth=lw2, alpha=0.5)\n ax.annotate('Offset',\n xy=(freqs[0]+bug_buff, fm.power_spectrum[0]-y_buff1),\n xytext=(freqs[0]-x_buff1, fm.power_spectrum[0]-y_buff1),\n verticalalignment='center',\n horizontalalignment='center',\n arrowprops=dict(facecolor=PLT_COLORS['aperiodic'], shrink=shrink),\n color=PLT_COLORS['aperiodic'], fontsize=fontsize)\n\n # Annotate Aperiodic Knee\n if fm.aperiodic_mode == 'knee':\n\n # Find the knee frequency point to annotate\n knee_freq = compute_knee_frequency(fm.get_params('aperiodic', 'knee'),\n fm.get_params('aperiodic', 'exponent'))\n knee_freq = np.log10(knee_freq) if plt_log else knee_freq\n knee_pow = fm.power_spectrum[nearest_ind(freqs, knee_freq)]\n\n # Add a dot to the plot indicating the knee frequency\n ax.plot(knee_freq, knee_pow, 'o', color=PLT_COLORS['aperiodic'], ms=ms1*1.5, alpha=0.7)\n\n ax.annotate('Knee',\n xy=(knee_freq, knee_pow),\n xytext=(knee_freq-x_buff2, knee_pow-y_buff1),\n verticalalignment='center',\n arrowprops=dict(facecolor=PLT_COLORS['aperiodic'], shrink=shrink),\n color=PLT_COLORS['aperiodic'], fontsize=fontsize)\n\n # Annotate Aperiodic Exponent\n mid_ind = int(len(freqs)/2)\n ax.annotate('Exponent',\n xy=(freqs[mid_ind], fm.power_spectrum[mid_ind]),\n xytext=(freqs[mid_ind]-x_buff2, fm.power_spectrum[mid_ind]-y_buff1),\n verticalalignment='center',\n arrowprops=dict(facecolor=PLT_COLORS['aperiodic'], shrink=shrink),\n color=PLT_COLORS['aperiodic'], fontsize=fontsize)\n\n # Apply style to plot & tune grid styling\n check_n_style(plot_style, ax, plt_log, True)\n ax.grid(True, alpha=0.5)\n\n # Add labels to plot in the legend\n da_patch = mpatches.Patch(color=PLT_COLORS['data'], label='Original Data')\n ap_patch = mpatches.Patch(color=PLT_COLORS['aperiodic'], label='Aperiodic Parameters')\n pe_patch = mpatches.Patch(color=PLT_COLORS['periodic'], label='Peak Parameters')\n mo_patch = mpatches.Patch(color=PLT_COLORS['model'], label='Full Model')\n\n handles = [da_patch, ap_patch if annotate_aperiodic else None,\n pe_patch if annotate_peaks else None, mo_patch]\n handles = [el for el in handles if el is not None]\n\n ax.legend(handles=handles, handlelength=1, fontsize='x-large')\n","repo_name":"JohnGriffiths/eeg_notebooks_doc","sub_path":"fooof/plts/annotate.py","file_name":"annotate.py","file_ext":"py","file_size_in_byte":10271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35066435127","text":"# Author: Omkar Dixit\n# Email: omedxt@gmail.com\n\n'''\nGiven N activities with their start and finish times. Select the maximum number of activities that can be performed by a single person, assuming that a person can only work on a single activity at a time.\n'''\n\nfrom operator import itemgetter\n\nclass Solution:\n def getMaxNumberOfActivities(self, start, end):\n i = 0\n print(start[i], end[i])\n count = 1\n for j in range(len(start)):\n if start[j] >= end[i]:\n print(start[j], end[j])\n i = j\n count += 1\n return count\n\nif __name__=='__main__':\n timings = sorted([[1,2], [2,6], [3,4], [5,7], [8,9], [5,9]], key=itemgetter(1))\n sol = Solution()\n start = []\n end = []\n for item in timings:\n start.append(item[0])\n end.append(item[1])\n print(sol.getMaxNumberOfActivities(start, end), 'Activities')","repo_name":"dixitomkar1809/Coding-Python","sub_path":"GFG/Greedy/activitySelection.py","file_name":"activitySelection.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8547503220","text":"import os\nimport os.path\nimport random\nimport time\nfrom typing import Any, Callable, Dict, Optional, Tuple\n\nimport h5py\nimport numpy as np\nimport requests\nimport torch\nfrom torchvision.datasets.vision import VisionDataset\n\nfrom imagegym.utils.io import print_info, print_warning\n\n\nclass Shapes3D(VisionDataset):\n _H5_FILENAME = '3dshapes.h5'\n _DATA_FILENAME = 'data.npy'\n _DATA_FILENAME_SPLIT = lambda x: f'data_{x}.npy'\n _TARGETS_FILENAME = 'targets.npy'\n _TARGETS_FILENAME_SPLIT = lambda x: f'targets_{x}.npy'\n _FACTORS_IN_ORDER = ['floor_hue', 'wall_hue', 'object_hue', 'scale', 'shape',\n 'orientation']\n _NUM_VALUES_PER_FACTOR = {'floor_hue': 10, 'wall_hue': 10, 'object_hue': 10,\n 'scale': 8, 'shape': 4, 'orientation': 15}\n\n URL = 'https://storage.googleapis.com/3d-shapes/3dshapes.h5'\n\n def __init__(\n self,\n root: str,\n split: str = 'train',\n seed: int = 0,\n percentage: int = 100,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n load_from_numpy: bool = False,\n missing_perc:float =0.0,\n ) -> None:\n super(Shapes3D, self).__init__(root, transform=transform,\n target_transform=target_transform)\n assert split in ['train', 'val', 'test']\n self.split = split # training set or test set\n self.seed = seed\n self.percentage = percentage\n self.load_from_numpy = load_from_numpy\n if download:\n self._download()\n\n if not self._check_exists():\n raise RuntimeError('Dataset not found.')\n\n self.data, self.targets = self._load_data()\n self.missing_percent = missing_perc\n self.missing_data = True if missing_perc > 0 else False\n\n def _download(self):\n\n filename_h5 = os.path.join(self.root, Shapes3D._H5_FILENAME)\n print_info(f'Downloading the data from {Shapes3D.URL} and saving it in {filename_h5}')\n\n if os.path.exists(filename_h5):\n print_warning('Aborting download, the file is already in the folder!')\n print_info('Delete the file if you want to download it again')\n return\n try:\n response = requests.get(Shapes3D.URL)\n with open(filename_h5, \"wb\") as f:\n f.write(response.content)\n except:\n print_warning('Downloading data failed!')\n print_info(f'Download the data manually from {Shapes3D.URL} and put it in {self.root}')\n return\n\n def _filename_data(self, split=None):\n if split is None: split = self.split\n if self.percentage == 100:\n basename = Shapes3D._DATA_FILENAME_SPLIT(f\"{split}_{self.seed}\")\n else:\n basename = Shapes3D._DATA_FILENAME_SPLIT(f\"{split}_{self.seed}_{self.percentage}\")\n filename_data = os.path.join(self.root, basename)\n return filename_data\n\n def _filename_targets(self, split=None):\n if split is None: split = self.split\n if self.percentage == 100:\n basename = Shapes3D._TARGETS_FILENAME_SPLIT(f\"{split}_{self.seed}\")\n else:\n basename = Shapes3D._TARGETS_FILENAME_SPLIT(f\"{split}_{self.seed}_{self.percentage}\")\n filename_targets = os.path.join(self.root, basename)\n return filename_targets\n\n def _load_data(self):\n filename_data = self._filename_data()\n filename_targets = self._filename_targets()\n\n files_exist = all([os.path.exists(f) for f in [filename_data, filename_targets]])\n if files_exist and self.load_from_numpy:\n print_info(f'Loading data: {filename_data}')\n data = np.load(filename_data)\n print_info(f'Loading targets: {filename_targets}')\n targets = np.load(filename_targets)\n return data, targets\n else:\n filename_h5 = os.path.join(self.root, Shapes3D._H5_FILENAME)\n dataset = h5py.File(filename_h5, 'r')\n data_h5 = dataset['images'] # array shape [480000,64,64,3], uint8 in range(256)\n targets_h5 = dataset['labels'] # array shape [480000,6], float64\n\n if self.load_from_numpy:\n time_1 = time.time()\n print_info(f'Creating data: {filename_data}')\n data = data_h5[:]\n time_2 = time.time()\n print_info(f'Done creating data: {time_2 - time_1}')\n print_info(f'Creating targets: {filename_data}')\n targets = targets_h5[:]\n time_3 = time.time()\n print_info(f'Done creating targets: {time_3 - time_2}')\n print_info(f'Total time: {time_3 - time_1}')\n del dataset\n\n idx_ = list(range(data.shape[0]))\n random.seed(self.seed)\n random.shuffle(idx_)\n idx_ = torch.tensor(idx_)\n num_images = data.shape[0]\n split_sections = [int(num_images * p) for p in [0.8, 0.1, 0.1]]\n split_sections[-1] = num_images - sum(split_sections[:-1])\n idx_list_all = torch.split(idx_, split_size_or_sections=split_sections)\n\n splits = ['train', 'val', 'test']\n idx_list = []\n for i, split in enumerate(splits):\n if self.percentage < 100:\n num_ = int(len(idx_list_all[i]) * self.percentage / 100)\n idx_list_i = idx_list_all[i][:num_]\n else:\n idx_list_i = idx_list_all[i]\n idx_list.append(idx_list_i.tolist())\n\n for i, split in enumerate(splits):\n filename_data = self._filename_data(split=split)\n filename_targets = self._filename_targets(split=split)\n idx_list_i = idx_list[i]\n\n np.save(filename_data, data[idx_list_i])\n print_info(f'Saving data: {filename_data}')\n np.save(filename_targets, targets[idx_list_i])\n print_info(f'Saving targets: {filename_targets}')\n\n idx = idx_list[splits.index(self.split)]\n\n return data[idx], targets[idx]\n else:\n return data_h5, targets_h5\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n \n return img, target\n\n def __len__(self) -> int:\n return len(self.data)\n\n @property\n def class_to_idx(self) -> Dict[str, int]:\n return {_class: i for i, _class in enumerate(Shapes3D._FACTORS_IN_ORDE)}\n\n def _check_exists(self) -> bool:\n return os.path.exists(os.path.join(self.root, Shapes3D._H5_FILENAME))\n\n def extra_repr(self) -> str:\n return \"Split: {}\".format(\"Train\" if self.train is True else \"Test\")\n\n\ndef compare_performance_h5_npy(root):\n for load_from_numpy in [True, False]:\n time_0 = time.time()\n dataset = Shapes3D(root=root, split='train', load_from_numpy=load_from_numpy)\n time_1 = time.time()\n print_info(f\"[load_from_numpy={load_from_numpy}] Shapes3D {time_1 - time_0}\")\n\n for i in range(100):\n batch = dataset.__getitem__(i)\n\n time_2 = time.time()\n print_info(f\"[load_from_numpy={load_from_numpy}] __getitem__ {time_2 - time_1}\")\n print('')\n","repo_name":"bkoyuncu/vamoh","sub_path":"imagegym/datasets/shapes3d.py","file_name":"shapes3d.py","file_ext":"py","file_size_in_byte":7999,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"30654466668","text":"import sys\nimport wave\nimport struct as st\nimport numpy as np\nimport scipy as sc\n\n# Example Input:\n# Amplitude = 1000\n# Frame rate = 48000\n# Number of frames = 50000\n# Number of channels = 1\n# Width = 2\n# Play time = 3\n# Audio name = cool\n\n# Get audio info\naudio_name = str(input(\"Enter target audio name: \")) + \".wav\"\naudio_length = int(input(\"Enter target audio length: \"))\n\n# Get sine wave info\namplitude = int(input(\"Enter amplitude: \"))\nfrequency = float(input(\"Enter frequency: \"))\nframe_rate = float(input(\"Enter frame rate: \"))\nwav_frames = int(input(\"Enter number of frames: \"))\nchannels = int(input(\"Enter number of channels: \"))\nwidth = int(input(\"Enter width: \"))\n\n# Generate sine wave data\nsine_wave = [np.sin(2 * np.pi * frequency * i / frame_rate)\n for i in range(wav_frames * audio_length)]\n\n# Create .wav audio file for the target sine wave\nwav_file = wave.open(audio_name, \"w\")\nwav_file.setparams((channels, width, int(\n frame_rate), wav_frames, \"NONE\", \"NONE\"))\n\n# Write the sine wave data into the audio file in hex\nfor i in sine_wave:\n wav_file.writeframes(st.pack(\"h\", int(i*amplitude)))\n\n# Print the result\nprint(\"------------[Gnereated]------------\")\nprint(\"Audio Name:\", audio_name)\nprint(\"Audio Length: \", audio_length)\nprint(\"-----------------------------------\")\n","repo_name":"BaderAlshaya/Steganography","sub_path":"Sine_Wave_Generator.py","file_name":"Sine_Wave_Generator.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27498924443","text":"import sys\nimport pygame\nimport random\n\nclass GameManager(object): #*args, **kwargs\n def __init__(self, **kwargs):\n # Initliizlies PyGame Window & Gameover Font\n pygame.display.init()\n pygame.font.init()\n\n # Defineing Color RGB Values for GUI elements\n self.white = (255, 255, 255)\n self.black = (0, 0,0 )\n self.red = (255, 0, 0)\n self.green = (0, 255, 0)\n self.blue = (0, 0, 255)\n self.brick = (144, 69, 53)\n self.sky_blue= (52, 152, 219)\n\n self.display_width = kwargs.pop('display_width')\n self.display_height = kwargs.pop('display_height')\n\n self.game_display = pygame.display.set_mode((self.display_width,self.display_height))\n self.game_display.fill(self.sky_blue)\n if 'caption' in kwargs: \n pygame.display.set_caption(caption)\n else:\n pygame.display.set_caption('Unicorn Squad')\n\n self.clock = pygame.time.Clock()\n if 'FPS' in kwargs: \n self.FPS = kwargs.pop('FPS')\n else:\n self.FPS = 10\n \n self.font = pygame.font.SysFont(None, 55)\n\n def game_did_quit(self):\n gameExit = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameExit = True\n return gameExit\n\n def add_icon(self,icon,location):\n self.game_display.blit(icon,location)\n\n def get_center(self):\n return (self.display_width/2, self.display_height/2)\n\n def did_collide(self,obj1, obj2):\n if obj1.x > obj2.x and obj1.x < obj2.x + obj2.image_width or obj1.x + obj1.image_width > obj2.x and obj1.x + obj1.image_width < obj2.x + obj2.image_width:\n if obj1.y > obj2.y and obj1.y < obj2.y + obj2.image_height:\n return True\n elif obj1.y + obj1.image_height > obj2.y and obj1.y + obj1.image_height < obj2.y + obj2.image_height:\n return True\n return False\n\n#display_width,display_height, caption='Unicorn Squad', FPS=10\n\nclass SnakeGame(GameManager):\n def __init__(self,**kwargs):\n super(SnakeGame,self).__init__(**kwargs)\n\n def is_game_over(self,snake):\n if snake.x >= self.display_width or snake.x < 0 or snake.y >= self.display_height or snake.y < 0:\n return True\n return False\n\n def fill_blue(self):\n self.game_display.fill(self.sky_blue)\n\n # Initliazes text object\n def text_objects(self, text, color):\n textSurface = self.font.render(text, True, color)\n return textSurface, textSurface.get_rect()\n\n # Defines display message with color variable\n def message_to_screen(self, msg, color, y_displace=0):\n textSurf, textRect = self.text_objects(msg,color)\n textRect.center = (self.display_width / 2), (self.display_height / 2) + y_displace\n self.game_display.blit(textSurf, textRect)\n\n def score_to_screen(self, msg, color):\n textSurf, textRect = self.text_objects(msg, color)\n textRect = (self.display_width - 50), (self.display_height - 50)\n self.game_display.blit(textSurf, textRect)\n\nclass Image(object):\n def __init__(self,**kwargs):\n self.start_pos = kwargs.pop('start_pos') #(x,y)\n self.icon = pygame.image.load(kwargs.pop('icon'))\n self.icon = pygame.transform.scale(self.icon, (kwargs.pop('image_width'), kwargs.pop('image_height')))\n self.x = self.start_pos[0]\n self.y = self.start_pos[1]\n if 'image_width' in kwargs: \n self.image_width = kwargs.pop('image_width')\n else:\n self.image_width = 50 \n if 'image_height' in kwargs: \n self.image_height = kwargs.pop('image_height')\n else:\n self.image_height = 50\n\n\n#start_pos=(0,0),icon='food.png', image_width=50, image_height=50\n\nclass Carrot(Image):\n def __init__(self,**kwargs):\n super(Carrot,self).__init__(**kwargs)\n \n def set_random_location(self,display_width,display_height):\n self.x = round(random.randrange(0, display_width-self.image_width))\n self.y = round(random.randrange(0, display_height-self.image_height))\n self.start_pos = (self.x,self.y)\n\nclass Rainbow(Image):\n def __init__(self, **kwargs):\n super(Rainbow,self).__init__(**kwargs)\n\nclass Snake(Image):\n def __init__(self,**kwargs):\n super(Snake,self).__init__(**kwargs)\n self.tail_list = []\n self.tail_size = 1\n self.direction = \"right\"\n self.delta_y = 0\n self.delta_x = 40\n\n def add_to_tail(self):\n gameOver = False\n snake_head = []\n snake_head.append(self.x)\n snake_head.append(self.y)\n\n self.tail_list.append(snake_head)\n\n if len(self.tail_list) > self.tail_size:\n del self.tail_list[0]\n\n for eachSegment in self.tail_list[:-1]:\n if eachSegment == snake_head:\n gameOver = True\n \n return gameOver\n\n def handle_tail(self):\n if self.direction == \"right\":\n head = self.icon\n\n if self.direction == \"left\":\n head = pygame.transform.flip(self.icon, 1, 0)\n\n if self.direction == \"up\":\n head = pygame.transform.rotate(self.icon, 90)\n\n if self.direction == \"down\":\n head = pygame.transform.rotate(self.icon, 270)\n \n return head\n \n\n def process_move(self):\n gameExit = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameExit = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n self.direction = \"left\"\n self.delta_x = -self.image_width\n self.delta_y = 0\n elif event.key == pygame.K_RIGHT:\n self.direction = \"right\"\n self.delta_x = self.image_width\n self.delta_y = 0\n elif event.key == pygame.K_UP:\n self.direction = \"up\"\n self.delta_y = -self.image_height\n self.delta_x = 0\n elif event.key == pygame.K_DOWN:\n self.direction = \"down\"\n self.delta_y = self.image_height\n self.delta_x = 0\n return gameExit\n\n def move(self):\n self.x += self.delta_x\n self.y += self.delta_y\n # self.delta_x = 0\n # self.delta_y = 0\n\nclass Player:\n def __init__(self):\n self.player_score = 0\n\n def set_score(self,score):\n self.player_score = score\n \n def increase_score(self):\n self.player_score += 1\n \n def decrease_score(self):\n self.player_score -= 1\n \n def reset_score(self):\n self.player_score = 0 \n\ndef run_game(display_width,display_height):\n gameExit = False # HItting the close button (closing Window)\n gameOver = False # Internal game over view (Hitting the walls or your own tail)\n game = SnakeGame(display_width=display_width,display_height=display_height)\n player = Player()\n\n snake = Snake(start_pos=game.get_center(),image_height=50,image_width=50, icon=\"unicorn.png\")\n\n carrot = Carrot(icon='food.png',image_height=50,image_width=50, start_pos=(0,0))\n carrot.set_random_location(display_width,display_height)\n\n while not gameExit:\n while gameOver:\n game.game_display.fill(game.white)\n game.message_to_screen(\"Game Over!\", game.red, -50)\n game.message_to_screen(\"Press C to play again, or Q to quit\", game.black, 50)\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameOver = False\n gameExit = True\n\n # Maps keboard strokes to close game and redo game\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n gameOver = False\n gameExit = True\n exit()\n if event.key == pygame.K_c:\n run_game(display_width,display_height)\n \n gameExit = snake.process_move()\n\n \n snake.move()\n\n gameOver = game.is_game_over(snake)\n \n\n #fill blue\n game.fill_blue()\n\n game.add_icon(carrot.icon,(carrot.x,carrot.y))\n \n gameOver2 = snake.add_to_tail()\n if not gameOver:\n gameOver = gameOver2\n head = snake.handle_tail() #drawing snake\n\n # defines movement of snake tail\n game.game_display.blit(head, (snake.tail_list[-1][0], snake.tail_list[-1][1]))\n\n # defines adding of rainbow image to user sprite\n for XnY in snake.tail_list[:-1]:\n rainbow = Rainbow(icon=\"rainbow.png\", image_height=50, image_width=50, start_pos=(0,0)).icon\n game.game_display.blit(rainbow,(XnY[0], XnY[1]))\n\n game.score_to_screen(str(player.player_score), game.white)\n\n pygame.display.update()\n\n if (game.did_collide(snake,carrot)):\n carrot.set_random_location(display_width,display_height)\n snake.tail_size += 1\n player.increase_score()\n \n game.clock.tick(game.FPS)\n\n\n pygame.display.quit(str())\n pygame.quit()\n\nrun_game(800,600)","repo_name":"acenario/PyGame","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32220840153","text":"import httplib2\nimport pytest\nfrom six.moves import urllib\nimport socket\nimport ssl\nimport tests\n\n\ndef test_get_via_https():\n # Test that we can handle HTTPS\n http = httplib2.Http(ca_certs=tests.CA_CERTS)\n with tests.server_const_http(tls=True) as uri:\n response, _ = http.request(uri, \"GET\")\n assert response.status == 200\n\n\ndef test_get_301_via_https():\n http = httplib2.Http(ca_certs=tests.CA_CERTS)\n glocation = [\"\"] # nonlocal kind of trick, maybe redundant\n\n def handler(request):\n if request.uri == \"/final\":\n return tests.http_response_bytes(body=b\"final\")\n return tests.http_response_bytes(status=\"301 goto\", headers={\"location\": glocation[0]})\n\n with tests.server_request(handler, request_count=2, tls=True) as uri:\n glocation[0] = urllib.parse.urljoin(uri, \"/final\")\n response, content = http.request(uri, \"GET\")\n assert response.status == 200\n assert content == b\"final\"\n assert response.previous.status == 301\n assert response.previous[\"location\"] == glocation[0]\n\n\ndef test_get_301_via_https_spec_violation_on_location():\n # Test that we follow redirects through HTTPS\n # even if they violate the spec by including\n # a relative Location: header instead of an absolute one.\n http = httplib2.Http(ca_certs=tests.CA_CERTS)\n\n def handler(request):\n if request.uri == \"/final\":\n return tests.http_response_bytes(body=b\"final\")\n return tests.http_response_bytes(status=\"301 goto\", headers={\"location\": \"/final\"})\n\n with tests.server_request(handler, request_count=2, tls=True) as uri:\n response, content = http.request(uri, \"GET\")\n assert response.status == 200\n assert content == b\"final\"\n assert response.previous.status == 301\n\n\ndef test_invalid_ca_certs_path():\n http = httplib2.Http(ca_certs=\"/nosuchfile\")\n with tests.server_const_http(request_count=0, tls=True) as uri:\n with tests.assert_raises(IOError):\n http.request(uri, \"GET\")\n\n\ndef test_not_trusted_ca():\n # Test that we get a SSLHandshakeError if we try to access\n # server using a CA cert file that doesn't contain server's CA.\n http = httplib2.Http(ca_certs=tests.CA_UNUSED_CERTS)\n with tests.server_const_http(tls=True) as uri:\n try:\n http.request(uri, \"GET\")\n assert False, \"expected CERTIFICATE_VERIFY_FAILED\"\n except ssl.SSLError as e:\n assert e.reason == \"CERTIFICATE_VERIFY_FAILED\"\n except httplib2.SSLHandshakeError: # Python2\n pass\n\n\n@pytest.mark.skipif(\n not hasattr(tests.ssl_context(), \"minimum_version\"),\n reason=\"ssl doesn't support TLS min/max\",\n)\ndef test_set_min_tls_version():\n # Test setting minimum TLS version\n # We expect failure on Python < 3.7 or OpenSSL < 1.1\n expect_success = hasattr(ssl.SSLContext(), 'minimum_version')\n try:\n http = httplib2.Http(tls_minimum_version=\"TLSv1_2\")\n http.request(tests.DUMMY_HTTPS_URL)\n except RuntimeError:\n assert not expect_success\n except socket.error:\n assert expect_success\n\n\n@pytest.mark.skipif(\n not hasattr(tests.ssl_context(), \"maximum_version\"),\n reason=\"ssl doesn't support TLS min/max\",\n)\ndef test_set_max_tls_version():\n # Test setting maximum TLS version\n # We expect RuntimeError on Python < 3.7 or OpenSSL < 1.1\n # We expect socket error otherwise\n expect_success = hasattr(ssl.SSLContext(), 'maximum_version')\n try:\n http = httplib2.Http(tls_maximum_version=\"TLSv1_2\")\n http.request(tests.DUMMY_HTTPS_URL)\n except RuntimeError:\n assert not expect_success\n except socket.error:\n assert expect_success\n\n\n@pytest.mark.skipif(\n not hasattr(tests.ssl_context(), \"minimum_version\"),\n reason=\"ssl doesn't support TLS min/max\",\n)\ndef test_min_tls_version():\n def setup_tls(context, server, skip_errors):\n skip_errors.append(\"WRONG_VERSION_NUMBER\")\n context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)\n context.load_cert_chain(tests.SERVER_CHAIN)\n return context.wrap_socket(server, server_side=True)\n\n http = httplib2.Http(ca_certs=tests.CA_CERTS, tls_minimum_version=\"TLSv1_2\")\n with tests.server_const_http(tls=setup_tls) as uri:\n try:\n http.request(uri)\n assert False, \"expected SSLError\"\n except ssl.SSLError as e:\n assert e.reason in (\"UNSUPPORTED_PROTOCOL\", \"VERSION_TOO_LOW\")\n\n\n@pytest.mark.skipif(\n not hasattr(tests.ssl_context(), \"maximum_version\"),\n reason=\"ssl doesn't support TLS min/max\",\n)\ndef test_max_tls_version():\n http = httplib2.Http(ca_certs=tests.CA_CERTS, tls_maximum_version=\"TLSv1\")\n with tests.server_const_http(tls=True) as uri:\n http.request(uri)\n _, tls_ver, _ = http.connections.popitem()[1].sock.cipher()\n assert tls_ver == \"TLSv1.0\"\n\n\ndef test_client_cert_verified():\n cert_log = []\n\n def setup_tls(context, server, skip_errors):\n context.load_verify_locations(cafile=tests.CA_CERTS)\n context.verify_mode = ssl.CERT_REQUIRED\n return context.wrap_socket(server, server_side=True)\n\n def handler(request):\n cert_log.append(request.client_sock.getpeercert())\n return tests.http_response_bytes()\n\n http = httplib2.Http(ca_certs=tests.CA_CERTS)\n with tests.server_request(handler, tls=setup_tls) as uri:\n uri_parsed = urllib.parse.urlparse(uri)\n http.add_certificate(tests.CLIENT_PEM, tests.CLIENT_PEM, uri_parsed.netloc)\n http.request(uri)\n\n assert len(cert_log) == 1\n # TODO extract serial from tests.CLIENT_PEM\n assert cert_log[0][\"serialNumber\"] == \"E2AA6A96D1BF1AEC\"\n\n\ndef test_client_cert_password_verified():\n cert_log = []\n\n def setup_tls(context, server, skip_errors):\n context.load_verify_locations(cafile=tests.CA_CERTS)\n context.verify_mode = ssl.CERT_REQUIRED\n return context.wrap_socket(server, server_side=True)\n\n def handler(request):\n cert_log.append(request.client_sock.getpeercert())\n return tests.http_response_bytes()\n\n http = httplib2.Http(ca_certs=tests.CA_CERTS)\n with tests.server_request(handler, tls=setup_tls) as uri:\n uri_parsed = urllib.parse.urlparse(uri)\n http.add_certificate(tests.CLIENT_ENCRYPTED_PEM, tests.CLIENT_ENCRYPTED_PEM,\n uri_parsed.netloc, password=\"12345\")\n http.request(uri)\n\n assert len(cert_log) == 1\n # TODO extract serial from tests.CLIENT_PEM\n assert cert_log[0][\"serialNumber\"] == \"E2AA6A96D1BF1AED\"\n\n\n@pytest.mark.skipif(\n not hasattr(tests.ssl_context(), \"set_servername_callback\"),\n reason=\"SSLContext.set_servername_callback is not available\",\n)\ndef test_sni_set_servername_callback():\n sni_log = []\n\n def setup_tls(context, server, skip_errors):\n context.set_servername_callback(lambda _sock, hostname, _context: sni_log.append(hostname))\n return context.wrap_socket(server, server_side=True)\n\n http = httplib2.Http(ca_certs=tests.CA_CERTS)\n with tests.server_const_http(tls=setup_tls) as uri:\n uri_parsed = urllib.parse.urlparse(uri)\n http.request(uri)\n assert sni_log == [uri_parsed.hostname]\n","repo_name":"iridium-browser/iridium-browser","sub_path":"third_party/depot_tools/external_bin/gsutil/gsutil_4.68/gsutil/third_party/httplib2/tests/test_https.py","file_name":"test_https.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"53"} +{"seq_id":"70894239847","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 9 14:40:40 2019\n\n@author: davide\n\"\"\"\n################### COMPLETE RECURRENT DYNAMICS #################################\n\nfrom functions import * \nimport Jlearner\nimport numpy as np\nimport os\n\n#PARAMETERS\nsimulation_name=\"s21\"\nn=20 #linear number of cells\nN=n*n\nksigma=0.1\nkcut=5*ksigma\na=1\nsparsity=0.1\nNuncorr=N\neta=0.00001\ntimesteps=1000\nt1=10 #number of step of input persistence, must divide timesteps\ns=1\nsaveJs=True\nnormalization=\"hardL2\" #choose betw: hardL2 -- softL2\n\nprint(\"Initializing...\")\n\nif not os.path.exists(simulation_name):\n os.makedirs(simulation_name)\n\nsave_parametersRC(simulation_name,N,ksigma,kcut,a,sparsity,Nuncorr,eta,timesteps,t1,s,normalization)\n\nNetwork=Jlearner.Jlearner(n)\nNetwork.build_gridA()\nNetwork.build_gridB(Nuncorr)\nnp.save(simulation_name+\"/gridA\",Network.gridA)\nnp.save(simulation_name+\"/gridB\",Network.gridB)\nNetwork.buildJA(ksigma,kcut)\nNetwork.buildJB(ksigma,kcut)\nnp.save(simulation_name+\"/JA\",Network.JA)\nnp.save(simulation_name+\"/JB\",Network.JB)\n\nif timesteps % t1 !=0:\n print(\"Invalid t1 value: please enter a number that divides the vairable timesteps\")\nprint(\"Initialization completed\")\n\nprint(\"Starting learning dynamics with recurrent connections\")\nmJA,mJB,mJAB=Network.RecurrentDynamics(s,eta,t1,timesteps,a,sparsity,ksigma,kcut,simulation_name,saveJs)\n \nnp.save(simulation_name+\"/mJA\",mJA)\nnp.save(simulation_name+\"/mJB\",mJB)\nnp.save(simulation_name+\"/mJAB\",mJAB)\n\nprint(\"End of simulation.\")\n","repo_name":"davidespalla/CAN_code","sub_path":"AttractorLearning/simulations/LearningDynamics/RecurrentLearning.py","file_name":"RecurrentLearning.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41824005715","text":"# @author: Nandan M\n# @time: 31/01/2021 08:02 PM \n\nimport os\nimport numpy as np\nimport pandas as pd\nimport librosa\n# to write audio file output \n# from scipy.io.wavfile import write\n\n# deterministic random generator\nnp.random.seed(2)\n\n\ndef load_audio_data(data_path, samples, seconds, sampling_rate, background):\n\t'''\n\tfunction to load data from each folder and as classnames\n\t'''\n\tlimit=samples # samples limit per class\n\tdata = []\n\tfiles = []\n\tlabels = []\n\tcategory = []\n\tpath = data_path # dataset path \n\tdirs = os.listdir(path)\n\tdirs.sort()\n\t# read all directory except background noise(.background) directory if false\n\tif background:\n\t\t# load n second length background noise audio and split to desired length \n\t\tbg_data = load_bg(path, sampling_rate)\n\t\tbg_chunks = split_bg(bg_data, seconds, sampling_rate)\n\t\tbg_chunks = bg_chunks[:limit]\n\t\tlabels.extend([0]*len(bg_chunks))\n\t\tdata.extend(bg_chunks.tolist())\n\t\tcategory.extend([\"background\"]*len(bg_chunks))\n\t\tfiles.extend(['N/A']*len(bg_chunks))\n\tt=0\n\tfor folder in dirs[1:]:\n\t\tc=0\n\t\tfor filename in os.listdir(path+folder):\n\t\t\tl = np.zeros(len(dirs))\n\t\t\tind = dirs.index(folder) \n\t\t\tif filename.endswith(\"wav\"):\n\t\t\t\tl[ind] = 1\n\t\t\t\tlabels.append(ind)\n\t\t\t\tcategory.append(folder)\n\t\t\t\twave, _ = librosa.load(path + folder + \"/\" + filename,\\\n\t\t\t\t\t\tmono=True,\\\n\t\t\t\t\t\tsr=sampling_rate)\n\t\t\t\tdata.append(wave)\n\t\t\t\tfiles.append(folder + \"/\" + filename)\n\t\t\t\tc+=1; t+=1\n\t\t\tif c==limit:\n\t\t\t\tbreak\n\t\tprint(folder,\":\", c)\n\tprint('Total original :', t)\n\treturn data, files, labels, category\n\ndef audio_length(max_length, data):\n\t'''\n\tlimit audio length to desired length\n\t'''\n\tif len(data)>max_length:\n\t\tdata = data[:max_length]\n\telse:\n\t\tdata = np.pad(data, (0, max(0, max_length - len(data))), \"constant\")\n\treturn data\n\n\ndef split_bg(bg, seconds, sampling_rate):\n\t'''\n\tsplit n length bg audio files into desired chunks \n\t'''\n\tmax_length = seconds * sampling_rate\n\tbg_split = []\n\tfor x in range(len(bg)):\n\t\tfor i in range(0, len(bg[x]), max_length):\n\t\t\tchunk = bg[x][i:i + max_length]\n\t\t\t# print(len(chunk))\n\t\t\tbg_split.append(chunk)\n\n\t\t\t# create folders manually and save audio chunks \n\t\t\t# write(\"./export/bg/audio\"+str(x)+str(i)+\".wav\", sr, chunk)\n\n\treturn np.array(bg_split)\n\n\ndef load_bg(path, sampling_rate):\n\t'''\n\tload background for augmentation\n\t'''\n\tbg = []\n\t# hardcoded background directory name\n\tfolder = '.background'\n\tfor filename in os.listdir(path+folder):\n\t\tif filename.endswith(\"wav\"):\n\t\t\twave, _ = librosa.load(path + folder + \"/\" + filename,\\\n\t\t\t\t\tmono=True,\\\n\t\t\t\t\tsr=sampling_rate)\n\t\t\tbg.append(wave)\n\tprint('\\nBackground :', len(bg))\n\treturn bg\n\ndef get_random_bg(bg, max_length):\n\t'''\n\tselect random background of desired length\n\t'''\n\tindex = np.random.randint(0,len(bg))\n\tbg_audio = bg[index]\n\ti = np.random.randint(0,bg_audio.shape[0]-max_length)\n\tbg_chunk = bg_audio[i:i + max_length]\n\treturn bg_chunk\n\n# Augmentation functions\ndef change_pitch(data, sampling_rate, pitch_factor):\n return librosa.effects.pitch_shift(data, sampling_rate, pitch_factor)\n\ndef change_speed(data, speed_factor):\n return librosa.effects.time_stretch(data, speed_factor)\n\ndef shift_audio(data, sampling_rate, shift_max, shift_direction):\n\tshift = np.random.randint(0,sampling_rate * shift_max)\n\tif shift_direction == 1: # right\n\t\tshift = -shift\n\n\taugmented_data = np.roll(data, shift)\n\t# Set to silence for heading/ tailing\n\tif shift > 0:\n\t\taugmented_data[:shift] = 0\n\telse:\n\t\taugmented_data[shift:] = 0\n\treturn augmented_data\n\n\ndef augment_audio(df_row, bg, sampling_rate, max_length):\n\t''' \n\taugment voice audio and overlap with background\n\t'''\n\tbg_audio = get_random_bg(bg, max_length)\n\taudio = np.array(df_row.audio)\n\t# audio.resize(bg_audio.shape)\n\n\t# shift time left or right\n\tshift_time = np.random.randint(1, 50)*0.01 # shift range 0.01 - 0.50 secs\n\tshift_direction = np.random.randint(0, 2) # 0: left, 1: right\n\taudio = shift_audio(audio, sampling_rate, shift_time, shift_direction)\n\t# using shift_direction for more random augmentation activation\n\tif shift_direction==1:\n\t\tpitch_factor = np.random.randint(-30, 30)*0.1 \n\t\taudio = change_pitch(audio, sampling_rate, pitch_factor)\n\telse:\n\t\tspeed_factor = np.random.randint(7, 13)*0.1 # If rate > 1, then the signal is sped up. If rate < 1, then the signal is slowed down.\n\t\taudio = change_speed(audio, speed_factor)\n\n\t# audio intensity for background and voice\n\tbg_vol = np.random.randint(10, 40)*0.01 # bg volume range 0.10 - 0.40\n\tvoice_vol = np.random.randint(60, 100)*0.01 # voice volume range 0.60 - 1.00\n\taudio = audio_length(max_length, audio)\n\taug_audio = np.array(np.add(bg_vol*bg_audio, voice_vol*audio))\n\n\treturn aug_audio\n\ndef audio_synthesis(path, sampling_rate, sample_limit, seconds, max_length, random_factor, background):\n\t'''\n\tLoad raw dataset and background\n\t'''\n\taudio_data, files, label, category = load_audio_data(path, sample_limit, seconds, sampling_rate, background)\n\tdata_dict = {'file':files,'audio':audio_data,'label':label,'category':category}\n\tdataset = pd.DataFrame(data_dict)\n\tclassname = dataset.category.unique()\n\tbg = load_bg(path, sampling_rate)\n\n\tn_list = []\n\tr = 0\n\n\tfor i in range(dataset.shape[0]):\n\t\trow = dataset.iloc[i]\n\t\t# random number of samples augmentation\n\t\trand_f = np.random.randint(2, random_factor) # higher the max limit more chances of augmented audio\n\t\tlim_audio = audio_length(max_length, row.audio)\n\t\tn_list.append([row.file,lim_audio,row.label,row.category])\n\t\t# write(\"./export/\"+str(row.category)+\"/audio\"+str(i)+\".wav\", sampling_rate, lim_audio)\n\t\tfor x in range(rand_f):\n\t\t\taug_audio = augment_audio(row, bg, sampling_rate, max_length)\n\t\t\tlim_audio = audio_length(max_length, aug_audio)\n\t\t\tn_list.append([row.file,lim_audio,row.label,row.category])\n\t\t\tr+=1\n\t\t\t# write(\"./export/\"+str(row.category)+\"/audio_\"+str(i)+str(x)+\".wav\", sampling_rate, lim_audio)\n\n\tprint(f\"Generated {r} augmented audio data\")\n\tprint(f\"Total samples: {len(n_list)}\")\n\tnew_data = pd.DataFrame(n_list, columns=list(dataset.columns))\n\treturn new_data\n\t\t\n\n\ndef audio2mfcc(audio, mfcc_max_length, MFCC_NUM, SAMPLING_RATE):\n\t'''\n\tconvert audio data to MFCC spectrogram\n\t'''\n\tmfcc = librosa.feature.mfcc(audio, n_mfcc=MFCC_NUM, sr=SAMPLING_RATE, hop_length=1024, htk=True)\n\tif (mfcc_max_length > mfcc.shape[1]):\n\t\tpad_width = mfcc_max_length - mfcc.shape[1]\n\t\tmfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')\n\telse:\n\t\tmfcc = mfcc[:, :mfcc_max_length]\n\t\n\treturn mfcc\n\ndef get_dataset(path, sampling_rate, mfcc_num, mfcc_max_length, seconds, sample_limit, random_factor, background=True):\n\t'''\n\tget X, Y and dataframe of dataset\n\t'''\n\tmax_length = sampling_rate * seconds\n\tds = audio_synthesis(path, sampling_rate, sample_limit, seconds, max_length, random_factor, background)\n\n\tx = [audio2mfcc(audio, mfcc_max_length, mfcc_num, sampling_rate) for audio in np.array(ds.audio)]\n\tx = np.array(x)\n\ty = np.array(ds.label)\n\treturn x, y, ds\n\n\nif __name__== \"main\":\n\tpath = '../input/wav_dataset/'\n\tsr = 16000\n\tsample_limit = None\n\tseconds = 2\n\tmfcc_num = 20\n\tmfcc_max_length = 35\n\n\tX, Y, df = get_dataset(path, sr, mfcc_num, mfcc_max_length, seconds, sample_limit)\n\n\n\t# import matplotlib.pyplot as plt\n\t# plt.imshow(X[0])\n\t# plt.show()\n\t# plt.savefig('mfcc.png')\n","repo_name":"Nannigalaxy/audio-preprocessing-tool","sub_path":"src/audio_preprocess.py","file_name":"audio_preprocess.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8577434915","text":"# @File :mib.py\n# @Time :https://github.com/mfederici/Multi-View-Information-Bottleneck\nimport torch\nimport torch.nn as nn\n\nfrom torch.distributions import Normal, Independent\nfrom torch.nn.functional import softplus\n\n\nclass ExponentialScheduler:\n def __init__(self, start_value, end_value, n_iterations,\n start_iteration=0, base=10):\n self.base = base\n self.start_value = start_value\n self.end_value = end_value\n self.n_iterations = n_iterations\n self.start_iteration = start_iteration\n self.m = (end_value - start_value) / n_iterations\n \n def __call__(self, iteration):\n if iteration > self.start_iteration + self.n_iterations:\n linear_value = self.end_value\n elif iteration <= self.start_iteration:\n linear_value = self.start_value\n else:\n linear_value = (iteration - self.start_iteration\n ) * self.m + self.start_value\n return self.base ** linear_value\n\n\n# Encoder architecture\nclass Encoder(nn.Module):\n def __init__(self, z_dim):\n super(Encoder, self).__init__()\n \n self.z_dim = z_dim\n \n # Vanilla MLP\n self.net = nn.Sequential(\n nn.Linear(28 * 28, 1024),\n nn.ReLU(True),\n nn.Linear(1024, 1024),\n nn.ReLU(True),\n nn.Linear(1024, z_dim * 2),\n )\n \n def forward(self, x):\n x = x.view(x.size(0), -1) # Flatten the input\n params = self.net(x)\n \n mu, sigma = params[:, :self.z_dim], params[:, self.z_dim:]\n sigma = softplus(sigma) + 1e-7 # Make sigma always positive\n # Return a factorized Normal distribution\n return Independent(Normal(loc=mu, scale=sigma), 1)\n\n\nclass Decoder(nn.Module):\n def __init__(self, z_dim, scale=0.39894):\n super(Decoder, self).__init__()\n \n self.z_dim = z_dim\n self.scale = scale\n \n # Vanilla MLP\n self.net = nn.Sequential(\n nn.Linear(z_dim, 1024),\n nn.ReLU(True),\n nn.Linear(1024, 1024),\n nn.ReLU(True),\n nn.Linear(1024, 28 * 28)\n )\n \n def forward(self, z):\n x = self.net(z)\n return Independent(Normal(loc=x, scale=self.scale), 1)\n\n\n# Auxiliary network for mutual information estimation\nclass MIEstimator(nn.Module):\n def __init__(self, size1, size2, low_dim=1024):\n super(MIEstimator, self).__init__()\n # Vanilla MLP\n self.net = nn.Sequential(\n nn.Linear(size1 + size2, low_dim),\n nn.ReLU(True),\n nn.Linear(low_dim, low_dim),\n nn.ReLU(True),\n nn.Linear(low_dim, 1),\n )\n \n # Gradient for JSD mutual information estimation and EB-based estimation\n def forward(self, x1, x2):\n pos = self.net(torch.cat([x1, x2], 1)) # Positive Samples\n neg = self.net(torch.cat([torch.roll(x1, 1, 0), x2], 1))\n return -softplus(-pos).mean() - softplus(\n neg).mean(), pos.mean() - neg.exp().mean() + 1\n\n\n######################\n# MV InfoMax #\n######################\nclass MVInfoMax(nn.Module):\n def __init__(self, z_dim, **params):\n super(MVInfoMax, self).__init__()\n self.z_dim = z_dim\n \n # Initialization of the mutual information estimation network\n self.mi_estimator = MIEstimator(self.z_dim, self.z_dim)\n \n # Intialization of the encoder(s)\n # In this example encoder_v1 and encoder_v2 completely\n # share their parameters\n self.encoder_v1 = Encoder(z_dim)\n self.encoder_v2 = self.encoder_v1\n \n def forward(self, data):\n # Read the two views v1 and v2 and ignore the label y\n v1, v2, _ = data\n \n # Encode a batch of data\n p_z1_given_v1 = self.encoder_v1(v1)\n p_z2_given_v2 = self.encoder_v2(v2)\n \n # Sample from the posteriors with reparametrization\n z1 = p_z1_given_v1.rsample()\n z2 = p_z2_given_v2.rsample()\n \n # Mutual information estimation\n mi_gradient, mi_estimation = self.mi_estimator(z1, z2)\n mi_gradient = mi_gradient.mean()\n mi_estimation = mi_estimation.mean()\n \n # Computing the loss function\n loss = - mi_gradient\n \n return loss, mi_estimation.item()\n\n\n###############\n# MIB #\n###############\nclass MIB(nn.Module):\n def __init__(self, z_dim, beta_start_value=1e-3, beta_end_value=1,\n beta_n_iterations=100000, beta_start_iteration=50000):\n # The neural networks architectures and initialization procedure\n # is analogous to Multi-View InfoMax\n super(MIB, self).__init__()\n \n # Initialization of the encoder(s)\n # encoder_v1 and encoder_v2 completely share their parameters\n self.encoder_v1 = Encoder(z_dim)\n self.encoder_v2 = self.encoder_v1\n \n # Initialization of the mutual information estimation network\n self.mi_estimator = MIEstimator(self.z_dim, self.z_dim)\n \n # Definition of the scheduler to update the value of\n # the regularization coefficient beta over time\n self.beta_scheduler = ExponentialScheduler(\n start_value=beta_start_value,\n end_value=beta_end_value,\n n_iterations=beta_n_iterations,\n start_iteration=beta_start_iteration\n )\n \n def forward(self, data, iterations):\n # Read the two views v1 and v2 and ignore the label y\n v1, v2, _ = data\n \n # Encode a batch of data\n p_z1_given_v1 = self.encoder_v1(v1)\n p_z2_given_v2 = self.encoder_v2(v2)\n \n # Sample from the posteriors with reparametrization\n z1 = p_z1_given_v1.rsample()\n z2 = p_z2_given_v2.rsample()\n \n # Mutual information estimation\n mi_gradient, mi_estimation = self.mi_estimator(z1, z2)\n mi_gradient = mi_gradient.mean()\n mi_estimation = mi_estimation.mean()\n \n # Symmetrized Kullback-Leibler divergence\n kl_1_2 = p_z1_given_v1.log_prob(z1) - p_z2_given_v2.log_prob(z1)\n kl_2_1 = p_z2_given_v2.log_prob(z2) - p_z1_given_v1.log_prob(z2)\n skl = (kl_1_2 + kl_2_1).mean() / 2.\n \n # Update the value of beta according to the policy\n beta = self.beta_scheduler(iterations)\n \n # Computing the loss function\n loss = - mi_gradient + beta * skl\n \n return loss, mi_estimation.item(), skl.item()\n","repo_name":"jingjing12110/CIB-VQA","sub_path":"module/ib_lib/mib.py","file_name":"mib.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71720990249","text":"from lib.useful_things import *\nfrom lib.surface_creation import surface_object_from_file, surface_to_file\nimport lib.option_formulas as opt\nfrom lib.option import get_years_before_expiration\nimport lib.plotter as pl\n\n\nclass HistoricalVolatility:\n def __init__(self, asset_file_name, rolling_size):\n self.file_name = asset_file_name\n self.spot_list = []\n self.historical_volatility_list = []\n self.rolling_size = rolling_size\n self.first_day = '2019-02-05 05-AM'\n\n def count_realised_volatility(self):\n self.spot_list = pd.read_csv(self.file_name, index_col='date')[['close']][::-1][self.first_day:]\n\n if self.rolling_size < len(self.spot_list):\n print('error')\n quit()\n\n asset = pd.read_csv(self.asset_file_name, index_col='date')[['close']][::-1][\n '2019-02-05 05-AM':last_day]\n\n self.historical_volatility_list = (np.log(asset / asset.shift(1)).rolling(24 * rolling_size).std()\n * np.sqrt(24 * rolling_size)).close[24 * use_size:]\n self.asset_spot = asset.close[-1]","repo_name":"karinabashirova/complect","sub_path":"lib/historical_volatility.py","file_name":"historical_volatility.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12464258891","text":"def run():\n my_list = [1, \"Hello\", True, 4.5]\n my_dict = {\"nombre\": \"Nerea\", \"apellido\": \"Nerever\"}\n\n super_list = [\n {\"nombre\": \"Nerea\", \"apellido\": \"Nerever\"},\n {\"nombre\": \"Bugs\", \"apellido\": \"Bunny\"},\n {\"nombre\": \"Peter\", \"apellido\": \"Salas\"},\n {\"nombre\": \"Coño\", \"apellido\": \"Sucio\"},\n {\"nombre\": \"Fleetwood\", \"apellido\": \"Mac\"},\n ]\n\n super_dict = {\n \"natural_nums\": [1,2,3,4,5],\n \"integer_nums\": [-1,-2,0,1,2],\n \"floating_nums\": [1.1,4.5,6.43]\n }\n\n for key, value in super_dict.items():\n print(key, \" - \", value)\n\n for item in super_list:\n print(item['nombre'] , ' - ', item['apellido'])\n\n\nif __name__ == '__main__':\n run()","repo_name":"nereacal/python","sub_path":"data-science-basics/lists_and_dicts.py","file_name":"lists_and_dicts.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23897856367","text":"class Organization:\r\n def __init__(self, name, salary):\r\n print('Organization : ', name)\r\n print('The Basic salary : ', salary)\r\n\r\n\r\nclass Benifit(Organization):\r\n def __init__(self,name, salary):\r\n super().__init__(name, salary)\r\n HRA = float(salary*0.10)\r\n print('The HRA : ', HRA)\r\n\r\nclass Benifit1(Benifit):\r\n def __init__(self, HRA, insentive):\r\n super().__init__(HRA)\r\n insentive = 2500\r\n print('The insentive : ', insentive)\r\n\r\nclass Benifit2(Benifit1):\r\n def __init__(self, bonus, insentive):\r\n super().__init__(insentive)\r\n bonus = 5000\r\n print('The Bonus : ', bonus)\r\n\r\nclass employee(Benifit2):\r\n def __init__(self, bonus, salary, insentive, total_salary):\r\n super().__init__(bonus)\r\n \r\n self.total_salary = total_salary\r\n total_salary = salary + bonus + insentive\r\n \r\now = input('Enter the Organization name : ') \r\nsal = input('Enter the Salary : ') \r\no1 = Organization(ow, sal)\r\n\r\no2 = employee(100, sal, 32, 100)\r\n# o2 = Employee()\r\n# o1 = employee(100, 30, 1, 11)\r\n\r\n# name = str(input(\"Enter name of employee : \"))\r\n# basic = float(input(\"Enter Basic Salary : \"))\r\n","repo_name":"Mahesh2357/Python_Tutorials_23","sub_path":"tut20.py","file_name":"tut20.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41872635428","text":"import time\r\nfrom dateutil.relativedelta import relativedelta\r\nfrom datetime import datetime, timedelta\r\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DF\r\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DT\r\nfrom odoo import models, fields, tools, api, _\r\nfrom odoo.exceptions import UserError, ValidationError\r\nfrom odoo.osv import expression\r\n\r\nclass HrAttendancePayroll(models.Model):\r\n _inherit = 'hr.attendance.payroll'\r\n\r\n operation_type_id = fields.Many2one('hr.operation.type', string='Hr Type')\r\n\r\n @api.multi\r\n def get_attendance_data(self):\r\n if self.line_ids:\r\n for unlink in self.line_ids:\r\n unlink.unlink()\r\n PayrollLine = self.env['hr.attendance.payroll.line']\r\n HrAttendance = self.env['hr.attendance']\r\n day_off = self.env['hr.holidays.public.line'].search([('date', '>=', self.date_start),('date', '<=', self.date_stop)])\r\n if day_off:\r\n day_off_dict = dict(map(lambda x: (x.date,'weekend' if x.is_weekend else 'national'), day_off))\r\n else:\r\n day_off_dict = {}\r\n\r\n overtime_datas = self.env['hr.overtime'].search([])\r\n if overtime_datas:\r\n overtime_dict = dict(map(lambda x: (x.hours, {'normal_day': x.normal_day,\r\n 'holiday': x.holiday, 'work_hours': x.work_hours}),overtime_datas))\r\n else:\r\n overtime_dict = {}\r\n\r\n list_employee = self.env['hr.minimum.wage'].search([('operation_type_id','=',self.operation_type_id.id)]).ids\r\n for employee in self.env['hr.employee'].search([('umr_ids.id','in',list_employee)]):\r\n min_wage = employee.get_salary(self.date_start)\r\n if min_wage:\r\n line_vals = {\r\n 'name' : self.name,\r\n 'payroll_id' : self.id,\r\n 'employee_id' : employee.id,\r\n 'min_wage_id' : min_wage.id,\r\n 'min_wage_month' : min_wage.umr_month,\r\n 'operation_type_id'\t: self.operation_type_id.id,\r\n }\r\n else:\r\n continue\r\n attendance_date_start = (datetime.strptime(self.date_start+' 00:00:00', \"%Y-%m-%d %H:%M:%S\") + timedelta(hours=-8)).strftime(\"%Y-%m-%d %H:%M:%S\") if self.date_start else 0\r\n attendance_date_stop = (datetime.strptime(self.date_stop+' 23:59:59', \"%Y-%m-%d %H:%M:%S\") + timedelta(hours=-8)).strftime(\"%Y-%m-%d %H:%M:%S\") if self.date_stop else 0\r\n attendances = self.env['hr.attendance'].search([('employee_id','=',employee.id),('valid','=',True), \\\r\n ('check_in','>=',attendance_date_start),('check_in','<=',attendance_date_stop)])\r\n att_to_update = self.env['hr.attendance']\r\n if attendances:\r\n effective_working_days = sum( \\\r\n attendances.filtered(lambda x: x.attendance_type_id.type=='effective_work_day').\\\r\n mapped('work_day'))\r\n non_effective_working_days = sum(\\\r\n attendances.filtered(lambda x: x.attendance_type_id.type == 'non_effective_work_day').\\\r\n mapped('work_day'))\r\n not_working = int(len(\\\r\n attendances.filtered(lambda x: x.attendance_type_id.type == 'not_working')))\r\n working_days_month = effective_working_days + non_effective_working_days + not_working\r\n working_days = effective_working_days + non_effective_working_days\r\n effective_working_salary_amt = 0.0\r\n non_effective_working_salary_amt = 0.0\r\n amount_natura = 0.0\r\n overtime_amt = 0.0\r\n penalty_amt = 0.0\r\n premi_amt = 0.0\r\n amount_pph21 = 0.0\r\n for att in attendances:\r\n if not att.attendance_type_id:\r\n continue\r\n attendance_check_in = (datetime.strptime(att.check_in, \"%Y-%m-%d %H:%M:%S\") + timedelta(hours=8)) if att.check_in else 0\r\n working_date = attendance_check_in.strftime(DF)\r\n day_of_week = attendance_check_in.weekday()\r\n if att.attendance_type_id.type=='effective_work_day':\r\n # if working_date in day_off_dict.keys() or day_of_week==6:\r\n # effective_working_salary_amt += (att.work_day * (min_wage.umr_month/working_days_month))\r\n # overtime_t = float(att.working_time or 0.0) + float(att.overtime or 0.0)\r\n # base_overtime = float(int(overtime_t))\r\n # overtime_ratio = overtime_dict.get(base_overtime, {}).get('holiday', 0.0)\r\n # overtime_amt += overtime_ratio * (min_wage.umr_month / 173)\r\n # print '============================1',att.check_in,overtime_t, overtime_ratio, employee.name, overtime_amt\r\n # next_hour_overtime = overtime_t - base_overtime\r\n # if next_hour_overtime:\r\n # overtime_ratio2 = overtime_dict.get((base_overtime + 1), {}).get('holiday', 0.0)\r\n # overtime_amt += ((next_hour_overtime * 60) / 60) * (overtime_ratio2-overtime_ratio) * (min_wage.umr_month / 173)\r\n # else:\r\n effective_working_salary_amt += (att.work_day * (min_wage.umr_month/working_days_month))\r\n overtime_t = float(att.overtime or 0.0)\r\n base_overtime = float(int(overtime_t))\r\n overtime_ratio = overtime_dict.get(base_overtime, {}).get('normal_day', 0.0)\r\n overtime_amt += overtime_ratio * (min_wage.umr_month / 173)\r\n next_hour_overtime = overtime_t - base_overtime\r\n if next_hour_overtime:\r\n overtime_ratio2 = overtime_dict.get((base_overtime + 1), {}).get('normal_day', 0.0)\r\n overtime_amt += ((next_hour_overtime * 60) / 60) * (overtime_ratio2-overtime_ratio) * (min_wage.umr_month / 173)\r\n elif att.attendance_type_id.type=='non_effective_work_day':\r\n non_effective_working_salary_amt += (att.work_day * (min_wage.umr_month / working_days_month))\r\n elif att.attendance_type_id.type=='overtime':\r\n overtime_t = float(att.overtime or 0.0)\r\n base_overtime = float(int(overtime_t))\r\n overtime_ratio = overtime_dict.get(base_overtime, {}).get('holiday', 0.0)\r\n overtime_amt += overtime_ratio * (min_wage.umr_month / 173)\r\n next_hour_overtime = overtime_t - base_overtime\r\n if next_hour_overtime:\r\n overtime_ratio2 = overtime_dict.get((base_overtime + 1), {}).get('holiday', 0.0)\r\n overtime_amt += ((next_hour_overtime * 60) / 60) * (overtime_ratio2 - overtime_ratio) * (min_wage.umr_month / 173)\r\n elif att.attendance_type_id.type == 'not_available':\r\n overtime_t = float(att.overtime or 0.0)\r\n base_overtime = float(int(overtime_t))\r\n overtime_ratio = overtime_dict.get(base_overtime, {}).get('normal_day', 0.0)\r\n overtime_amt += overtime_ratio * (min_wage.umr_month / 173)\r\n next_hour_overtime = overtime_t - base_overtime\r\n if next_hour_overtime:\r\n overtime_ratio2 = overtime_dict.get((base_overtime + 1), {}).get('holiday', 0.0)\r\n overtime_amt += ((next_hour_overtime * 60) / 60) * (overtime_ratio2 - overtime_ratio) * (min_wage.umr_month / 173)\r\n penalty_amt = 0 \r\n premi_amt += att.premi_value\r\n att_to_update |= att\r\n\r\n\r\n amount_natura = working_days < working_days_month and min_wage.amount_natura * (working_days/working_days_month) or min_wage.amount_natura or 0.0\r\n allowance_structural = min_wage.allowance_structural or 0.0\r\n allowance_production = working_days < working_days_month and min_wage.allowance_production * (working_days/working_days_month) or min_wage.allowance_production or 0.0\r\n\r\n if employee.type_id.monthly_employee:\r\n effective_working_salary_amt = min_wage.umr_month\r\n non_effective_working_salary_amt = 0\r\n amount_natura = min_wage.amount_natura or 0.0\r\n allowance_production = min_wage.allowance_production or 0.0\r\n\r\n # RAPEL\r\n rapel_ids = self.env['hr.attendance.rapel.line'].search([('employee_id', '=', employee.id), ('rapel_id.state', '=', 'confirm')]).filtered(lambda rekap: self.date_start <= rekap.rapel_id.date <= self.date_stop)\r\n rapel_value = sum(rapel_ids.mapped('total')) if rapel_ids else 0\r\n line_vals.update({\r\n 'effective_work_days': effective_working_days,\r\n 'effective_work_days_value': effective_working_salary_amt,\r\n 'non_effective_work_days': non_effective_working_days,\r\n 'non_effective_work_days_value': non_effective_working_salary_amt,\r\n 'overtime_value': overtime_amt,\r\n 'natura_value': amount_natura,\r\n 'penalty_value': penalty_amt,\r\n 'premi_value': premi_amt,\r\n 'allowance_production': allowance_production,\r\n 'allowance_structural': allowance_structural,\r\n 'amount_pph21': amount_pph21,\r\n 'rapel_value': rapel_value,\r\n })\r\n\r\n insurance_dict = employee.with_context(date=self.date_start).get_insurance_values(min_wage)\r\n line_vals.update(insurance_dict)\r\n payroll_line = PayrollLine.create(line_vals)\r\n\r\n if att_to_update:\r\n att_to_update.write({'payroll_line_id': payroll_line.id})\r\n\r\n @api.multi\r\n def action_create_bill(self):\r\n self.ensure_one()\r\n # 1. Grouping Salary Expenses\r\n grouped_hke = {}\r\n grouped_hkne = {}\r\n grouped_overtime = {}\r\n grouped_tunjangan = {}\r\n grouped_natura = {}\r\n grouped_bpjs_kes = {}\r\n grouped_bpjs_kes_pot = {}\r\n grouped_bpjs_tk = {}\r\n grouped_bpjs_tk_pot = {}\r\n grouped_bpjs_pen = {}\r\n grouped_bpjs_pen_pot = {}\r\n default_type_none = self.env['account.location.type'].search(['|', ('name', '=', '-'), ('code', '=', '-')],limit=1)\r\n for line in self.line_ids:\r\n employee = line.employee_id\r\n if not (line.effective_work_days_value or line.natura_value or line.overtime_value or line.non_effective_work_days_value):\r\n continue\r\n # 0.0 Validasi data\r\n salary_ok = bpjs_kes_ok = bpjs_tk_ok = bpjs_pen_ok = True\r\n if employee.default_account_salary_id:\r\n salary_ok = True\r\n else:\r\n raise exceptions.ValidationError(_(\"Karyawan %s tidak memiliki Account Gaji.\\n\\\r\n Silahkan diinput di Data Karyawan tersebut terlebih dahulu.\")%employee.name)\r\n if line.potongan_bpjs_kes:\r\n if not employee.default_account_bpjs_allowance_id or not employee.inter_account_bpjs_allowance_id:\r\n raise exceptions.ValidationError(_(\"Karyawan %s tidak memiliki Account Beban BPJS Kesehatan.\\n\\\r\n Silahkan diinput di Data Karyawan tersebut terlebih dahulu.\") % employee.name)\r\n else:\r\n bpjs_kes_ok = True\r\n\r\n if line.potongan_bpjs_tk:\r\n if not employee.default_account_ketenagakerjaan_allowance_id or not employee.inter_account_ketenagakerjaan_allowance_id:\r\n raise exceptions.ValidationError(_(\"Karyawan %s tidak memiliki Account Beban BPJS Ketenagakerjaan.\\n\\\r\n Silahkan diinput di Data Karyawan tersebut terlebih dahulu.\")%employee.name)\r\n else:\r\n bpjs_tk_ok = True\r\n\r\n if line.potongan_bpjs_pensiun:\r\n if not employee.default_account_pensiun_allowance_id or not employee.inter_account_pensiun_allowance_id:\r\n raise exceptions.ValidationError(_(\"Karyawan %s tidak memiliki Account Beban BPJS Pensiun.\\n\\\r\n Silahkan diinput di Data Karyawan tersebut terlebih dahulu.\")%employee.name)\r\n else:\r\n bpjs_pen_ok = True\r\n\r\n # 1.1 Grouped by Location are made for Basic Salary Expenses, Overtime Expenses, BPJS Kes and BPJS TK\r\n location_type = employee.default_location_type_id or (default_type_none or False)\r\n location = employee.default_location_id\r\n if location_type.id not in grouped_hke.keys() and salary_ok:\r\n grouped_hke.update({location_type.id: {}})\r\n grouped_hkne.update({location_type.id: {}})\r\n grouped_tunjangan.update({location_type.id: {}})\r\n grouped_natura.update({location_type.id: {}})\r\n grouped_overtime.update({location_type.id: {}})\r\n if bpjs_kes_ok:\r\n grouped_bpjs_tk.update({location_type.id: {}})\r\n if bpjs_tk_ok:\r\n grouped_bpjs_kes.update({location_type.id: {}})\r\n if bpjs_pen_ok:\r\n grouped_bpjs_pen.update({location_type.id: {}})\r\n if location.id not in grouped_hke[location_type.id].keys() and salary_ok:\r\n grouped_hke[location_type.id].update({location.id:{}})\r\n grouped_hkne[location_type.id].update({location.id: {}})\r\n grouped_tunjangan[location_type.id].update({location.id: {}})\r\n grouped_natura[location_type.id].update({location.id: {}})\r\n grouped_overtime[location_type.id].update({location.id: {}})\r\n if bpjs_kes_ok:\r\n grouped_bpjs_tk[location_type.id].update({location.id: {}})\r\n if bpjs_tk_ok:\r\n grouped_bpjs_kes[location_type.id].update({location.id: {}})\r\n if bpjs_pen_ok:\r\n grouped_bpjs_pen[location_type.id].update({location.id: {}})\r\n\r\n # 1.2 Grouped by Salary Account for Basic Salary Expenses\r\n salary_account = employee.default_account_salary_id\r\n if salary_account.id not in grouped_hke[location_type.id][location.id].keys() and salary_ok:\r\n grouped_hke[location_type.id][location.id].update({salary_account.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': salary_account.id,\r\n 'name': 'Beban Gaji Hari Kerja Efektif',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_hke[location_type.id][location.id][salary_account.id]['price_unit'] += (line.effective_work_days_value + line.premi_value - line.penalty_value)\r\n if salary_account.id not in grouped_hkne[location_type.id][location.id].keys() and salary_ok:\r\n grouped_hkne[location_type.id][location.id].update({salary_account.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': salary_account.id,\r\n 'name': 'Beban Gaji Hari Kerja Non-efektif',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_hkne[location_type.id][location.id][salary_account.id]['price_unit'] += line.non_effective_work_days_value\r\n\r\n allowance_account = employee.default_account_salary_id\r\n if allowance_account.id not in grouped_tunjangan[location_type.id][location.id].keys() and salary_ok:\r\n grouped_tunjangan[location_type.id][location.id].update({allowance_account.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': allowance_account.id,\r\n 'name': 'Beban Gaji Tunjangan',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_tunjangan[location_type.id][location.id][allowance_account.id]['price_unit'] += line.allowance_structural + line.allowance_production\r\n\r\n # 1.3 Grouped by Overtime Account for Overtime Expenses\r\n overtime_account = employee.default_account_overtime_id or salary_account\r\n if overtime_account.id not in grouped_overtime[location_type.id][location.id].keys() and salary_ok:\r\n grouped_overtime[location_type.id][location.id].update({overtime_account.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': overtime_account.id,\r\n 'name': 'Beban Overtime',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_overtime[location_type.id][location.id][overtime_account.id]['price_unit'] += line.overtime_value\r\n\r\n # 1.4 Grouped by Natura Account for Overtime Expenses\r\n natura_account = employee.default_account_welfare_allowance_id or salary_account\r\n if natura_account.id not in grouped_natura[location_type.id][location.id].keys() and salary_ok:\r\n grouped_natura[location_type.id][location.id].update({natura_account.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': natura_account.id,\r\n 'name': 'Beban Natura',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_natura[location_type.id][location.id][natura_account.id]['price_unit'] += line.natura_value\r\n\r\n # BEBAN BPJS KES\r\n account_bpjs_kes = employee.default_account_bpjs_allowance_id\r\n if account_bpjs_kes.id not in grouped_bpjs_kes[location_type.id][location.id].keys() and bpjs_kes_ok:\r\n grouped_bpjs_kes[location_type.id][location.id].update({account_bpjs_kes.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': account_bpjs_kes.id,\r\n 'name': 'Beban BPJS Kesehatan',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_bpjs_kes[location_type.id][location.id][account_bpjs_kes.id]['price_unit'] += line.tunjangan_bpjs_kes\r\n # POTONGAN GAJI KARYAWAN UNTUK BPJS KESEHATAN\r\n account_bpjs_kes_pot = employee.inter_account_bpjs_allowance_id\r\n if account_bpjs_kes_pot.id not in grouped_bpjs_kes_pot.keys() and bpjs_kes_ok:\r\n grouped_bpjs_kes_pot.update({account_bpjs_kes_pot.id: {\r\n 'account_location_type_id': default_type_none and default_type_none.id or False,\r\n 'account_location_id': False,\r\n 'account_id': account_bpjs_kes_pot.id,\r\n 'name': 'Potongan Gaji untuk BPJS Kesehatan',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_bpjs_kes_pot[account_bpjs_kes_pot.id]['price_unit'] += line.potongan_bpjs_kes\r\n\r\n # BEBAN BPJS KETENAGAKERJAAN\r\n account_bpjs_tk = employee.default_account_ketenagakerjaan_allowance_id\r\n if account_bpjs_tk.id not in grouped_bpjs_tk[location_type.id][location.id].keys() and bpjs_tk_ok:\r\n grouped_bpjs_tk[location_type.id][location.id].update({account_bpjs_tk.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': account_bpjs_tk.id,\r\n 'name': 'Beban BPJS Ketenagakerjaan',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_bpjs_tk[location_type.id][location.id][account_bpjs_tk.id]['price_unit'] += line.tunjangan_bpjs_tk\r\n # POTONGAN GAJI KARYAWAN UNTUK BPJS KETENAGAKERJAAN\r\n account_bpjs_tk_pot = employee.inter_account_ketenagakerjaan_allowance_id\r\n if account_bpjs_tk_pot.id not in grouped_bpjs_tk_pot.keys() and bpjs_tk_ok:\r\n grouped_bpjs_tk_pot.update({account_bpjs_tk_pot.id: {\r\n 'account_location_type_id': default_type_none and default_type_none.id or False,\r\n 'account_location_id': False,\r\n 'account_id': account_bpjs_tk_pot.id,\r\n 'name': 'Potongan Gaji untuk BPJS Ketenagakerjaan',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_bpjs_tk_pot[account_bpjs_tk_pot.id]['price_unit'] += line.potongan_bpjs_tk\r\n\r\n # BEBAN BPJS PENSIUN\r\n account_bpjs_pen = employee.default_account_pensiun_allowance_id\r\n if account_bpjs_pen.id not in grouped_bpjs_pen[location_type.id][location.id].keys() and bpjs_pen_ok:\r\n grouped_bpjs_pen[location_type.id][location.id].update({account_bpjs_pen.id: {\r\n 'account_location_type_id': location_type.id,\r\n 'account_location_id': location.id,\r\n 'account_id': account_bpjs_pen.id,\r\n 'name': 'Beban BPJS Pensiun',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_bpjs_pen[location_type.id][location.id][account_bpjs_pen.id]['price_unit'] += line.tunjangan_bpjs_pensiun\r\n # POTONGAN GAJI KARYAWAN UNTUK BPJS KETENAGAKERJAAN\r\n account_bpjs_pen_pot = employee.inter_account_pensiun_allowance_id\r\n if account_bpjs_pen_pot.id not in grouped_bpjs_pen_pot.keys() and bpjs_pen_ok:\r\n grouped_bpjs_pen_pot.update({account_bpjs_pen_pot.id: {\r\n 'account_location_type_id': default_type_none and default_type_none.id or False,\r\n 'account_location_id': False,\r\n 'account_id': account_bpjs_pen_pot.id,\r\n 'name': 'Potongan Gaji untuk BPJS Pensiun',\r\n 'price_unit': 0.0,\r\n 'quantity': 1.0,\r\n }})\r\n grouped_bpjs_pen_pot[account_bpjs_pen_pot.id]['price_unit'] += line.potongan_bpjs_pensiun\r\n\r\n # Create Invoice AP\r\n AccountInvoice = self.env['account.invoice']\r\n AccountInvoiceLine = self.env['account.invoice.line']\r\n invoice_ids = []\r\n default_journal = self.env['account.journal'].search([('type','=','general')], limit=1)\r\n # 1. Create Invoice AP for Salary\r\n invoice1 = AccountInvoice.create({\r\n 'type': 'in_invoice',\r\n 'date_invoice': self.date_stop,\r\n 'partner_id': self.payroll_partner_id.id,\r\n 'account_id': self.payroll_partner_id.property_account_payable_id.id,\r\n 'journal_id': default_journal.id,\r\n 'currency_id': self.company_id.currency_id.id,\r\n 'company_id': self.company_id.id\r\n })\r\n # Salary and Overtime Expenses\r\n salary_lines = []\r\n for loctype in grouped_hke.values() + grouped_hkne.values() + grouped_tunjangan.values() + grouped_natura.values():\r\n for loc in loctype.values():\r\n for x in loc.values():\r\n salary_lines.append(x)\r\n for salary_expense in salary_lines:\r\n if not salary_expense['price_unit']:\r\n continue\r\n invoice_line_vals = salary_expense\r\n invoice_line_vals.update({'invoice_id': invoice1.id})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n overtime_lines = []\r\n for loctype in grouped_overtime.values():\r\n for loc in loctype.values():\r\n for x in loc.values():\r\n overtime_lines.append(x)\r\n for overtime_expense in overtime_lines:\r\n if not overtime_expense['price_unit']:\r\n continue\r\n invoice_line_vals = overtime_expense\r\n invoice_line_vals.update({'invoice_id': invoice1.id})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n # Potongan Gaji\r\n for bpjs_kes_pot in grouped_bpjs_kes_pot.values():\r\n if not bpjs_kes_pot['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_kes_pot\r\n invoice_line_vals.update({'invoice_id': invoice1.id, 'quantity': -1.0})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n for bpjs_tk_pot in grouped_bpjs_tk_pot.values():\r\n if not bpjs_tk_pot['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_tk_pot\r\n invoice_line_vals.update({'invoice_id': invoice1.id, 'quantity': -1.0})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n for bpjs_tk_pen in grouped_bpjs_pen_pot.values():\r\n if not bpjs_tk_pen['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_tk_pen\r\n invoice_line_vals.update({'invoice_id': invoice1.id, 'quantity': -1.0})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n invoice1.compute_taxes()\r\n invoice_ids.append(invoice1.id)\r\n\r\n # 2. Create Invoice AP for BPJS Kesehatan\r\n invoice2 = AccountInvoice.create({\r\n 'type': 'in_invoice',\r\n 'date_invoice': self.date_stop,\r\n 'partner_id': self.bpjs_kes_partner_id.id,\r\n 'account_id': self.bpjs_kes_partner_id.property_account_payable_id.id,\r\n 'journal_id': default_journal.id,\r\n 'currency_id': self.company_id.currency_id.id,\r\n 'company_id': self.company_id.id\r\n })\r\n # BPJS Expenses\r\n bpjs_lines = []\r\n for loctype in grouped_bpjs_kes.values():\r\n for loc in loctype.values():\r\n for x in loc.values():\r\n bpjs_lines.append(x)\r\n for bpjs_kes_expenses in bpjs_lines:\r\n if not bpjs_kes_expenses['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_kes_expenses\r\n invoice_line_vals.update({'invoice_id': invoice2.id})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n # Potongan Gaji\r\n for bpjs_kes_pot in grouped_bpjs_kes_pot.values():\r\n if not bpjs_kes_pot['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_kes_pot\r\n invoice_line_vals.update({'invoice_id': invoice2.id, 'quantity': 1.0})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n invoice2.compute_taxes()\r\n invoice_ids.append(invoice2.id)\r\n\r\n # 3. Create Invoice AP for BPJS Ketenagakerjaan\r\n invoice3 = AccountInvoice.create({\r\n 'type': 'in_invoice',\r\n 'date_invoice': self.date_stop,\r\n 'partner_id': self.bpjs_tk_partner_id.id,\r\n 'account_id': self.bpjs_tk_partner_id.property_account_payable_id.id,\r\n 'journal_id': default_journal.id,\r\n 'currency_id': self.company_id.currency_id.id,\r\n 'company_id': self.company_id.id\r\n })\r\n # BPJS Expenses\r\n bpjs_lines2 = []\r\n for loctype in grouped_bpjs_tk.values():\r\n for loc in loctype.values():\r\n for x in loc.values():\r\n bpjs_lines2.append(x)\r\n for bpjs_tk_expenses in bpjs_lines2:\r\n if not bpjs_tk_expenses['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_tk_expenses\r\n invoice_line_vals.update({'invoice_id': invoice3.id})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n bpjs_lines3 = []\r\n for loctype in grouped_bpjs_pen.values():\r\n for loc in loctype.values():\r\n for x in loc.values():\r\n bpjs_lines3.append(x)\r\n for bpjs_pen_expenses in bpjs_lines3:\r\n if not bpjs_pen_expenses['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_pen_expenses\r\n invoice_line_vals.update({'invoice_id': invoice3.id})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n # Potongan Gaji\r\n for bpjs_tk_pot in grouped_bpjs_tk_pot.values():\r\n if not bpjs_tk_pot['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_tk_pot\r\n invoice_line_vals.update({'invoice_id': invoice3.id, 'quantity': 1.0})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n for bpjs_pen_pot in grouped_bpjs_pen_pot.values():\r\n if not bpjs_pen_pot['price_unit']:\r\n continue\r\n invoice_line_vals = bpjs_pen_pot\r\n invoice_line_vals.update({'invoice_id': invoice3.id, 'quantity': 1.0})\r\n AccountInvoiceLine.create(invoice_line_vals)\r\n invoice3.compute_taxes()\r\n invoice_ids.append(invoice3.id)\r\n\r\n self.invoice_ids = [(6,0,invoice_ids)]\r\n self.state='done'\r\n\r\n action = self.env.ref('account.action_invoice_tree2')\r\n result = action.read()[0]\r\n result['context'] = {'type': 'in_invoice', 'default_journal_id': default_journal.id}\r\n # choose the view_mode accordingly\r\n if len(invoice_ids) != 1:\r\n result['domain'] = \"[('id', 'in', \" + str(invoice_ids) + \")]\"\r\n elif len(invoice_ids) == 1:\r\n res = self.env.ref('account.invoice_supplier_form', False)\r\n result['views'] = [(res and res.id or False, 'form')]\r\n result['res_id'] = invoice_ids[0]\r\n else:\r\n return False\r\n return result\r\n\r\nclass HrAttendancePayrollLine(models.Model):\r\n _inherit = 'hr.attendance.payroll.line'\r\n \r\n operation_type_id = fields.Many2one('hr.operation.type', related='payroll_id.operation_type_id', string='Hr Type')\r\n allowance_structural = fields.Float('Tunjangan Struktural')\r\n allowance_production = fields.Float('Tunjangan Produksi')\r\n amount_pph21 = fields.Float('PPH 21')\r\n premi_value = fields.Float('Nilai Premi')\r\n rapel_value = fields.Float(\"Rapel Value\")","repo_name":"hendrasaputra0501/od10bms","sub_path":"addons/bms_hr_attendance/models/hr_payroll.py","file_name":"hr_payroll.py","file_ext":"py","file_size_in_byte":31171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41494338142","text":"__author__ = 'Antoine Schonewille'\n__copyright__ = 'Copyright 2022, Luna2 Project'\n__license__ = 'GPL'\n__version__ = '2.0'\n__maintainer__ = 'Antoine Schonewille'\n__email__ = 'antoine.schonewille@clustervision.com'\n__status__ = 'Development'\n\nfrom utils.queue import Queue\nfrom utils.database import Database\nfrom utils.log import Log\nfrom utils.helper import Helper\nfrom utils.service import Service\n\n\nclass DNS():\n \"\"\"\n This class is responsible for all additional DNS related matter.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n This constructor will initialize all required variables here.\n \"\"\"\n self.logger = Log.get_logger()\n\n\n\n def get_dns(self, name=None):\n \"\"\"\n This method will return requested additional dns record for the network.\n \"\"\"\n status=False\n response=f\"No entries for network {name}\"\n dns = Database().get_record_join(['dns.*'],['dns.networkid=network.id'],[f\"network.name='{name}'\"])\n\n if dns:\n status=True\n response = {'config': {'dns': {name: [] }}}\n data=[]\n for host in dns:\n response['config']['dns'][name].append({ \"host\": host['host'], \"ipaddress\": host['ipaddress'] })\n else:\n network = Database().get_record(None, \"network\", f\"WHERE `name`='{name}'\")\n if not network:\n status=False\n response=f\"Network {name} does not exist\"\n return status, response\n\n\n def update_dns(self, name=None, request_data=None):\n \"\"\"\n This method will create or update additional dns hosts for a network.\n \"\"\"\n status=True\n response=\"Internal error\"\n data = {}\n if request_data:\n data = request_data['config']['dns'][name]\n network = Database().get_record(None, \"network\", f\"WHERE `name`='{name}'\")\n if network:\n status=True\n response='DNS entries added or changed'\n networkid=network[0]['id']\n for entry in data:\n if 'host' in entry and 'ipaddress' in entry:\n host=entry['host']\n ipaddress=entry['ipaddress']\n valid_ip = Helper().check_ip(ipaddress)\n if valid_ip:\n ndata={}\n ndata['host']=host\n ndata['ipaddress']=ipaddress\n ndata['networkid']=networkid\n row = Helper().make_rows(ndata)\n exist = Database().get_record(None, \"dns\", f\"WHERE `host`='{host}' AND `networkid`='{networkid}'\")\n if exist:\n where = [{\"column\": \"id\", \"value\": exist[0]['id']}]\n Database().update('dns', row, where)\n else:\n Database().insert('dns', row)\n Service().queue('dns','restart')\n else:\n status=False\n response=f'Network {name} not present in database'\n else:\n status=False\n response='Invalid request: Did not receive data'\n return status, response\n\n\n def delete_dns(self, name=None, network=None):\n \"\"\"\n This method deletes a single host entry for a network.\n \"\"\"\n status=False\n response=\"Entry does not present in database\"\n exist = Database().get_record_join(['dns.*'],['dns.networkid=network.id'],[f\"dns.host='{name}'\",f\"network.name='{network}'\"])\n if exist:\n Database().delete_row('dns', [{\"column\": \"id\", \"value\": exist[0]['id']}])\n status=True\n response=\"Entry removed\"\n Service().queue('dns','restart')\n return status, response\n\n","repo_name":"clustervision/luna2-daemon","sub_path":"daemon/base/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4647422859","text":"import os\nimport pickle\nimport numpy as np\nfrom colorama import Fore\nfrom utils.container import Container\nfrom utils.plot import plot_two_lines\n\n\ndef raise_memory(image_name):\n print(\n r\"\"\"\n .__ \n ____________ |__| ______ ____ _____ ____ _____ \n \\_ __ \\__ \\ | |/ ___// __ \\ / \\_/ __ \\ / \\ \n | | \\// __ \\| |\\___ \\\\ ___/ | Y Y \\ ___/| Y Y \\\n |__| (____ /__/____ >\\___ > |__|_| /\\___ >__|_| /\n \\/ \\/ \\/ \\/ \\/ \\/ \n \"\"\"\n )\n sample = Container(image_name, 1024, 0.25)\n sample.run()\n for i in range(3):\n sample.updateAllocation(cpu=sample.cpu + 0.25)\n sample.run()\n sample.delete()\n sample.display()\n\n # plot\n with open(f\"profiling/data/profiling/{image_name}.pkl\", \"rb\") as f:\n base_data = pickle.load(f)\n data = [recorder[2] for recorder in sample.recorder[-4:]]\n if not os.path.exists(\"profiling/image/raise_memory\"):\n os.mkdir(\"profiling/image/raise_memory\")\n plot_two_lines(\n fig_name=f\"{image_name}\",\n xticks=np.arange(0, 4),\n xiticklabels=np.arange(0.25, 1.25, 0.25),\n xlabel=\"CPU(s)\",\n y1lim=(0, (max(data) // 1000 + 2.5) * 1000),\n y1label=\"Latency (ms)\",\n y2lim=(0, 2),\n y2label=\"Proportion\",\n values1=data,\n label1=\"Raise memory\",\n values2=[now / base for now, base in zip(data, base_data[:4])],\n label2=\"Comparison\",\n path=\"profiling/image/raise_memory\",\n )\n","repo_name":"HongyuZh/ASUSC","sub_path":"profiling/raise_memory.py","file_name":"raise_memory.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1716129866","text":"from flask import render_template, request, session, redirect\nfrom flask_bcrypt import Bcrypt\n\nfrom flask_app import app\n\nfrom ..models.recipe_model import Recipe\n\nbcrypt = Bcrypt(app) #instantiating the Bcrypt class passing the flask app \n\n\n# gets all recipes and puts them in the table on the dashboard page after loggin in .. \n@app.route(\"/dashboard\")\ndef get_all_recipes_controller():\n recipes = Recipe.get_all_recipes()\n return render_template('dashboard.html', recipes = recipes)\n\n\n# this function takes you from the dashboard to the add/create recipe page\n@app.route(\"/recipes/new\")\ndef dashboard_to_add_recipe_page():\n return render_template(\"add_recipe.html\")\n\n# this route creates/adds a new recipe for the user joining by the foregin key \n\n@app.route(\"/recipes/new\", methods = [\"POST\"])\ndef create_recipe_controller():\n Recipe.create_recipe(request.form)\n\n return redirect(\"/recipes/new\")\n\n\n# this route updates my recipes on my edit recipe form \n@app.route(\"/recipes/edit//\", methods = ['POST'])\ndef edit_recipe_controller(recipe_id):\n data = {\n \"name\": request.form['name'],\n \"under_30_min\": request.form['under_30_min'],\n \"description\": request.form['description'],\n \"instructions\": request.form['instructions'],\n \"date_made_on\": request.form['date_made_on'],\n \"id\": recipe_id\n }\n Recipe.update(data)\n return redirect(\"/dashboard\")\n\n","repo_name":"TessaP97/Python","sub_path":"recipes_crud/flask_app/controllers/recipe_controller.py","file_name":"recipe_controller.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7979827202","text":"# coding=utf8\nfrom website.common.site import WebSite\nfrom website.ea3w.ea3wcomments import Ea3wcomments\n\n\n\nclass Ea3w(WebSite):\n def __init__(self):\n WebSite.__init__(self)\n self.name = 'ea3w'\n self.pattern = r'^http[s]{0,1}://\\w+\\.ea3w\\.com\\/*'\n self.setcommentimpl(Ea3wcomments())","repo_name":"ErBingBing/django-tonado-crawler","sub_path":"ZG-PhaseFour/code/website/ea3w/ea3w.py","file_name":"ea3w.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17808801954","text":"'''\nFornecedor:\n- Possui estoque de peças ilimitado\n- Consideraremos que está localizado a uma faixa aleatória de distância em metros do almoxarifado\n- Fica a critério do monitor checar o nível de estoque no almoxarifado e solicitar peças aos fornecedores\n- Pode entregar uma ou mais peças de uma vez\n'''\nimport random\n\n# código para identificar operação destinatário no início do payload \nds_code = 1 # decrease stock\nsa_code = 2 # solicitação de reposição almoxarife\nsf_code = 3 # solicitação de reposição fornecedor \nra_code = 4 # resposta de de reposição almoxarife\nrf_code = 5 # resposta de de reposição forncedor\nfp_code = 6 # \n\n# Tópicos para receber e enviar dados ao monitor\ntopic_monitor = 'topic/monitor'\ntopic_estoque = 'topic/estoque'\ntopic_dashboard = 'topic/dashboard'\n\nmax_stock_fabrica = 50\n\n# número de peças diferentes que a fábrica utiliza\nnum_pecas = 10\nnum_pecas_base = 7\nnum_pecas_varia = 3\n\n# estoque de peças de uma linha de produção\nestoque_pecas = [10] * num_pecas\n\nestoque_fabrica = {\n 'l1' : [max_stock_fabrica] * num_pecas,\n 'l2' : [max_stock_fabrica] * num_pecas,\n 'l3' : [max_stock_fabrica] * num_pecas,\n 'l4' : [max_stock_fabrica] * num_pecas,\n 'l5' : [max_stock_fabrica] * num_pecas,\n 'l6' : [max_stock_fabrica] * num_pecas,\n 'l7' : [max_stock_fabrica] * num_pecas,\n 'l8' : [max_stock_fabrica] * num_pecas,\n 'l9' : [max_stock_fabrica] * num_pecas,\n 'l10' : [max_stock_fabrica] * num_pecas,\n 'l11' : [max_stock_fabrica] * num_pecas,\n 'l12' : [max_stock_fabrica] * num_pecas,\n 'l13' : [max_stock_fabrica] * num_pecas,\n}\n\n# estoque de peças do almoxarifado\nestoque_almoxarifado = [100] * num_pecas\n\n# gera uma lista de peças utilizadas na montagem de um produto\n# por simplicidade, vamos considerar que cada peça deve ser utilizada na ordem de seu índice\nproduto = [random.randint(1, 6) if random.choice([True, False]) else 0 for _ in range(num_pecas)]\n\nkit_base = [random.randint(1, 6) if random.choice([True, False]) else 0 for _ in range(num_pecas_base)]\npv1 = kit_base + [random.randint(1, 6) if random.choice([True, False]) else 0 for _ in range(num_pecas - num_pecas_base)]\npv2 = kit_base + [random.randint(1, 6) if random.choice([True, False]) else 0 for _ in range(num_pecas - num_pecas_base)]\npv3 = kit_base + [random.randint(1, 6) if random.choice([True, False]) else 0 for _ in range(num_pecas - num_pecas_base)]\npv4 = kit_base + [random.randint(1, 6) if random.choice([True, False]) else 0 for _ in range(num_pecas - num_pecas_base)]\npv5 = kit_base + [random.randint(1, 6) if random.choice([True, False]) else 0 for _ in range(num_pecas - num_pecas_base)]\n\n# gera uma lista de tempo utilizado na montagem de cada peça do produto\ntimer = [random.randint(1, 5)/10 for _ in range(num_pecas)]\n\n# define um limiar para a quantidade de peças viável \nalmoxarife_threshold = 10\nalmoxarife_baseline = 30\nproduction_threshold = 5\n\n# estoque de peças do almoxarifado\n# solicita_almoxarifado = [0] * num_pecas\n\n# estoque de peças do almoxarifado\nsolicita_fornecedor= [0] * num_pecas\n\nsolicita_almoxarifado = {\n 'l1' : [0] * num_pecas,\n 'l2' : [0] * num_pecas,\n 'l3' : [0] * num_pecas,\n 'l4' : [0] * num_pecas,\n 'l5' : [0] * num_pecas,\n 'l6' : [0] * num_pecas,\n 'l7' : [0] * num_pecas,\n 'l8' : [0] * num_pecas,\n 'l9' : [0] * num_pecas,\n 'l10' : [0] * num_pecas,\n 'l11' : [0] * num_pecas,\n 'l12' : [0] * num_pecas,\n 'l13' : [0] * num_pecas,\n}\n\nprod_name = ['pv1', 'pv2', 'pv3', 'pv4', 'pv5']\nprodutos = [pv1, pv2, pv3, pv4, pv5]\n\n\nimport paho.mqtt.client as mqtt\nimport json\nimport time\n\n\nclass Fornecedor:\n def __init__(self, name):\n self.name = name\n self.client = mqtt.Client(self.name)\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n self.client.connect(\"localhost\", 1883, 60)\n\n\n def on_connect(self, client, userdata, flags, rc):\n print(f\"{self.name} conectado ao broker\")\n self.client.subscribe(topic_monitor)\n\n\n def on_message(self, client, userdata, msg):\n data_json = msg.payload.decode(\"utf-8\")\n remetente, destinatario, code, part_index, quantidade = json.loads(data_json)\n\n # recebeu uma mensagem de solicitação de reposição vinda do almoxarifado/monitor\n if code == sf_code:\n # Considera um tempo de entrega\n time.sleep(random.randint(1,2))\n\n # envia resposta de entrega\n data = json.dumps((self.name, \"almoxarifado\", rf_code, part_index, quantidade))\n self.client.publish(topic_estoque, data)\n\n print(f\"reposição peça {part_index}, quantidade {quantidade}\")\n \n \n def start(self):\n try:\n self.client.loop_forever()\n except KeyboardInterrupt:\n self.client.loop_stop()\n self.client.disconnect()\n print(f\"\\nconexão {self.name} encerrada!\")\n\n\n#\n# Inicializa objeto do fornecedor\nfornecedor = Fornecedor(\"fornecedor\")\nfornecedor.start()\n","repo_name":"rad-silva/stock_monitoring","sub_path":"stock/fornecedor/fornecedor.py","file_name":"fornecedor.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3766235413","text":"import os\nos.system(\"cls\")\n#desarrolle el programa que determine la longitud total recorrida en metros y en yardas\n\n \nt1_km = float(input(\"ingrese la longitud en km del tramo 1: \"))\nt2_pies = float(input(\"ingrese la longitud en pies del tramo 2: \"))\nt3_millas = float(input(\"ingrese la longitud en millass del tramo 3: \"))\n\n\n \nt1_m = t1_km * 1000\nt2_m = t2_pies / 3.281\nt3_m = t3_millas * 1609\n\n \ntotalmetros = t1_m + t2_m + t3_m\ntotalyardas = totalmetros * 1.094\n\n\nprint( f\"Longitud total en metros: {totalmetros:.2f} m\")\nprint( f\"Longitud total en yardas: {totalyardas:.2f} yd\")","repo_name":"jakeline7/Ejercicios-Secuenciales","sub_path":"ejercicios_de_Rosa/ejercicio03.py","file_name":"ejercicio03.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39293969258","text":"\n\nclass Node:\n def __init__(self, data = None, next = None, prev = None ):\n self.data = data\n self.next = next\n self.prev = prev\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n\n def print_forward(self):\n if self.head is None:\n print(\"Linkedlist is empty\")\n return\n itr = self.head\n llstr = ''\n while itr:\n llstr += str(itr.data) + ' --> ' if itr.next else str(itr.data)\n itr = itr.next\n print(llstr)\n\n def print_backward(self):\n if self.head is None:\n print(\"Linkedlist is empty\")\n return\n\n itr = self.get_last_node()\n llstr = ''\n while itr:\n llstr += str(itr.data) + ' --> ' if itr.prev else str(itr.data)\n itr = itr.prev\n print(llstr)\n\n def get_last_node(self):\n itr = self.head\n while itr.next:\n itr = itr.next\n return itr\n\n def get_length(self):\n count = 0\n itr = self.head\n while itr:\n count += 1\n itr = itr.next\n return count\n\n def insert_at_begining(self,data):\n if self.head is None:\n node = Node(data, self.head, None)\n self.head = node\n else:\n node = Node(data, self.head,None)\n self.head.prev = node\n self.head = node\n \n def insert_at_end(self, data):\n if self.head is None:\n node = Node(data, None, None)\n return\n\n itr = self.head \n while itr.next:\n itr = itr.next\n\n itr.next = Node(data, None, itr)\n\n def insert_at(self, index, data):\n if index < 0 or index >= self.get_length():\n raise Exception(\"Invalid Index\")\n\n if index == 0:\n node = self.insert_at_begining(data)\n return\n\n itr = self.head\n count = 0\n while itr:\n if count == index - 1:\n node = Node(data, itr.next, itr)\n if node.next:\n node.next.prev = node\n itr.next = node\n break\n\n itr = itr.next\n count += 1\n def remove_at(self, index):\n if index < 0 or index >= self.get_length():\n raise Exception(\"Invalid Index\")\n\n if index == 0:\n self.head = self.head.next\n return\n\n itr = self.head\n count = 0\n while itr:\n if count == index - 1:\n itr.next= itr.next.next\n itr.next.prev = itr\n \n itr = itr.next\n count += 1\n def insert_values(self,data_list):\n for data in data_list:\n self.insert_at_end(data)\n \nif __name__ == \"__main__\": \n ll = Linkedlist()\n ll.insert_at_begining(3)\n ll.insert_at_begining(2)\n ll.insert_at_begining(1)\n ll.insert_at_end(6)\n ll.insert_at(3,5)\n ll.insert_at(3,4)\n ll.print_forward()\n ll.print_backward()\n ll.remove_at(4)\n ll.print_forward()\n ll.print_backward()\n ll.insert_values([7,8,9,10])\n ll.print_forward()\n ll.print_backward()\n ","repo_name":"prasad-kumar/DSA-Python","sub_path":"doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34986708915","text":"import os\n\nfrom moviepy.video.compositing.concatenate import concatenate_videoclips\nfrom moviepy.video.io.VideoFileClip import VideoFileClip\nfrom PIL import ImageFont, Image, ImageDraw\nimport moviepy.editor as mpy\nimport numpy as np\n\nWIDTH = 960\nHEIGHT = 540\n\n\ndef concatenate_videos(video_clip_paths, output_folder_path, uuid):\n \"\"\" Concatenates several video files and save it to `output_path_folder`.\n `method` can be either 'compose' or 'reduce':\n `reduce`: Reduce the quality of the video to the lowest quality on the list of `video_clip_paths`.\n `compose`: type help(concatenate_videoclips) for the info\n \"\"\"\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path)\n\n # create VideoFileClip object for each video file\n clips = [VideoFileClip(c) for c in video_clip_paths]\n\n # resize the videos to the maximum\n clips = [c.resize(newsize=(WIDTH, HEIGHT)) for c in clips]\n # concatenate the final video\n final_clip = concatenate_videoclips(clips)\n\n # write the output video file\n final_clip.write_videofile(output_folder_path + '/' + uuid + '.mp4', fps=24, threads=1, codec=\"libx264\")\n\n\ndef create_video_with_text(text, database_path):\n \"\"\"if a word is missing then we create a video displaying the word\"\"\"\n output_folder_path = database_path + \"/\" + text\n\n img = Image.new('RGB', (WIDTH, HEIGHT), color=(0, 0, 0))\n\n d = ImageDraw.Draw(img)\n\n font = ImageFont.truetype(\"arial.ttf\", 40)\n d.text((WIDTH/2, HEIGHT/2), text, font=font, fill=(255, 255, 255), anchor=\"mm\")\n\n pixels = list(img.getdata())\n width, height = img.size\n pixels = [pixels[i * width:(i + 1) * width] for i in range(height)]\n\n frame = np.asarray(pixels)\n\n def make_frame(t):\n return frame\n\n text = mpy.VideoClip(make_frame, duration=1)\n\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path)\n\n video_with_text = mpy.CompositeVideoClip(\n [\n text.set_position((\"center\", \"top\"))\n ],\n size=(640, 480)). \\\n on_color(\n color=(0, 0, 0),\n col_opacity=1).set_duration(1)\n\n video_path = output_folder_path + '/text.mp4'\n video_with_text.write_videofile(video_path, fps=30, codec=\"mpeg4\", audio_codec=\"aac\")\n return video_path\n\n\nif __name__ == \"__main__\":\n create_video_with_text('aa')\n","repo_name":"carolineRe13/Speech_to_sign_language","sub_path":"codeBase/code/VideoCreator.py","file_name":"VideoCreator.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73834587688","text":"# Justin Oakley\n# CSC-526 Assignment 4: Machine Learning\n# 04/01/19\n\nimport pandas as pd, numpy as np\n\ndef print_list(param_list):\n for item in param_list:\n print(item)\n\n# Import cervical cancer data.\ncerv_cancer = pd.read_csv(\"risk_factors_cervical_cancer.csv\")\ncerv_cancer\n\n# Drop null values from dataframe.\ncerv_cancer = cerv_cancer.drop(columns=['STDs: Time since first diagnosis','STDs: Time since last diagnosis'])\ncerv_cancer = cerv_cancer.replace(to_replace='?', value=np.nan).dropna()\n\n#Basic statistical analysis\ncerv_cancer_mean = [(col, cerv_cancer[col].astype('float64').mean()) for col in cerv_cancer.columns]\nprint(\"The mean of each feature:\")\nprint_list(cerv_cancer_mean)\nprint('\\n')\ncerv_cancer_std = [(col, cerv_cancer[col].astype('float64').std()) for col in cerv_cancer.columns]\nprint(\"The standard deviation of each feature:\")\nprint_list(cerv_cancer_std)\nprint('\\n')\n\n# Drop columns with mean and/or standard deviation values of 0.0\ncerv_cancer = cerv_cancer.drop(columns=[mean_item[0] for mean_item, stddev_item in zip(cerv_cancer_mean, cerv_cancer_std)\n if mean_item[1] == 0 or stddev_item[1] == 0])\n\n# Split current dataframe to prepare for train_test_split()\nX = cerv_cancer.drop(columns=['Dx:Cancer'])\ny = cerv_cancer['Dx:Cancer']\n\n# Create training and testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=100)\n\n# Standardize datasets for Random Forest Classifier\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler().fit(X_train)\nX_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)\n\n# Create Random Forest Classifier Model and find the most important features in the dataset.\nfrom sklearn.ensemble import RandomForestClassifier\n\nrf = RandomForestClassifier(n_estimators=100, max_depth=2,random_state=0)\nrf_model = rf.fit(X_train, y_train)\nfeat_importances = [(X.columns[idx], col) for col,idx in zip(rf_model.feature_importances_,\n range(len(rf_model.feature_importances_)))]\nmost_important_feats = [feat for feat in feat_importances if feat[1] > 0]\n\n# Print column names with their accuracy scores.\nprint(\"\\nAll important features in determining cervical cancer:\")\nprint_list(feat_importances)\nprint('\\n')\nprint(\"Most important features in determining cervical cancer:\")\nprint_list(most_important_feats)\nprint('\\n')\n\n# Calculate prediction accuracy of Random Forest Model\nfrom sklearn import metrics\npredictions = rf_model.predict(X_test)\nprint(\"Accuracy of Random Forest Model: %s\" % metrics.accuracy_score(y_test, predictions))\n","repo_name":"jmoakle2/CSC-526_Assignments","sub_path":"CSC-526_Assignment4/MachineLearning.py","file_name":"MachineLearning.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13689629274","text":"from page import Page\nimport tool\nimport status\nfrom collections import deque\nfrom status import LayoutChangeStatus\n\n\nclass Window:\n def __init__(self, manifest):\n self.manifest = manifest\n self.exitStatus = ['KEYCODE_BACK']\n self.pageBackStack = deque()\n self.prePage = None\n self.newPage = None\n\n def check(self, page):\n if self.manifest is None:\n return True\n if self.manifest.contains(page.getPackageName()):\n return True \n return False \n\n def append(self, page): \n if not self.check(page):\n raise Exception(\"*****package not right*****\", page.getPackageName(), self.manifest.packages)\n self.pageBackStack.append(page) \n\n def createPage(self): \n page = Page(self) \n page.parsing()\n return page\n\n def createPageWithAppend(self, depth=0, page=None):\n if page is None:\n page = self.createPage()\n\n if self.prePage is None:\n self.prePage = page\n else:\n self.prePage = self.newPage\n self.newPage = page \n \n self.append(page) \n page.depth = depth\n return page\n\n\n def sameActivityCheck(self, prePage, newPage):\n # a empty page should exit directly\n if newPage.elementsBesideExit == []:\n newPage.exit() \n return LayoutChangeStatus.PAGE_EXIT, prePage, newPage\n \n diff = newPage.difference(prePage) \n if len(diff) == 0: # nothing change\n return LayoutChangeStatus.NONE, prePage, newPage \n elif diff == set(newPage.elementsBesideExit): # page defintly not same \n if len(self.pageBackStack) >= 2:\n # check it is circle calling, like this situation:\n # eque([BusHomeActivity_7609395336725850600, BusHomeActivity_7186982384285961855, BusHomeActivity_7609395336725850600, BusHomeActivity_7186982384285961855])\n lastPageInStack = self.pageBackStack[-2]\n if lastPageInStack.getPageName() == newPage.getPageName():\n return LayoutChangeStatus.PAGE_EMPTY_EXIT, prePage, newPage \n return LayoutChangeStatus.NEW_LAYOUT, prePage, newPage \n else: # page partly same \n return LayoutChangeStatus.LAYOUT_MERGE, prePage, newPage \n\n\n def diffActivityCheck(self, prePage, newPage):\n lastPage = self.pageBackStack[-1]\n if lastPage.getPageName() == newPage.getPageName():\n newPage.exit() # a empty page should exit directly\n return LayoutChangeStatus.PAGE_EXIT, prePage, newPage\n else: \n # page name not same \n return LayoutChangeStatus.NEW_LAYOUT, prePage, newPage \n\n '''\n if new page is a dialog in the activity\n '''\n def checkChange(self, prePage): \n newPage = self.createPage()\n if not self.manifest.contains(newPage.getPackageName()): \n if len(self.pageBackStack) == 0:\n return LayoutChangeStatus.APPLICATION_EXIT, None, None\n else:\n return LayoutChangeStatus.ERROR, prePage, None\n \n if prePage.getActivityName() == newPage.getActivityName(): \n return self.sameActivityCheck(prePage, newPage)\n else:\n return self.diffActivityCheck(prePage, newPage)\n\n \n def getManifest(self): \n return self.manifest\n\n def dump(self):\n return getPage(self).dump() \n\n def size(self): \n return len(self.pageBackStack)\n","repo_name":"alexwang3322/android_auto_dump","sub_path":"src/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24620145581","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello():\n return \"Hello World!\"\n\n\n@app.route(\"/about\")\ndef aboutsection():\n return \"this is about section\"\n\n\n@app.route(\"/best\")\ndef myresume():\n data = [{\n 'name': 'sanketbisne',\n 'div': 'A,',\n 'email': 'sanketbisne@gmail.com',\n 'arr': [1, 2, 1, 45, 56]\n },\n {\n 'name': 'netfix',\n 'cloud': 'AWS',\n 'SERVICES ': 'sagemaker',\n 'music': 'spotify'\n }]\n return jsonify(data)\n\n\n@app.route('/add_two_numbers', methods=[\"POST\"])\ndef add_two_nums():\n dataDictt = request.get_json()\n x = dataDictt[\"x\"]\n y = dataDictt[\"y\"]\n z = x+y\n\n retJSON = {\n \"z\": z\n }\n return jsonify(retJSON), 200\n# 200 = Success\n# 500 = NOT FOUND\n\n\nif (__name__ == \"__main__\"):\n app.run()\n","repo_name":"sanketbisne/Flask-api-integrated-with-Docker","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11328054062","text":"def frequency(string):\n hashmap = {}\n for item in string:\n if item not in hashmap:\n hashmap[item] = 1\n else:\n hashmap[item] = hashmap[item]+1\n \n print(hashmap)\n\n res_ls = []\n for values in hashmap.values():\n res_ls.append(values)\n \n print(res_ls)\n\n maxN = max(res_ls)\n if maxN == 1:\n return 1\n\n hashT = [0]*(maxN+1)\n\n for i in range(len(res_ls)):\n hashT[res_ls[i]] = hashT[res_ls[i]]+1\n \n print(hashT)\n \n if len(hashT) == 3:\n for i in hashT:\n if i == 1:\n return 1\n\n return 0\n\n\n\n \n \n\nif __name__ == \"__main__\":\n print(\"\\n\")\n # string = 'xyz'\n # string = 'xxxxyyzz'\n # string = 'xyyz'\n # string = 'evjxpnqgmvfjl'\n string = 'ehuuroaidj'\n # string = 'cceea'\n # string = 'lptpgwgjrwlgtdhdui'\n\n res = frequency(string)\n print(res)\n\n\n print(\"\\n\")\n","repo_name":"maxkashyap41/pythonDSA","sub_path":"Hashing/Check_Frequencies_BeEqual(hashing).py","file_name":"Check_Frequencies_BeEqual(hashing).py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6018717608","text":"from PySide6.QtWidgets import QMainWindow, QPushButton, QFileDialog, QToolBar, QStatusBar\nfrom PySide6.QtCore import QSize, QPointF\nfrom PySide6.QtGui import QAction, QPainter\nfrom PySide6.QtCharts import QChart, QChartView, QLineSeries\nfrom graph import Chart\n\nfrom investmentData import InvestmentData\n\nclass MainWindow(QMainWindow):\n def __init__(self, app):\n super().__init__()\n self.setWindowTitle(\"Investment Tracker\")\n self.app = app\n\n #Menubar and menu\n menu_bar = self.menuBar()\n\n file_menu = menu_bar.addMenu(\"&File\")\n file_open = file_menu.addAction(\"Open file...\")\n file_open.triggered.connect(self.open_file)\n quit_action = file_menu.addAction(\"Quit\")\n quit_action.triggered.connect(self.quit_app)\n\n #toolbar\n toolbar = QToolBar(\"Main Tool Bar\")\n toolbar.setIconSize(QSize(16,16))\n self.addToolBar(toolbar)\n\n toolbar.addAction(quit_action)\n\n toolbar.addSeparator()\n\n action1 = QAction(\"Some Action\", self)\n action1.setStatusTip(\"Status message for some action\")\n action1.triggered.connect(self.toolbar_button_click)\n toolbar.addAction(action1)\n\n #status bar\n self.setStatusBar(QStatusBar(self))\n\n path = \"C:\\\\Users\\\\Trevor\\\\Dropbox\\\\My PC (DESKTOP-7CBII96)\\\\Documents\\\\Test\\\\test.txt\"\n investmentData = InvestmentData()\n investmentData = Chart.file_reader(path)\n\n chartData = Chart(investmentData)\n self.setCentralWidget(chartData._chart_view)\n\n def quit_app(self):\n self.app.quit()\n\n #Open file explorer\n def open_file(self):\n dialog = QFileDialog(self)\n dialog.setFileMode(QFileDialog.AnyFile)\n dialog.setViewMode(QFileDialog.Detail)\n if dialog.exec():\n fileNames = dialog.selectedFiles()\n \n def toolbar_button_click(self):\n self.statusBar().showMessage(\"Message!\", 3000)","repo_name":"twigmytwig/InvestmentTracker","sub_path":"InvestmentTracker/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34282359313","text":"import numpy as np\r\nimport random\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import stats\r\nimport seaborn as sns; \r\nimport cv2\r\n\r\n\r\nt = 1000\r\n\r\ndef cost_fun(length, arrival):\r\n\treturn (2*length + arrival)*arrival/2/t\r\n\r\ndef distribute (left,right,quant):\r\n\tleft_add = 0\r\n\tright_add = 0\r\n\tif (abs(left-right) > quant):\r\n\t\tleft_add = quant if left < right else 0\r\n\t\tright_add = quant if left > right else 0\r\n\telif (left+right+quant)%2: # is odd\r\n\t coin = random.randint(0,1)\r\n\t left_add = (-left+right+quant-1)/2 + coin\r\n\t right_add = (left-right+quant-1)/2 + 1-coin\r\n\telse:\r\n\t\tleft_add = (-left+right+quant)/2 \r\n\t\tright_add = (left-right+quant)/2\r\n\treturn left_add,right_add\r\n\r\nflow = np.random.poisson(1/0.7, size=t)\r\ndef calculate_cost(d,a,p):\r\n\troad1_real_traffic = 0\r\n\troad2_real_traffic = 0\r\n\troad1_observed_traffic = 0\r\n\troad2_observed_traffic = 0\r\n\tcost = 0\r\n\t#d = 0.4 # probability of checking\r\n\t#a = 0 # probalibity of attack\r\n\t#p = 0.7 # probality of geometrical distribution \r\n\tfor i in range(t):\r\n\t\tcurrent = flow[i]\t\t\r\n\t\tattack = random.randint(0,100-1)\r\n\t\tif attack < 100*a: attack = 1 # give an attack\r\n\t\telse: attack = 0 \r\n\t\tcheck = random.randint(0,100-1) # check the traffic\r\n\t\tif check < 100*d : attack = 0 \r\n\t\tleft_add, right_add = distribute(road1_observed_traffic, road2_observed_traffic, current + attack)\r\n\t\troad1_observed_traffic += left_add\r\n\t\troad2_observed_traffic += right_add\r\n\t\tfalse_goes_to_left = attack if road1_real_traffic < road2_real_traffic else 0\r\n\t\r\n\t\tcost += cost_fun(road1_real_traffic,left_add - false_goes_to_left) + cost_fun(road2_real_traffic,right_add - (attack - false_goes_to_left))\r\n\t\r\n\t\troad1_observed_traffic -= 1 if road1_observed_traffic >0 else 0\r\n\t\troad1_real_traffic -= 1 if road1_real_traffic >0 else 0\r\n\t\troad2_observed_traffic -= 1 if road2_observed_traffic >0 else 0\r\n\t\troad2_real_traffic -= 1 if road2_real_traffic >0 else 0\r\n\t\r\n\t\troad1_real_traffic += left_add - false_goes_to_left\r\n\t\troad2_real_traffic += right_add - (attack - false_goes_to_left)\r\n\t\t\r\n\treturn cost*p + d - a\r\n\t\t\r\nx = np.linspace(0,1,21)\r\nheatmap = np.zeros((11,21))\r\nfor i in range(0,11):\r\n\tcost_sequence = []\r\n\ta = 0.03*i\r\n\tfor k in range(21):\r\n\t\tcost = 0\r\n\t\td = k*0.05\r\n\t\tfor j in range(100):\r\n\t\t\tcost += calculate_cost(d,a,0.7)\r\n\t\tcost_sequence.append(cost/100)\r\n\t\theatmap[10-i][k] = cost/100\r\n\t#plt.plot(x,cost_sequence,label = \"a = \"+ str(a))\r\n\r\nax = sns.heatmap(heatmap,xticklabels=False, yticklabels=False,cmap=\"YlGnBu\",vmax=2.5)\r\nax.set_xlabel(\"d\",fontsize = 15)\r\nax.set_ylabel(\"a\",fontsize = 15)\r\nplt.savefig(\"heatmap\" + '.png', dpi=300)\r\n#plt.legend()\r\n#plt.show()\r\n'''\r\nstart_a = 5 # a = 3*0.05\r\nstart_d = 5 # d = 5*0.05\r\ninitial_cost = heatmap[start_a][start_d]\r\ntime = 100\r\nwhile time :\r\n\ttime -= 1\r\n\tline = heatmap[start_a]\r\n\td_max = 10\r\n\td_loc = 0\r\n\tfor i in range(21):\r\n\t\tif line[i] == 0:\r\n\t\t\tcontinue\r\n\t\tif line[i] < d_max :\r\n\t\t\td_loc = i\r\n\t\t\td_max = line[i]\t\r\n\ta_min = 0\r\n\ta_loc = 0\r\n\tline = heatmap[:,d_loc]\r\n\tfor i in range(11):\r\n\t\tif line[i] > a_min :\r\n\t\t\ta_loc = i\r\n\t\t\ta_min = line[i]\r\n\tprint(d_loc)\r\n\tprint(a_loc)\r\n\tprint(heatmap[a_loc][d_loc])\r\n\tif (abs(heatmap[a_loc][d_loc] - heatmap[start_a][start_d]) < 0.01*(heatmap[a_loc][d_loc] + heatmap[start_a][start_d])) :\r\n\t\tprint(\"d is \"+str(0.05*start_d))\r\n\t\tprint(\"a is \"+str(0.05*start_a))\r\n\t\tprint(\"initial cost is \"+str(initial_cost))\r\n\t\tprint(\"cost after game is \"+str(heatmap[a_loc][d_loc]))\r\n\t\tbreak\r\n\tstart_a = a_loc\r\n\tstart_d = d_loc\r\n\t'''\r\n\t\r\n","repo_name":"shellfish007/Two-Query-System","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3227971795","text":"def split(word):\n return [char for char in word]\n\ninput_file = open('input.txt', 'r')\nLines = input_file.readlines()\n\narray_length = len(Lines[0].strip())\nline_count= len(Lines)\nhalf_count = line_count/2\nbit_counts = []\nbit_counts = [0 for i in range(array_length)] \n\nfor line in Lines:\n\treport = split(line.strip())\n\tfor i in range(array_length):\n\t\tbit_counts[i] += int(report[i])\n \ngamma = \"\"\nfor bit in bit_counts:\n\t# print(bit)\n\tif int(bit)> half_count:\n\t\tgamma+=\"1\"\n\telse:\n\t\tgamma+=\"0\"\n\ngamma_int = int(gamma,2)\n\nxor_string = \"\"\nfor i in range(array_length):\n\txor_string += \"1\"\n\t\nxor = int(xor_string,2)\nepsilon_int = gamma_int^xor\n\nprint(int(gamma,2))\npower = gamma_int*epsilon_int\n\nprint(\"gamma:{}, epsilon:{}, power consumption:{} \".format(gamma_int,epsilon_int,power))\t","repo_name":"cyberphilia/adventofcode2021","sub_path":"day03/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7392341967","text":"location_cat = [\n \"upper leg\",\n \"lower leg\",\n \"upper arm\",\n \"lower arm\",\n \"hand\",\n \"(? signedMax:\n\t\tusInput = ((~(usInput-1))&mask)\n\t\tusInput = usInput*(-1)\n\treturn usInput\n\ndef dataUnpack(data, unpack_no):\n dataOut = np.empty(0)\n for i in range(0, len(data),unpack_no):\n temp = (int(data[i])&0xFFFF)+((int(data[i+1])&0xFFFF)<<16)+((int(data[i+2])&0xFFFF)<<32)+((int(data[i+3])&0xFFFF)<<48)\n temp = unsignedToSigned(temp, 64)\n dataOut= np.append(dataOut, temp)\n return dataOut\n \nclass SD_FPGA(keysightSD1.SD_AIN):\n def __init__(self):\n super(SD_FPGA, self).__init__()\n self._bitModeDefine()\n self._FPGA = self.bitMode_Keysight\n\n def assignCard(self, cardName:str):\n \"\"\" select different digitizeer by setting the model name of digitizer\n Args:\n cardName: the model name of the digitizer, currently only M3102A is available\n \"\"\"\n if cardName == \"M3102A\":\n self._assignM3102A() \n else:\n raise TypeError(cardName +\"is not defined\") \n \n def loadFPGABitFile(self, bitFileMode:int):\n \"\"\"\n load different bitfile by setting different bitFileMode\n Args:\n bitFileMode: use the mode setting constant defined in bitModeDefine\n \"\"\"\n if bitFileMode == self.bitMode_Keysight:\n self._loadBitFile(self.bitPath_Keysight)\n self._FPGA = self.bitMode_Keysight\n elif bitFileMode == self.bitMode_AVE:\n self._loadBitFile(self.bitPath_AVE)\n self._FPGA = self.bitMode_AVE \n elif bitFileMode == self.bitMode_SingleDDC:\n self._loadBitFile(self.bitPath_SingleDDC)\n self._FPGA=self.bitMode_SingleDDC\n elif bitFileMode == self.bitMode_SingleDDC_Int:\n self._loadBitFile(self.bitPath_SingleDDC_Int)\n self._FPGA = self.bitMode_SingleDDC_Int\n elif bitFileMode == self.bitMode_SingleDDC_Spt:\n self._loadBitFile(self.bitPath_SingleDDC_Spt)\n self._FPGA = self.bitMode_SingleDDC_Spt\n elif bitFileMode == self.bitMode_DualDDC:\n self._loadBitFile(self.bitPath_DualDDC)\n self._FPGA = self.bitMode_DualDDC\n elif bitFileMode == self.bitMode_DualDDC_Int:\n self._loadBitFile(self.bitPath_DualDDC_Int)\n self._FPGA = self.bitMode_DualDDC_Int\n elif bitFileMode == self.bitMode_DualDDC_Spt:\n self._loadBitFile(self.bitPath_DualDDC_Spt)\n self._FPGA = self.bitMode_DualDDC_Spt \n elif bitFileMode == self.bitMode_AVE_SingleDDC:\n self._loadBitFile(self.bitPath_AVE_SingleDDC)\n self._FPGA = self.bitMode_AVE_SingleDDC\n elif bitFileMode == self.bitMode_AVE_SingleDDC_Int:\n self._loadBitFile(self.bitPath_AVE_SingleDDC_Int)\n self._FPGA = self.bitMode_AVE_SingleDDC_Int\n elif bitFileMode == self.bitMode_AVE_DualDDC:\n self._loadBitFile(self.bitPath_AVE_DualDDC)\n self._FPGA = self.bitMode_AVE_DualDDC\n elif bitFileMode == self.bitMode_AVE_DualDDC_Int:\n self._loadBitFile(self.bitPath_AVE_DualDDC_Int)\n self._FPGA = self.bitMode_AVE_DualDDC_Int\n else:\n raise ValueError(\"bit Mode assign Error\")\n self._getOffsetReg()\n self._getAveReg()\n self._getDDCReg()\n \n def adjustOffset(self, CH:int, offset:int):\n \"\"\" set the input offset value\n Args:\n CH: input channel, only 1,2,3,4 is valid\n offset: offset value of each channel, the unit is digit\n \"\"\"\n if offset < -8192 or offset > 8191:\n raise ValueError(\"the offset value is out of range\")\n if self._FPGA == self.bitMode_Keysight:\n return \n if self._FPGA & self.bitMode_Dual:\n if CH == 1:\n self.regCh1Offset.writeRegisterInt32(offset)\n elif CH ==2:\n self.regCh2Offset.writeRegisterInt32(offset)\n elif CH ==3:\n self.regCh3Offset.writeRegisterInt32(offset)\n elif CH ==4:\n self.regCh4Offset.writeRegisterInt32(offset)\n else:\n raise ValueError(\"CH number setting error\")\n else:\n if CH == 1:\n self.regCh1Offset.writeRegisterInt32(offset)\n elif CH ==2:\n self.regCh2Offset.writeRegisterInt32(offset)\n else:\n raise ValueError(\"CH number setting error\") \n \n def setDDCFreq(self, CH, frequency):\n \"\"\" Args:\n CH: input channel number only 1 or 3 is valid\n frequency: the local oscillator frequency. the unit is Hz\n \"\"\"\n if not self._FPGA & self.bitMode_DDC:\n return \n temp_a_b = ((frequency*self.superSampling)*(2**25))/(self.samplingRate*self.Lo_T)\n A = int(temp_a_b)\n B = round((temp_a_b -A)*(5**10))\n if CH == 1:\n self.regCh1LoA.writeRegisterInt32(A)\n self.regCh1LoB.writeRegisterInt32(B)\n self.regCh1SetFreq.writeRegisterInt32(1)\n self.regCh1SetFreq.writeRegisterInt32(0)\n \n elif CH ==2:\n self.regCh2LoA.writeRegisterInt32(A)\n self.regCh2LoB.writeRegisterInt32(B)\n self.regCh2SetFreq.writeRegisterInt32(1)\n self.regCh2SetFreq.writeRegisterInt32(0)\n \n def DAQconfigFPGA(self, CH, pt_per_shot, shots, trig_delay, trigger_mode):\n \"\"\"\n replace the original DAQconfig function. the parameter is the same\n \"\"\"\n if self._FPGA & self.bitMode_AVE:\n print(\"config AVE\")\n self._DAQconfigAVE(CH, pt_per_shot, shots, int(trig_delay/5), trigger_mode)\n elif self._FPGA & self.bitMode_DDC:\n print(\"config DDC\")\n self._DAQconfigDDC(CH, pt_per_shot, shots, int(trig_delay/5), trigger_mode)\n else:\n error = self.DAQconfig(CH, pt_per_shot, shots, trig_delay, trigger_mode)\n if error != 0:\n raise ValueError(\"DAQ configure error in channel \",CH)\n\n def aveMemoryClear(self):\n if self._FPGA & self.bitMode_AVE or self._FPGA & self.bitMode_Spt:\n print(\"clear memory\")\n self.regCh1Clear.writeRegisterInt32(1)\n self.regCh2Clear.writeRegisterInt32(1)\n\n \n def checkFinished(self, Channel, pt_per_shot, shots, timeout_in_s =1):\n tstart = time.time()\n if self._FPGA & self.bitMode_AVE or self._FPGA & self.bitMode_Spt:\n self._checkStatus(Channel, tstart, timeout_in_s)\n elif self._FPGA == self.bitMode_Keysight or self._FPGA & self.bitMode_Int:\n self._checkDataCount(Channel, pt_per_shot*shots, tstart, timeout_in_s)\n elif self._FPGA == self.bitMode_DualDDC or self._FPGA == self.bitMode_SingleDDC :\n self._checkDataCount(Channel, int(pt_per_shot*shots/5), tstart, timeout_in_s)\n\n def _checkDataCount(self, Channel, totolPts, tstart, timeout_in_s):\n \"\"\"Arg:\n Channel: DAQ channel for 1 to 4\n totalPts: the data points expect to read back\n tstart: starting timestamp of timeout \n \"\"\"\n counter = 0\n tcheck = 0\n while(tcheck < timeout_in_s and counter != totolPts):\n counter = self.DAQcounterRead(Channel)\n tcheck = time.time()-tstart\n if tcheck >=timeout_in_s:\n raise RuntimeError(\"time out in Reading data points from Channel:\"+str(Channel)+\" datacount=\"+str(counter))\n\n def _checkStatus(self, Channel, tstart, timeout_in_s):\n \"\"\"\n check the status for AVE function:\n Args:\n Channel: DAQ channel for 1 to 4\n tstart: starting timestamp of timeout \n \"\"\"\n status = 0\n tcheck = 0\n while(tcheck < timeout_in_s and status != 26):\n if Channel == 1:\n status = self.regCh1State.readRegisterInt32()\n elif Channel == 2:\n status = self.regCh2State.readRegisterInt32()\n print(\"ch2 status:\", status)\n elif Channel ==3:\n status = self.regCh3State.readRegisterInt32()\n elif Channel == 4:\n status = self.regCh4State.readRegisterInt32()\n else:\n raise ValueError(\" incorrect channel setting\")\n tcheck = time.time()-tstart\n if tcheck >= timeout_in_s:\n raise RuntimeError(\"AVE status runtime error in channel:\"+str(Channel))\n\n\n def getAVEDebugInfo(self, enable_ch12= True):\n if not (self._FPGA & self.bitMode_AVE or self._FPGA & self.bitMode_Spt):\n return\n if enable_ch12:\n regCh1TriggerCnt = self.FPGAgetSandBoxRegister(\"Mem_Ch1_TriggerCnt\")\n regCh2TriggerCnt = self.FPGAgetSandBoxRegister(\"Mem_Ch2_TriggerCnt\")\n tn0 = regCh1TriggerCnt.readRegisterInt32()\n tn1 = regCh2TriggerCnt.readRegisterInt32()\n shots1 = self.regCh1Shot.readRegisterInt32()\n shots2 = self.regCh2Shot.readRegisterInt32()\n info = \"tn0 = \"+str(tn0)+\", tn1 = \"+str(tn1)+\", shots1=\"+str(shots1)+ \",shots2=\"+str(shots2)\n else: \n regCh3TriggerCnt = self.FPGAgetSandBoxRegister(\"Mem_Ch3_TriggerCnt\")\n regCh4TriggerCnt = self.FPGAgetSandBoxRegister(\"Mem_Ch4_TriggerCnt\")\n tn0 = regCh3TriggerCnt.readRegisterInt32()\n tn1 = regCh4TriggerCnt.readRegisterInt32()\n shots= self.regCh2Shot.readRegisterInt32()\n info = \"tn3 = \"+str(tn0)+\", tn4 = \"+str(tn1)+\",shots2=\"+str(shots)\n return info\n\n def DAQreadFPGA(self, CH, timeout):\n datacounter = self.DAQcounterRead(CH)\n print(\"datacount of CH\", CH, \"=\", datacounter)\n \n if datacounter == 0:\n raise ValueError(\" There is no output data in the acquired channel\")\n if datacounter % 10:\n datacounter = int(datacounter/10)*10 \n data = self.DAQread(CH, datacounter, timeout)\n \n if self._FPGA & self.bitMode_Int or self._FPGA == self.bitMode_AVE:\n data = dataUnpack(data, self.superSampling)\n\n return data\n \n def _assignM3102A(self):\n self.samplingRate =5e8\n self.superSampling = 5\n self.Lo_T = 8\n self.bitPath_Keysight = str(FPGA_FOLDER / \"M3102A.k7z\")\n self.bitPath_SingleDDC = str(FPGA_FOLDER / \"M3102A_SingleDDC.k7z\")\n self.bitPath_SingleDDC_Int = str(FPGA_FOLDER / \"M3102A_SingleDDC_Int.k7z\")\n self.bitPath_SingleDDC_Spt = str(FPGA_FOLDER / \"M3102A_SingleDDC_Spt.k7z\")\n self.bitPath_DualDDC = str(FPGA_FOLDER / \"M3102A_DualDDC.k7z\")\n self.bitPath_DualDDC_Int = str(FPGA_FOLDER / \"M3102A_DualDDC_Int.k7z\")\n self.bitPath_DualDDC_Spt = str(FPGA_FOLDER / \"M3102A_DualDDC_Spt.k7z\")\n self.bitPath_AVE = str(FPGA_FOLDER / \"M3102A_AVE.k7z\")\n self.bitPath_AVE_SingleDDC = str(FPGA_FOLDER / \"M3102A_AVE_SingleDDC.k7z\")\n self.bitPath_AVE_SingleDDC_Int = str(FPGA_FOLDER / \"M3102A_AVE_SingleDDC_Int.k7z\")\n self.bitPath_AVE_DualDDC = str(FPGA_FOLDER / \"M3102A_AVE_DualDDC.k7z\")\n self.bitPath_AVE_DualDDC_Int = str(FPGA_FOLDER / \"M3102A_AVE_DualDDC_Int.k7z\")\n\n def _bitModeDefine(self):\n \"\"\" Define the bitMode constant\n 0x01: AVE\n 0x02: DDC\n 0x04: Single DDC\n 0x08: Dual DDC\n 0x10: Integration\n 0x20: Single Point \n \"\"\"\n self.bitMode_Keysight = 0x00\n self.bitMode_AVE = 0x01\n self.bitMode_DDC = 0x02 \n self.bitMode_Single = 0x04\n self.bitMode_Dual = 0x08\n self.bitMode_Int = 0x10\n self.bitMode_Spt = 0x20\n # DDC COMBO:\n self.bitMode_SingleDDC = self.bitMode_DDC|self.bitMode_Single # 6\n self.bitMode_SingleDDC_Int =self.bitMode_SingleDDC|self.bitMode_Int # 22\n self.bitMode_SingleDDC_Spt =self.bitMode_SingleDDC_Int|self.bitMode_Spt # 54\n self.bitMode_DualDDC = self.bitMode_DDC|self.bitMode_Dual # 10\n self.bitMode_DualDDC_Int = self.bitMode_DualDDC|self.bitMode_Int # 26\n self.bitMode_DualDDC_Spt = self.bitMode_DualDDC_Int|self.bitMode_Spt # 58\n self.bitMode_AVE_SingleDDC = self.bitMode_AVE|self.bitMode_SingleDDC # 7\n self.bitMode_AVE_SingleDDC_Int = self.bitMode_AVE_SingleDDC|self.bitMode_Int # 23\n self.bitMode_AVE_DualDDC = self.bitMode_AVE|self.bitMode_DualDDC # 11\n self.bitMode_AVE_DualDDC_Int = self.bitMode_AVE_DualDDC| self.bitMode_Int # 27\n \n def _getAveReg(self):\n if not (self._FPGA & self.bitMode_AVE or self._FPGA & self.bitMode_Spt):\n return \n if self._FPGA & self.bitMode_Spt or (self._FPGA & self.bitMode_Dual and self._FPGA & self.bitMode_AVE):\n self.regCh1State = self.FPGAgetSandBoxRegister(\"Mem_Ch1_State_I\")\n self.regCh2State = self.FPGAgetSandBoxRegister(\"Mem_Ch1_State_Q\")\n self.regCh3State = self.FPGAgetSandBoxRegister(\"Mem_Ch2_State_I\")\n self.regCh4State = self.FPGAgetSandBoxRegister(\"Mem_Ch2_State_Q\")\n else:\n self.regCh1State = self.FPGAgetSandBoxRegister(\"Mem_Ch1_State\")\n self.regCh2State = self.FPGAgetSandBoxRegister(\"Mem_Ch2_State\")\n self.regCh1Point = self.FPGAgetSandBoxRegister(\"Mem_Ch1_Point\")\n self.regCh1Shot = self.FPGAgetSandBoxRegister(\"Mem_Ch1_Shot\")\n self.regCh1Clear = self.FPGAgetSandBoxRegister(\"Mem_Ch1_Clear\")\n self.regCh2Point = self.FPGAgetSandBoxRegister(\"Mem_Ch2_Point\")\n self.regCh2Shot = self.FPGAgetSandBoxRegister(\"Mem_Ch2_Shot\")\n self.regCh2Clear = self.FPGAgetSandBoxRegister(\"Mem_Ch2_Clear\")\n \n def _getOffsetReg(self):\n if self._FPGA == self.bitMode_Keysight:\n return \n if self._FPGA& self.bitMode_Dual:\n self.regCh1Offset = self.FPGAgetSandBoxRegister(\"Mem_Ch1_Offset_I\")\n self.regCh2Offset = self.FPGAgetSandBoxRegister(\"Mem_Ch1_Offset_Q\")\n self.regCh3Offset = self.FPGAgetSandBoxRegister(\"Mem_Ch2_Offset_I\")\n self.regCh4Offset = self.FPGAgetSandBoxRegister(\"Mem_Ch2_Offset_Q\") \n else :\n self.regCh1Offset = self.FPGAgetSandBoxRegister(\"Mem_Ch1_Offset\")\n self.regCh2Offset = self.FPGAgetSandBoxRegister(\"Mem_Ch2_Offset\")\n\n def _getDDCReg(self):\n if self._FPGA& self.bitMode_DDC:\n self.regCh1SetFreq = self.FPGAgetSandBoxRegister(\"Mem_Ch1_SetFreq\")\n # self.regCh1PhaseReset = self.FPGAgetSandBoxRegister(\"Mem_Ch1_phRst\")\n self.regCh1LoA = self.FPGAgetSandBoxRegister(\"Mem_Ch1_LoA\")\n self.regCh1LoB = self.FPGAgetSandBoxRegister(\"Mem_Ch1_LoB\")\n self.regCh2SetFreq = self.FPGAgetSandBoxRegister(\"Mem_Ch2_SetFreq\")\n # self.regCh2PhaseReset = self.FPGAgetSandBoxRegister(\"Mem_Ch2_phRst\")\n self.regCh2LoA = self.FPGAgetSandBoxRegister(\"Mem_Ch2_LoA\")\n self.regCh2LoB = self.FPGAgetSandBoxRegister(\"Mem_Ch2_LoB\")\n \n def _loadBitFile(self, filepath):\n error = self.FPGAload(filepath)\n if error !=0:\n raise FileExistsError(\"can't load bitFile\"+str(filepath)) \n \n def _DAQconfigAVE(self,CH, pt_per_shot, shots, trig_delay, trigger_mode):\n if self._FPGA & self.bitMode_AVE:\n if pt_per_shot % 10 != 0:\n raise ValueError(\"pt_per_shot should be multiple of 10\") \n if pt_per_shot >65530:\n raise ValueError(\"pt_per_shot can't exceed 65530\")\n if self._FPGA & self.bitMode_DDC:\n self._DAQconfigDDC(CH, pt_per_shot, 1, trig_delay, trigger_mode)\n elif (self._FPGA == self.bitMode_AVE):\n error = self.DAQconfig(CH, pt_per_shot*5, 1, trig_delay*5, trigger_mode)\n if error:\n raise ValueError(\"DAQ configure error in channel \",CH)\n else:\n error = self.DAQconfig(CH, pt_per_shot*5, 1, trig_delay, trigger_mode)\n if error:\n raise ValueError(\"DAQ configure error in channel \",CH)\n pt_per_shot = round(pt_per_shot/5)-1\n if CH == 1:\n self.regCh1Point.writeRegisterInt32(pt_per_shot)\n self.regCh1Shot.writeRegisterInt32(shots)\n elif CH ==2:\n self.regCh2Point.writeRegisterInt32(pt_per_shot)\n self.regCh2Shot.writeRegisterInt32(shots)\n else:\n pass\n # raise ValueError(\"in Fpga mode, CH value only can be configure CH=1 or CH=2\")\n\n def _DAQconfigDDC(self, CH, pt_per_shot, shots, trig_delay, trigger_mode):\n if self._FPGA & self.bitMode_DDC:\n if pt_per_shot % 10 !=0:\n raise ValueError(\" pt_per_shot should be multiples of 10\")\n if pt_per_shot > 65535:\n raise ValueError(\" pt_per_shot can't exceed 65530\")\n if self._FPGA & self.bitMode_Spt:\n if CH ==1:\n self.regCh1Point.writeRegisterInt32(round(pt_per_shot/5)-1)\n self.regCh1Shot.writeRegisterInt32(shots)\n elif CH ==2 or CH==3:\n self.regCh2Point.writeRegisterInt32(round(pt_per_shot/5)-1)\n self.regCh2Shot.writeRegisterInt32(shots)\n\n if self._FPGA & self.bitMode_Int==0:\n pt_per_shot = round(pt_per_shot/5)\n if self._FPGA & self.bitMode_Spt:\n shots = 1\n print(\"pt_per_shot=\", pt_per_shot, \"shots=\", shots)\n if CH == 1:\n error = self.DAQconfig(1, pt_per_shot, shots, trig_delay, trigger_mode)\n error = self.DAQconfig(2, pt_per_shot, shots, trig_delay, trigger_mode)\n if error != 0:\n raise ValueError(\"DAQ configure error in channel \",CH)\n elif CH ==2 or CH==3:\n print(\"configure 3, 4 for DDC\")\n error = self.DAQconfig(3, pt_per_shot, shots, trig_delay, trigger_mode)\n error = self.DAQconfig(4, pt_per_shot, shots, trig_delay, trigger_mode)\n\n else: \n raise ValueError(\"DAQconfigDDC only can be used in DDC mode\")\n\n \n \n \n\n\n\n\n\n \n\n\n","repo_name":"asqum/PYQUM","sub_path":"TEST/FACE/pyqum/instrument/machine/SD_FPGA.py","file_name":"SD_FPGA.py","file_ext":"py","file_size_in_byte":18777,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"27459903301","text":"# fibonacci\n\ndef fibonacci(n: int) -> int:\n assert n >= 0 and isinstance(n, int), \"n must be positive integer\"\n \n if n == 0 or n == 1:\n return 1\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n\nprint(fibonacci(0))\nprint(fibonacci(1))\nprint(fibonacci(5))","repo_name":"chen-qian-dan/Algorithms_And_Data_Structures_20211227Mon","sub_path":"recursion/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41196544382","text":"import json\nimport signal\n\nimport sqlalchemy\nfrom kafka import KafkaConsumer\nfrom scanf import scanf\n\ndef read_configs(config_path):\n '''\n Read configs from JSON\n '''\n\n dbConf = None\n kafkaConf = None\n with open(config_path) as cfg:\n configs = json.load(cfg)\n dbConf = configs['db']\n kafkaConf = configs['kafka']\n\n return dbConf, kafkaConf\n\n\ndef connect_db(dbConf):\n '''\n Setup DB connection\n '''\n\n # Build connection URL\n dburl = 'postgresql://{}:{}@{}:{}/{}'.format(\n dbConf['username'],\n dbConf['password'],\n dbConf['host'],\n dbConf['port'],\n dbConf['db_name']\n )\n\n # Create DB connection\n con = sqlalchemy.create_engine(dburl, client_encoding='utf8')\n\n # Build metadata\n meta = sqlalchemy.MetaData(bind=con, reflect=True)\n\n return con, meta\n\n\ndef connect_kafka(kafkaConf):\n '''\n Setup Kafka broker connection\n (NOTE: SSH Tunnel to brokers needs to be running at the time of execution)\n '''\n consumer = KafkaConsumer(\n kafkaConf['topic_name'],\n bootstrap_servers=kafkaConf['bootstrap_servers'],\n auto_offset_reset='earliest',\n group_id=kafkaConf['group_id'],\n enable_auto_commit=True,\n auto_commit_interval_ms=1000\n )\n\n return consumer\n\n\ndef evaluate_ratings(recommendations_table, rating_event_table, consumer, db_con,model_id=1):\n # Start consuming from Kafka topic\n for message in consumer:\n # Get the timestamp in BIGINT\n ts = message.timestamp\n\n # Read message byetes into comma-separated string\n event = message.value.decode('utf-8')\n\n # Parse values from event\n values = event.split(',')\n if(len(values) == 3 and '/rate/' in values[2]):\n user_id = values[1]\n movie_id, rating = scanf('GET /rate/%s=%s', values[2])\n year = movie_id.split('+')[-1]\n check_valid=validate(ts, user_id, movie_id, rating, year)\n if check_valid:\n # Insert into database table\n user = int(user_id)\n rating = int(rating)\n\n insert_clause = rating_event_table.insert().values(\n kafka_ts=ts,\n user_id=user_id,\n movie_id=movie_id,\n rating=rating,\n model_id =model_id\n )\n db_con.execute(insert_clause)\n # print(\"Inserted\")\n\n user_liking = get_user_liking(rating)\n\n update_clause = recommendations_table.update().where(\n (recommendations_table.c.user_id == user_id) &\n (recommendations_table.c.movie_id == movie_id) & \n (recommendations_table.c.rating == 0) ).values({'rating':user_liking,'model_id':1})\n db_con.execute(update_clause)\n\n if killswitch:\n break\n\ndef get_user_liking(rating):\n user_liking = 1 if rating >= 3 else -1\n\n return user_liking\n\ndef validate_timestamp(timestamp):\n if (timestamp > 0 and type(timestamp)==int):\n return True\n \n return False\n\ndef valiate_year(year):\n try:\n year=int(year)\n if(year <= 2020 and year > 1800):\n return True\n \n return False\n except:\n return False\n \n\ndef validate_rating(rating):\n try:\n rating = int(rating)\n if(type(rating) == int and rating >=1 and rating <=5):\n return True\n \n return False\n except:\n return False\n\ndef validate_userid(user_id):\n try:\n user_id=int(user_id)\n if(type(user_id)==int and user_id > 0):\n return True\n \n return False\n except:\n return False\n\n \n\ndef validate(timestamp, user_id, movie_id, rating, year):\n '''\n # Set data type validations \n # Range of ratings from 1 to 5\n # Check if the movie_id is valid\n # Check if the user id is valid\n '''\n try:\n if(valiate_year(year) and validate_timestamp(timestamp) and validate_rating(rating) and validate_userid(user_id)):\n return True\n \n return False\n\n except:\n return False\n\n\ndef cleanup(consumer, db_con):\n consumer.close()\n db_con.dispose()\n\n\ndef toggle_killswitch(signalNumber, frame):\n global killswitch\n killswitch = True\n\n\nif __name__ == \"__main__\":\n # Setup killswitch for clean exit\n killswitch = False\n signal.signal(signal.SIGTERM, toggle_killswitch)\n\n # Read configs\n config_path = \"/home/harshjai/movie-prediction-group-project-jurassicpark/confs/config.json\"\n dbConf, kafkaConf = read_configs(config_path)\n\n # Connect to DB\n con, meta = connect_db(dbConf)\n\n # Get table from database\n recommendations_table = meta.tables['recommendations']\n rating_event_table = meta.tables['rating_event']\n\n # Connect to Kafka\n consumer = connect_kafka(kafkaConf)\n\n # Set pointer to end of partition\n consumer.poll()\n consumer.seek_to_end()\n\n # Begin online evaluation\n evaluate_ratings(recommendations_table, rating_event_table, consumer, con)\n\n # Cleanup after breaking loop\n cleanup(consumer, con)\n","repo_name":"rohanrb302/End-to-End-Movie-Recommendation--Service","sub_path":"s3/online_eval_mod.py","file_name":"online_eval_mod.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4232694802","text":"class Queue(object):\n\n def __init__(self):\n self.items = []\n\n def enqueue(self, item):\n self.items.append(item)\n\n def dequeue(self):\n self.items.pop(0)\n\n def display(self):\n return self.items[0]\n\nqueries = []\nfor _ in range(10):\n queries.append(list(map(int, input().rstrip().split())))\nqueue = Queue()\nfor val in queries:\n if val[0] == 1:\n queue.enqueue(val[1])\n elif val[0] == 2:\n queue.dequeue()\n else:\n print(queue.display())\n\n\n#print(queries)\n\n","repo_name":"boddugopikrishna/LearningOne","sub_path":"venv/All functions/testFile.py","file_name":"testFile.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17255113881","text":"from __future__ import division\r\nfrom __future__ import print_function\r\nfrom random import randint\r\nfrom random import seed\r\nfrom random import randint\r\nimport hashlib\r\nimport Crypto.Util.number\r\nimport sys\r\nfrom Crypto import Random\r\nimport random\r\nfrom numpy.polynomial.polynomial import Polynomial as Poly\r\nimport numpy.polynomial.polynomial as polynomial\r\n\r\nimport functools\r\n\r\n_RINT = functools.partial(random.SystemRandom().randint, 0)\r\n\r\n\r\ndef convert_string_asciisum(m):\r\n asc = [ord(c) for c in m]\r\n return sum(asc)\r\n\r\ndef calculate_z(g,q):\r\n temp = randint(1,q-1)\r\n z = (g**temp)%q\r\n return z\r\n\r\ndef hash_function(x1,x2,g,z,q):\r\n hash_val = ((g**x1)%q * (z**x2)%q)%q\r\n return hash_val\r\n\r\ndef loop_exponent(exponent, nr, r, p):\r\n while(nr != 1):\r\n nr = (nr*r)%p\r\n exponent= exponent+1\r\n return exponent\r\n\r\ndef generating_x(g):\r\n x = randint(1,g-1)\r\n return x\r\n\r\ndef loop_gen(nr, exponent, r, p, g):\r\n exponent = loop_exponent(exponent, nr, r, p)\r\n if(exponent == p-1 and exponent != None):\r\n g.append(r)\r\n\r\ndef generator(p):\r\n g = []\r\n for i in range(1,p):\r\n r = i\r\n exponent = 1\r\n nr = r%p\r\n loop_gen(nr, exponent, r, p, g)\r\n return random.choice(g)\r\n\r\ndef choosing_p(n):\r\n q = Crypto.Util.number.getPrime(n, randfunc=Random.get_random_bytes)\r\n return q\r\n\r\n\r\ndef digital_signature(m,q,g,x,z):\r\n M = convert_string_asciisum(m)\r\n k = 13 #randint(1, q-1)\r\n r = (g**k)%q\r\n e = (hash_function(r,M, g,z,q))\r\n s = (k-(x*e))%(q-1)\r\n return s,e\r\n\r\n\r\ndef _eval_at(poly, x, prime):\r\n accum = 0\r\n for coeff in reversed(poly):\r\n accum *= x\r\n accum += coeff\r\n accum %= prime\r\n return accum\r\n\r\ndef make_random_shares(k, n, prime):\r\n if k > n:\r\n raise ValueError(\"Pool secret would be irrecoverable.\")\r\n poly = [_RINT(prime - 1) for i in range(k)]\r\n points = [(i, _eval_at(poly, i, prime))\r\n for i in range(1, n + 1)]\r\n return poly[0], points\r\n\r\ndef _extended_gcd(a, b):\r\n x = 0\r\n last_x = 1\r\n y = 1\r\n last_y = 0\r\n while b != 0:\r\n quot = a // b\r\n a, b = b, a % b\r\n x, last_x = last_x - quot * x, x\r\n y, last_y = last_y - quot * y, y\r\n return last_x, last_y\r\n\r\ndef _divmod(num, den, p):\r\n inv, _ = _extended_gcd(den, p)\r\n return num * inv\r\n\r\ndef _lagrange_interpolate(x, x_s, y_s, p):\r\n k = len(x_s)\r\n assert k == len(set(x_s)),\"points must be distinct\"\r\n def PI(vals):\r\n accum = 1\r\n for v in vals:\r\n accum *= v\r\n return accum\r\n nums = [] # avoid inexact division\r\n dens = []\r\n for i in range(k):\r\n others = list(x_s)\r\n cur = others.pop(i)\r\n nums.append(PI(x - o for o in others))\r\n dens.append(PI(cur - o for o in others))\r\n den = PI(dens)\r\n num = sum([_divmod(nums[i] * den * y_s[i] % p, dens[i], p)\r\n for i in range(k)])\r\n return (_divmod(num, den, p) + p) % p\r\n\r\ndef recover_secret(points, prime):\r\n if len(points) < 2:\r\n raise ValueError(\"need at least two shares\")\r\n print(*points)\r\n x_s, y_s = zip(*points)\r\n return _lagrange_interpolate(0, x_s, y_s, prime)\r\n\r\ndef receiver(n,k,sending_mesage,p,g,x,z):\r\n count = 0\r\n new_message = []\r\n for i in sending_mesage:\r\n s,e = digital_signature(str(i[0][1]),p,g,x,z)\r\n if(int(s) != int(i[1]) or int(e) != int(i[2])):\r\n count = count+1\r\n continue\r\n new_message.append(i[0])\r\n if(count > n-k):\r\n print(\"Unable to recover the data\")\r\n else:\r\n print(\"The secret in receiver is \",recover_secret(new_message,p))\r\n\r\nif __name__ == \"__main__\":\r\n S = 1234\r\n n = 6\r\n k = 3\r\n p = choosing_p(5)\r\n g = generator(p)\r\n z = calculate_z(g, p)\r\n x = generating_x(g)\r\n n = 6\r\n k = 3\r\n\r\n secret, points = make_random_shares(k,n,p)\r\n\r\n sending_mesage = []\r\n for i in points:\r\n temp = []\r\n sign, hash_ = digital_signature(str(i[1]),p,g,x,z)\r\n temp.append(i)\r\n print(type(i))\r\n temp.append(sign)\r\n temp.append(hash_)\r\n sending_mesage.append(temp)\r\n\r\n print('Secret in sender ',secret)\r\n\r\n receiver(n,k,sending_mesage,p,g,x,z)\r\n","repo_name":"indranilpradhan/Fault-Tolerant-Storage-Scheme","sub_path":"evaluation_2.py","file_name":"evaluation_2.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70315601127","text":"import glob\nimport os\nfrom collections import OrderedDict, defaultdict\n\nfrom polygraphy import constants, mod, util\nfrom polygraphy.logger import G_LOGGER\nfrom polygraphy.tools.base import Tool\n\nalgorithm_selector = mod.lazy_import(\"polygraphy.backend.trt.algorithm_selector\")\n\n\nclass DiffTactics(Tool):\n \"\"\"\n Determine potentially bad TensorRT tactics given sets of good and bad Polygraphy tactic\n replay files, such as those saved by `--save-tactics`.\n \"\"\"\n\n def __init__(self, _issue_deprecation_warning=None):\n super().__init__(\"diff-tactics\")\n self._issue_deprecation_warning = util.default(_issue_deprecation_warning, False)\n\n if self._issue_deprecation_warning:\n self.__doc__ = \"[DEPRECATED - use `inspect diff-tactics`] \" + self.__doc__\n\n def add_parser_args(self, parser):\n parser.add_argument(\n \"--dir\",\n help=\"A directory containing good and bad Polygraphy tactic replay files, such as the ones saved by --save-tactics. \"\n \"By default, this tool will search for files in directories called 'good' and 'bad'\",\n default=\"\",\n )\n parser.add_argument(\n \"--good\",\n help=\"Either a directory containing good Polygraphy tactic replay files or a single good file. \",\n default=None,\n )\n parser.add_argument(\n \"--bad\",\n help=\"Either a directory containing bad Polygraphy tactic replay files or a single bad file. \",\n default=None,\n )\n\n def run_impl(self, args):\n if self._issue_deprecation_warning:\n mod.warn_deprecated(\n \"debug diff-tactics\", use_instead=\"inspect diff-tactics\", remove_in=\"0.48.0\", always_show_warning=True\n )\n\n if args.dir is None and (args.good is None or args.bad is None):\n G_LOGGER.critical(\"Either `--dir`, or both `--good` and `--bad` must be specified.\")\n\n def load_tactics(dirpath):\n \"\"\"\n Load all tactic replays from the specified directory into a single dictionary.\n\n Args:\n dirpath (str): Directory containing zero or more tactic replay files.\n\n Returns:\n dict[str, Set[polygraphy.backend.trt.algorithm_selector.Algorithm]]:\n Maps layer names to the set of algorithms present in the tactic replays.\n \"\"\"\n\n def try_load_replay(path):\n try:\n return algorithm_selector.TacticReplayData.load(path)\n except:\n return None\n\n tactics = defaultdict(set)\n replay_paths = []\n search_paths = (\n glob.iglob(os.path.join(dirpath, \"**\"), recursive=True) if os.path.isdir(dirpath) else [dirpath]\n )\n for path in search_paths:\n replay = try_load_replay(path)\n if replay is None:\n G_LOGGER.verbose(f\"{path} does not look like a tactic replay file, skipping.\")\n continue\n\n replay_paths.append(path)\n for name, algo in replay.items():\n tactics[name].add(algo)\n return tactics, replay_paths\n\n good_dir = util.default(args.good, os.path.join(args.dir, \"good\"))\n good_tactics, good_paths = load_tactics(good_dir)\n G_LOGGER.info(f\"Loaded {len(good_paths)} good tactic replays.\")\n G_LOGGER.verbose(f\"Good tactic replays: {good_paths}\")\n\n bad_dir = util.default(args.bad, os.path.join(args.dir, \"bad\"))\n bad_tactics, bad_paths = load_tactics(bad_dir)\n G_LOGGER.info(f\"Loaded {len(bad_paths)} bad tactic replays.\")\n G_LOGGER.verbose(f\"Bad tactic replays: {bad_paths}\")\n\n # Walk bad tactics and remove all the known good tactics.\n potential_bad_tactics = OrderedDict()\n for name, algo_set in bad_tactics.items():\n if name in good_tactics:\n algo_set -= good_tactics[name]\n\n if algo_set:\n potential_bad_tactics[name] = algo_set\n\n if potential_bad_tactics:\n G_LOGGER.info(\"Found potentially bad tactics:\")\n for name, algo_set in potential_bad_tactics.items():\n algo_set_str = list(map(str, algo_set))\n G_LOGGER.info(f\"Layer: {name}\\n{constants.TAB}Algorithms: {algo_set_str}\")\n else:\n G_LOGGER.info(\n \"Could not determine potentially bad tactics. Try providing more tactic replay files if possible.\"\n )\n","repo_name":"NVIDIA/TensorRT","sub_path":"tools/Polygraphy/polygraphy/tools/inspect/subtool/diff_tactics.py","file_name":"diff_tactics.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","stars":8187,"dataset":"github-code","pt":"53"} +{"seq_id":"20425569394","text":"import os\nfrom random import randint, sample\n\nimport gymnasium as gym\n\nfrom sinergym.utils.constants import *\nfrom sinergym.utils.env_checker import check_env\n\n\ndef test_reset(env_demo):\n obs, info = env_demo.reset()\n # obs check\n assert len(obs) == len(DEFAULT_5ZONE_OBSERVATION_VARIABLES) + \\\n 4 # year, month, day and hour\n assert env_demo.simulator._episode_existed\n # info check\n assert isinstance(info, dict)\n assert len(info) > 0\n\n\ndef test_step(env_demo):\n env_demo.reset()\n action = randint(0, 9)\n obs, reward, terminated, _, info = env_demo.step(action)\n\n assert len(obs) == len(DEFAULT_5ZONE_OBSERVATION_VARIABLES) + \\\n 4 # year, month, day and hour\n assert not isinstance(reward, type(None))\n assert not terminated\n assert info['timestep'] == 1\n assert info['time_elapsed'] == env_demo.simulator._eplus_run_stepsize * \\\n info['timestep']\n\n action = randint(0, 9)\n obs, reward, terminated, _, info = env_demo.step(action)\n\n assert len(obs) == 20\n assert not isinstance(reward, type(None))\n assert not terminated\n assert info['timestep'] == 2\n assert info['time_elapsed'] == env_demo.simulator._eplus_run_stepsize * \\\n info['timestep']\n\n\ndef test_close(env_demo):\n env_demo.reset()\n assert env_demo.simulator._episode_existed\n env_demo.close()\n assert not env_demo.simulator._episode_existed\n assert env_demo.simulator._conn is None\n\n\ndef test_all_environments():\n\n envs_id = [env_id for env_id in gym.envs.registration.registry.keys(\n ) if env_id.startswith('Eplus')]\n # Select 10 environments randomly (test would be too large)\n samples_id = sample(envs_id, 5)\n for env_id in samples_id:\n # Create env with TEST name\n env = gym.make(env_id)\n\n check_env(env)\n\n # Rename directory with name TEST for future remove\n os.rename(env.simulator._env_working_dir_parent, 'Eplus-env-TEST' +\n env.simulator._env_working_dir_parent.split('/')[-1])\n\n env.close()\n","repo_name":"kad99kev/sinergym","sub_path":"tests/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"28117153695","text":"import RPi.GPIO as GPIO\nimport time\ndac = [26,19,13,6,5,11,9,10]\nbits = len(dac)\nlevels = 2**bits\nmaxVoltage = 3.3\ndef decimal2binary(decimal):\n return [int(bit) for bit in bin(decimal)[2:].zfill(8)]\ndef bin2dac(value):\n signal = decimal2binary(value)\n return signal\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(dac, GPIO.OUT, initial = GPIO.LOW)\ntry:\n while True:\n time.sleep(0.5)\n for i in range(0,255):\n signal = decimal2binary(i)\n GPIO.output(dac,signal)\n print(i)\n time.sleep(0.01)\n for i in range(255,0,-1):\n signal = decimal2binary(i)\n GPIO.output(dac,signal)\n print(i)\n time.sleep(0.01)\n \n \n\n\nexcept KeyboardInterrupt:\n print(\"The program was stopped by the keyboard\")\nelse:\n print(\"No exceptions\")\nfinally:\n GPIO.output(dac, GPIO.LOW)\n GPIO.cleanup(dac)\n print(\"GPIO cleanup\") ","repo_name":"El1pse/Get","sub_path":"dac2.py","file_name":"dac2.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40174502984","text":"import itertools\n\nfrom flask import url_for, request, current_app\nfrom sqlalchemy.ext.mutable import MutableDict\nfrom sqlalchemy import desc\nfrom wtforms import SelectField, RadioField\nfrom flaskshop.database import Column, Model, db\nfrom flaskshop.corelib.mc import cache, cache_by_args, rdb\nfrom flaskshop.corelib.db import PropsItem\nfrom flaskshop.settings import Config\nfrom flaskshop.resources.resources import get_presigned_url\nimport os\nimport datetime\nMC_KEY_FEATURED_PRODUCTS = \"product:featured:{}\"\nMC_KEY_PRODUCT_IMAGES = \"product:product:{}:images\"\nMC_KEY_PRODUCT_VARIANT = \"product:product:{}:variant\"\nMC_KEY_PRODUCT_DISCOUNT_PRICE = \"product:product:{}:discount_price\"\nMC_KEY_ATTRIBUTE_VALUES = \"product:attribute:values:{}\"\nMC_KEY_COLLECTION_PRODUCTS = \"product:collection:{}:products:{}\"\nMC_KEY_CATEGORY_PRODUCTS = \"product:category:{}:products:{}\"\nMC_KEY_CATEGORY_CHILDREN = \"product:category:{}:children\"\n\n\n\nclass Product(Model):\n __tablename__ = \"product_product\"\n\n title = Column(db.String(255), nullable=False)\n on_sale = Column(db.Boolean(), default=False)\n is_active = Column(db.Boolean(), default=True)\n is_featured = Column(db.Boolean())\n in_front_banner= Column(db.Boolean(), default=False)\n rating = Column(db.DECIMAL(10, 2), default=5.0)\n sold_count = Column(db.Integer(), default=0)\n review_count = Column(db.Integer(), default=0)\n basic_price = Column(db.DECIMAL(10, 2))\n discount_price = Column(db.DECIMAL(10, 2))\n category_id = Column(db.Integer())\n category_name = Column(db.String(255))\n need_check_stock=Column(db.Boolean(), default=False)\n product_type_id = Column(db.Integer())\n attributes = Column(MutableDict.as_mutable(db.JSON()),nullable=True)\n description = Column(db.Text(),nullable=True)\n background_img = Column(db.String(255), nullable=True, default=None)\n\n if Config.USE_REDIS:\n description = PropsItem(\"description\")\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n if self.has_variants:\n return url_for(\"product.show_single\", id=self.id)\n else:\n return url_for(\"product.show\", id=self.id)\n\n @property\n @cache(MC_KEY_PRODUCT_IMAGES.format(\"{self.id}\"))\n def images(self):\n return ProductImage.query.filter(ProductImage.product_id == self.id).all()\n\n\n\n @property\n def background_img_url(self):\n return url_for(\"static\", filename=\"uploads/\"+ self.first_img)\n\n def image_url(self):\n name= self.first_img\n urt=None;\n category= Category.get_by_id(self.category_id)\n if self.first_img:\n urt = get_presigned_url(self.first_img)\n elif category:\n urt= category.get_background_img_AWS()\n\n return urt\n\n @property\n def first_img(self):\n if self.images:\n im =self.images[0].image\n return str(im)\n\n return \"\"\n @property\n def is_in_stock(self):\n return any(variant.is_in_stock for variant in self.variant)\n\n @property\n def category(self):\n return Category.get_by_id(self.category_id)\n\n @property\n def product_type(self):\n return ProductType.get_by_id(self.product_type_id)\n\n @property\n def get_is_featured(self):\n if self.is_featured:\n return True\n return False\n\n @property\n def get_is_on_sale(self):\n if self.on_sale:\n return True\n return False\n\n @property\n def get_in_front_banner(self):\n if self.in_front_banner:\n return True\n return False\n\n @property\n def has_variants(self):\n product_type = ProductType.get_by_id(self.product_type_id)\n return product_type.has_variants\n\n @property\n\n def get_has_attributes(self):\n product_type = ProductType.get_by_id(self.product_type_id)\n return product_type.has_attributes\n\n def variant_attributes(self):\n product_type = ProductType.get_by_id(self.product_type_id).first()\n return product_type.variant_attributes()\n\n\n @property\n def is_discounted(self):\n if float(self.discounted_price) > 0:\n return True\n return False\n @property\n @cache(MC_KEY_PRODUCT_DISCOUNT_PRICE.format(\"{self.id}\"))\n def discounted_price(self):\n from flaskshop.discount.models import Sale\n\n return Sale.get_discounted_price(self)\n\n @property\n def price(self):\n if self.is_discounted:\n return self.basic_price - self.discounted_price\n return self.basic_price\n\n\n @property\n def price_human(self):\n return \"$\" + str(self.price)\n\n @property\n def on_sale_human(self):\n return \"Y\" if self.on_sale else \"N\"\n\n @property\n @cache(MC_KEY_PRODUCT_VARIANT.format(\"{self.id}\"))\n def variant(self):\n return ProductVariant.query.filter(ProductVariant.product_id == self.id).all()\n\n @property\n def attribute_map(self):\n items = {\n ProductAttribute.get_by_id(k): AttributeChoiceValue.get_by_id(v)\n for k, v in self.attributes.items()\n }\n return items\n\n @property\n\n def attribute_values(self):\n items = {\n ProductAttribute.get_by_id(k): AttributeChoiceValue.get_by_id(v)\n for k, v in self.attributes.items()\n }\n return items\n @classmethod\n @cache(MC_KEY_FEATURED_PRODUCTS.format(\"{num}\"))\n def get_featured_product(cls, num=8):\n return cls.query.filter_by(is_featured=True).limit(num).all()\n\n @classmethod\n\n def search_product(cls, num=8):\n return cls.query.filter_by(is_featured=True).limit(num).all()\n\n @classmethod\n @cache(MC_KEY_FEATURED_PRODUCTS.format(\"{num}\"))\n def get_on_sale(cls, num=8):\n return cls.query.filter_by(on_sale=True).limit(num).all()\n\n @classmethod\n @cache(MC_KEY_FEATURED_PRODUCTS.format(\"{num}\"))\n def get_all(cls):\n return cls.query.all()\n\n @classmethod\n def update_images(cls, new_images,product_id):\n\n origin_ids = (\n ProductImage.query.with_entities(ProductImage.id)\n .filter_by(product_id=product_id)\n .all()\n )\n\n origin_ids = set(i for i, in origin_ids)\n new_images = set(int(i) for i in new_images)\n need_del = origin_ids - new_images\n need_add = new_images - origin_ids\n for id in need_del:\n ProductImage.get_by_id(id).delete(commit=False)\n for id in need_add:\n image = ProductImage.get_by_id(id)\n image.product_id = product_id\n image.save(commit=False)\n db.session.commit()\n\n def update_attributes(self, attr_values):\n\n attr_entries = [str(item.id) for item in self.product_type.product_attributes]\n attributes = dict(zip(attr_entries, attr_values))\n self.attributes = attributes\n\n def generate_variants(self):\n if not self.product_type.has_variants:\n ProductVariant.create(sku=str(self.id) + \"-1337\", product_id=self.id, title= self.title)\n else:\n sku_id = 1337\n variant_attr_map = {\n attr: attr.values for attr in self.product_type.variant_attributes\n }\n all_combinations = itertools.product(*variant_attr_map.values())\n variant_combination= [\n {str(attr_value.attribute.id): str(attr_value.id) for attr_value in combination}\n for combination in all_combinations\n ]\n\n\n for variant_attributes in variant_combination:\n sku = str(self.id) + \"-\" + str(sku_id)\n variant=ProductVariant.create(\n sku=sku,\n title=\"\",\n product_id=self.id,\n attributes=variant_attributes,\n )\n sku_id += 1\n\n def set_all_stocks_infi(self):\n for item in self.variant:\n item.quantity_allocated=10000,000;\n\n\n\n\n\n def delete_variants(self):\n for item in itertools.chain(\n self.variant\n ):\n item.delete(commit=False)\n db.session.commit()\n\n def delete(self):\n need_del_collection_products = ProductCollection.query.filter_by(\n product_id=self.id\n ).all()\n for item in itertools.chain(\n self.images, self.variant, need_del_collection_products\n ):\n item.delete(commit=False)\n\n db.session.delete(self)\n db.session.commit()\n\n\n @staticmethod\n def clear_mc(target):\n rdb.delete(MC_KEY_PRODUCT_DISCOUNT_PRICE.format(target.id))\n keys = rdb.keys(MC_KEY_FEATURED_PRODUCTS.format(\"*\"))\n for key in keys:\n rdb.delete(key)\n\n @staticmethod\n def clear_category_cache(target):\n keys = rdb.keys(MC_KEY_CATEGORY_PRODUCTS.format(target.category_id, \"*\"))\n for key in keys:\n rdb.delete(key)\n\n @classmethod\n def __flush_insert_event__(cls, target):\n super().__flush_insert_event__(target)\n\n if current_app.config[\"USE_ES\"]:\n from flaskshop.public.search import Item\n\n Item.add(target)\n\n @classmethod\n def __flush_before_update_event__(cls, target):\n\n super().__flush_before_update_event__(target)\n target.clear_category_cache(target)\n\n @classmethod\n def __flush_after_update_event__(cls, target):\n\n super().__flush_after_update_event__(target)\n target.clear_mc(target)\n target.clear_category_cache(target)\n if current_app.config[\"USE_ES\"]:\n from flaskshop.public.search import Item\n\n Item.update_item(target)\n\n @classmethod\n def __flush_delete_event__(cls, target):\n from flaskshop.public.search import Item\n\n super().__flush_delete_event__(target)\n target.clear_mc(target)\n target.clear_category_cache(target)\n # Item.delete(target) ******TO DO- Chaya\n\n\nclass Category(Model):\n __tablename__ = \"product_category\"\n title = Column(db.String(255), nullable=False)\n parent_id = Column(db.Integer(), default=0)\n background_img = Column(db.String(255),nullable=True,default=None)\n is_active = Column(db.Boolean(), default=True)\n description = Column(db.Text())\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n\n\n return url_for(\"product.show_category\", id=self.id)\n\n @property\n def background_img_url(self):\n url = self.get_background_img()\n return url\n def get_background_img(self):\n if self.background_img:\n return self.background_img\n\n return \"\"\n\n def get_background_img_AWS(self):\n url=None\n if self.background_img:\n url= get_presigned_url(self.background_img)\n\n return url\n\n\n\n @property\n def products(self):\n all_category_ids = [child.id for child in self.children] + [self.id]\n return Product.query.filter(Product.category_id.in_(all_category_ids)).all()\n\n @property\n def product_number(self):\n all_category_ids = [child.id for child in self.children] + [self.id]\n category_products= Product.query.filter(Product.category_id.in_(all_category_ids))\n return category_products.count()\n\n @property\n @cache(MC_KEY_CATEGORY_CHILDREN.format(\"{self.id}\"))\n def children(self):\n return Category.query.filter(Category.parent_id == self.id).all()\n\n @property\n def parent(self):\n return Category.get_by_id(self.parent_id)\n\n @property\n def attr_filter(self):\n attr_filter = set()\n for product in self.products:\n product_type= product.product_type\n product_tpe_list= ProductType.query.filter_by(title= product.title).first()\n if product.product_type.has_attributes:\n for attr in product.product_type.product_attributes:\n attr_filter.add(attr)\n return attr_filter\n\n @classmethod\n @cache_by_args(MC_KEY_CATEGORY_PRODUCTS.format(\"{category_id}\", \"{page}\"))\n def get_product_by_category(cls, category_id, page):\n category = Category.get_by_id(category_id)\n all_category_ids = [child.id for child in category.children] + [category.id]\n query = Product.query.filter(Product.category_id.in_(all_category_ids))\n ctx, query = get_product_list_context(query, category)\n pagination = query.paginate(page, per_page=16)\n del pagination.query\n ctx.update(object=category, pagination=pagination, products=pagination.items)\n return ctx\n\n @classmethod\n def first_level_items(cls):\n return cls.query.filter(cls.parent_id == 0).all()\n\n def delete(self):\n for child in self.children:\n child.parent_id = 0\n db.session.add(child)\n need_update_products = Product.query.filter_by(category_id=self.id).all()\n for product in need_update_products:\n product.category_id = 0\n db.session.add(product)\n db.session.delete(self)\n db.session.commit()\n\n\n @staticmethod\n def clear_mc(target):\n rdb.delete(MC_KEY_CATEGORY_CHILDREN.format(target.id))\n keys = rdb.keys(MC_KEY_CATEGORY_PRODUCTS.format(target.id, \"*\"))\n for key in keys:\n rdb.delete(key)\n\n @classmethod\n def __flush_after_update_event__(cls, target):\n super().__flush_after_update_event__(target)\n target.clear_mc(target)\n\n @classmethod\n def __flush_delete_event__(cls, target):\n super().__flush_delete_event__(target)\n target.clear_mc(target)\n\n\nclass ProductTypeAttributes(Model):\n \"\"\"存储的产品的属性是包括用户可选和不可选\"\"\"\n\n __tablename__ = \"product_type_attribute\"\n product_type_id = Column(db.Integer(),default=0)\n product_attribute_id = Column(db.Integer())\n\n\nclass ProductTypeVariantAttributes(Model):\n \"\"\"存储的产品SKU的属性是可以给用户去选择的\"\"\"\n\n __tablename__ = \"product_type_variant_attribute\"\n product_type_id = Column(db.Integer())\n product_attribute_id = Column(db.Integer())\n\nclass ProductType(Model):\n __tablename__ = \"product_type\"\n title = Column(db.String(255), nullable=False)\n has_variants = Column(db.Boolean(), default=False)\n has_attributes= Column(db.Boolean(), default=False)\n is_shipping_required = Column(db.Boolean(), default=False)\n\n def __str__(self):\n return self.title\n\n def get_by_name(cls,title):\n return cls.query.filter_by(title=title)\n\n @property\n def product_attributes(self):\n at_ids = (\n ProductTypeAttributes.query.with_entities(\n ProductTypeAttributes.product_attribute_id\n )\n .filter(ProductTypeAttributes.product_type_id == self.id)\n .all()\n )\n return ProductAttribute.query.filter(\n ProductAttribute.id.in_(id for id, in at_ids)\n ).all()\n\n @property\n def variant_attributes(self):\n at_ids = (\n ProductTypeVariantAttributes.query.with_entities(\n ProductTypeVariantAttributes.product_attribute_id\n )\n .filter(ProductTypeVariantAttributes.product_type_id == self.id)\n .all()\n )\n return ProductAttribute.query.filter(\n ProductAttribute.id.in_(id for id, in at_ids)\n ).all()\n\n @property\n def variant_attr_id(self):\n if self.variant_attributes:\n return self.variant_attributes[0].id\n else:\n return None\n\n def update_product_attr(self, new_attrs):\n origin_ids = (\n ProductTypeAttributes.query.with_entities(\n ProductTypeAttributes.product_attribute_id\n )\n .filter_by(product_type_id=self.id)\n .all()\n )\n origin_ids = set(i for i, in origin_ids)\n new_attrs = set(int(i) for i in new_attrs)\n need_del = origin_ids - new_attrs\n need_add = new_attrs - origin_ids\n for id in need_del:\n ProductTypeAttributes.query.filter_by(\n product_type_id=self.id, product_attribute_id=id\n ).first()\n for id in need_add:\n new = ProductTypeAttributes(\n product_type_id=self.id, product_attribute_id=id\n )\n db.session.add(new)\n db.session.commit()\n\n def update_variant_attr(self, new_attrs):\n origin_ids = (ProductTypeVariantAttributes.query.filter_by(\n product_type_id=self.id\n ).all())\n\n origin_ids = set(i.id for i in origin_ids)\n new_attrs = set(int(i) for i in new_attrs)\n need_del = origin_ids - new_attrs\n need_add = new_attrs - origin_ids\n\n for id in need_del:\n need_del_variant_attrs= ProductTypeVariantAttributes.query.filter_by(\n product_type_id=self.id, product_attribute_id=id\n ).first()\n\n for item in itertools.chain(need_del_variant_attrs):\n item.delete(commit=False)\n\n for id in need_add:\n new = ProductTypeVariantAttributes(\n product_type_id=self.id, product_attribute_id=id\n )\n db.session.add(new)\n db.session.commit()\n\n\n\n def del_all_variant_attr(self):\n need_del_variant_attrs = ProductTypeVariantAttributes.query.filter_by(\n product_type_id=self.id\n ).all()\n for item in itertools.chain(need_del_variant_attrs):\n item.delete(commit=False)\n db.session.commit()\n\n def delete(self):\n need_del_product_attrs = ProductTypeAttributes.query.filter_by(\n product_type_id=self.id\n ).all()\n need_del_variant_attrs = ProductTypeVariantAttributes.query.filter_by(\n product_type_id=self.id\n ).all()\n\n for item in itertools.chain(need_del_product_attrs, need_del_variant_attrs):\n item.delete(commit=False)\n need_update_products = Product.query.filter_by(product_type_id=self.id).all()\n for product in need_update_products:\n product.product_type_id = 0\n db.session.add(product)\n db.session.delete(self)\n db.session.commit()\n\n\nclass ProductVariant(Model):\n __tablename__ = \"product_variant\"\n sku = Column(db.String(32), unique=True)\n title = Column(db.String(255))\n price_override = Column(db.DECIMAL(10, 2), default=0.00)\n quantity = Column(db.Integer(), default=0)\n quantity_allocated = Column(db.Integer(), default=0)\n product_id = Column(db.Integer(), default=0)\n attributes = Column(MutableDict.as_mutable(db.JSON()))\n\n def __str__(self):\n return self.title or self.sku\n\n\n def display_product(self):\n return f\"{self.product} ({str(self)})\"\n\n @property\n def sku_id(self):\n return self.sku.split(\"-\")[1]\n\n @sku_id.setter\n def sku_id(self, data):\n pass\n\n @property\n def is_shipping_required(self):\n return self.product.product_type.is_shipping_required\n\n @property\n def quantity_available(self):\n return max(self.quantity - self.quantity_allocated, 0)\n\n @classmethod\n def search_varint_by_attributs(cls,variantAttributes,product_id):\n\n variants= ProductVariant.query.filter_by(product_id=product_id).all()\n for aa in variants:\n res= all( variantAttributes[k] == v for k, v in aa.attributes.items() if k in variantAttributes)\n if res:\n return aa\n return None\n @property\n def is_in_stock(self):\n product= Product.get_by_id(self.product_id)\n if not product.need_check_stock:\n return True;\n stock = self.quantity - self.quantity_allocated\n if stock > 0:\n return True\n return False\n\n @property\n def stock(self):\n return self.quantity - self.quantity_allocated\n\n @property\n def price(self):\n return self.price_override or self.product.price\n\n @property\n def product(self):\n return Product.get_by_id(self.product_id)\n\n def get_absolute_url(self):\n return url_for(\"product.show\", id=self.product.id)\n\n @property\n def attribute_map(self):\n items = {\n ProductAttribute.get_by_id(k): AttributeChoiceValue.get_by_id(v)\n for k, v in self.attributes.items()\n }\n return items\n\n def check_enough_stock(self, quantity):\n product = Product.get_by_id(self.product_id)\n if not product.need_check_stock:\n return True, \"success\"\n\n if self.stock < quantity:\n return False, f\"{self.display_product()} has not enough stock\"\n return True, \"success\"\n\n @staticmethod\n def clear_mc(target):\n rdb.delete(MC_KEY_PRODUCT_VARIANT.format(target.product_id))\n\n @classmethod\n def __flush_insert_event__(cls, target):\n super().__flush_insert_event__(target)\n target.clear_mc(target)\n\n @classmethod\n def __flush_after_update_event__(cls, target):\n super().__flush_after_update_event__(target)\n target.clear_mc(target)\n\n @classmethod\n def __flush_delete_event__(cls, target):\n super().__flush_delete_event__(target)\n target.clear_mc(target)\n\n\nclass ProductAttribute(Model):\n __tablename__ = \"product_attribute\"\n\n title = Column(db.String(255), nullable=False)\n image= Column(db.String(255), nullable=True, default=None)\n\n def __str__(self):\n return self.title\n\n def get_by_name(cls, title):\n return cls.query.filter_by(title=title)\n\n @property\n @cache(MC_KEY_ATTRIBUTE_VALUES.format(\"{self.id}\"))\n def values(self):\n return AttributeChoiceValue.query.filter(\n AttributeChoiceValue.attribute_id == self.id\n ).all()\n\n @property\n def values_label(self):\n return \",\".join([value.title for value in self.values])\n\n @property\n def types(self):\n at_ids = (\n ProductTypeAttributes.query.with_entities(\n ProductTypeAttributes.product_type_id\n )\n .filter_by(product_attribute_id=self.id)\n .all()\n )\n return ProductType.query.filter(ProductType.id.in_(id for id, in at_ids)).all()\n\n @property\n def types_label(self):\n return \",\".join([t.title for t in self.types])\n\n def update_values(self, new_values):\n origin_values = AttributeChoiceValue.query.filter_by(attribute_id=self.id).all()\n for value in origin_values:\n value.delete(commit=False)\n for key in new_values.keys():\n new = AttributeChoiceValue(title=key, image=new_values[key], attribute_id=self.id)\n db.session.add(new)\n db.session.commit()\n\n\n def update_values_(self, new_values):\n origin_values = list(value.title for value in self.values)\n\n need_del = set()\n need_add = set()\n for value in self.values:\n if value.title not in new_values.keys():\n need_del.add(value)\n for value in new_values.keys():\n if value not in origin_values:\n need_add.add(value)\n for value in need_del:\n value.delete(commit=False)\n for value in need_add:\n new = AttributeChoiceValue(title=value, attribute_id=self.id)\n db.session.add(new)\n db.session.commit()\n\n\n\n def update_types(self, new_types):\n origin_ids = (\n ProductTypeAttributes.query.with_entities(\n ProductTypeAttributes.product_type_id\n )\n .filter_by(product_attribute_id=self.id)\n .all()\n )\n origin_ids = set(i for i, in origin_ids)\n new_types = set(int(i) for i in new_types)\n need_del = origin_ids - new_types\n need_add = new_types - origin_ids\n for id in need_del:\n ProductTypeAttributes.query.filter_by(\n product_attribute_id=self.id, product_type_id=id\n ).first().delete(commit=False)\n for id in need_add:\n new = ProductTypeAttributes(\n product_attribute_id=self.id, product_type_id=id\n )\n db.session.add(new)\n db.session.commit()\n\n def delete(self):\n need_del_product_attrs = ProductTypeAttributes.query.filter_by(\n product_attribute_id=self.id\n ).all()\n need_del_variant_attrs = ProductTypeVariantAttributes.query.filter_by(\n product_attribute_id=self.id\n ).all()\n for item in itertools.chain(\n need_del_product_attrs, need_del_variant_attrs, self.values\n ):\n item.delete(commit=False)\n db.session.delete(self)\n db.session.commit()\n\n @classmethod\n def __flush_after_update_event__(cls, target):\n super().__flush_after_update_event__(target)\n rdb.delete(MC_KEY_ATTRIBUTE_VALUES.format(target.id))\n\n @classmethod\n def __flush_delete_event__(cls, target):\n super().__flush_delete_event__(target)\n rdb.delete(MC_KEY_ATTRIBUTE_VALUES.format(target.id))\n\n\nclass AttributeChoiceValue(Model):\n __tablename__ = \"product_attribute_value\"\n title = Column(db.String(255), nullable=False)\n image = Column(db.String(255), nullable=False)\n attribute_id = Column(db.Integer())\n\n def __str__(self):\n return self.title\n\n @property\n def attribute(self):\n return ProductAttribute.get_by_id(self.attribute_id)\n def image_url(self):\n urt = get_presigned_url(self.image)\n return urt\n\nclass ProductImage(Model):\n __tablename__ = \"product_image\"\n image = Column(db.String(255))\n order = Column(db.Integer())\n product_id = Column(db.Integer())\n\n @classmethod\n def del_product_imgs(cls,product_id):\n items= cls.query.filter_by(product_id = product_id).all()\n for item in items:\n item.delete()\n\n def __str__(self):\n return url_for(\"static\", filename=self.image, _external=True)\n\n @staticmethod\n def clear_mc(target):\n rdb.delete(MC_KEY_PRODUCT_IMAGES.format(target.product_id))\n\n @classmethod\n def __flush_insert_event__(cls, target):\n super().__flush_insert_event__(target)\n target.clear_mc(target)\n\n @classmethod\n def __flush_delete_event__(cls, target):\n super().__flush_delete_event__(target)\n target.clear_mc(target)\n image_file = current_app.config[\"UPLOAD_FOLDER\"] / target.image\n if image_file.exists():\n os.remove(image_file)\n\n\n\nclass Collection(Model):\n __tablename__ = \"product_collection\"\n title = Column(db.String(255), nullable=False)\n background_img = Column(db.String(255))\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return url_for(\"product.show_collection\", id=self.id)\n\n @property\n def background_img_url(self):\n return url_for(\"static\", filename=self.background_img)\n\n @property\n def products(self):\n at_ids = (\n ProductCollection.query.with_entities(ProductCollection.product_id)\n .filter_by(collection_id=self.id)\n .all()\n )\n return Product.query.filter(Product.id.in_(id for id, in at_ids)).all()\n\n @property\n def attr_filter(self):\n attr_filter = set()\n for product in self.products:\n for attr in product.product_type.product_attributes:\n attr_filter.add(attr)\n return attr_filter\n\n def update_products(self, new_products):\n origin_ids = (\n ProductCollection.query.with_entities(ProductCollection.product_id)\n .filter_by(collection_id=self.id)\n .all()\n )\n origin_ids = set(i for i, in origin_ids)\n new_products = set(int(i) for i in new_products)\n need_del = origin_ids - new_products\n need_add = new_products - origin_ids\n for id in need_del:\n ProductCollection.query.filter_by(\n collection_id=self.id, product_id=id\n ).first().delete(commit=False)\n for id in need_add:\n new = ProductCollection(collection_id=self.id, product_id=id)\n db.session.add(new)\n db.session.commit()\n\n def delete(self):\n need_del = ProductCollection.query.filter_by(collection_id=self.id).all()\n for item in need_del:\n item.delete(commit=False)\n db.session.delete(self)\n db.session.commit()\n if self.background_img:\n image = current_app.config[\"STATIC_DIR\"] / self.background_img\n if image.exists():\n image.unlink()\n\n\nclass ProductCollection(Model):\n __tablename__ = \"product_collection_product\"\n product_id = Column(db.Integer())\n collection_id = Column(db.Integer())\n\n @classmethod\n @cache_by_args(MC_KEY_COLLECTION_PRODUCTS.format(\"{collection_id}\", \"{page}\"))\n def get_product_by_collection(cls, collection_id, page):\n collection = Collection.get_by_id(collection_id)\n at_ids = (\n ProductCollection.query.with_entities(ProductCollection.product_id)\n .filter(ProductCollection.collection_id == collection.id)\n .all()\n )\n query = Product.query.filter(Product.id.in_(id for id, in at_ids))\n ctx, query = get_product_list_context(query, collection)\n pagination = query.paginate(page, per_page=16)\n del pagination.query\n ctx.update(object=collection, pagination=pagination, products=pagination.items)\n return ctx\n\n @staticmethod\n def clear_mc(target):\n keys = rdb.keys(MC_KEY_COLLECTION_PRODUCTS.format(target.collection_id, \"*\"))\n for key in keys:\n rdb.delete(key)\n\n @classmethod\n def __flush_insert_event__(cls, target):\n target.clear_mc(target)\n\n @classmethod\n def __flush_after_update_event__(cls, target):\n super().__flush_after_update_event__(target)\n target.clear_mc(target)\n\n @classmethod\n def __flush_delete_event__(cls, target):\n super().__flush_delete_event__(target)\n target.clear_mc(target)\n\n\ndef get_product_list_context(query, obj):\n \"\"\"\n obj: collection or category, to get it`s attr_filter.\n \"\"\"\n args_dict = {}\n price_from = request.args.get(\"price_from\", None, type=int)\n price_to = request.args.get(\"price_to\", None, type=int)\n if price_from:\n query = query.filter(Product.basic_price > price_from)\n if price_to:\n query = query.filter(Product.basic_price < price_to)\n args_dict.update(price_from=price_from, price_to=price_to)\n\n sort_by_choices = {\"title\": \"title\", \"price\": \"price\"}\n arg_sort_by = request.args.get(\"sort_by\", \"\")\n is_descending = False\n if arg_sort_by.startswith(\"-\"):\n is_descending = True\n arg_sort_by = arg_sort_by[1:]\n if arg_sort_by in sort_by_choices:\n if is_descending:\n query = query.order_by(desc(getattr(Product, arg_sort_by)))\n else:\n query = query.order_by(getattr(Product, arg_sort_by))\n now_sorted_by = arg_sort_by or \"title\"\n args_dict.update(\n sort_by_choices=sort_by_choices,\n now_sorted_by=now_sorted_by,\n is_descending=is_descending,\n )\n\n args_dict.update(default_attr={})\n attr_filter = obj.attr_filter\n for attr in attr_filter:\n value = request.args.get(attr.title)\n if value:\n query = query.filter(Product.attributes.__getitem__(str(attr.id)) == value)\n args_dict[\"default_attr\"].update({attr.title: int(value)})\n args_dict.update(attr_filter=attr_filter)\n\n if request.args:\n args_dict.update(clear_filter=True)\n\n return args_dict, query\n","repo_name":"chaya7282/flask-shop-master","sub_path":"flaskshop/product/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":31719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"831165548","text":"import sys\nfrom pytube import YouTube\nimport time\nimport pafy\n\n\ndef get_video_info(url):\n try:\n ptvideo = YouTube(url)\n pfvideo = pafy.new(url)\n print(ptvideo.title)\n except Exception as e:\n msg = f'Erro: {e}\\n'\n sys.exit(msg)\n\n\ndef download_video(url):\n ptvideo = YouTube(url, on_progress_callback=progress)\\\n .streams\\\n .filter(file_extension='mp4', resolution='720p', progressive=True)\\\n .first()\\\n .download()\n # .download('Baixando/') # Define o diretório destino\n\n\ndef progress(stream, chunck, bytes_remaining):\n filesize = stream.filesize\n current = ((filesize - bytes_remaining)/filesize)\n percent = ('{0:.1f}').format(current*100)\n progress = int(50*current)\n status = '█' * progress + '-' * (50 - progress)\n sys.stdout.write(' ↳ |{bar}| {percent}%\\r'.format(\n bar=status, percent=percent))\n sys.stdout.flush()\n\n\n# url = 'http://youtube.com/watch?v=9bZkp7q19f0'\nurl = ''\nwhile url == '':\n url = input('URL do video: ')\n\nget_video_info(url)\ndownload_video(url)\n\ntime.sleep(3)\n","repo_name":"saulotarsobc/baixar-video-youtube-terminal","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70180304809","text":"import gym\nfrom gym import core, spaces\nfrom gym.envs.registration import register\nimport numpy as np\nfrom gym.utils import seeding\nimport copy\n\nclass PuddleSimpleEnv(gym.Env):\n\n def __init__(self, goal=[1.0, 1.0], goal_threshold=0.1,\n noise=0.025, thrust=0.05, puddle_center=[[.5, .5]],\n puddle_width=[[.3, .3]]):\n self.goal = np.array(goal)\n self.goal_threshold = goal_threshold\n self.noise = noise\n self.thrust = thrust\n self.puddle_center = [np.array(center) for center in puddle_center]\n self.puddle_width = [np.array(width) for width in puddle_width]\n\n self.action_space = spaces.Discrete(4)\n self.observation_space = spaces.Box(0.0, 1.0, shape=(2,))\n\n self.actions = [np.zeros(2) for i in range(4)]\n for i in range(4):\n self.actions[i][i//2] = thrust * (i%2 * 2 - 1)\n\n self._seed()\n self.viewer = None\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n\n self.pos += self.actions[action] + self.np_random.uniform(low=-self.noise, high=self.noise, size=(2,))\n self.pos = np.clip(self.pos, 0.0, 1.0)\n\n reward = 0.\n\n done = np.linalg.norm((self.pos - self.goal), ord=1) < self.goal_threshold\n\n if done == True:\n reward = 50.0\n\n return self.pos, reward, done, {}\n\n def reset(self):\n self.pos = self.observation_space.sample()\n return self.pos\n#\nregister(\n id='PuddleEnv-v0',\n entry_point='puddlesimple:PuddleSimpleEnv',\n timestep_limit=5000,\n)\n\n","repo_name":"arushijain94/SafeOptionCritic","sub_path":"puddlesimple.py","file_name":"puddlesimple.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"70292401769","text":"import discord\nimport random\n#replace the world token with your discord bot token\nmy_secret = \"TOKEN\"\nHorror=[\"1922\",\"Alive (2020)\",\"Apostle (2018)\",\"Army of the Dead (2021)\",\"Berlin Syndrome (2017)\",\"The Conjuring (2013)\",\"The Conjuring 2 (2016)\",\"Creep (2014)\",\"Fear Street (2021)(trilogy)\",\"Hush (2016)\",\"*Jaws (1975)\",\"Pan’s Labyrinth (2006)\"]\nthriller=[\" Venom: Let There Be Carnage (2021)\",\"I See You (2019)\",\"Red Dragon (2002)\",\"Mulholland Drive (2001)\",\"thesis\",\"Buried (2010)\",\"Run(2020)\",\"The Revenant(2015)\",\"Panic Room(2002)\"]\nadventure=[\"Free Guy (2021)\",\"Black Widow (2021)\",\"Shang-Chi and the Legend of the Ten Rings\",\"Skyfall (2012)\",\"THE THREE MUSKETEERS (1921)\",\"THE WIZARD OF OZ (1939)\",\"Star Wars: Episode VII - The Force Awakens (2015)\",\"War for the Planet of the Apes (2017)\",\"Shazam! (2019)\",\"The Jungle Book (2016)\",\"Mission: Impossible Rogue Nation (2015)\",\"Harry Potter and the Deathly Hallows - Part 2 (2011)\",\"Star Trek (2009)\"]\nanimated=[\"Kubo and the two strings\",\"Aladdin\", \"Lion King\",\"Bambi\",\"Megamind\",\"Hotel Transylvania 1\",\"Hotel Transylvania 2\",\"Hotel Transylvania 3\",\"Big Hero 6\",\"Toy Story 1\",\"Toy Story 2\",\"Toy Story 3\",\"Toy Story 4\",\"Justice Leaugue Dark\",\"Justice League Dark: Apokolips War\"]\nromcom=[\"Just go for it\",\"17 again\",\"New Year's Eve\",\"La La Land\",\"You've got mail\",\"Music and Lyrics\",\"The Holiday\",\"Roxanne\",\"Sleepless in Seattle\",\"When Harry met Sally\",\"Crazy Rich Asians\",\"Tanu weds Manu\",\"Varane Avashyamund\",\"Kannum Kannum Kollaiyadithaal\",\"qarib qarib singlle\"]\nall_movies=Horror+thriller+adventure+animated+romcom\nclient = discord.Client()\n@client.event\nasync def on_read():\n print(\"Bot logged in as{0.user}\".format(client))\n@client.event\nasync def on_message(msg):\n if msg.author==client.user:\n return\n if msg.content.startswith('!movies-horror'):\n await msg.channel.send(random.choice(Horror))\n elif msg.content.startswith('!movies-thriller'):\n await msg.channel.send(random.choice(thriller))\n elif msg.content.startswith('!movies-adventure'):\n await msg.channel.send(random.choice(adventure))\n elif msg.content.startswith('!movies-animated'):\n await msg.channel.send(random.choice(animated))\n elif msg.content.startswith('!movies-romcom'):\n await msg.channel.send(random.choice(romcom))\n elif msg.content.startswith('!movies'):\n await msg.channel.send(random.choice(all_movies)) \nclient.run(my_secret)","repo_name":"Akito7011/MOVIE-BOT","sub_path":"MOVIE-BOT.py","file_name":"MOVIE-BOT.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3614610873","text":"while True:\n try:\n numero = int(input(\"Ingrese un número(distinto de cero): \"))\n resultado = 10/numero\n print(f\"el resultado es {resultado}\")\n break\n except ZeroDivisionError:\n print(\"El valor ingresado debe ser un número distinto de cero, vuelva a intentarlo!!!\")\n except ValueError:\n print(\"el número ingresado no puede ser un texto, no sea terco!!!\")","repo_name":"reinaldoca/taller_practico_python","sub_path":"clase_26/ejemplo_3.py","file_name":"ejemplo_3.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"es","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"10147915985","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Galactic vs Extragalactic Objects\n# \n# The astronomical transients that appear in this challenge can be separated into two distinct groups: ones that are in our Milky Way galaxy (galactic) and ones that are outside of our galaxy (extragalactic). As described in the data note, all of the galactic objects have been assigned a host galaxy photometric redshift of 0. We can use this information to immediately classify every object as either galactic or extragalactic and remove a lot of potential options from the classification. Doing so results in matching the naive benchmark.\n# \n# We find that all of the classes are either uniquely galactic or extragalactic except for class 99 which represents the unknown objects that aren't in the training set.\n\n# ## Load the data\n# \n# For this notebook, we'll only need the metadata.\n\n# In[ ]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# In[ ]:\n\n\n\n# In[ ]:\n\n\nmeta_data = pd.read_csv('../input/training_set_metadata.csv')\ntest_meta_data = pd.read_csv('../input/test_set_metadata.csv')\n\n# Map the classes to the range 0-14. We manually add in the 99 class that doesn't show up in the training data.\n\n# In[ ]:\n\n\ntargets = np.hstack([np.unique(meta_data['target']), [99]])\ntarget_map = {j:i for i, j in enumerate(targets)}\ntarget_ids = [target_map[i] for i in meta_data['target']]\nmeta_data['target_id'] = target_ids\n\n# Let's look at which classes show up in galactic vs extragalactic hosts. We can use the hostgal_specz key which is 0 for galactic objects.\n\n# In[ ]:\n\n\ngalactic_cut = meta_data['hostgal_specz'] == 0\nplt.figure(figsize=(10, 8))\nplt.hist(meta_data[galactic_cut]['target_id'], 15, (0, 15), alpha=0.5, label='Galactic')\nplt.hist(meta_data[~galactic_cut]['target_id'], 15, (0, 15), alpha=0.5, label='Extragalactic')\nplt.xticks(np.arange(15)+0.5, targets)\nplt.gca().set_yscale(\"log\")\nplt.xlabel('Class')\nplt.ylabel('Counts')\nplt.xlim(0, 15)\nplt.legend();\n\n# There is no overlap at all between the galactic and extragalactic objects in the training set. Class 99 isn't represented in the training set at all. Let's make a classifier that checks if an object is galactic or extragalactic and then assigns a flat probability to each class in that group. We'll include class 99 in both the galactic and extragalactic groups.\n\n# In[ ]:\n\n\n# Build the flat probability arrays for both the galactic and extragalactic groups\ngalactic_cut = meta_data['hostgal_specz'] == 0\ngalactic_data = meta_data[galactic_cut]\nextragalactic_data = meta_data[~galactic_cut]\n\ngalactic_classes = np.unique(galactic_data['target_id'])\nextragalactic_classes = np.unique(extragalactic_data['target_id'])\n\n# Add class 99 (id=14) to both groups.\ngalactic_classes = np.append(galactic_classes, 14)\nextragalactic_classes = np.append(extragalactic_classes, 14)\n\ngalactic_probabilities = np.zeros(15)\ngalactic_probabilities[galactic_classes] = 1. / len(galactic_classes)\nextragalactic_probabilities = np.zeros(15)\nextragalactic_probabilities[extragalactic_classes] = 1. / len(extragalactic_classes)\n\n# Apply this prediction to the data. We simply choose which of the two probability arrays to use based off of whether the object is galactic or extragalactic.\n\n# In[ ]:\n\n\n# Apply this prediction to a table\nimport tqdm\ndef do_prediction(table):\n probs = []\n for index, row in tqdm.tqdm(table.iterrows(), total=len(table)):\n if row['hostgal_photoz'] == 0:\n prob = galactic_probabilities\n else:\n prob = extragalactic_probabilities\n probs.append(prob)\n return np.array(probs)\n\npred = do_prediction(meta_data)\ntest_pred = do_prediction(test_meta_data)\n\n# Now write the prediction out and submit it. This notebook gets a score of 2.158 which matches the naive benchmark.\n\n# In[ ]:\n\n\ntest_df = pd.DataFrame(index=test_meta_data['object_id'], data=test_pred, columns=['class_%d' % i for i in targets])\ntest_df.to_csv('./naive_benchmark.csv')\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample734.py","file_name":"sample734.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7539901853","text":"import sys,traceback\nfrom multiprocessing import Pool\nfrom Src.Reader import Reader\nfrom Src.Writer import Writer\n\n\nclass runner:\n def __init__(self ,cmds):\n\n\n func = (hasattr(self, cmds['runModel']))\n\n if(func == False):\n print('-m 只支持 read 或者 write')\n return\n\n self.cmds = cmds\n\n\n self.__getattribute__(cmds['runModel'])()\n\n\n def read(self ):\n Reader(\n logPath = self.cmds['logPath'],\n redisKey = self.cmds['redisKey'],\n ).startTailf()\n\n def write(self):\n Writer(\n redisKey = self.cmds['redisKey'],\n withStatic = self.cmds['withStatic']\n ).start()\n\n\n\n\ndef formCommands(cmdArgs):\n _map = {\n 'logPath':None,\n 'redieKey':None,\n 'runModel':None,\n 'proccessNum':2,\n 'withStatic':0,\n }\n\n # log file path\n if ('-f' in cmdArgs):\n _map['logPath'] = cmdArgs[cmdArgs.index('-f') + 1].strip()\n _map['runModel'] = 'read'\n\n # redis key name\n if ('-k' in cmdArgs):\n _map['redisKey'] = cmdArgs[cmdArgs.index('-k') + 1].strip()\n\n # run model\n if ('-m' in cmdArgs):\n _map['runModel'] = cmdArgs[cmdArgs.index('-m') + 1].strip()\n\n # write model proccess number\n if ('-p' in cmdArgs):\n _map['proccessNum'] = int(cmdArgs[cmdArgs.index('-p') + 1].strip())\n _map['runModel'] = 'write'\n\n if ('-with-static' in cmdArgs):\n _map['withStatic'] = 1\n\n return _map\n\nif __name__ == '__main__':\n commond = sys.argv\n\n \"\"\"\n 参数说明 : \n -f your access.log path \n -k your redis key name \n -m run model -m [read | write] \n -p writer model Proccess Number defualt 2\n -with-static writer model Proccess will not filter static file request\n read model example:\n python3 watcher.py -k access_log_80_server -m read -f /wwwlogs/access.log\n \n write model example:\n python3 watcher.py -k access_log_80_server -m write -p 4 [-with-static]\n \n \"\"\"\n\n try:\n\n args = formCommands(commond)\n\n runModel = args['runModel']\n\n if runModel == 'write':\n\n poolNum = args['proccessNum']\n pool = Pool(poolNum)\n for i in range(poolNum):\n pool.apply_async(runner, args=(args,))\n\n pool.close()\n pool.join()\n\n else:\n\n runner(args)\n\n except TypeError as e:\n print('参数错误')\n print('参数说明 :')\n print(' -f your access.log path')\n print(' -k your redis key name')\n print(' -m run model -m [read | write]')\n print(' -p writer model Proccess Number defualt 2 ')\n print(' -with-static writer model Proccess will not filter static file request ')\n print('read model example :')\n print(' python3 watcher.py -k access_log_80_server -m read -f /wwwlogs/access.log ')\n print('write model example :')\n print(' python3 watcher.py -k access_log_80_server -m write -p 4 [-with-static]')\n\n\n except Exception as e:\n traceback.print_exc()\n\n\n\n\n","repo_name":"jyolo/nginxWatcher","sub_path":"watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"29718306255","text":"from .address import create_program_address, find_program_address\nfrom .commitment import Commitment\nfrom .instruction import Instruction\nfrom .memo import PROGRAM_KEY as MEMO_PROGRAM_KEY, memo_instruction, decompile_memo\nfrom .system import create_account, decompile_create_account\nfrom .token import initialize_account, transfer, decompile_initialize_account, \\\n decompile_transfer\nfrom .transaction import Transaction, SIGNATURE_LENGTH\n\n__all__ = [\n 'create_program_address',\n 'find_program_address',\n 'Commitment',\n 'Instruction',\n 'MEMO_PROGRAM_KEY',\n 'memo_instruction',\n 'decompile_memo',\n 'create_account',\n 'decompile_create_account',\n 'initialize_account',\n 'transfer',\n 'decompile_initialize_account',\n 'decompile_transfer',\n 'Transaction',\n 'SIGNATURE_LENGTH',\n]\n","repo_name":"kinecosystem/kin-python","sub_path":"agora/solana/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"72357259688","text":"import PySimpleGUI as sg\nfrom utils import ver_categoria_homens, ver_categoria_mulheres, calcula_agua\nfrom webbrowser import open_new\nfrom frases_request import frase_motivacional\n\n\ndef beneficios():\n layout = [\n [sg.Image(filename='imagens/beneficios.png')]\n ]\n janela = sg.Window('Benefícios da água', layout=layout)\n while True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED:\n break\n janela.close()\n\n\ndef falta_de_agua():\n layout = [\n [sg.Image(filename='imagens/pouca_agua.png')]\n ]\n janela = sg.Window('Sinais de que você bebe pouca água', layout=layout)\n while True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED:\n break\n janela.close()\n\n\ndef agua():\n layout = [\n [sg.Push(), sg.Text('Seu peso(kg):'), sg.Input(\n key='peso', size=4), sg.Push()],\n [sg.Push(), sg.Button('Calcular'), sg.Push()],\n [sg.Push(), sg.Text('', key='resultado', size=(\n 30, 3), justification='center'), sg.Push()],\n [sg.Push(), sg.Button('Benefícios de se beber água', key='Beneficios'), sg.Push()],\n [sg.Push(), sg.Button('Sinais de que você bebe pouca água',\n key='pouca_agua'), sg.Push()]\n ]\n janela = sg.Window('Calcular Água', layout=layout, size=(400, 210))\n while True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED:\n break\n elif evento == 'Calcular':\n try:\n calculo = calcula_agua(float(valores['peso']))\n janela['resultado'].update(calculo)\n except:\n janela['resultado'].update('ERRO: Valor inválido!')\n elif evento == 'Beneficios':\n beneficios()\n elif evento == 'pouca_agua':\n falta_de_agua()\n\n janela.close()\n\n\ndef busca_frases():\n layout = [\n [sg.Text('Clique no botão para gerar uma frase motivacional!',\n key='frase', justification='center', size=(50, 4))],\n [sg.Push(), sg.Button('Gerar', key='gerar_frase'), sg.Push()]\n ]\n janela = sg.Window('Frases Motivacionais', layout=layout)\n while True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED:\n break\n elif evento == 'gerar_frase':\n janela['frase'].update(frase_motivacional())\n janela.close()\n\n\ndef tabela():\n layout = [\n [sg.Image(filename='imagens/tabela1.png')]\n ]\n janela = sg.Window('Tabela IMC', layout=layout)\n\n while True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED:\n break\n janela.close()\n\n\ndef calculo_imc():\n opcoes = ['Masculino', 'Feminino']\n layout = [\n [sg.Text('Seu peso(kg)', justification='center', size=(100, 1))],\n [sg.Input(size=(100, 1), key='peso', justification='center')],\n [sg.Text('Sua altura(m):', justification='center', size=(100, 1))],\n [sg.Input(size=(100, 1), key='altura', justification='center')],\n [sg.Text('Sexo:'), sg.Combo(['Masculino', 'Feminino'],\n default_value='Masculino', key='sexo')],\n [sg.Push(), sg.Button('Calcular IMC'), sg.Push()],\n [sg.Push(), sg.Text('', key='resultado'), sg.Push()],\n [sg.Push(), sg.Button('Visualizar Tabela IMC', key='visualizar'), sg.Push()]\n ]\n\n janela = sg.Window(title='Calculadora IMC', layout=layout, size=(250, 300))\n\n while True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED:\n break\n elif evento == 'Calcular IMC':\n try:\n peso = float(valores['peso'])\n altura = float(valores['altura'])\n calculo = peso / (altura * altura)\n txt = f\"Seu IMC é {round(calculo, 2)}\\n{ver_categoria_mulheres(calculo) if valores['sexo'] == 'Feminino' else ver_categoria_homens(calculo)}\"\n janela['resultado'].update(txt)\n except:\n janela['resultado'].update('ERRO: Valores inválidos!')\n elif evento == 'visualizar':\n tabela()\n\n janela.close()\n\n\ndef main():\n sg.theme('LightBlue2')\n layout_esquerda = [\n [sg.Image(filename='imagens/logo_inicio.png'), ]]\n layout_direita = [\n [sg.Image(filename='imagens/logotipo.png')],\n [sg.Push(), sg.Text('O que deseja fazer?'), sg.Push()],\n [sg.Push(), sg.Button('Calculadora IMC', key='imc'), sg.Push()],\n [sg.Push(), sg.Button('Gerar frases motivacionais', key='gerador'), sg.Push()],\n [sg.Push(), sg.Button('Calcular quantidade de água',\n key='calcular_agua'), sg.Push()],\n [sg.Push(), sg.Button('Sobre o App', key='sobre'), sg.Push()],\n ]\n layout = [\n [sg.Column(layout_esquerda), sg.VSeparator(),\n sg.Column(layout_direita)]\n ]\n janela = sg.Window('FitnessLife App', layout=layout)\n\n while True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED:\n break\n elif evento == 'imc':\n calculo_imc()\n elif evento == 'gerador':\n busca_frases()\n elif evento == 'calcular_agua':\n agua()\n elif evento == 'sobre':\n open_new('https://github.com/ElissonDouglas/fitnesslife-app')\n\n janela.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ElissonDouglas/fitnesslife-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24863940386","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# @Time : 2020/11/11 19:08\n# @Author : Chenchen Wei\n# @Description:\nimport os\nimport pickle\nimport time\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nclass get_data(object):\n \"\"\"\n Return missing data in different missing categories and missing rates\n Return: Samples * node * lags\n \"\"\"\n\n def __init__(self, path,\n file_name,\n loss_category='mcar',\n loss_rate=0.1,\n lags=12,\n split_rate=0.8,\n ):\n self.path = path\n self.file_name = file_name\n self.loss_category = loss_category\n self.loss_rate = loss_rate\n self.lags = lags\n self.split_rate = split_rate\n self.data_names = '{}_{}_lrt{}_sl{}.npz'.format(self.file_name[:-4],\n self.loss_category,\n self.loss_rate,\n self.lags)\n self.scalerfile = os.path.join(self.path, self.file_name[:-4] + 'rec_scaler.sav')\n\n def data_process(self):\n data = pd.read_csv(os.path.join(self.path, self.file_name)).values\n self.road_num = data.shape[1]\n data_num = data.shape[0] // self.lags\n split_num = int(0.8 * data_num)\n train, test = data[:split_num * self.lags], data[split_num * self.lags:]\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n # Standardization operation, standardize the data of each detector by column\n train = scaler.fit_transform(train)\n test = scaler.transform(test) # Standardize the test set with the training set scale\n\n train = np.array(np.split(train, train.shape[0] // self.lags, axis=0))\n test = np.array(np.split(test, test.shape[0] // self.lags, axis=0))\n train, test = np.transpose(train, (0, 2, 1)), np.transpose(test, (0, 2, 1))\n\n train_mask, test_mask = self.get_mask(train.shape), self.get_mask(test.shape)\n train_x, train_y = train * train_mask, train\n test_x, test_y = test * test_mask, test\n\n np.savez(os.path.join(self.path, self.data_names),\n train_x, train_y, test_x,\n test_y, train_mask, test_mask) # data file pre_save\n pickle.dump(scaler, open(self.scalerfile, 'wb')) # save scaler\n\n return train_x, train_y, test_x, test_y, scaler, train_mask, test_mask\n\n def main(self):\n if os.path.exists(os.path.join(self.path, self.data_names)): # If it exists, load directly\n a = time.time()\n print('#' + self.data_names + ' are Exists, Begin Loading')\n all_data = np.load(os.path.join(self.path, self.data_names))\n train_x, train_y, test_x = all_data['arr_0'], all_data['arr_1'], all_data['arr_2']\n test_y, train_mask, test_mask = all_data['arr_3'], all_data['arr_4'], all_data['arr_5']\n scaler = pickle.load(open(self.scalerfile, 'rb'))\n use_time = time.time() - a\n print('#Loading Success! Use time= {:.2f}s'.format(use_time))\n else: # If it does not exist, call the function to read and save data\n a = time.time()\n print('#' + self.data_names + ' are Not exists, Begin Loading and Saving')\n train_x, train_y, test_x, test_y, scaler, train_mask, test_mask = self.data_process()\n use_time = time.time() - a\n print('#Save and Loading Success Use time= {:.2f}s'.format(use_time))\n return train_x, train_y, test_x, test_y, train_mask, test_mask, scaler\n\n def get_mask(self, shape):\n if self.loss_category == 'mcar':\n return self.get_mcar_mask(shape)\n\n elif self.loss_category == 'tmcar':\n return self.get_time_mask(shape)\n\n elif self.loss_category == 'smcar':\n return self.get_space_mask(shape)\n\n else:\n raise ValueError('No loss_category')\n\n def get_time_mask(self, shape):\n \"\"\"\n tmcar\n Missing in time, no data at certain moments\n :param shape:\n :return:\n \"\"\"\n loss_nums = int(shape[-1] * self.loss_rate)\n mask = np.ones(shape=shape)\n for num in range(shape[0]):\n indexs = np.arange(shape[-1]).astype(np.int)\n np.random.shuffle(indexs)\n index = indexs[:loss_nums]\n mask[num, :, index] = 0\n return mask\n\n def get_space_mask(self, shape):\n \"\"\"\n smcar\n Spatially missing, some detectors have no data\n :param shape:\n :return:\n \"\"\"\n loss_nums = int(shape[1] * self.loss_rate)\n mask = np.ones(shape=shape)\n for num in range(shape[0]):\n indexs = np.arange(shape[1]).astype(np.int)\n np.random.shuffle(indexs)\n index = indexs[:loss_nums]\n mask[num, index, :] = 0\n return mask\n\n def get_mcar_mask(self, shape):\n \"\"\"\n mcar\n All missing at random\n :param shape:\n :return:\n \"\"\"\n array = []\n flag = int(shape[1] * shape[2] * (self.loss_rate))\n for n in range(0, shape[0]):\n array_first = []\n array_mid = []\n array_fianl = np.ones([shape[1], shape[2]])\n for i in range(0, shape[1]):\n for j in range(0, shape[2]):\n array_mid.append(int(array_fianl[i][j]))\n for k in range(0, flag):\n array_mid[k] = 0\n np.random.shuffle(array_mid) # 得到随机排序后的array_mid\n for z in range(0, shape[2] * shape[1], shape[2]):\n array_first.append(array_mid[z:z + shape[2]])\n array.append(array_first)\n\n return np.asarray(array)\n","repo_name":"pihang/GA-GAN","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"27747243675","text":"# -*- encoding: utf-8 -*-\n\n\"\"\"Тесты для обработки столкновений\"\"\"\n\nimport unittest\n\nfrom asteroids_lib.vector import Vector\nfrom asteroids_lib.collision import closest_vector\n\nclass ClosestVectorTest(unittest.TestCase):\n \"\"\"Тестирование поиска ближайшего вектора в mod n пространстве\"\"\"\n\n def setUp(self):\n # Создаём переменную для хранения размера окна\n self.ss = Vector(10, 10)\n\n # Создаём тестовые вектора.\n self.v00 = Vector(0, 0)\n self.v50 = Vector(5, 0)\n self.v05 = Vector(0, 5)\n self.v11 = Vector(1, 1)\n self.v99 = Vector(9, 9)\n\n def test_main(self):\n \"\"\"Общий тест\"\"\"\n\n self.assertEqual(\n closest_vector(self.ss, self.v00, self.v00),\n self.v00)\n self.assertEqual(\n closest_vector(self.ss, self.v11, self.v99),\n Vector(-2, -2))\n # ...\n","repo_name":"rutsky/python-course","sub_path":"08/asteroids/asteroids_lib/tests/test_collision.py","file_name":"test_collision.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"7841867757","text":"import numpy as np\nimport numba as nb\n\n@nb.jit\ndef py_fib(n=4):\n p = np.zeros(n)\n p[1] = 1\n i = 2\n while i < n:\n p[i] = p[i-1]+p[i-2]\n i += 1\n return p\n\n","repo_name":"SouthernMethodistUniversity/fast_python","sub_path":"possible_solutions/numba_fib.py","file_name":"numba_fib.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"4488567669","text":"from django import forms\nfrom .models import Manuntencao\nfrom django.utils.translation import ugettext as _\n\n\nclass ManutencaoForm(forms.ModelForm):\n data = forms.TimeField(label=_('Data'))\n valor_gasto = forms.DecimalField(label=_('Valor Gasto'), max_digits=6,\n decimal_places=2)\n\n def __init__(self, *args, **kwargs):\n super(ManutencaoForm, self).__init__(*args, **kwargs)\n self.fields['data'].widget.attrs.update({'class': 'form-control'})\n self.fields['valor_gasto'].widget.attrs.update({'class': 'form-control'})\n self.fields['produto'].widget.attrs.update({'class': 'form-control'})\n\n class Meta:\n model = Manuntencao\n fields = ['data', 'valor_gasto', 'produto']\n","repo_name":"wl4dek/pbd","sub_path":"pbd/manutencao/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4252052041","text":"import re\r\n\r\nlist1=[]\r\ncard_number=\"4455667767495369\"\r\nmatcher1=re.search(\"[456]\\d{3}\\d{4}\\d{4}\\d{4}\",card_number)\r\nmatcher2=re.search(\"^[456]\\d{3}-\\d{4}-\\d{4}-\\d{4}\",card_number)\r\nk=0\r\nif matcher1!=None or matcher2!=None:\r\n for i in card_number:\r\n if i!='-':\r\n list1.append(i)\r\n count=0\r\n for i in range(4,16):\r\n if int(list1[i])==int(list1[i-1]):\r\n if int(list1[i])==int(list1[i-2]):\r\n if int(list1[i])==int(list1[i-3]):\r\n print('Invalid')\r\n k=1\r\n break\r\nelse:\r\n k=1\r\n print('Invalid')\r\nif k==0:\r\n print('Valid')\r\n","repo_name":"gokulkannaniprimed/Python_practice","sub_path":"credit_card_validation.py","file_name":"credit_card_validation.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9166386101","text":"import logging\nfrom yaml import load, dump\nimport time\nfrom opcuaClient.browse_server_structure import BROWSEServer\nfrom canmops.analysis_utils import AnalysisUtils\n# from browse_server_structure import BROWSEServer\n# from analysisUtils import AnalysisUtils\nfrom datetime import date\nfrom datetime import datetime\nimport opcua\nimport re\nimport yaml\nfrom opcua import ua\nfrom opcua.common import node\n\n_logger = logging.getLogger('asyncua')\n\n\nclass OPCClient(BROWSEServer):\n\n def __init__(self, url=\"opc.tcp://localhost:4840/freeopcua/server/\", parent=None, client=None):\n\n if parent is not None:\n self.parent = parent\n if client is None:\n self.server_url = url\n self.client = opcua.Client(self.server_url, timeout=1500)\n else:\n self.client = client\n self.maxBUS_count = 8\n self.maxCIC_count = 4\n self.cicADCChannel_count = 5\n self.server_dict = dict()\n BROWSEServer.__init__(self)\n\n def start_connection(self, url=None):\n try:\n if url is not None:\n self.client = opcua.Client(url)\n opcua.Client.connect(self.client)\n return True\n except Exception as e:\n self.parent.textBox.append(f\"{e}\")\n return False\n\n def close_connection(self):\n opcua.Client.disconnect(self.client)\n if self.parent is not None:\n self.parent.textBox.append(\"Connection closed\")\n\n def get_mops_nodes(self, cic_id: int, bus_id: int, node_id: int):\n adc_nodes = []\n mon_nodes = []\n mon_desc = []\n\n for entry in self.server_dict:\n if f\"CIC {cic_id}\" in entry:\n if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n if f\"MOPS {node_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n for channel_id in range(3, 35):\n try:\n adc_nodes.append(self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"]\n [f\"MOPS {node_id}\"][f\"ADCChannel {channel_id:02}\"]\n [\"monitoringValue\"]))\n except Exception as e:\n print(e)\n\n for entry in self.server_dict:\n if f\"CIC {cic_id}\" in entry:\n if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n if f\"MOPS {node_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n for mon_item in self.server_dict[entry][f\"CANBus {bus_id}\"][f\"MOPS {node_id}\"][\"MOPSMonitoring\"]:\n try:\n mon_nodes.append(self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"]\n [f\"MOPS {node_id}\"][\"MOPSMonitoring\"][mon_item]))\n mon_desc.append(mon_item)\n except Exception as e:\n print(e)\n return None\n\n return adc_nodes, mon_nodes, mon_desc\n\n def get_cic_adc_nodes(self, cic_id: int, bus_id: int):\n bus_nodes = []\n if f\"CANBus {bus_id}\" in self.server_dict[f\"CIC {cic_id}\"]:\n for channel in self.server_dict[f\"CIC {cic_id}\"][f\"CANBus {bus_id}\"][f\"ADC CANBus {bus_id}\"]:\n try:\n bus_nodes.append(self.client.get_node(self.server_dict[f\"CIC {cic_id}\"][f\"CANBus {bus_id}\"]\n [f\"ADC CANBus {bus_id}\"][channel]\n [\"monitoringValue\"]))\n except Exception as e:\n print(e)\n return None\n return bus_nodes\n\n def read_mops_adc(self, cic_id: int, bus_id: int, node_id: int, nodes):\n values = self.client.get_values(nodes)\n return values\n # for entry in self.server_dict:\n # if f\"CIC {cic_id}\" in entry:\n # if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n # if f\"MOPS {node_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n # # values = []\n # nodes = []\n # for channel_id in range(3, 35):\n # try:\n # nodes.append(self.client.get_node(\n # self.server_dict[entry][f\"CANBus {bus_id}\"][f\"MOPS {node_id}\"]\n # [f\"ADCChannel {channel_id:02}\"][\"monitoringValue\"]))\n # except Exception as e:\n # print(e)\n # return None\n # for channel_id in range(3, 35):\n # try:\n # values = self.client.get_values(nodes)\n # except Exception as e:\n # print(e)\n # print(values)\n # return values\n\n def read_bus_adc(self, cic_id: int, bus_id: int, nodes):\n values = self.client.get_values(nodes)\n return values\n # for entry in self.server_dict:\n # if f\"CIC {cic_id}\" in entry:\n # if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n # try:\n # node = self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"][f\"ADC CANBus {bus_id}\"]\n # [f\"ADCChannel {channel:02}\"][\"monitoringValue\"])\n # value = node.get_value()\n # return value\n # except Exception as e:\n # print(e)\n # return None\n\n def read_mops_monitoring(self, cic_id: int, bus_id: int, node_id: int, nodes):\n values = self.client.get_values(nodes)\n return values\n # for entry in self.server_dict:\n # if f\"CIC {cic_id}\" in entry:\n # if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n # if f\"MOPS {node_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n # mon_list = []\n # try:\n # for mon_item in self.server_dict[entry][f\"CANBus {bus_id}\"][f\"MOPS {node_id}\"][\"MOPSMonitoring\"]:\n # node = self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"]\n # [f\"MOPS {node_id}\"][\"MOPSMonitoring\"][mon_item])\n # mon_list.append([node.get_value(), mon_item])\n # except Exception as e:\n # print(e)\n # return None\n # return mon_list\n\n def read_mops_conf(self, cic_id: int, bus_id: int, node_id: int):\n for entry in self.server_dict:\n if f\"CIC {cic_id}\" in entry:\n if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n if f\"MOPS {node_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n conf_list = []\n try:\n for conf_item in self.server_dict[entry][f\"CANBus {bus_id}\"][f\"MOPS {node_id}\"][\"MOPSInfo\"]:\n node = self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"]\n [f\"MOPS {node_id}\"][\"MOPSInfo\"][conf_item])\n conf_list.append((node.get_value(), conf_item))\n except Exception as e:\n print(e)\n return None\n return conf_list\n\n def disable_power(self, cic_id, bus_id):\n for entry in self.server_dict:\n if f\"CIC {cic_id}\" in entry:\n if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n if f\"PE Signal CANBus {bus_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n try:\n methode = self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"]\n [f\"PE Signal CANBus {bus_id}\"]\n [f\"Power Disable Bus {bus_id}\"])\n parent = methode.get_parent()\n parent.call_method(methode)\n return True\n except Exception as e:\n print(e)\n return False\n else:\n return False\n\n def enable_power(self, cic_id, bus_id):\n for entry in self.server_dict:\n if f\"CIC {cic_id}\" in entry:\n if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n if f\"PE Signal CANBus {bus_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n try:\n methode = self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"]\n [f\"PE Signal CANBus {bus_id}\"][f\"Power Enable Bus {bus_id}\"])\n parent = methode.get_parent()\n parent.call_method(methode)\n return True\n except Exception as e:\n print(e)\n return False\n else:\n return False\n\n def check_power_status(self, cic_id, bus_id):\n for entry in self.server_dict:\n if f\"CIC {cic_id}\" in entry:\n if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n if f\"PE Signal CANBus {bus_id}\" in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n try:\n node = self.client.get_node(self.server_dict[entry][f\"CANBus {bus_id}\"]\n [f\"PE Signal CANBus {bus_id}\"][f\"Current Status\"])\n value = node.get_value()\n return value\n except Exception as e:\n print(e)\n return None\n else:\n return \"N/A\"\n\n def search_endpoints(self, cic_id, bus_id):\n mops_list = []\n for entry in self.server_dict:\n if f\"CIC {cic_id}\" in entry:\n if f\"CANBus {bus_id}\" in self.server_dict[entry]:\n for CANBusChild in self.server_dict[entry][f\"CANBus {bus_id}\"]:\n if \"MOPS\" in CANBusChild:\n mops_list.append(self.server_dict[entry][f\"CANBus {bus_id}\"][CANBusChild][\"NodeID\"])\n if not mops_list:\n return []\n else:\n return mops_list\n\n def load_configuration(self, file):\n with open(file, 'r') as stream:\n self.server_dict = yaml.safe_load(stream)\n\n def browse_server_structure(self, directory=None):\n self.browse_server(self.client)\n print(self.server_dict)\n if directory is None:\n with open('opcuaClient/config/setup.yml', 'w') as ymlfile:\n dump(self.server_dict, ymlfile, sort_keys=False)\n elif directory:\n now = datetime.now()\n current_time = now.strftime(\"%H-%M-%S\")\n file_path = directory + f'/setup_Date-{date.today()}_Time-{current_time}.yml'\n with open(file_path, 'w') as ymlfile:\n dump(self.server_dict, ymlfile, sort_keys=False)\n return file_path\n","repo_name":"ahmedqamesh/canmops","sub_path":"opcuaClient/opcua_client.py","file_name":"opcua_client.py","file_ext":"py","file_size_in_byte":11864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37525170452","text":"import datetime\n\nfrom django.test import TestCase\nfrom django.test import RequestFactory\n\nfrom homepage.views import HomePageView\nfrom races.models import Season, Organizer, Location, Race\n\n\nclass HomePageTest(TestCase):\n\n def setUp(self):\n self.request = RequestFactory().get('/')\n self.view = HomePageView.as_view()\n\n self.season = Season.objects.create(\n opening_year=2014,\n closing_year=2015,\n slug='2014-15',\n is_current_season=True,\n )\n\n self.organizer = Organizer.objects.create(\n name='Joe Organizer',\n phone='555-555-5555',\n email='joe@gmail.com',\n website='http://racenow.com',\n )\n\n self.location = Location.objects.create(\n city='Winston-Salem',\n state='NC',\n zip_code=27101,\n address='151 Piedmont Avenue',\n description='Take the first left past the golf course.'\n )\n\n self.race_this_weekend = Race.objects.create(\n date=datetime.date(2015, 1, 3),\n season=self.season,\n location=self.location,\n organizer=self.organizer,\n description='A very muddy race.',\n pre_registration_link='http://pre-reg.com',\n )\n self.race_this_month = Race.objects.create(\n date=datetime.date(2015, 1, 10),\n season=self.season,\n location=self.location,\n organizer=self.organizer,\n description='A very muddy race.',\n pre_registration_link='http://pre-reg.com',\n )\n\n def test_home_page_renders_template(self):\n response = self.view(self.request)\n self.assertEqual(response.template_name, ['home.html'])\n\n def test_home_page_returns(self):\n response = self.view(self.request)\n self.assertEqual(response.status_code, 200)\n\n def test_home_page_returns_season(self):\n response = self.view(self.request, current_season=self.season)\n self.assertEqual(response.context_data['current_season'], self.season)\n\n def test_home_page_returns_upcoming_races_for_weekend(self):\n response = self.view(self.request, weekend_race_list=self.race_this_weekend)\n self.assertEqual(response.context_data['weekend_race_list'][0], self.race_this_weekend)\n\n def test_home_page_returns_upcoming_races_for_month(self):\n response = self.view(self.request, remaining_races_for_month_list=self.race_this_month)\n self.assertEqual(response.context_data['remaining_races_for_month_list'][0], self.race_this_month)\n\n def tearDown(self):\n season = Season.objects.all()\n season.delete()\n organizer = Organizer.objects.all()\n organizer.delete()\n location = Location.objects.all()\n location.delete()\n race = Race.objects.all()\n race.delete()\n","repo_name":"patrickbeeson/north-carolina-cyclocross-series","sub_path":"racemanager/homepage/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18986432956","text":"import logging\n\nimport gym\n# noinspection PyUnresolvedReferences\nimport gym_multiplexer\nfrom lcs.agents.yacs.yacs import Configuration, YACS\n\nfrom examples.real_multiplexer import HashedObservation\n\nlogging.basicConfig(level=logging.INFO)\n\nRMPX_SIZE = 3\nRMPX_HASH = 'md5'\nRMPX_BINS = 10\n\nif __name__ == '__main__':\n cfg = Configuration(classifier_length=RMPX_SIZE + 1,\n number_of_possible_actions=2,\n learning_rate=0.1,\n discount_factor=0.8,\n trace_length=3,\n estimate_expected_improvements=False,\n feature_possible_values=[{str(i) for i in range(RMPX_BINS)}] * RMPX_SIZE + [{'F', 'T'}],\n metrics_trial_frequency=1)\n\n env = HashedObservation(\n gym.make(f'real-multiplexer-{RMPX_SIZE}bit-v0'), RMPX_HASH, RMPX_BINS)\n\n agent = YACS(cfg)\n\n print(\"\\n*** EXPLORE ***\")\n pop, metrics = agent.explore(env, 500)\n","repo_name":"ParrotPrediction/pyalcs-experiments","sub_path":"examples/real_multiplexer/yacs.py","file_name":"yacs.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23830961444","text":"\nfrom django.urls import path\nfrom . import views\nurlpatterns = [\n path ('',views.home, name='home'),\n path ('announcement/',views.announcement,name='announcement'),\n path ('contact-us/',views.contact_us,name='contact'),\n path ('gallery/',views.gallery, name= 'gallery'),\n path ('Ariful-Islam-Juwel/',views.aijuwel, name='juwel'),\n]\n","repo_name":"juwelariful/DIU-swe-Alumni","sub_path":"other/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11639698570","text":"# Description\n# Complete the function that returns an array of length n, starting with the given number x and the squares of the\n# previous number. If n is negative or zero, return an empty array/list.\n\ndef squares(x, n):\n if n <= 0:\n return []\n prod = x\n seq = [x]\n for i in range(n - 1):\n prod **= 2\n seq.append(prod)\n return seq\n","repo_name":"orlando1080/codewars","sub_path":"7_kyu/square_sequence.py","file_name":"square_sequence.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25501201384","text":"import numpy as np\nfrom vpython import *\nA, N = 0.10, 50\nsize, m, k, d = 0.06, 0.1, 10.0, 0.4\nscene = canvas(title='Wave vector', width=800, height=300, background=vec(0.5,0.5,0), center = vec(0, 0, 0))\nd_graph = graph(width = 400, height = 400, title='dispersion relationship',xtitle = 'wave vector',ytitle='angular frequency')\nfunc = gcurve(graph = d_graph, color = color.red, width = 4)\n\nUnit_K = 2 * pi/(N*d)\n\nfor n in range(1, N//2):\n Wavevector = n * Unit_K\n phase = Wavevector * arange(N) * d\n ball_pos, ball_v, spring_len = np.arange(N)*d + A*np.sin(phase), np.zeros(N), np.ones(N)*d\n ori_second = ball_pos[1]\n t, dt = 0, 0.0003\n f = False\n while True:\n t += dt\n if ball_pos[1] < ori_second:\n f = True\n if f and ball_pos[1] > ori_second:\n break\n spring_len[:-1] = [ball_pos[i] - ball_pos[i+1] for i in range(N-1)]\n spring_len[-1] = ball_pos[-1] - ball_pos[0] - N*d\n ball_v[1:] += [(k*(spring_len[i]-spring_len[i+1])/m*dt) for i in range(N-1)]#6\n ball_v[0] += k*(spring_len[-1] - spring_len[0])/m*dt\n ball_pos += ball_v*dt\n func.plot(pos = (Wavevector, 2*pi/t))\n","repo_name":"khhung906/NTU-Vpython","sub_path":"hw-4.py","file_name":"hw-4.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74923138729","text":"import sys\nimport urllib.request\nimport urllib.parse\n\narg1Lista = sys.argv[1:]\n\nfor urlElementListy in arg1Lista:\n url = \"\"\n url += urlElementListy\n\nnazwaPliku = \"\"\n\ntry:\n if url[-1] == \"/\":\n nazwaPliku = \"index.html\"\n else:\n for i in range(1, len(url)):\n if url[-i] == \"/\":\n nazwaPliku = url[-i + 1 : len(url)] + \".html\"\n break\nexcept:\n print(\"Nie podano adresu URL\")\n\ntry:\n html = urllib.request.urlopen(url)\n with open(nazwaPliku, \"w\") as sys.stdout:\n print(html.read())\n\nexcept Exception as e:\n print(\"Napotkano błąd:\", e)\n","repo_name":"tTargiel/UNI-Python-Programming","sub_path":"Lista 11/wget.py","file_name":"wget.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74603979686","text":"from .builder import DATASETS\nfrom .custom import CustomDataset\n\nfrom . import FishDataset\n\n@DATASETS.register_module()\nclass FlatDataset(CustomDataset):\n \"\"\"Flat dataset.\n \"\"\"\n\n CLASSES = FishDataset.CLASSES\n PALETTE = FishDataset.PALETTE\n\n def __init__(self, **kwargs):\n assert kwargs.get('split') in [None, 'train']\n if 'split' in kwargs:\n kwargs.pop('split')\n super(FlatDataset, self).__init__(\n img_suffix='.png',\n seg_map_suffix='.png',\n split=None,\n **kwargs)\n \n","repo_name":"qqplot/sait-uda","sub_path":"seg/mmseg/datasets/flat.py","file_name":"flat.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22557929705","text":"# %%\nimport os\nimport matplotlib\n# matplotlib.use('agg')\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\nfrom matplotlib.gridspec import GridSpec\nimport pandas as pd\nimport numpy as np\nfrom pandas.core.frame import DataFrame\nimport scipy.stats as stats\nimport seaborn as sns\n# import statsmodels.api as sm\n# from statsmodels.stats.diagnostic import lillifors\nmatplotlib.rcParams['hatch.linewidth'] = 0.1\ncolor_pool = plt.get_cmap('tab10').colors\n# %matplotlib inline\n# %config InlineBackend.figure_format = 'svg'\n\n\ndef bar(df, x_col='Sample', y_col='count', group_col='stage', group_col2='tumor_type', \n ax=None, title=None, out=None, labelsize=6, labelrotation=90,\n ordered_group=None, ordered_group2=None, \n hatch_lst=(None, '***', '///', '+++', '...', 'xxx', 'ooo')):\n \"\"\"\n :param df:\n :param x_col: column name for data source of bar label\n :param y_col: column name for data source of bar height\n :param group_col: column name for data source of bar color group. 柱子颜色分组信息\n :param group_col2: column name for data source of bar hatch(decorative pattern) group. 柱子里的花纹分组信息,可以赋值None\n :param ax: \n :param title:\n :param out:\n :param labelsize: xtick label size\n :param labelrotation: xtick label rotation angle\n :param ordered_group: ordered group color, 决定柱子的顺序\n :param ordered_group2: ordered group hatch,与hatch_lst一一对应,决定哪些分组对应什么纹理\n :param hatch_lst: 柱子的花纹选项\n :return:\n \"\"\"\n ax = plt.subplots()[1] if ax is None else ax\n # color_pool = plt.get_cmap('Set2').colors\n ordered_group = sorted(set(df[group_col])) if ordered_group is None else ordered_group\n color_dict = dict(zip(ordered_group, color_pool))\n colors = [color_dict[x] if x in color_dict else \"gray\" for x in df[group_col]]\n \n # 画柱子\n ax.grid(axis='y', linestyle='--', linewidth=0.5, alpha=0.5)\n bars = ax.bar(df[x_col], df[y_col], color=colors, align='center', width=0.7)\n \n # 设置柱子的面子颜色和纹理\n hatch_dict = dict()\n if group_col2 is not None:\n ordered_group2 = sorted(set(df[group_col2])) if ordered_group2 is None else ordered_group2\n hatch_dict = dict(zip(ordered_group2, hatch_lst))\n for ind, patch in enumerate(bars.patches):\n patch.set_hatch(hatch_dict[df[group_col2][ind]])\n patch.set_facecolor(color_dict[df[group_col][ind]])\n\n # customise legend\n group_count = df[group_col].value_counts()\n legend_patches = [mpatches.Patch(color=color_dict[x], label=x+f'({group_count[x]})') for x in ordered_group]\n if group_col2 is not None:\n group2_count = df[group_col2].value_counts()\n legend_patches += [mpatches.Patch(hatch=hatch_dict[x], label=x+f'({group2_count[x]})', facecolor='w', edgecolor='gray') for x in ordered_group2]\n ncol = 1 if len(legend_patches) <= 4 else 2\n ax.legend(handles=legend_patches, loc='upper right', ncol=ncol)\n\n # fine tune plot\n ax.xaxis.set_tick_params(labelsize=labelsize, labelrotation=labelrotation)\n if title:\n ax.set_title(title)\n ax.set_xlim(-1, df.shape[0])\n if out:\n plt.tight_layout()\n plt.savefig(f'{out}', dpi=300)\n return ax\n\n\ndef test_normality(data):\n # 正太分布检验\n if len(data) < 50:\n p_value = stats.normaltest(data)[1]\n return 'normalTest', p_value\n if len(data) < 300:\n p_value = stats.shapiro(data)[1]\n return \"shapiro\", p_value\n if len(data) >= 300:\n p_value = stats.kstest(data, 'norm')[1]\n return \"kstest\", p_value\n\n\ndef displot(d1, d2, label1, label2, ax=None, kind='kde'):\n s1 = pd.Series(d1)\n s2 = pd.Series(d2)\n # color_pool = plt.get_cmap('Set2').colors\n ax = plt.subplots()[1] if not ax else ax\n if kind == 'kde':\n ax = s1.plot.kde(label=label1, ax=ax, alpha=0.7, color=color_pool[0])\n ax = s2.plot.kde(label=label2, ax=ax, alpha=0.7, color=color_pool[1])\n ax.tick_params(labelsize='small')\n ax.legend(fontsize='small', loc='best')\n ax.set_title(\"Kernel density estimation\")\n elif kind == 'hist':\n ax = s1.plot.hist(label=label1, ax=ax, alpha=0.7, color=color_pool[0])\n ax = s2.plot.hist(label=label2, ax=ax, alpha=0.6, color=color_pool[1])\n ax.yaxis.tick_right()\n ax.tick_params(labelsize='small')\n ax.legend(fontsize='small', loc='best')\n ax.set_title(\"Histogram plot\")\n elif kind == 'box':\n def get_limit(series, k=3):\n q1 = series.quantile(q=0.25)\n q3 = series.quantile(q=0.75)\n iqr = q3 - q1\n upper = q3 + 3 * iqr\n lower = q1 - 3 * iqr\n return upper, lower\n # d1 = s1.clip(*get_limit(s1))\n # d2 = s2.clip(*get_limit(s1))\n d1, d2 = s1, s2\n df = pd.DataFrame({'value': list(d1)+list(d2), 'group': [label1]*len(d1) + [label2]*len(d2)})\n sns.boxplot(data=df, x='group', y='value', ax=ax, palette=color_pool)\n sns.swarmplot(data=df, x='group', y='value', ax=ax, color='0.25')\n else:\n raise Exception(f'unsupported kind {kind}')\n return ax\n\n\ndef pvalue_plot(d1, d2, label1, label2, axes=None):\n if axes is None:\n fig, axes = plt.subplots(2, 2, tight_layout=True)\n axes = [x for y in axes for x in y]\n\n def format_pvalue(x):\n return f'{x:.3e}' if x < 0.0001 else round(x, 4)\n\n # color_pool = plt.get_cmap('Set2').colors\n # 正态性检验: get pvalue\n method1, pvalue1 = test_normality(d1)\n method2, pvalue2 = test_normality(d2)\n\n # 正态性检验:Generates a probability plot\n for data, label, method, pvalue, ax, color in zip(\n [d1, d2], \n [label1, label2],\n [method1, method2],\n [pvalue1, pvalue2],\n axes[2:], color_pool\n ):\n (osm, osr), (slope, intercept, r) = stats.probplot(data, dist=\"norm\", plot=None)\n ax.plot(osm, osr, 'o', osm, slope*osm + intercept, 'r--', markerfacecolor=color)\n ax.annotate(\n f'{method}: pvalue={format_pvalue(pvalue)} and ' + \"$R^2=%1.4f$\" % r,\n xy=(0.05, 0.9), xycoords='axes fraction', fontsize=7\n )\n ax.set_title(f'Probability Plot of group {label}')\n\n # 方差齐性检验\n s, pvalue = stats.levene(d1, d2, center='median')\n\n # 两组均值差异:t检验\n t, t_test_pvalue = stats.ttest_ind(d1, d2, equal_var=(pvalue <= 0.05 or False))\n\n # 两组非参数检验\n s, rank_test_pvalue = stats.mannwhitneyu(d1, d2)\n\n # distribution plot:\n displot(d1, d2, label1, label2, ax=axes[0], kind='box')\n\n # plot table, 不使用rowLabels是为了避免空间占用,它占用的是axis外部的空间\n data = [\n ['T-Test', format_pvalue(t_test_pvalue)],\n ['Mann Whitney U Test', format_pvalue(rank_test_pvalue)],\n [f'\"{label1}\" Normality Test', format_pvalue(pvalue1)],\n [f'\"{label2}\" Normality Test', format_pvalue(pvalue2)],\n ]\n axes[1].set_title('P-value Table')\n table = axes[1].table(\n cellText=data, \n colLabels=['Method', 'P-value'],\n cellLoc='center', loc='center', fontsize=8,\n colColours=['lightblue', 'lightblue'],\n colWidths=[0.62, 0.38]\n )\n axes[1].set_axis_off()\n # 调整table row height\n for i in [1, 2, 3, 4]:\n for j in [0, 1]:\n table[i, j].set_height(.2)\n return axes\n\n\ndef count_distribution(df, prefix='MHC-I_neoantigen_count', y_col='count', log2=True,\n group_col='tumor_type', label1='adeno', label2='squamous'):\n # data process\n if log2:\n df[y_col] = np.log2(df[y_col])\n d1 = df[df[group_col] == label1][y_col]\n d2 = df[df[group_col] == label2][y_col]\n # layout setting\n fig = plt.figure(constrained_layout=True, figsize=(8, 9))\n gs = GridSpec(1+2, 2, figure=fig)\n ax1 = fig.add_subplot(gs[0, :])\n stat_axes = []\n for i in [1, 2]:\n for j in [0, 1]:\n stat_axes.append(fig.add_subplot(gs[i, j]))\n\n # drawing\n bar(df, ax=ax1, x_col='Sample', y_col='count', group_col='stage', group_col2='tumor_type')\n pvalue_plot(d1, d2, label1=label1, label2=label2, axes=stat_axes)\n # save\n plt.savefig(f'{prefix}.stats.pdf', dpi=300)\n # plt.show()\n return fig\n\n\ndef cell_fraction_boxplot(file, hue='tumor_type'):\n # stats\n df = pd.read_csv(file)\n df['stage_group'] = ['I-II' if x in ['I', 'II'] else 'III-IV' for x in df['stage']]\n \n def get_data(x_col, hue, y_col):\n # group by x_col\n var_lst = sorted(set(df[x_col]))\n group_lst = sorted(set(df[hue]))\n box_data_dict = dict()\n d1 = df.groupby(x_col)\n for v in var_lst:\n d2 = d1.get_group(v).groupby(hue)\n for g in group_lst:\n if not g in d2.groups:\n box_data = []\n else:\n box_data = d2.get_group(g)[y_col].values\n box_data_dict.setdefault(v, dict())[g] = box_data\n return box_data_dict\n \n gd = get_data(x_col='cell_type', hue=hue, y_col='cell_fraction')\n pvalues = dict()\n man_pvalues = dict()\n for cell, value_dict in gd.items():\n print(value_dict.values())\n pvalues[cell] = stats.ttest_ind(*value_dict.values())\n man_pvalues[cell] = stats.mannwhitneyu(*value_dict.values())\n print(pvalues)\n \n # boxplot with table\n fig, ax = plt.subplots()\n sns.boxplot(data=df, x='cell_type', y='cell_fraction', hue_order=sorted(set(df[hue])),\n hue=hue, ax=ax, linewidth=1.5)\n ax.xaxis.set_tick_params(rotation=90)\n pvalues = [pvalues[x.get_text()][1] for x in ax.xaxis.get_majorticklabels()]\n man_pvalues = [man_pvalues[x.get_text()][1] for x in ax.xaxis.get_majorticklabels()]\n pvalue_lst = [f'{x:.3f}' for x in pvalues]\n man_pvalue_lst = [f'{x:.3f}' for x in man_pvalues]\n cell_text = pd.DataFrame({'ttest-pvalue': pvalue_lst, 'ranksum-pvalue': man_pvalue_lst}).T.values\n # print(cell_text)\n cmap = plt.get_cmap('RdYlBu')\n # 截取色谱中间的颜色\n mid_cmap = cmap(np.linspace(0.2, 0.8, 100))\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list('mycmap', mid_cmap)\n # cmap = plt.get_cmap('bwr')\n cell_colors = [[cmap(x) for x in pvalues], [cmap(x) for x in man_pvalues]]\n cell_colors = [[cmap(x) for x in pvalues], cmap(man_pvalues)]\n table = ax.table(\n cellText=cell_text,\n rowLabels=['ttest-pvalue', 'ranksum-pvalue'], \n # colLabels=ax.get_xticklabels(), \n cellColours=cell_colors,\n cellLoc='center', loc='top',\n # fontsize=8,\n alpha=0.5\n )\n # ax.set_xticklabels([])\n plt.subplots_adjust(left=0.2, top=0.8)\n \n # swarmplot\n fig, ax = plt.subplots(1, 1)\n sns.swarmplot(data=df, x='cell_type', y='cell_fraction', hue_order=sorted(set(df[hue])),\n hue=hue, dodge=True, ax=ax, size=3)\n ax.xaxis.set_tick_params(rotation=90)\n \n \ndef cell_fraction_stackbar(file, group_field='tumor_type'):\n df = pd.read_csv(file)\n df['stage_group'] = ['I-II' if x in ['I', 'II'] else 'III-IV' for x in df['stage']]\n # stackbar\n fig, ax = plt.subplots(1, 1)\n df2 = df.pivot(columns='cell_type', index='Sample', values='cell_fraction')\n # get order of stack and get order of sample\n col_order = df2.iloc[0, :].sort_values(ascending=False).index\n df2['cellfraction_sum'] = df2.sum(axis=1)\n tumor_type_dict = dict(zip(df['Sample'], df[group_field]))\n stage_dict = dict(zip(df['Sample'], df['stage_group']))\n df2[group_field] = [tumor_type_dict[x] for x in df2.index]\n row_order = df2.sort_values(by=[group_field, 'cellfraction_sum'], ascending=False).index\n df2 = df2.loc[row_order, col_order]\n ax = df2.plot.bar(stacked=True, rot=90, ax=ax)\n first_legend = ax.legend(bbox_to_anchor=(1.02, 0.65), title='cell_type')\n ax.xaxis.set_tick_params(labelsize=5)\n\n # add tumor type group bar\n ymin, ymax = ax.get_ylim()\n group_bar_height = ymax*0.05\n labels = [tumor_type_dict[x] for x in row_order]\n color_dict = dict(zip(set(labels), plt.get_cmap('Accent').colors))\n colors = [color_dict[x] for x in labels]\n new_ymax = ymax+group_bar_height\n ax.set_ylim(ymin, new_ymax)\n bars = ax.bar(range(len(labels)), bottom=ymax, height=new_ymax, color=colors)\n assigned_labels = set()\n assigned_patches = []\n for label, patch in zip(labels, bars.patches):\n if label not in assigned_labels:\n patch.set_label(label)\n assigned_labels.add(label)\n assigned_patches.append(patch)\n ax.axhline(ymax, color='gray')\n second_legend = ax.legend(handles=assigned_patches, bbox_to_anchor=(1.02, 1), title=group_field)\n # 查看源码似乎不可能获得legend的宽度\n\n # stage info\n ymin, ymax = ax.get_ylim()\n labels = [stage_dict[x] for x in row_order]\n color_dict = dict(zip(set(labels), plt.get_cmap('Paired').colors))\n colors = [color_dict[x] for x in labels]\n new_ymax = ymax+group_bar_height\n ax.set_ylim(ymin, new_ymax)\n bars = ax.bar(range(len(labels)), bottom=ymax, height=new_ymax, color=colors)\n assigned_labels = set()\n assigned_patches = []\n for label, patch in zip(labels, bars.patches):\n if label not in assigned_labels:\n patch.set_label(label)\n assigned_labels.add(label)\n assigned_patches.append(patch) \n third_legend = ax.legend(handles=assigned_patches, bbox_to_anchor=(1.02+0.5, 1), title='clinic_stage')\n # as first legend will be replaced by second one, we need to add it back\n ax.add_artist(first_legend)\n ax.add_artist(second_legend)\n # add grid\n ax.xaxis.grid(color='gray', linestyle='--', linewidth=0.2)\n return df2\n\n\ndef neocount_vs_cellfraction(count_file, cellfraction_file, hue=None, out='neocount_vs_cellfraction.pdf'):\n neo_count = pd.read_csv(count_file, index_col=0)\n cf = pd.read_csv(cellfraction_file)\n cf = cf.pivot(columns='cell_type', index='Sample', values='cell_fraction')\n df = neo_count.join(cf)\n df['log2Count'] = np.log2(df['count'])\n fig, axes = plt.subplots(cf.shape[1])\n for ind, cell_type in enumerate(cf.columns):\n if not hue:\n j = sns.jointplot(data=df[['log2Count', cell_type, 'tumor_type']], \n ax=axes[ind], x='log2Count', y=cell_type, kind='reg')\n r, p = stats.spearmanr(df['count'], df[cell_type])\n j.ax_joint.annotate(\n 'r={:f}\\nspearman_pval={:f}'.format(r,p),\n xy=(0.05, 0.9),\n xycoords='axes fraction'\n )\n else:\n j = sns.jointplot(data=df[['log2Count', cell_type, 'tumor_type']], ax=axes[ind],\n x='log2Count', y=cell_type, hue=hue)\n fig.savefig(out, dpi=300, bbox_inches='tight')\n\n \nif __name__ == '__main__':\n # for mhc_type, file in zip(\n # ['MHC-I', 'MHC-II', 'MHC-both'],\n # ['MHC_I.count.csv', 'MHC_II.count.csv', 'MHC_I_II.count.csv']\n # ):\n # df = pd.read_csv(file)\n # df['stage_group'] = ['I-II' if x in ['I', 'II'] else 'III-IV' for x in df['stage']]\n # count_distribution(df, prefix=f'{mhc_type}.adeno_vs_squamous', y_col='count', \n # group_col='tumor_type', label1='adeno', label2='squamous')\n # count_distribution(df, prefix=f'{mhc_type}.I-II_vs_III-IV', y_col='count', \n # group_col='stage_group', label1='I-II', label2='III-IV')\n # cell_fraction_boxplot('./immuneCellFraction.csv')\n # cell_fraction_boxplot('./immuneCellFraction.csv', hue='stage_group')\n # cell_fraction_stackbar('./immuneCellFraction.csv')\n neocount_vs_cellfraction('./MHC_I.count.csv', './immuneCellFraction.csv')\n neocount_vs_cellfraction('./MHC_II.count.csv', './immuneCellFraction.csv', hue='tumor_type')\n \n\n# %%\n","repo_name":"gudeqing/biodev","sub_path":"plots/pvalue_plots.py","file_name":"pvalue_plots.py","file_ext":"py","file_size_in_byte":16002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44034601880","text":"from deck import Deck\nfrom tile import blank_tile\nfrom utils import json_dump, json_load\nfrom constants import ROW, COL, GRID, TILES\n\n\nclass Grid:\n ''' Class for managing ROWxCOL matrix of tiles '''\n\n def __init__(self, sprites, width, height, tile_width, tile_height, scale,\n color):\n # the playing surface\n self.grid = []\n self.blanks = []\n\n for i in range(ROW * COL):\n self.grid.append(None)\n\n # tile spacing\n self.left_hand = int(tile_width / 2)\n self.left = int((width - (tile_width * COL)) / 2 + tile_width)\n self.xinc = int(tile_width)\n self.top = 0\n self.yinc = int(tile_height)\n\n for i in range(ROW * COL):\n self.blanks.append(blank_tile(sprites, scale=scale, color=color))\n self.blanks[i].move(self.grid_to_xy(i))\n self.blanks[i].set_layer(GRID)\n\n def clear(self):\n for i in range(ROW * COL):\n self.grid[i] = None\n\n def tiles_in_grid(self):\n ''' How many tiles are on the grid? '''\n return ROW * COL - self.grid.count(None)\n\n def serialize(self):\n ''' Serialize the grid for passing to share and saving '''\n grid = []\n for i in range(ROW * COL):\n if self.grid[i] is not None:\n grid.append([self.grid[i].number, self.grid[i].orientation])\n else:\n grid.append([None, None])\n return json_dump(grid)\n\n def restore(self, grid_as_text, deck):\n ''' Restore tiles to grid upon resume or share. '''\n self.hide()\n grid = json_load(grid_as_text)\n for i in range(ROW * COL):\n if grid[i][0] is None:\n self.grid[i] = None\n else:\n for k in range(ROW * COL):\n if deck.tiles[k].number == grid[i][0]:\n self.add_tile_to_grid(k, grid[i][1], i, deck)\n break\n self.show()\n\n def add_tile_to_grid(self, tile_number, orientation, grid_number, deck):\n ''' Add tiles[tile_number] to grid[grid_number] at orientation '''\n self.grid[grid_number] = deck.tiles[tile_number]\n self.grid[grid_number].spr.move(self.grid_to_xy(grid_number))\n self.grid[grid_number].spr.set_layer(TILES)\n while orientation > 0:\n self.grid[grid_number].rotate_clockwise()\n orientation -= 90\n\n def place_a_tile(self, c, x, y):\n ''' Place a tile at position x,y and display it. '''\n if c is not None:\n c.spr.move((x, y))\n c.spr.set_layer(TILES)\n\n def xy_to_grid(self, x, y):\n ''' Convert from sprite x,y to grid index. '''\n if x > self.left:\n return COL * int((y - self.top) / self.yinc) + \\\n int((x - self.left) / self.xinc)\n else:\n return None\n\n def grid_to_xy(self, i):\n ''' Convert from grid index to sprite x,y. '''\n return (int((self.left + i % COL * self.xinc)),\n int((self.top + (i / COL) * self.yinc)))\n\n def grid_to_spr(self, i):\n ''' Return the sprite in grid-position i. '''\n return self.grid[i].spr\n\n def spr_to_grid(self, spr):\n ''' Return the index of a sprite in grid. '''\n for i in range(ROW * COL):\n if self.grid[i] is not None and self.grid[i].spr == spr:\n return(i)\n return None\n\n def hide(self):\n ''' Hide all of the tiles on the grid. '''\n for i in range(ROW * COL):\n if self.grid[i] is not None:\n self.grid[i].hide()\n\n def show(self):\n ''' Restore all tile on the grid to their x,y positions. '''\n for i in range(ROW * COL):\n self.place_a_tile(self.grid[i], self.grid_to_xy(i)[0],\n self.grid_to_xy(i)[1])\n","repo_name":"sugarlabs/paths","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21757407270","text":"import datetime\nimport json\nimport sys\nimport urllib\nfrom time import sleep\n\nfrom bson import CodecOptions\nfrom bson.codec_options import TypeRegistry\nfrom requests import Session\n\nfrom config import config\nfrom library import setup_logger, DecimalCodec, authorize, send_mail\nfrom mongodb import mongo_client\n\nlogger = setup_logger(\"BTC-Dominance-Watcher\")\ncmc_key = config.get_parameter('cmc_key')\n\n\n\nurl = 'https://pro-api.coinmarketcap.com/v1/global-metrics/quotes/latest'\nparameters = {\n 'start': '1',\n 'limit': '5000',\n 'convert': 'USD'\n}\nheaders = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': cmc_key,\n}\n\nsession = Session()\nsession.headers.update(headers)\n\ndb = mongo_client.market_data\ndecimal_codec = DecimalCodec()\ntype_registry = TypeRegistry([decimal_codec])\ncodec_options = CodecOptions(type_registry=type_registry)\ncollection = db.get_collection(\"cmc\", codec_options=codec_options)\n\narguments = len(sys.argv) - 1\nif arguments != 2:\n print(\"You have to specify type the level of BTC.D and type-of-break you want to watch...)\")\n exit(0)\nlogger.info(\"Starting global market data crawling...\")\n\n\ndef get_current_cmc_cap(_url):\n response = session.get(_url)\n _data = json.loads(response.text)\n return _data['data']['btc_dominance']\n\n\ndef get_data(_url):\n response = session.get(_url)\n now = datetime.datetime.now().timestamp()\n collection.insert_one({'data': get_current_cmc_cap(json.loads(response.text)), 'timestamp': now})\n\n\ndef notify_when_break_up(_url, _level):\n _btcd = round(get_current_cmc_cap(_url), 3)\n if _btcd > _level:\n send_mail(f\"ZZZ BTC.D level {_level} BREAK UP ZZZ\", f\"Current BTC.D : {_btcd} > observed : {_level}\")\n\n\ndef notify_when_break_down(_url, _level):\n _btcd = get_current_cmc_cap(_url)\n if _btcd < _level:\n send_mail(f\"ZZZ BTC.D level {_level} BREAK DOWN ZZZ\", f\"Current BTC.D : {_btcd} < observed : {_level}\")\n\n\ndef manage_notification(_url, _level, _type):\n if _type == \"up\":\n notify_when_break_up(_url, _level)\n elif _type == \"down\":\n notify_when_break_down(_url, _level)\n\n\ndef validate_args(_args):\n _level = float(sys.argv[1])\n _type = sys.argv[2]\n assert 10 < _level < 90\n assert _type == \"up\" or _type == \"down\"\n logger.info(f\"All validations done : btcd : {btcd_level} type : {_type}\")\n\n\ndef get_line(_btcd_open1, _btcd_open2, _dt):\n _b = _btcd_open1\n _a = (_btcd_open2 - _btcd_open1)/_dt\n return _a, _b\n\n\ndef break_line(_url, _btcd_open1, _btcd_open2, _dt, _type):\n _a, _b = get_line(_btcd_open1, _btcd_open2, _dt)\n _btcd = round(get_current_cmc_cap(_url), 3)\n\n _res = False\n if _type == \"down\":\n _res = True if 0 < _a * (_dt + 1) + _b - _btcd else False\n else:\n _res = True if 0 > _a * (_dt + 1) + _b - _btcd else False\n\n return _res\n\n\n# authorize()\n\n\n\nbtcd_level = float(sys.argv[1])\nbreakout_type = sys.argv[2]\n\n# break_line(url, 65.26, 65.41, 3, \"down\")\nbreak_line(url, 65.37, 65.47, 8, \"up\")\n\nvalidate_args(sys.argv)\n\nlogger.info(f\"BTC.D level to watch : {btcd_level}\")\n\nwhile 1:\n try:\n manage_notification(url, btcd_level, breakout_type)\n except Exception as e:\n logger.error(e)\n sleep(5)\n manage_notification(url, btcd_level, breakout_type)\n sleep(1800)\n\n\n\n","repo_name":"sroziewski/trading-bot","sub_path":"btcd_watcher.py","file_name":"btcd_watcher.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27594198822","text":"import logging\n\nimport mariadb\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataBaseClient:\n\n def __init__(self, database_name: str, user: str, password: str, port=3306):\n self._database_name = database_name\n self._user = user\n self._password = password\n self._host = 'localhost'\n self._port = port\n self._connection = None\n\n def connect(self):\n try:\n self._connection = mariadb.connect(\n user=self._user,\n password=self._password,\n host=self._host,\n port=self._port,\n database=self._database_name\n )\n self._test_connection()\n except mariadb.Error as e:\n logger.error(f'Error connecting to MariaDB Platform: {e}')\n self._connection = None\n\n def insert_new_tele_info_frame(self, tele_info_frame):\n try:\n logger.debug(f'Insert new teleinfo frame into database: key={tele_info_frame.timestamp_db}')\n sql_request = self._prepare_insert_frame_request(tele_info_frame)\n cursor = self._connection.cursor()\n\n cursor.execute(sql_request)\n self._connection.commit()\n except mariadb.Error as e:\n logger.error(f'Error executing request to MariaDB Platform: {e}')\n\n def is_connected(self):\n return True if self._connection else False\n\n def _test_connection(self):\n cursor = self._connection.cursor()\n cursor.execute('select count(*) row_count from teleinfoframes')\n result = cursor.fetchone()\n\n if result:\n logger.debug(f'Successfully accessed to database: Number of frames detected: {result[0]}')\n else:\n logger.error('Failed to access to database: connection test returned nothing')\n\n @staticmethod\n def _prepare_insert_frame_request(tele_info_frame):\n request = 'INSERT INTO teleinfodb.teleinfoframes (' \\\n 'timestamp, ' \\\n 'meter_identifier, ' \\\n 'subscription_type, ' \\\n 'subscription_power_in_a, ' \\\n 'total_base_index_in_wh, ' \\\n 'current_pricing_period, ' \\\n 'instantaneous_intensity_in_a, ' \\\n 'intensity_max_in_a, ' \\\n 'power_consumption_in_va, ' \\\n 'peak_off_peak_schedule, ' \\\n 'meter_state_code' \\\n ') ' \\\n 'VALUES (' \\\n f\"'{tele_info_frame.timestamp_db}', \" \\\n f\"'{tele_info_frame.meter_identifier}', \" \\\n f\"'{tele_info_frame.subscription_type}', \" \\\n f'{tele_info_frame.subscription_power_in_a}, ' \\\n f'{tele_info_frame.total_base_index_in_wh}, ' \\\n f\"'{tele_info_frame.current_pricing_period}', \" \\\n f'{tele_info_frame.instantaneous_intensity_in_a}, ' \\\n f'{tele_info_frame.intensity_max_in_a}, ' \\\n f'{tele_info_frame.power_consumption_in_va}, ' \\\n f\"'{tele_info_frame.peak_off_peak_schedule}', \" \\\n f\"'{tele_info_frame.meter_state_code}'\" \\\n ')'\n return request\n\n def get_database_name(self) -> str:\n return self._database_name\n","repo_name":"jlesauce/TeleInfoReader","sub_path":"teleinforeader/database/database_client.py","file_name":"database_client.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7034953882","text":"from tkinter import *\nventana=Tk()\nventana.geometry(\"700x700+0+0\")\nventana.config(bg=\"yellow\")\nventana.title(\"Imagenes\")\n#Creamos la imagen\nimagenL=PhotoImage(file=\"pizza.GIF\")\nlblImagen=Label(ventana,image=imagenL).place(x=100,y=100)\nfondo=PhotoImage(file=\"estrellitas.gif\")\nlblFondo=Label(ventana,image=fondo).place(x=0,y=0)\nventana.mainloop()\n","repo_name":"LuisEnriqueSosaHernandez/Python","sub_path":"imagenes.py","file_name":"imagenes.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30253317661","text":"\n#一个test的小例子\nfrom mmdet.apis import inference_detector, init_detector, show_result_pyplot\n\n# Choose to use a config and initialize the detector\nconfig = './vfnet_coco.py'\n# Setup a checkpoint file to load\ncheckpoint = './pretrain/vfnet_r50_dcn_ms_2x_47.8.pth'\n\n# initialize the detector\nmodel = init_detector(config, checkpoint, device='cuda:0')\n# Use the detector to do inference\nimg = 'example.jpg'\nresult = inference_detector(model, img)\n#print(result)\n# Let's plot the result\nshow_result_pyplot(model, img, result, score_thr=0.3,out_file='./result_example.jpg')\n","repo_name":"dataslab/CV","sub_path":"VarifocalNet/coco_single_test.py","file_name":"coco_single_test.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15911905526","text":"__author__ = 'inozemcev'\n\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('',\n url(r'^$', 'weapon.views.weapon_list', name='weapons_list'),\n url(r'^create_weapon/$',\n 'weapon.views.create_weapon', name='create_weapon'),\n url(r'^edit_weapon/(?P\\d+)/$$',\n 'weapon.views.edit_weapon', name='edit_weapon'),\n url(r'^delete_weapon/(?P\\d+)/$$',\n 'weapon.views.delete_weapon', name='delete_weapon'),\n )\n","repo_name":"officefish/la_server","sub_path":"weapon/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2316586237","text":"\"\"\"\nUnpack Docker class\n\n\"\"\"\n\n__author__ = \"Valentin Giannini\"\n__copyright__ = \"Copyright 2016, LAMA\"\n__credits__ = [\"\"]\n__license__ = \"GPL\"\n__version__ = \"3\"\n__maintainer__ = \"Valentin Giannini - CSE Team\"\n__email__ = \"cse.contact -at- post.lu\"\n__status__ = \"Production\"\n\n\nimport os\nimport json\n\nfrom html import escape\n\nfrom lama.utils.type import Type\nfrom lama.input.input import Input\nfrom lama.models.indicator import Indicator\nfrom lama.analyzer.module import Module\nfrom lama.analyzer.docker_module import DockerModule\n\n\nclass UnpackDocker(DockerModule):\n \"\"\"UnpackDockerDocker class\n\n Args :\n **malware** (malware) : Malware which will be analyzed\n \"\"\"\n\n _module_name = \"Unpack\"\n\n def __init__(self, malware, local_path):\n super().__init__(\"Unpack\", malware, local_path, \"unpack\")\n\n @Module.dec_parse_result\n def parse_result(self):\n \"\"\"\n Abstract parse_result method.\n It calls when analyze is finished.\n It uptade malware with indicators.\n \"\"\"\n if not self._result:\n return\n\n json_unpack = self.json_decode(self._result)\n if not json_unpack:\n return\n\n if \"res\" in json_unpack and json_unpack[\"res\"] == \"ok\":\n for path, subdirs, files in os.walk(self._out_tmp_path):\n for name in files:\n file_path = os.path.join(path, name)\n extract_malware = self.malware.add_extract_malware_path(self.module_cls_name, file_path, name)\n Input.analyse_malware(extract_malware)\n\n if \"error\" in json_unpack:\n indicator = Indicator.factory(module_cls_name=self.module_cls_name,\n name=\"error\",\n content_type=Type.BASE64,\n content=json_unpack[\"error\"],\n score=-1)\n self._malware.get_module_status(self.module_cls_name\n ).add_indicator(indicator)\n\n def html_report(content):\n html = \"
    \"\n for item in content:\n if item.name == \"error\":\n html += \"Error : {}\".format(escape(item.content))\n else:\n html += \"LAMA PARSE ERROR\"\n html += \"
    \"\n return html\n","repo_name":"post-cyberlabs/lama","sub_path":"lama/analyzer/modules/unpack.py","file_name":"unpack.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"35677635364","text":"import argparse\nimport github\nimport os\nimport subprocess\n\nparser = argparse.ArgumentParser(description='Batch-upload git repos to the demoscene-source-archive org')\nparser.add_argument('token', help='GitHub access token')\nparser.add_argument('repos', help='Directory containing repos to upload')\nargs = parser.parse_args()\n\ng = github.Github(args.token)\norg = g.get_organization('demoscene-source-archive')\n\n# name = 'test'\n# description = 'foo'\n# repo = org.create_repo(name, description, private=False, has_issues=False, has_wiki=False, has_downloads=False, has_projects=False)\n# print(repo.clone_url)\n\ncwd = os.getcwd()\ntry:\n for f in os.scandir(args.repos):\n if f.is_dir():\n print(f.path)\n os.chdir(f.path)\n name = f.name\n description = None\n with open('.git/description', 'r') as file:\n description = file.read()\n print('{0} : \"{1}\"'.format(f.name, description))\n repo = org.create_repo(f.name, description, private=False, has_issues=False, has_wiki=False, has_downloads=False, has_projects=False)\n print(repo.clone_url)\n subprocess.run(['git', 'remote', 'add', 'origin', repo.clone_url])\n subprocess.run(['git', 'push', '-u', 'origin', 'master'])\n os.chdir(cwd)\nfinally:\n os.chdir(cwd)\n\n\n","repo_name":"kusma/demoscene-archive-scripts","sub_path":"upload-repos.py","file_name":"upload-repos.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5684248456","text":"import sys\nfrom codingTest import CheckTime\n\n\n@CheckTime.CheckTime\ndef solution1(prices: list) -> int:\n max_price = 0\n for i, price in enumerate(prices):\n for j in range(i, len(prices)):\n max_price = max(prices[j] - price, max_price)\n return max_price\n\n\n@CheckTime.CheckTime\ndef solution2(prices):\n answer = 0\n min_price = sys.maxsize\n for price in prices:\n min_price = min(price, min_price)\n answer = max(price - min_price, answer)\n return answer\n\n\nsolution1([7, 1, 5, 3, 6, 4])\nsolution2([7, 1, 5, 3, 6, 4])\n","repo_name":"Leekm0912/codingTest","sub_path":"배열/주식을 사고팔기 가장 좋슨 시점.py","file_name":"주식을 사고팔기 가장 좋슨 시점.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72051470889","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport tensorflow as tf\n\n# get Dataset\ntrain = pd.read_csv(r\"Titanic\\train.csv\")\ntest = pd.read_csv(r\"Titanic\\test.csv\")\nX_train = train.iloc[:,[0,2,4,5,6,7,9]].values\nX_test = test.iloc[:,[0,1,3,4,5,6,8]].values\ny_train = train.iloc[:, 1].values\nprint(X_train)\nprint(y_train)\n\n# Take care of missing data\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values = np.nan,strategy ='median')\nimputer.fit(X_train[:,3:])\nimputer.fit(X_test[:,3:])\nX_train[:,3:] = imputer.transform(X_train[:,3:])\nX_test[:,3:] = imputer.transform(X_test[:,3:])\nprint(X_train)\n\n# Encoding Indipendent var\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nct = ColumnTransformer(transformers=[('encoder',OneHotEncoder(),[2])],remainder='passthrough')\nX_train = ct.fit_transform(X_train)\nX_test = ct.fit_transform(X_test)\nprint(X_train)\nprint(X_test)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train[:,1:] = sc.fit_transform(X_train[:,1:])\nX_test[:,1:] = sc.transform(X_test[:,1:])\n\n# Train\n# from xgboost import XGBClassifier\n# classifier = XGBClassifier()\n# classifier.fit(X_train,y_train)\n# # from sklearn.naive_bayes import GaussianNB\n# # classifier = GaussianNB()\n# # classifier.fit(X_train, y_train)\n# # from sklearn.ensemble import RandomForestClassifier\n# # classifier = RandomForestClassifier(n_estimators = 100, criterion = 'entropy', random_state = 0)\n# # classifier.fit(X_train, y_train)\n# # from sklearn.neighbors import KNeighborsClassifier\n# # classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)\n# # classifier.fit(X_train, y_train)\n# # from sklearn.svm import SVC\n# # classifier = SVC(kernel = 'linear', random_state = 0)\n# # classifier.fit(X_train, y_train)\n\n# # Pridict test Results\n# y_pred = classifier.predict(X_train)\n# np.set_printoptions(precision=2)\n# print(np.concatenate((y_pred.reshape(len(y_pred),1), y_train.reshape(len(X_train),1)),1))\n\n#Initialization\nann = tf.keras.models.Sequential()\n\n#add neutron 1\nann.add(tf.keras.layers.Dense(units = 6,activation = 'relu'))\n\n#add neutron 2\nann.add(tf.keras.layers.Dense(units = 12,activation = 'relu'))\n\n#add neutron 3\nann.add(tf.keras.layers.Dense(units = 12,activation = 'relu'))\n\n#add neutron 4\nann.add(tf.keras.layers.Dense(units = 12,activation = 'relu'))\n\n#add neutron 5\nann.add(tf.keras.layers.Dense(units = 6,activation = 'relu'))\n\n#output neutron\nann.add(tf.keras.layers.Dense(units = 1,activation = 'sigmoid'))\n\n#compile\nann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n#training\nann.fit(X_train,y_train,batch_size = 32,epochs = 100)\n\n#prediction\n# print(ann.predict(sc.transform([[1,0,0,600,1,40,3,60000,2,1,1,50000]])))\n\n# Pridict test Results\ny_pred = ann.predict(X_test)\ny_pred = (y_pred > 0.5)\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), X_test.reshape(len(X_test),1)),1))\n\n# Confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_train, y_pred)\nprint(cm)\n\n# Evaluating the Model Performance\nfrom sklearn.metrics import accuracy_score\naccuracy = accuracy_score(y_train, y_pred)\nprint(accuracy)\n\n\nprint(ann.predict(X_train))\n\nsubmission=pd.DataFrame()\nsubmission['PassengerId'] = test['PassengerId']\nsubmission['Survived'] = ann.predict(X_test)\nsubmission.to_csv('submissionrd.csv',index=False)","repo_name":"dhanuvanth/Data_science_projects","sub_path":"Titanic/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38599292788","text":"from rest_framework import serializers\nfrom .models import Profile\nfrom django.contrib.auth.models import User\n\nclass UserSerializer(serializers.ModelSerializer):\n profile_url = serializers.HyperlinkedRelatedField(\n view_name='user-profile-detail',\n read_only=True,\n lookup_field='user',\n lookup_url_kwarg='pk'\n )\n\n class Meta:\n model = User\n fields = ('id', 'username', 'email', 'profile_url')","repo_name":"Bekbolsunn/Messenger","sub_path":"src/users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22178599405","text":"class DocumentOutput:\n def __init__(self,betaL1 =0.1,betaG1 = 1.9):\n self.betaL1 = betaL1\n self.betaG1 = betaG1\n self.RR = []\n self.RI = []\n self.NR = []\n self.presition= [] \n self.recovery= []\n self.FL1= []\n self.FG1= []\n self.F1= [] \n\n def CalculateStatistics(self):\n for i in range(len(self.RR)):\n RR = self.RR[i]\n RI = self.RI[i]\n NR = self.NR[i]\n\n rrUri = (len(RR | RI)) \n rrUnr = (len(RR | NR))\n\n if rrUri !=0: presition = len(RR)/rrUri\n else: presition = 0\n\n if rrUnr !=0: recovery = len(RR) /rrUnr\n else: recovery=0\n\n FL1 = ((1 + self.betaL1**2) * presition * recovery) / (self.betaL1*presition + recovery) if presition + recovery > 0 else 0\n FG1 = ((1 + self.betaG1**2) * presition * recovery) / (self.betaG1*presition + recovery) if presition + recovery > 0 else 0\n F1 = (2*presition * recovery) / (presition + recovery) if presition + recovery > 0 else 0\n self.presition.append(presition)\n self.recovery.append(recovery)\n self.FL1.append(FL1)\n self.FG1.append(FG1)\n self.F1.append(F1) \n\n\n\n def PrintAverages(self):\n print(\"\\nCalculing Statistics...\", end=\"\")\n self.CalculateStatistics()\n print(\"\\rDONE!! \")\n print(\"Averages: \")\n print(' Presicion:', self.mean(self.presition) )\n print(' Recobrado:', self.mean(self.recovery) )\n print(' F_(b<1):',self.mean( self.FL1) )\n print(' F_(b>1):', self.mean(self.FG1) )\n print(' F1:', self.mean(self.F1) )\n\n @staticmethod\n def mean( lis):\n return sum(lis) / len(lis)\n ","repo_name":"dcruzp/gugul-back","sub_path":"testers/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13573921513","text":"import ffmpeg\nimport boto3\nimport os\nimport re\nimport glob\nimport math\nimport subprocess\nfrom urllib.parse import unquote_plus\nfrom botocore.config import Config\nimport json\n\ns3_client = boto3.client('s3', os.environ['AWS_REGION'], config=Config(s3={'addressing_style': 'path'}, user_agent_extra=os.environ['user_agent_extra']))\nefs_path = os.environ['EFS_PATH']\ninstance_types = os.environ['instance_types']\nparallel_groups = int(os.environ['PARALLEL_GROUPS'])\n\ndef slice_video(key, segment_time, scale):\n key_store = key.replace('/','#')\n s3_client.download_file(os.environ['S3_BUCKET'], key, f'{efs_path}/{key_store}')\n\n try:\n probe = ffmpeg.probe(f'{efs_path}/{key_store}')\n except Exception as e:\n print(e.stderr.decode('utf8'))\n \n video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)\n if instance_types == 'inf1.xlarge':\n if video_stream['height']*video_stream['width']*scale*scale > 3840*2160:\n return -1\n \n if 'duration' in video_stream:\n duration = video_stream['duration']\n else:\n duration = probe['format']['duration']\n segment_num = math.ceil(float(duration)/int(segment_time))\n #segment_time = math.ceil(float(duration)/segment_num)\n #segment_files = glob.glob(os.path.join(efs_path, segment_prefix) + '_seg_[0-9][0-9][0-9]'+ segment_ext)\n #os.remove(f'{efs_path}/{key}')\n #for file in segment_files:\n # os.remove(file)\n return segment_num\n\ndef handler(event, context):\n if isinstance(event['body'], str):\n body = json.loads(event['body'])\n else:\n body = event['body']\n key = body['key']\n task = body.get('task', 'inference')\n scale = str(body.get('scale', '2'))\n env = body.get('env', 'spot')\n segment_time = body.get('segment_time', '60')\n if env == 'onDemand':\n env = 'SuperResolution_queue_onDemand'\n else:\n env = 'SuperResolution_queue_Spot'\n batchClient = boto3.client('batch')\n if task == 'debug':\n response = batchClient.submit_job(\n jobName=key.replace('#','_').replace('.','_') + \"-debug\",\n jobQueue=env,\n jobDefinition='SuperResolution',\n parameters={\n 'File': key,\n 'Scale': scale,\n 'SegmentTime': segment_time,\n 'TaskFlag': 'debug'\n },\n )\n return {\"statusCode\": 200, \"body\": key + \" debug\"}\n video_segments_num = slice_video(key, segment_time, int(scale))\n key_store = key.replace('/','#')\n if video_segments_num == -1:\n return {\n \"statusCode\": 400,\n \"body\": \"Video size exceeded the limit\"}\n print(key_store.replace('#','_').replace('.','_') + \"-Split\")\n response = batchClient.submit_job(\n jobName=key_store.replace('#','_').replace('.','_') + \"-Split\",\n jobQueue=env,\n jobDefinition='SuperResolution',\n parameters={\n 'File': key_store,\n 'Scale': scale,\n 'SegmentTime': segment_time,\n 'TaskFlag': 'split'\n },\n )\n if video_segments_num<=1:\n response = batchClient.submit_job(\n jobName=key_store.replace('/','_').replace('.','_') + \"-SR\",\n jobQueue=env,\n jobDefinition='SuperResolution',\n dependsOn=[{\n 'jobId': response['jobId']\n }],\n parameters={\n 'File': key_store,\n 'Scale': scale,\n 'SegmentTime': segment_time,\n 'TaskFlag': 'inference'\n },\n )\n else:\n response = batchClient.submit_job(\n jobName=key_store.replace('#','_').replace('.','_') + \"-SR\",\n jobQueue=env,\n arrayProperties={\n 'size': video_segments_num\n },\n jobDefinition='SuperResolution',\n dependsOn=[{\n 'jobId': response['jobId']\n }],\n parameters={\n 'File': key_store,\n 'Scale': scale,\n 'SegmentTime': segment_time,\n 'TaskFlag': 'inference'\n },\n )\n response = batchClient.submit_job(\n jobName=key_store.replace('#','_').replace('.','_') + \"-Merge\",\n jobQueue=env,\n jobDefinition='SuperResolution',\n dependsOn=list(map(lambda id: {\n 'jobId': id,\n 'type': 'SEQUENTIAL'\n }, [response['jobId']])),\n parameters={\n 'File': key_store,\n 'Scale': scale,\n 'SegmentTime': segment_time,\n 'TaskFlag': 'merge'\n },\n )\n return {\n \"statusCode\": 200,\n \"body\": key + \" started\"}\n","repo_name":"hoai/ai-video-super-resolution","sub_path":"lambda/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"9786805117","text":"import numpy as np\nimport pandas as pd\n\nclass TimeSeriesFeatures():\n\n def __init__(self, file_ts:str, file_ma:str):\n \n time_series = np.recfromtxt(file_ts)\n self.time_series = np.reshape(time_series, (1, time_series.size))\n ma_part = np.recfromtxt(file_ma)\n self.ma_part = np.reshape(ma_part, (1, ma_part.size))\n \n def create_feature_matrix(self, ar:int, ma:int):\n \n max_par = ar if ar > ma else ma\n len_matr = self.time_series.size - max_par\n\n feature_matrix = pd.DataFrame({\"intercept\":np.ones((1,len_matr))[0]}, index = range(len_matr))\n for i in range(1, ar+1):\n feature_matrix[f\"y(k-{i})\"] = self.time_series[0, max_par-i:-i]\n feature_matrix[\"v(k)\"] = self.ma_part[0, max_par:]\n for i in range(1, ma+1):\n feature_matrix[f\"v(k-{i})\"] = self.ma_part[0, max_par-i:-i]\n \n y = self.time_series[0,max_par:]\n return y, feature_matrix\n","repo_name":"artemkavara/time_series_kpi","sub_path":"Lab_1/feature_generation.py","file_name":"feature_generation.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11577043095","text":"from asyncio import sleep\nfrom aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom loader import dp\nfrom utils.db_api import quick_commands as commands\nfrom states import bot_mailing\nfrom data.config import admins\n\n@dp.message_handler(text='/mailing', chat_id=admins)\nasync def start_mailing(message: types.Message):\n await message.answer(f'Введите текст рассылки')\n # установление значения, используемое для отслеживания ввода рассылки\n await bot_mailing.text.set()\n\n@dp.message_handler(state=bot_mailing.text, chat_id=admins)\nasync def mailing_text(message: types.Message, state: FSMContext):\n answer = message.text\n markup = InlineKeyboardMarkup(row_width=2,\n inline_keyboard=[\n [\n InlineKeyboardButton(text='Добавить фото', callback_data='add_photo'),\n InlineKeyboardButton(text='Далее', callback_data='next'),\n InlineKeyboardButton(text='Отмена', callback_data='quit')\n ]\n ])\n await state.update_data(text=answer) # обновление состояния данными ответа\n await message.answer(text=answer, reply_markup=markup)\n await bot_mailing.state.set() # установление состояния\n\n@dp.callback_query_handler(text='next', state=bot_mailing.state, chat_id=admins)\nasync def start(call: types.CallbackQuery, state: FSMContext):\n users = await commands.select_all_users() # извлечение пользователей из БД\n data = await state.get_data()\n text = data.get('text')\n await state.finish() # завершение состояния\n for user in users:\n try:\n await dp.bot.send_message(chat_id=user.user_id, text=text)\n await sleep(0.33) # задержка каждого сообщения\n except Exception:\n pass\n await call.message.answer('Рассылка выполнена!')\n\n@dp.callback_query_handler(text='add_photo', state=bot_mailing.state, chat_id=admins)\nasync def add_photo(call: types.CallbackQuery):\n await call.message.edit_text(text='Пришлите фото')\n await bot_mailing.photo.set() # установление состояния\n\n@dp.message_handler(state=bot_mailing.photo, content_types=types.ContentType.PHOTO, chat_id=admins)\nasync def mailing_text(message: types.Message, state: FSMContext):\n photo_file_id = message.photo[-1].file_id # получение самой большой фото\n await state.update_data(photo=photo_file_id) # обновление состояния\n data = await state.get_data() # извлечение текущих данных\n text = data.get('text')\n photo = data.get('photo')\n markup = InlineKeyboardMarkup(row_width=2,\n inline_keyboard=[\n [\n InlineKeyboardButton(text='Далее', callback_data='next'),\n InlineKeyboardButton(text='Отмена', callback_data='quit')\n ]\n ])\n await message.answer_photo(photo=photo, caption=text, reply_markup=markup)\n\n@dp.callback_query_handler(text='next', state=bot_mailing.photo, chat_id=admins)\nasync def start(call: types.CallbackQuery, state: FSMContext):\n users = await commands.select_all_users() # извлечение пользователей из БД\n data = await state.get_data() # извлечение текущих данных\n text = data.get('text')\n photo = data.get('photo')\n await state.finish() # завершение состояния\n for user in users:\n try:\n await dp.bot.send_photo(chat_id=user.user_id, photo=photo, caption=text)\n await sleep(0.33) # задержка каждого сообщения\n except Exception:\n pass\n await call.message.answer('Рассылка выполнена!')\n\n@dp.message_handler(state=bot_mailing.text, chat_id=admins)\nasync def no_photo(message: types.Message):\n markup = InlineKeyboardMarkup(row_width=2,\n inline_keyboard=[\n [\n InlineKeyboardButton(text='Отмена', callback_data='quit')\n ]\n ])\n await message.edit_text('Пришлите фото', reply_markup=markup)\n\n@dp.callback_query_handler(text='quit', state=[bot_mailing.text, bot_mailing.photo, bot_mailing.state], chat_id=admins)\nasync def quit(call: types.CallbackQuery, state: FSMContext):\n await state.finish() # завершение состояния\n await call.message.answer('Рассылка отменена')","repo_name":"mwh4t/aiomtk","sub_path":"handlers/users/admin/bot_mailing.py","file_name":"bot_mailing.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37540592177","text":"class Solution:\n def isValid(self, s: str) -> bool:\n stack = []\n bracket_pairs = {')': '(', ']': '[', '}': '{'}\n\n for char in s:\n if char in bracket_pairs:\n # Current character is a closing bracket\n if not stack or stack.pop() != bracket_pairs[char]:\n return False # Invalid closing bracket\n else:\n # Current character is an opening bracket, push onto the stack\n stack.append(char)\n\n return not stack # String is valid if the stack is empty at the end\n","repo_name":"NitkarshChourasia/leetcodeJourneyThrough","sub_path":"0020-valid-parentheses/0020-valid-parentheses.py","file_name":"0020-valid-parentheses.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26955009197","text":"import subprocess\nimport socket\nfrom SerialManager import SerialManager\n\nHOST = \"\"\nPORT = 2727\nPACKET_LENGTH = 18\n\nprint(\"Starting serial communication...\")\nserialManager = SerialManager()\nprint(\"Serial communication started successfully\")\n\nprint(\"Starting control socket...\")\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind((HOST, PORT))\nprint(\"Control socket started\")\nwhile True:\n try: \n data, address = s.recvfrom(1024)\n print(\"Length: \" + str(len(data)))\n if(len(data) == PACKET_LENGTH ):\n serialManager.sendData(data)\n except Exception as e:\n print(e)\nif(s != null):\n s.close();\n","repo_name":"ezhor/BROOM","sub_path":"raspberry/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1088114345","text":"import os\nimport re\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport random\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport cv2\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom utils.utils import xywh2xyxy, xyxy2xywh\nfrom utils.data_utils import get_yolo_format, extract_annotation_file, split_data, create_yaml, get_hash, \\\n cache_labels, random_affine, load_image, load_mosaic, augment_hsv\n\nlabels = ['with_mask', 'mask_weared_incorrect', 'without_mask']\n\n\nclass Dataset(Dataset):\n def __init__(self, data_dir, idx1, idx2, hyp=None, batch_size=16, augment=None, rect=None):\n super(Dataset, self).__init__()\n self.hyp = hyp\n img_files = [data_dir + '/images/' + img_file for img_file in os.listdir(os.path.join(data_dir, 'images'))\n if img_file[-4:] == '.png']\n ann_files = [img_file.replace('images', 'annotations')[:-4] + '.xml' for img_file in img_files]\n split = split_data(img_files)\n img_files.sort(key=lambda x: int(re.sub('[^0-9]', '', x)))\n ann_files.sort(key=lambda x: int(re.sub('[^0-9]', '', x)))\n\n self.img_files = img_files[split[idx1]:split[idx2]+split[idx1]]\n self.ann_files = ann_files[split[idx1]:split[idx2]+split[idx1]]\n self.file_dir = data_dir\n self.augment = augment\n self.rect = rect\n self.mosaic_border = [-640 // 2, -640 // 2]\n cache_path = str(Path(ann_files[0]).parent) + 'cache'\n if os.path.isfile(cache_path):\n cache = torch.load(cache_path)\n if cache['hash'] != get_hash(self.ann_files + self.img_files):\n cache = cache_labels(self, cache_path)\n else:\n cache = cache_labels(self, cache_path)\n\n # Get labels\n labels, shapes = zip(*[cache[x] for x in self.img_files])\n self.shapes = np.array(shapes, dtype=np.float64)\n self.labels = list(labels)\n\n if self.rect:\n bi = np.floor(np.arange(len(self.img_files)) / batch_size).astype(np.int)\n nb = bi[-1] + 1\n\n ar = self.shapes[:, 1] / self.shapes[:, 0]\n irect = ar.argsort()\n self.img_files = [self.img_files[i] for i in irect]\n self.ann_files = [self.ann_files[i] for i in irect]\n self.labels = [self.labels[i] for i in irect]\n self.shapes = self.shapes[irect]\n ar = ar[irect]\n\n shapes = [[1, 1]] * nb\n for i in range(nb):\n ari = ar[bi == i]\n mini, maxi = ari.min(), ari.max()\n if maxi < 1:\n shapes[i] = [maxi, 1]\n elif mini > 1:\n shapes[i] = [1, 1 / mini]\n\n self.batch_shapes = np.ceil(np.array(shapes) * 640 / 32).astype(np.int) * 32 # 640 : img_size, 32 : stride\n\n gb = 0\n pbar = tqdm(range(len(self.img_files)), desc='caching image')\n self.imgs = [None] * self.__len__()\n self.img_hw0 = [None] * self.__len__()\n self.img_hw = [None] * self.__len__()\n for i in pbar:\n self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i)\n gb += self.imgs[i].nbytes\n pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)\n\n def __len__(self):\n return len(self.img_files)\n\n def __getitem__(self, index):\n image, label = load_mosaic(self, index)\n shapes = None\n if self.augment:\n augment_hsv(image, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])\n\n nL = len(label)\n if nL:\n label[:, 1:5] = xyxy2xywh(label[:, 1:5])\n\n label[:, [2, 4]] /= image.shape[0]\n label[:, [1, 3]] /= image.shape[1]\n\n # random left-right flip\n if self.augment:\n if random.random() < 0.5:\n image = np.fliplr(image)\n if nL:\n label[:, 1] = 1 - label[:, 1]\n\n label_out = torch.zeros((nL, 6))\n if nL:\n label_out[:, 1:] = torch.from_numpy(label)\n\n # Convert\n image = image[:, :, ::-1].transpose(2, 0, 1)\n image = np.ascontiguousarray(image)\n\n data = {'image': torch.from_numpy(image), 'label': label_out}\n\n return data","repo_name":"IDWSM/detector","sub_path":"data/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15319435608","text":"vertexList = ['A', 'B', 'C', 'D', 'E', 'F', 'G']\nedgeList = [(0,1), (1,2), (1,3), (3,4), (4,5), (1,6)]\ngraphs = (vertexList, edgeList)\n\ndef bfs(graph, start):\n vertexList, edgeList = graph\n visitedList = []\n queue = [start]\n adjacencyList = [[] for vertex in vertexList]\n\n # fill adjacencyList from graph\n for edge in edgeList:\n adjacencyList[edge[0]].append(edge[1])\n\n # bfs\n while queue:\n current = queue.pop()\n for neighbor in adjacencyList[current]:\n if not neighbor in visitedList:\n queue.insert(0,neighbor)\n visitedList.append(current)\n return visitedList\n\nprint(bfs(graphs, 0))\n\n\n#Detect Cycle in Undirect Graph (BFS): while Breadth First Search (BFS) traveral, if an already visited node is found, graph this cycle using bfs.\n#interview question? Find the cycle in the graph using Breadth First Search (BFS)? the output should be: [0, 1, 2, 3, 6, 4, 5]\n#interview question:find the shortest path? you can use bfs to find the shortest path using the dykstra algo\n","repo_name":"adamphopal/software-engineering","sub_path":"graph/bfs_queue.py","file_name":"bfs_queue.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34045432142","text":"# Author: Alejandro Sanchez\n# Class: BTE 499\n# Date: 2 April 2019\n# Assignment: Take Home Midterm - Question 2\n\n\ndef is_leap_year(year):\n leap = False\n\n if year % 4 == 0:\n leap = True\n if year % 100 == 0:\n leap = False\n if year % 400 == 0:\n leap = True\n\n return leap\n\n\ndef len_of_month(year, month):\n length = 31\n\n if (month % 2 == 0 and month < 8) or (month % 2 == 1 and month > 8):\n if month == 2:\n if is_leap_year(year):\n length = 29\n else:\n length = 28\n else:\n length = 30\n\n return length\n\n\ndef day_of_year(year, month, day):\n total_days = 0\n\n if month > 1:\n prev_months = [i for i in range(1, month)]\n\n for prev_month in prev_months:\n total_days += len_of_month(year, prev_month)\n\n total_days += day\n\n return total_days\n\n\ndef day_of_century(year, month, day, start_year=2000):\n total_days = 0\n\n if year > start_year:\n prev_years = [i for i in range(start_year, year)]\n\n for prev_year in prev_years:\n total_days += day_of_year(prev_year, 12, 31)\n\n total_days += day_of_year(year, month, day)\n\n return total_days\n\n\ndef day_of_forever(year, month, day):\n return day_of_century(year, month, day, 0)\n\n\ndef day_of_week(year, month, day, start_day=0):\n day_count = day_of_forever(year, month, day)\n\n weekday = (day_count - start_day) % 7\n\n return weekday\n\n\ndef month_calendar(start_day=0):\n weekdays = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']\n\n year_str, month_str = input('Enter a year and a month separated by a space: ').split()\n start_weekday = input('Enter a calendar start day [Mo, Tu, We, Th, Fr, Sa, Su]: ').title()[:2]\n year = eval(year_str)\n month = eval(month_str)\n\n output_start_day = weekdays.index(start_weekday)\n resorted_weekdays = weekdays[output_start_day:] + weekdays[:output_start_day]\n\n col_counter = 0\n\n print()\n\n for weekday in resorted_weekdays:\n print(weekday, end=' ')\n\n print()\n\n if day_of_week(year, month, 1, start_day) != output_start_day:\n for i in range(resorted_weekdays.index(weekdays[day_of_week(year, month, 1, start_day)])):\n print(' ', end=' ')\n col_counter += 1\n\n for i in range(1, len_of_month(year, month)+1):\n str_i = str(i) if i > 9 else ' ' + str(i)\n print(str_i, end=' ')\n col_counter += 1\n\n if col_counter % 7 == 0:\n print()\n\n\ndef sundays_in_year(year, start_day=0):\n total_sundays = 0\n for i in range(1, 13):\n for j in range(1, len_of_month(year, i)+1):\n if day_of_week(year, i, j, start_day) == 6:\n total_sundays += 1\n\n print('Total Sundays in {x}:'.format(x=year), total_sundays)\n\n\ndef main():\n # Align to correct day, with Jan 1 0 being a Thursday\n starting_day = 3\n\n # Part 1 - Length of Month\n print('Length of January 2009:', len_of_month(2009, 1))\n print('Length of February 2009:', len_of_month(2009, 2))\n print('Length of February 2008:', len_of_month(2008, 2))\n print()\n\n # Part 2 - Day of the Year\n print('Day of the Year value of January 1:', day_of_year(2009, 1, 1))\n print('Day of the Year value of January 2:', day_of_year(2009, 1, 2))\n print('Day of the Year value of February 1:', day_of_year(2009, 2, 1))\n print()\n\n # Part 3 - Day of the Century\n print('Day of the Century value of January 1, 2000:', day_of_century(2000, 1, 1))\n print('Day of the Century value of December 31, 2000:', day_of_century(2000, 12, 31))\n print('Day of the Century value of January 1, 2001:', day_of_century(2001, 1, 1))\n print()\n\n # Part 4 - Day of Forever\n print('Day of the Century value of January 1, 2000:', day_of_forever(2000, 1, 1))\n print('Day of the Century value of July 4, 1776:', day_of_forever(1776, 7, 4))\n print('Day of the Century value of October 2, 2012:', day_of_forever(2012, 10, 2))\n print('Day of the Century value of October 3, 2012:', day_of_forever(2012, 10, 3))\n print('Day of the Century value of October 4, 2012:', day_of_forever(2012, 10, 4))\n print('Day of the Century value of November 27, 2737:', day_of_forever(2737, 11, 27))\n print('Day of the Century value of January 1, 10:', day_of_forever(10, 1, 1))\n print()\n\n # Part 5 - Day of the Week\n print('Day of the week of April 1, 2019:', day_of_week(2019, 4, 1, starting_day))\n print('Day of the week of April 2, 2019:', day_of_week(2019, 4, 2, starting_day))\n print('Day of the week of April 3, 2019:', day_of_week(2019, 4, 3, starting_day))\n print('Day of the week of April 4, 2019:', day_of_week(2019, 4, 4, starting_day))\n print('Day of the week of April 5, 2019:', day_of_week(2019, 4, 5, starting_day))\n print('Day of the week of April 6, 2019:', day_of_week(2019, 4, 6, starting_day))\n print('Day of the week of April 7, 2019:', day_of_week(2019, 4, 7, starting_day))\n print()\n\n # Part 6 + 7 - Calendar for a month, any year, any start day\n month_calendar(starting_day)\n print('\\n')\n month_calendar(starting_day)\n print('\\n')\n\n # Part 8 - Sundays per year\n sundays_in_year(2019, starting_day)\n\n\nmain()\n","repo_name":"alex-sa-ur/python-bte499","sub_path":"bte499.hw6.alejandrosanchezuribe/Question 2.py","file_name":"Question 2.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70445950890","text":"from functools import reduce\n\nanimals = [\"dog\", \"cat\", \"bird\"]\n\n\n# 1 Capitalize all of the pet names and print the list\ndef capitallize(item):\n return item.upper()\n\n\nmy_pets = [\"sisi\", \"bibi\", \"titi\", \"carla\"]\npets = map(capitallize, my_pets)\n\nprint(*pets)\n# 2 Zip the 2 lists into a list of tuples, but sort the numbers from lowest to highest.\nmy_strings = [\"a\", \"b\", \"c\", \"d\", \"e\"]\nmy_numbers = [5, 4, 3, 2, 1]\nprint(list(zip(my_strings, my_numbers)))\n\n\n# 3 Filter the scores that pass over 50%\ndef over50(item):\n return item > 50\n\n\nscores = [73, 20, 65, 19, 76, 100, 88]\nprint(list(filter(over50, scores)))\n\n\n# 4 Combine all of the numbers that are in a list on this file using reduce (my_numbers and scores). What is the total?\ndef accumulate(acc, item):\n print(acc + item)\n return acc + item\n\n\nprint(reduce(accumulate, my_numbers, (reduce(accumulate, scores, 0))))\n\nmylist = [5, 4, 3]\n\nprint(list(map(lambda item: item**2, mylist)))\n\na = [(0, 2), (4, 3), (9, 9), (10, -1)]\n\na.sort(key=lambda x: x[1])\nprint(a)\n\nsome_list2 = [\"a\", \"b\", \"c\", \"b\", \"d\", \"m\", \"n\", \"n\"]\n\nnon_repeated = list[{char for char in some_list2 if some_list2.count(char) > 1}]\n\nprint(non_repeated)\n","repo_name":"OmarHeshamShehab/ZTM-Python","sub_path":"Section 7 Functional Programing/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42674841893","text":"import re\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom math import inf\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nfrom days.day import Day\nfrom tests import test_day16\nfrom tools.visualizer import GraphvizVisualizer\n\n\n@dataclass(frozen=True)\nclass Room:\n id: str\n flow: int\n links_to: (str,)\n\n def __str__(self):\n return f\"{self.id} ({self.flow})\"\n\n\nLINE_REGEX = r\"^Valve (?P[A-Z]{2}) has flow rate=(?P\\d+); tunnels? leads? to valves? (?P.+)$\"\n\n\ndef parse_room(line):\n result = re.search(LINE_REGEX, line)\n return Room(\n id=result.group(\"valve\"),\n flow=int(result.group(\"flow\")),\n links_to=tuple(result.group(\"links\").split(\", \")),\n )\n\n\nREMAINING_TOTAL = 30\n\nSTART_ROOM_ID = \"AA\"\n\n\nclass Day16(Day):\n def solve_part_1(self):\n def plot_dp():\n df = pd.DataFrame(dp)\n sns.heatmap(df, annot=True, vmin=0)\n plt.show()\n\n rooms = self.input_lines(parse_room)\n room_map: dict[Room] = {r.id: r for r in rooms}\n dp = {r.id: [-inf] * (REMAINING_TOTAL + 1) for r in rooms}\n opened = {r.id: [False] * (REMAINING_TOTAL + 1) for r in rooms}\n\n next_queue = deque()\n next_queue.append(START_ROOM_ID)\n dp[START_ROOM_ID][0] = 0\n elapsed = 1\n breaker = 0\n break_at = 20\n\n while elapsed <= 30 and breaker < break_at:\n plot_dp()\n print(\"elapsed =\", elapsed)\n remaining = REMAINING_TOTAL - elapsed\n queue = next_queue\n next_queue = deque()\n\n while queue and breaker < break_at:\n breaker += 1\n curr: Room = room_map[queue.pop()]\n print(curr)\n visited[curr.id][elapsed] = True\n\n max_if_moving = max(\n (dp[linked][elapsed - 1] for linked in curr.links_to),\n default=0,\n )\n max_if_opening = (\n dp[curr.id][elapsed - 1] + curr.flow * remaining\n )\n dp[curr.id][elapsed] = max(max_if_moving, max_if_opening)\n for linked in curr.links_to:\n if not visited[linked][elapsed]:\n next_queue.append(linked)\n\n elapsed += 1\n\n # show_rooms(room_map)\n\n def solve_part_2(self):\n raise RuntimeError(\"Not yet implemented!\")\n\n\nclass GraphvizRoomsVisualizer(GraphvizVisualizer):\n def build_graph(self, room_map: dict[Room]):\n for room in room_map.values():\n self.graph.node(str(room))\n\n added = set()\n for room in room_map.values():\n for l in room.links_to:\n linked_room: Room = room_map[l]\n if (linked_room, room) not in added:\n added.add((room, linked_room))\n self.graph.edge(str(room), str(linked_room))\n\n def show(self):\n self.graph.view()\n\n\ndef show_rooms(room_map):\n visualizer = GraphvizRoomsVisualizer()\n visualizer.build_graph(room_map)\n visualizer.show()\n\n\nif __name__ == \"__main__\":\n Day16(test_day16.EXAMPLE_INPUT).solve_part_1()\n","repo_name":"HelloThisIsFlo/AdventOfCode","sub_path":"2022/python/days/day_16.py","file_name":"day_16.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32165722013","text":"'''\nSe abordan los temas:\n\nRegresión lineal.\nDescenso del gradiente.\n'''\n#Un modelo lineal se define como: 𝑌=𝑊𝑋+𝑏\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(2) #aseguramos que el resultado del random es el mismo en cada ejecucion\nm = 100\nX = 2 * np.random.rand(m,1)\nW = np.random.randint(3,8)+np.random.rand()\nb = np.random.randint(3,8)+np.random.rand()\nY = W * X + b #regreson lineal\nY += np.random.randn(m,1) #ruido\n\nplt.scatter(X, Y, color='red')\nplt.xlabel(\"x\", fontsize=15)\nplt.ylabel(\"y\", fontsize=15)\nplt.show()\n\n'''\nLa regresión lineal consiste en hallar los valores de 𝑊 y 𝑏 tales que definan una recta\nque se acerque lo mejor posible a los valores originales de 𝑌 dados los 𝑋 de entrada.\nEn este punto, podemos adivinar dichos valores?\n'''\n\n# W = None\n# b = None\nY_pred = W * X + b\nplt.scatter(X, Y, color='red')\nplt.plot(X, Y_pred)\nplt.xlabel(\"x\", fontsize=15)\nplt.ylabel(\"y\", fontsize=15)\nplt.show()\n","repo_name":"atehortua1907/IA_ITM_David","sub_path":"Repaso/Machine_Learning/MachineLearningRegresionLineal.py","file_name":"MachineLearningRegresionLineal.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12969726027","text":"# Python Class 1889\n# Lesson 7 Problem 2\n# Author: madmathninja (272729)\n\nimport turtle\n\ndef teleport_and_draw(x,y):\n carol.pendown()\n carol.goto(x, y)\n\ndef teleport_and_no_draw(x,y):\n carol.penup()\n carol.goto(x, y)\n\n\n# set up window and TT\nwn = turtle.Screen()\ncarol = turtle.Turtle()\n\n# listeners to teleport\nwn.onclick(teleport_and_draw,1) # left click\nwn.onclick(teleport_and_no_draw,3) # right click\n\n# turn on the listeners and run\nwn.listen()\nwn.mainloop()","repo_name":"matthewru/PythonLearning","sub_path":"AOPS_Intermediate_Python/Week7/ChallengeProblem2.py","file_name":"ChallengeProblem2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2649594893","text":"# coding=utf-8\nfrom cqhttp import CQHttp\nimport re\nimport sys\nimport argparse\nimport requests\nimport traceback\nimport time\n\nbot = CQHttp(api_root='')\npigeon= False\ngroup_list=(253370800,781269903,808234958)\npower_list=(895853325,)\nwelcomeList=[]\ntimestamp=time.time()\nwelcome_newbie= \"Hi,欢迎加入 CNSS 2018 招新群 XD \\n\\n \"\\\n \"请先阅读以下事项:\\n\\n\" \\\n \"* 夏令营平台: summer.cnss.io\\n\\n\"\\\n \"* 为了让大家更好的相互了解,请先阅读群公告本群须知,更改一下群名片\\n\"\\\n \"* 如有任何疑问,请在群里艾特管理员提问\"\nwhy_at_me=\"为啥AT我?咕咕咕~\"\npower_at_me=\"大哥好~我只是一只小鸽子\"\ncard_pattern = R\"\\d{2}[-].*\"\n\ndef check_group_card():\n try:\n info = bot.get_group_member_list(group_id=781269903)\n to_shot_list = []\n to_shot_msg = ''\n for each in info:\n member = bot.get_group_member_info(group_id=int(each['group_id']), user_id=int(each['user_id']),no_cache=False)\n card = member['card']\n print(card)\n if not re.match(card_pattern, card):\n to_shot_list.append(member)\n for each in to_shot_list:\n to_shot_msg+= ('[CQ:at,qq=%d]'% each['user_id'])\n to_shot_msg+='\\n群名片不符合规范,请参照格式更改'\n bot.send_group_msg(group_id=253370800,message=to_shot_msg) #bot messsage群\n except:\n print('check_group_card error')\n\n\ndef check(context): #检测群\n group=context.get('group_id')\n person=context.get('user_id')\n if group in group_list:\n print(\"botMessage\")\n return True\n return False\n\ndef check_man(context): #检测powerMan\n person=context.get('user_id')\n if person in power_list:\n return True\n return False\n\ndef switch_pigeon(pigeon): #复读模式切换\n if pigeon:\n return False\n else:\n return True\n\n\ndef menu(context):\n menu_list = \"施工中ing 请耐心等待\"\n\n context['message'] = context['message'].strip()\n context['message_no_CQ'] = re.sub(R'\\[CQ:[^\\]]*\\]', '', context['message']) #去除CQ码\n context['message_no_CQ']=context['message_no_CQ'].strip()\n print(context['message_no_CQ'])\n if not (context['message'] and context['message_no_CQ'][0] in ('-',)):\n bot.send(context,power_at_me)\n return\n if '-h' in context['message_no_CQ']:\n bot.send(context, message=menu_list)\n return\n if '-checkCard' in context['message_no_CQ']:\n check_group_card()\n return\n bot.send(context,power_at_me)\n return\n\n\n\n@bot.on_message('private','group')\ndef handle_msg(context):\n global timestamp\n global welcomeList\n is_group=bool(context.get('group_id'))\n if is_group:\n at_me = '[CQ:at,qq=%d]' % 1751065040 # 机器人被@\n if at_me in context['message']:\n at = '[CQ:at,qq=%d] ' % context['user_id']\n if not check_man(context):\n bot.send(context,why_at_me)\n else:\n menu(context)\n if check(context) and pigeon:\n print(context)\n return {'reply': context['message'], 'at_sender': False}\n if \"鸽\" in context['message']:\n return {'reply':\"咕咕咕\",'at_sender':False}\n if (time.time()-timestamp)>=600 and len(welcomeList)!=0:\n welcomMessage = ''\n for each in welcomeList:\n at = ('[CQ:at,qq=%d] ' % each)\n welcomMessage += at\n group = context.get('group_id')\n bot.send_group_msg(group_id=group, message=welcomMessage+welcome_newbie)\n welcomeList.clear()\n timestamp=time.time()\n return\n\n@bot.on_notice('group_increase')\ndef handle_group_increase(context):\n if check(context):\n print(context)\n welcomeList.append(context['user_id'])\n welcomMessage= ''\n if len(welcomeList) == 6 :\n for each in welcomeList:\n at = ('[CQ:at,qq=%d] ' % each)\n welcomMessage += at\n group = context.get('group_id')\n bot.send_group_msg(group_id=group, message=welcomMessage+welcome_newbie)\n welcomeList.clear()\n print(context['user_id'])\n #bot.send(context, message=at+welcome_newbie,is_raw=False) # 发送欢迎新人\n return\n\n@bot.on_request('group', 'friend')\ndef handle_request(context):\n return {'approve': True} # 同意所有加群、加好友请求\n\nbot.run(host='0.0.0.0',port=8080)\n","repo_name":"TangentHuang/cnss-qqbot","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12210944342","text":"f = [1 for _ in range(1000001)] # 미리 최대치까지 저장할 리스트 만든다.\nfor i in range(2, 1000001): # 2부터 1000000까지 반복 횟수 만들다.\n cnt = 1 # 자기자신도 횟수에 포함된다.\n n = i #원 값을 보존하다.\n while 1:\n if n % 2 == 1: # 만약 홀수일 경우\n n = n*3 + 1\n cnt += 1\n n //= 2 #짝수 일때 2로 나눈다.\n #만약 자기자신값보다 낮으면 그 값과 지금 까지 반복횟수를 구하고 빠져나온다.\n if n < i: \n f[i] = cnt + f[n]\n break\n cnt +=1 #자기자신보다 낮기전까지 반복횟수 구한다.\n\ndata = ','.join(iter(input,''))\nfor i in data.split(','):\n a, b = map(int, i.split())\n x, y = a, b #만약 뒤보다 앞이 크면 바꾼다.\n if x> y: x, y= y, x\n print(a, b, max(f[x:y]))","repo_name":"jih3508/Study-Algorithm","sub_path":"Python/CodingStar/Gold1/G1_-01The 3n+1 problem.py","file_name":"G1_-01The 3n+1 problem.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27085366529","text":"frase='Teria maior confiança no desempenho de um homem que espera ter uma grande recompensa do que no daquele que já a recebeu/Não devemos de forma alguma preocupar-nos com o que diz a maioria, mas apenas com a opinião dos que têm conhecimento do justo e do injusto, e com a própria verdade.'\n\ni=0\nqtd_apareceu_mais=0\nletra_apareceu_mais_vezes=''\nwhile i x:\n right = mid - 1\n else:\n left = mid + 1\n return right\n\n\n# 牛顿迭代\nclass Solution:\n def mySqrt(self, x: int) -> int:\n if x == 0:\n return 0\n \n C, x0 = float(x), float(x)\n while True:\n xi = 0.5 * (x0 + C / x0) # 像是面积为C 长x0 宽C/x0 为了使长与宽相等 xi = (x0 + C / x0) / 2\n if abs(x0 - xi) < 1e-7:\n break\n x0 = xi\n \n return int(x0)\n\n'''\n如果使用 >= 或者 < \nleft返回第一个大于等于目标值的数,如果存在目标数,则返回的就是该值,如果均小于\nrihgt返回第一个小于该目标值的数,如果列表中均大于,指向-1\n\n如果使用 > 或者 <= \nleft返回第一个大于目标值的数,\nrihgt返回第一个小于等于该目标值的数,如果存在目标数,则返回的就是该值\n'''\n\n\ns = 9\nS = Solution() \nre = S.mySqrt(s)\nprint(re) \n \n ","repo_name":"He1o/NootBook_LeetCode","sub_path":"old/Def/69.x的平方根.py","file_name":"69.x的平方根.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70508026088","text":"from reprlib import aRepr\nfrom pathlib import Path\nimport scanpy as sc\nimport scanpy.external as sce\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport argparse\nimport os\nimport utils\nimport warnings\n\n'''\nIntegrate the merged samples using Harmony and save the AnnData object\n'''\n############################### BOOOORIING STUFF BELOW ############################### \n# Warning settings\nwarnings.simplefilter(action='ignore')\nsc.settings.verbosity = 0\n# Set figure params\nsc.set_figure_params(scanpy=True, facecolor=\"white\", dpi=80, dpi_save=300)\n# Read command line and set args\nparser = argparse.ArgumentParser(prog='qc', description='Run intergration by Harmony')\nparser.add_argument('-i', '--input_path', help='Input path to merged object', required=True)\nparser.add_argument('-o', '--output_dir', help='Output directory where to store the object', required=True)\nparser.add_argument('-an', '--analysis_name', help='Analysis name', required=True)\nparser.add_argument('-st', '--sample_type', default=\"sc\", help='Sample type', required=False)\nargs = vars(parser.parse_args())\ninput_path = args['input_path']\noutput_path = args['output_dir']\nanalysis_name = args['analysis_name'] # sc_integrate\nsample_type = args['sample_type']\n# Get necesary paths and create folders if necessary\nS_PATH, DATA_PATH, OUT_DATA_PATH, PLOT_PATH = utils.set_n_return_paths(analysis_name)\n############################### BOOOORIING STUFF ABOVE ############################### \n\nprint(\"Reading merged object...\")\n# Read merged object\nadata = sc.read_h5ad(input_path)\n# print(adata.obs)\nprint(f\"Number of cells: {adata.shape[0]}\")\nprint(\"Running harmony ...\")\n\nif sample_type==\"atlas\":\n # Run harmony\n print(\"Correcting for batch, technology, study\")\n sce.pp.harmony_integrate(adata, ['batch',\"technology\", \"study\"], adjusted_basis='X_pca', max_iter_harmony=30)\nelse:\n sce.pp.harmony_integrate(adata, 'batch', adjusted_basis='X_pca', max_iter_harmony=30)\n\nprint(\"Computing neighbours ...\")\n# Run umap with updated connectivity\nsc.pp.neighbors(adata)\nsc.tl.umap(adata)\n\nplt.rcParams['figure.dpi']= 300\nplt.rcParams[\"figure.figsize\"] = (10,10)\nplt.rcParams[\"legend.fontsize\"] = 'xx-small'\nplt.rcParams[\"legend.loc\"] = \"upper right\"\nplt.rcParams['axes.facecolor'] = \"white\"\n\n\"\"\"# the number of genes expressed in the count matrix\nsc.pl.umap(\n adata, color=[\"condition\", \"n_genes_by_counts\"], color_map =plt.cm.afmhot, \n title= [\"Condition\", \"Num of exp. genes\"], s=10, frameon=False, ncols=2, show=True, save=f\"{sample_type}_all_condition_harmony\"\n)\"\"\"\nif sample_type==\"atlas\":\n plt.rcParams['figure.dpi']= 300\n plt.rcParams['figure.figsize']= (45, 30)\nelse:\n plt.rcParams['figure.dpi']= 300\n plt.rcParams['figure.figsize']= (15, 10)\n\nsc.pl.umap(\n adata, color=\"condition\",\n title= \"Condition\", size=10, frameon=False, show=True, save=f\"{sample_type}_all_condition_harmony\"\n)\n\n\"\"\"rows = 2\ncolumns = 2\ngrid = plt.GridSpec( rows, columns, wspace = .4, hspace = .4)\nplot_list = [\"condition\", \"doublet_score\", \"n_genes_by_counts\", \"pct_counts_mt\"]\nfor i in range(rows * columns):\n plt.subplot(grid[i])\n c_ax = plt.gca()\n sc.pl.umap(adata, ax=c_ax, color=[plot_list[i]], color_map =plt.cm.afmhot, frameon=False, show=True)\"\"\"\n\nprint(\"Saving the integrated object...\")\n# Write to file\nadata.write(os.path.join(output_path, f'{sample_type}_integrated.h5ad'))\n\n# python integrate.py -i ../data/out_data/sc_merged.h5ad -o ../data/out_data\n# python sc_integrate.py -i ../data/out_data/atlas_merged.h5ad -o ../data/out_data -st atlas -an atlas_integrate\n# python sc_integrate.py -i ../data/out_data/sc_merged.h5ad -o ../data/out_data -st atlas -an sc_integrate\n\n# python sc_integrate.py -i ../data/out_data/sc_merged.h5ad -o ../data/out_data -st atlas -an sc_integrate\n\n# python sc_integrate.py -i ../data/out_data/sc_epicells_merged.h5ad -o ../data/out_data -st sc_epicells -an sc_epicells_aom_noaom_integrate\n# python sc_integrate.py -i ../data/out_data/sc_epicells_aom_noaom_merged.h5ad -o ../data/out_data -st sc_epicells_aom_noaom -an sc_epicells_aom_noaom_integrate_2\n","repo_name":"saezlab/CRCDiet","sub_path":"bin/sc_integrate.py","file_name":"sc_integrate.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70069584808","text":"import pytest\nfrom rftg.card import (\n load_file,\n load_one,\n TYPE_DEVELOPMENT,\n TYPE_WORLD,\n GOODTYPE_NOVELTY,\n GOODTYPE_RARE,\n GOODTYPE_GENE,\n GOODTYPE_ALIEN,\n build_base_set_1st_edition,\n get_subset,\n)\n\n\ndef test_load_one():\n lines = [\n \"N:Contact Specialist\",\n \"T:2:1:1\",\n \"E@0:2@1:1\",\n \"P:3:EXTRA_MILITARY:-1:0\",\n \"P:3:PAY_MILITARY:1:0\",\n ]\n res = load_one(lines)\n assert \"Contact Specialist\" == res.name\n assert TYPE_DEVELOPMENT == res.type\n\n\ndef test_load_one_with_flags():\n lines = [\n \"N:Gateway Station\",\n \"T:1:0:0\",\n \"E@0:1\",\n \"#E:1:1:1:1:1\",\n \"G:ANY\",\n \"F:PROMO\",\n ]\n res = load_one(lines)\n assert res.flags is not None\n assert TYPE_WORLD == res.type\n assert \"PROMO\" in res.flags\n\n\ndef test_load_one_with_extra_victory():\n lines = [\n \"N:Pan-Galactic League\",\n \"T:2:6:0\",\n \"E@0:1\",\n \"P:3:EXTRA_MILITARY:-1:0\",\n \"P:5:DRAW_WORLD_GENE:1:0\",\n \"V:2:GENE_PRODUCTION:N/A\",\n \"V:2:GENE_WINDFALL:N/A\",\n \"V:1:MILITARY:N/A\",\n \"V:3:NAME:Contact Specialist\",\n ]\n res = load_one(lines)\n assert TYPE_DEVELOPMENT == res.type\n assert len(res.extra_victory) > 0\n\n\ndef test_load_one_with_produce():\n lines = [\n \"N:New Vinland\",\n \"T:1:2:1\",\n \"E@0:1\",\n \"G:NOVELTY\",\n \"P:4:CONSUME_ANY | GET_2_CARD:1:1\",\n \"P:5:PRODUCE:0:0\",\n ]\n res = load_one(lines)\n assert TYPE_WORLD == res.type\n assert len(res.powers) == 2\n assert GOODTYPE_NOVELTY == res.goodtype\n assert \"5\" in res.powers\n assert len(res.powers[\"5\"]) == 1\n\n\ndef test_load_file():\n CARD_TYPES = load_file(\"cards.txt\")\n assert len(CARD_TYPES) == 280\n\n\ndef test_gambling_world():\n lines = [\n \"N:Gambling World\",\n \"T:1:1:1\",\n \"E@0:1@2:-1\",\n \"P:4:CONSUME_ANY | GET_VP:1:1\",\n \"P:4:DRAW_LUCKY:0:0\",\n ]\n res = load_one(lines)\n assert TYPE_WORLD == res.type\n assert len(res.expansion) == 2\n assert \"0\" in res.expansion\n assert res.expansion[\"0\"] == 1\n assert \"2\" in res.expansion\n assert res.expansion[\"2\"] == -1\n\n\ndef test_build_base_set_1st_edition():\n CARD_TYPES = load_file(\"cards.txt\")\n cards = build_base_set_1st_edition(CARD_TYPES)\n\n nb_base = 0\n nb_military_worlds = 0\n nb_no_military_worlds = 0\n nb_dev_6 = 0\n nb_dev_no_6 = 0\n nb_military_worlds_all = 0\n nb_military_worlds = dict()\n nb_military_worlds = dict()\n for i in range(8):\n nb_military_worlds[i] = list()\n nb_worlds_windfall_total = 0\n nb_worlds_windfall = dict()\n nb_worlds_windfall[GOODTYPE_NOVELTY] = 0\n nb_worlds_windfall[GOODTYPE_RARE] = 0\n nb_worlds_windfall[GOODTYPE_GENE] = 0\n nb_worlds_windfall[GOODTYPE_ALIEN] = 0\n nb_worlds_produce_total = 0\n nb_worlds_produce = dict()\n nb_worlds_produce[GOODTYPE_NOVELTY] = 0\n nb_worlds_produce[GOODTYPE_RARE] = 0\n nb_worlds_produce[GOODTYPE_GENE] = 0\n nb_worlds_produce[GOODTYPE_ALIEN] = 0\n\n nb_starts = 0\n\n nb_cost = dict()\n for i in range(8):\n nb_cost[i] = list()\n for card in cards:\n card_type = card[\"type\"]\n if \"START\" in card_type.flags:\n nb_base = nb_base + 1\n else:\n if (\"MILITARY\" not in card_type.flags) and (card_type.type == TYPE_WORLD):\n nb_no_military_worlds = nb_no_military_worlds + 1\n if (\n (card_type.cost == 6)\n and (card_type.type == TYPE_DEVELOPMENT)\n and (len(card_type.extra_victory) > 0)\n ):\n nb_dev_6 = nb_dev_6 + 1\n if (\n (card_type.cost < 6)\n and (card_type.type == TYPE_DEVELOPMENT)\n and (len(card_type.extra_victory) == 0)\n ):\n nb_dev_no_6 = nb_dev_no_6 + 1\n\n if (card_type.type == TYPE_WORLD) and (\"MILITARY\" in card_type.flags):\n nb_military_worlds_all = nb_military_worlds_all + 1\n nb_military_worlds[card_type.cost].append(card)\n\n if (card_type.type == TYPE_WORLD) and (\"WINDFALL\" in card_type.flags):\n nb_worlds_windfall[card_type.goodtype] = (\n nb_worlds_windfall[card_type.goodtype] + 1\n )\n nb_worlds_windfall_total = nb_worlds_windfall_total + 1\n\n if (\n (card_type.type == TYPE_WORLD)\n and (len(card_type.powers) > 0)\n and (card_type.goodtype in [GOODTYPE_NOVELTY, \"RARE\", \"GENE\", \"ALIEN\"])\n and (\"WINDFALL\" not in card_type.flags)\n ):\n if card_type.goodtype != \"ANY\":\n nb_worlds_produce[card_type.goodtype] = (\n nb_worlds_produce[card_type.goodtype] + 1\n )\n nb_worlds_produce_total = nb_worlds_produce_total + 1\n\n if \"MILITARY\" not in card_type.flags:\n nb_cost[card_type.cost].append(card)\n\n if \"START\" in card_type.flags:\n nb_starts = nb_starts + 1\n\n # for c in nb_cost[1]:\n # print(f\" {c['type'].name} :\\t\\t\\t {c['type'].type} \")\n\n assert 5 == nb_base\n # assert 22 == len(nb_military_worlds)\n # assert 37 == nb_no_military_worlds\n assert 12 == nb_dev_6\n assert 38 == nb_dev_no_6\n\n assert 23 == nb_military_worlds_all\n assert len(nb_military_worlds[1]) == 6\n assert len(nb_military_worlds[2]) == 7\n assert len(nb_military_worlds[3]) == 3\n assert len(nb_military_worlds[4]) == 2\n\n assert 25 == nb_worlds_windfall_total\n assert 5 == nb_worlds_windfall[GOODTYPE_NOVELTY]\n assert 7 == nb_worlds_windfall[GOODTYPE_RARE]\n assert 7 == nb_worlds_windfall[GOODTYPE_GENE]\n assert 6 == nb_worlds_windfall[GOODTYPE_ALIEN]\n\n assert 21 == nb_worlds_produce_total\n assert 9 == nb_worlds_produce[GOODTYPE_NOVELTY]\n assert 6 == nb_worlds_produce[GOODTYPE_RARE]\n assert 4 == nb_worlds_produce[GOODTYPE_GENE]\n assert 2 == nb_worlds_produce[GOODTYPE_ALIEN]\n\n assert len(nb_cost[0]) == 2\n assert len(nb_cost[1]) == 18\n assert len(nb_cost[2]) == 23\n assert len(nb_cost[3]) == 14\n assert len(nb_cost[4]) == 13\n assert len(nb_cost[5]) == 7\n assert len(nb_cost[6]) == 14\n\n assert 114 == len(cards)\n\n assert nb_starts == 5\n\n\ndef test_get_subset():\n CARD_TYPES = load_file(\"cards.txt\")\n cards = build_base_set_1st_edition(CARD_TYPES)\n starts = get_subset(\"START\", cards)\n assert len(cards) == 109\n assert len(starts) == 5\n","repo_name":"damiencarol/rftg","sub_path":"tests/card_test.py","file_name":"card_test.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10257385644","text":"\nimport pandas as pd\nimport os\nimport re\n\ntotal = sum(1 for line in open('data/train_set_0520.csv'))\ntrain_num=int(total*1/2)\ndev_num=int(total*1/4)\n\ndef clear(obj):\n text= re.sub(r'[^A-Za-z0-9]+',' ',obj)\n return re.sub(r'^(\\s+)|(\\s+)$', '', text)\n\n# 切分数据——训练集\ntrain_data = pd.read_csv('data/train_set_0520.csv', encoding='utf-8',nrows=train_num)\nwith open('data/train.tsv', 'w', encoding='utf-8') as train_f:\n for line in train_data.values:\n line[5]=str(line[5]).replace('\\n','')\n train_f.write((str(line[6])+'\\t'+clear(str(line[5]))+'\\n'))\ntrain_f.close()\n\n# 切分数据——验证集\ndev_data = pd.read_csv('data/train_set_0520.csv', encoding='utf-8',header=train_num+1,nrows=dev_num)\nwith open('data/dev.tsv', 'w', encoding='utf-8') as dev_f:\n for line in dev_data.values:\n line[5]=str(line[5]).replace('\\n','')\n dev_f.write((str(line[6])+'\\t'+clear(str(line[5]))+'\\n'))\ndev_f.close()\n\n# 切分数据——测试集\ntest_data = pd.read_csv('data/train_set_0520.csv', encoding='utf-8',header=train_num+dev_num+1)\nwith open('data/test.tsv', 'w', encoding='utf-8') as test_f:\n for line in test_data.values:\n line[5]=str(line[5]).replace('\\n','')\n test_f.write((str(line[6])+'\\t'+clear(str(line[5]))+'\\n'))\ntest_f.close()\n\n\n","repo_name":"CodeUp-Yang/sentence-classifier","sub_path":"bert/train_set.py","file_name":"train_set.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72878949608","text":"# link : https://leetcode.com/problems/convert-an-array-into-a-2d-array-with-conditions/description/\n# author : Mohamed Ibrahim\n\nclass Solution:\n def findMatrix(self, nums: List[int]) -> List[List[int]]:\n \n res , vis = collections.defaultdict(list),[[] for i in range(len(nums)+1)]\n for indx,val in enumerate(nums):\n l = len(vis[val])\n res[l].append(val)\n vis[val].append(indx)\n return list(res.values())\n \n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"LeetCode/2610. Convert an Array Into a 2D Array With Conditions.py","file_name":"2610. Convert an Array Into a 2D Array With Conditions.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"31004435558","text":"\"\"\"\nFF example output:\n\n[...]\n\nff: found legal plan as follows\n\nstep 0: UP F0 F1\n 1: BOARD F1 P0\n 2: DOWN F1 F0\n 3: DEPART F0 P0\n\n\ntime spent: 0.00 seconds instantiating 4 easy, 0 hard action templates\n 0.00 seconds reachability analysis, yielding 4 facts and 4 actions\n 0.00 seconds creating final representation with 4 relevant facts\n 0.00 seconds building connectivity graph\n 0.00 seconds searching, evaluating 5 states, to a max depth of 2\n 0.00 seconds total time\n\"\"\"\n\nimport re\n\nfrom lab.parser import Parser\n\n\ndef error(content, props):\n if props[\"planner_exit_code\"] == 0:\n props[\"error\"] = \"plan-found\"\n else:\n props[\"error\"] = \"unsolvable-or-error\"\n\n\ndef coverage(content, props):\n props[\"coverage\"] = int(props[\"planner_exit_code\"] == 0)\n\n\ndef get_plan(content, props):\n # All patterns are parsed before functions are called.\n if props.get(\"evaluations\") is not None:\n props[\"plan\"] = re.findall(r\"^(?:step)?\\s*\\d+: (.+)$\", content, re.M)\n\n\ndef get_times(content, props):\n props[\"times\"] = re.findall(r\"(\\d+\\.\\d+) seconds\", content)\n\n\ndef trivially_unsolvable(content, props):\n props[\"trivially_unsolvable\"] = int(\n \"ff: goal can be simplified to FALSE. No plan will solve it\" in content\n )\n\n\nclass FFParser(Parser):\n def __init__(self):\n super().__init__()\n self.add_pattern(\n \"node\", r\"node: (.+)\\n\", type=str, file=\"driver.log\", required=True\n )\n self.add_pattern(\n \"planner_exit_code\",\n r\"run-planner exit code: (.+)\\n\",\n type=int,\n file=\"driver.log\",\n )\n self.add_pattern(\"evaluations\", r\"evaluating (\\d+) states\")\n self.add_function(error)\n self.add_function(coverage)\n self.add_function(get_plan)\n self.add_function(get_times)\n self.add_function(trivially_unsolvable)\n","repo_name":"aibasel/lab","sub_path":"examples/ff/ff_parser.py","file_name":"ff_parser.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"43999282175","text":"import numpy as np\nfrom math import sin,cos,exp\nimport matplotlib.pyplot as plt\nimport pylab as pl\n\nfrom cc.plotting import Plotting2\n\n\n\ndef expDensFunc(r,rSw,exponent):\n\n return exp(-(r/rSw)**exponent)\n\n\n\n@np.vectorize\ndef densityFunc(r,theta,A,B,C,D,E,F,rMin,rSw):\n\n exponent = -B*(1+C*(sin(theta)**F)*(expDensFunc(r,rSw,D)/\\\n expDensFunc(rMin,rSw,D)))\n rho = (r/rMin)**exponent\n rho = rho * (1+A*((1-cos(theta))**F)*(expDensFunc(r,rSw,E)/\\\n expDensFunc(rMin,rSw,E)))\n\n return rho\n\n\n\ndef plotDens(A,B,C,D,E,F,rPlot,rMin,rSw,nRad,nTheta,filename=None,\\\n extension='pdf',landscape=0,show=0,figsize=(5.5, 10)):\n \n #-- Define the radial and angular grids\n R = np.logspace(np.log10(rMin), np.log10(rPlot), num=nRad, endpoint=True)\n Theta = np.arange(0, np.pi/2.0+1.0/float(nTheta), (np.pi/2.0)/(nTheta))\n R,Theta = np.meshgrid(R,Theta)\n \n #-- Calculate the Meixner density distribution\n rho = densityFunc(R,Theta,A,B,C,D,E,F,rMin,rSw)\n\n #-- Figure consists of two subplots, one with the density map, and one with\n # profiles at 0 and 90 degrees.\n fig = plt.figure(figsize=figsize)\n \n #-- Make the color scale density plot\n ax = fig.add_subplot(211)\n pl.pcolor(R*np.sin(Theta),R*np.cos(Theta),np.log10(rho/rho.max()))\n pl.pcolor(-R*np.sin(Theta),R*np.cos(Theta),np.log10(rho/rho.max()))\n pl.pcolor(-R*np.sin(Theta),-R*np.cos(Theta),np.log10(rho/rho.max()))\n pl.pcolor(R*np.sin(Theta),-R*np.cos(Theta),np.log10(rho/rho.max()))\n fig.subplots_adjust(right=0.8)\n fig.subplots_adjust(bottom=0.1)\n cbar_ax = fig.add_axes([0.85, 0.54, 0.05, 0.36])\n pl.colorbar(cax=cbar_ax)\n \n #-- Add density distribution at 0 and 90 degrees, and a r^-2 distribution\n ax2 = fig.add_subplot(212)\n ax2.plot(np.log10(R[0]),np.log10(rho[0]/rho[0].max()),'black',\\\n lw=2,marker='x',label=r'$\\theta = 0^\\circ$')\n ax2.plot(np.log10(R[-1]),np.log10(rho[-1]/rho[-1].max()),'magenta',\\\n lw=2,marker='|', label=r'$\\theta = 90^\\circ$')\n ax2.plot(np.log10(R[0]),np.log10(rMin*rMin/(R[0]*R[0])),'green',\\\n lw=2,label=r'$r^{-2}$')\n ax2.legend()\n label = r'$\\rho_0[r_{\\rm min}] / \\rho_{90}[r_{\\rm min}] = $'+\\\n '{:10.3e}'.format(rho[0][0]/rho[-1][0])\n ax2.text(0.25,1.02,label,transform=ax2.transAxes)\n\n \n if filename: filename = Plotting2.saveFig(filename,extension,landscape)\n if show: pl.show()\n \n return filename\n\n\n\nif __name__ == '__main__':\n\n A = 100.0\n B = 2.0\n C = 1.0\n D = 0.5\n E = 0.0\n F = 2.0\n rPlot = 1E16\n rMin = 1E13\n rSw = 5E13\n nTheta = 50\n nRad = 50\n\n plotDens(A,B,C,D,E,F,rPlot,rMin,rSw,nRad,nTheta,show=1)\n","repo_name":"IvS-KULeuven/ComboCode","sub_path":"cc/plotting/PlotMeixner.py","file_name":"PlotMeixner.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6422586144","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/10/24 12:36\n# @Author : YuChou\n# @Site : \n# @File : QueryDeri.py\n# @Software: PyCharm\nimport csv\nimport os\nimport re\n#更改文件名字\ndef reName():\n path = \"E://scripts//4//pick-up-sku//\"\n csvList = os.listdir(path)\n for csv in csvList:\n l = csv.split('-')\n l[7] = \"400\" + l[7]\n newName = path + \"-\".join(l)\n oldName = path + csv\n os.rename(oldName, newName)\n# reName()\n#比较两个文件是否一致\ndef compileFile():\n erpPath=\"E://4//log.csv//log.csv\"\n csvPath=\"E://4//pick-up-sku//\"\n notExist=[]\n csvCaseList=[]\n for line in os.listdir(csvPath):\n csvCaseList.append(line.split('-')[7])\n with open(erpPath,\"r\",encoding=\"utf8\") as f:\n reads=csv.reader(f)\n rows=[row for row in reads]\n for j in rows[1:]:\n if j[0] not in csvCaseList:\n notExist.append(j[0])\n print(\"下列csv文件无法在log里找到ERP: \",end=\"\")\n print(notExist)\n return csvCaseList\ndef reLog():\n newData=[]\n\n\n with open(\"E://scripts//4//log.csv//log1.csv\", \"w\", encoding=\"utf8\") as k:\n writer = csv.writer(k)\n with open(\"E://scripts//4//log.csv//log.csv\", \"r\", encoding=\"utf8\") as f:\n reads = csv.reader(f)\n rows = [row for row in reads]\n newData.append(tuple(rows[0]))\n for j in rows[1:]:\n j[0] = \"400\" + j[0]\n print(j)\n writer.writerow(j)\n\nreLog()\n\n","repo_name":"mrzhouyu/ServerPr","sub_path":"QueryDeri.py","file_name":"QueryDeri.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70580758570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 14 23:41:03 2018\n\n@author: KEEL\n\"\"\"\n\n\nimport os\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nimport glob\n\nIMG_SIZE = 28\nIMG_PIXELS = IMG_SIZE*IMG_SIZE*3\n\n#----------------------------------------------------------------------\n#画像をNumpy配列に変換する\n# 画像のあるディレクトリ\ntrain_img_dirs = ['KizunaAI','MiraiAkari','Nekomasu','Shiro','KaguyaLuna','Cafeno-Zombiko','DD','Fuji-Aoi',\n 'Fujisaki-Yua','hoonie','Kurumi-chan','MIDI','Miial','Mochi-Hiyoko','Moscowmule','MyuMyu',\n 'Neets','Nemu','Nora-cat','Raiden-Kasuka', 'Suzuki-Secil', 'Todoki-Uka', 'Tokinosora', 'Umakoshi-Kentaro']\nCLASSES_NUM = len(train_img_dirs)\n\nxml_path = \"./lbpcascade_animeface.xml\"\nout_path = \"./face/\"\ndef faceDetect(img_path):\n classifier = cv2.CascadeClassifier(xml_path)\n \n img_count = 1\n face_imgs = []\n #for img_path in img_list:\n org_img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n gray_img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n face_points = classifier.detectMultiScale(gray_img, \\\n scaleFactor=1.2, minNeighbors=2, minSize=(1,1))\n\n for points in face_points:\n \n x, y, width, height = points\n\n dst_img = org_img[y:y+height, x:x+width]\n dst_img = cv2.resize(dst_img, (IMG_SIZE,IMG_SIZE))\n face_imgs.append(dst_img)\n\n face_img = cv2.rectangle(org_img, (x,y), (x+width,y+height), (0, 0, 0), 2)\n new_img_name = out_path + str(img_count) + 'face.jpg'\n cv2.imwrite(new_img_name, face_img)\n \n img_count += 1\n print(img_count)\n if img_count != 1:\n return face_points[0]\n\n\ndef inference(images_placeholder, keep_prob):\n \"\"\" モデルを作成する関数\n\n 引数: \n images_placeholder: inputs()で作成した画像のplaceholder\n keep_prob: dropout率のplace_holder\n\n 返り値:\n cross_entropy: モデルの計算結果\n \"\"\"\n W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 3, 32], stddev=0.1),name='W_conv1')\n b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]),name='b_conv1')\n\n x_image = tf.reshape(images_placeholder, [-1,IMG_SIZE,IMG_SIZE,3])\n\n h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)\n h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n x_image = tf.reshape(images_placeholder, [-1,IMG_SIZE,IMG_SIZE,3])\n\n W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1),name='W_conv2')\n b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]),name='b_conv2')\n h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)\n h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1),name='W_fc1')#変更元は7*7*64\n b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]),name='b_fc1')\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])#変更元は7*7*64\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n W_fc2 = tf.Variable(tf.truncated_normal([1024, CLASSES_NUM], stddev=0.1),name='W_fc2')\n b_fc2 = tf.Variable(tf.constant(0.1, shape=[CLASSES_NUM]),name='b_fc2')\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n #cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))\n return y_conv\n\n\n \nimages_placeholder = tf.placeholder(\"float\", shape=(None, IMG_PIXELS))\nkeep_prob = tf.placeholder(\"float\")\n\ninit = tf.global_variables_initializer()\n\nlogits = inference(images_placeholder, keep_prob)\n\nsess = tf.InteractiveSession()\nsess.run(init)\n\nsaver = tf.train.Saver()\nsaver = tf.train.import_meta_graph('./Model/model.ckpt.meta')#注意\nsaver.restore(sess, \"./Model/model.ckpt\")\n\ninput_path = './test_img/'\nfilename = glob.glob(input_path + '*.jpg')\n\nfor img_path in input_path:\n frame = cv2.imread(img_path)\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n#顔の検出\ndets = faceDetect(filename)\n \nif not isinstance(dets,type(None)):\n x, y, width, height = dets\n image = frame[y:y+height, x:x+width]\n cv2.rectangle(frame, (x,y), (x+width, y+height), (0, 0, 0), 4)\n cv2.imwrite(filename, frame)\n cv2.imshow(\"1\",image)\n cv2.waitKey(0)\n img = cv2.resize(img.copy(), (28, 28))\n ximage = []\n ximage.append(img.flatten().astype(np.float32)/255.0)\n ximage = np.asarray(ximage)\n\n print(ximage.shape)\n pred = np.argmax(logits.eval(session=sess,feed_dict={ images_placeholder : ximage, keep_prob: 1.0 }))\n print(pred)\n","repo_name":"keel-210/face_detect_youtuber","sub_path":"label_test2.py","file_name":"label_test2.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"44804665488","text":"from django.urls import path\nfrom rest_framework.authtoken.views import obtain_auth_token\n\nfrom . import views\n\nurlpatterns = [\n path('connection/', views.RootView.as_view(), name='root'),\n\n # REST Interface\n path('register/', views.RegisterView.as_view(), name='register'),\n path('login/', obtain_auth_token, name='login'),\n path('extract/', views.ExtractorEndpoint.as_view(), name='extractor'),\n path('logout/', views.LogoutView.as_view(), name='logout'),\n\n # Web app\n path('', views.WebView.as_view(get_template=\"index.html\", post_template=\"result.html\"))\n]\n","repo_name":"axbg/yuz","sub_path":"yuz/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74466496808","text":"import unittest\nimport unittest.mock as um\nimport tempfile\nfrom src.privacy_analysis.system_analysis.system_privacy_package_upgrades import SystemPrivacyPackageUpgrades\n\n\n# Getting a \"ResourceWarning: unclosed file\" warning, but this is all kosher according to the docs and examples\nclass TestSystemPrivacyEncryption(unittest.TestCase):\n def setUp(self) -> None:\n self.mock_stdout = tempfile.TemporaryFile()\n\n @um.patch(\"src.dashboard.alerts.alert.Alert.alert\")\n @um.patch(\"subprocess.Popen\")\n def test_package_upgrades_available(self, mock_popen, mock_alert):\n self.mock_stdout.write(b'HELLO')\n self.mock_stdout.seek(0)\n mock_popen.return_value.stdout = self.mock_stdout\n SystemPrivacyPackageUpgrades()()\n mock_popen.return_value.stdout.close()\n\n self.assertEqual(1, mock_alert.call_count)\n\n @um.patch(\"src.dashboard.alerts.alert.Alert.alert\")\n @um.patch(\"subprocess.Popen\")\n def test_package_upgrades_absent(self, mock_popen, mock_alert):\n self.mock_stdout.write(b'')\n self.mock_stdout.seek(0)\n mock_popen.return_value.stdout = self.mock_stdout\n SystemPrivacyPackageUpgrades()()\n mock_popen.return_value.stdout.close()\n\n self.assertEqual(0, mock_alert.call_count)\n\n def tearDown(self) -> None:\n self.mock_stdout.close()\n\n\n# Source:\n# https://blog.samuel.domains/blog/programming/how-to-mock-stdout-runtime-attribute-of-subprocess-popen-python\n\n","repo_name":"briweinstein/tinyHIPPO","sub_path":"tests/privacy_tests/system_tests/test_system_privacy_package_upgrades.py","file_name":"test_system_privacy_package_upgrades.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"73856207528","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nB = 1.5\nBPositive = 2.5\nBNegative = 0.5\ngyroRatio = 42.6\nw = gyroRatio * B\nwPositive = gyroRatio * BPositive\nwNegative = gyroRatio * BNegative\nT1 = 490/1000\nT2 = 43/1000\nt = np.arange(start=0, stop=10, step=0.0001)\n\nomega = 2*np.pi*w*t\nomegaPositive = 2*np.pi*wPositive*t + np.pi/8\nomegaNegative = 2*np.pi*wNegative*t - np.pi/8\n\n\nMx = np.exp(-1*t/T2)*np.sin(omega)\nMxPositive = np.exp(-1*t/T2)*np.sin(omegaPositive)\nMxNegative = np.exp(-1*t/T2)*np.sin(omegaNegative)\n\n\nMy = np.exp(-1*t/T2)*np.cos(omega)\nMyPositive = np.exp(-1*t/T2)*np.cos(omegaPositive)\nMyNegative = np.exp(-1*t/T2)*np.cos(omegaNegative)\n\n\nMxy = np.sqrt(Mx**2 + My**2)\nMxyPositive = np.sqrt(MxPositive**2 + MyPositive**2)\nMxyNegative = np.sqrt(MxNegative**2 + MyNegative**2)\n\nplt.figure(1)\nplt.plot(t[:1000], Mx[:1000], 'r', label=\"No Noise\")\nplt.plot(t[:1000], MxPositive[:1000], 'b', label=\"Positive Noise\")\nplt.plot(t[:1000], MxNegative[:1000], 'y', label=\"Negative Noise\")\nplt.title(\"$M_x/M_o$ vs time\")\nplt.xlabel(\"time\")\nplt.ylabel(\"$M_x/M_o$\")\nplt.legend()\n\n\nplt.figure(2)\nplt.plot(t[:1000], My[:1000], 'r', label=\"No Noise\")\nplt.plot(t[:1000], MyPositive[:1000], 'b', label=\"Positive Noise\")\nplt.plot(t[:1000], MyNegative[:1000], 'y', label=\"Negative Noise\")\nplt.title(\"$M_y/M_o$ vs time\")\nplt.xlabel(\"time\")\nplt.ylabel(\"$M_y/M_o$\")\nplt.legend()\n\nplt.figure(3)\nplt.plot(Mx, My, 'r', label=\"No Noise\")\nplt.plot(MxPositive, MyPositive, 'b', label=\"Positive Noise\")\nplt.plot(MxNegative, MyNegative, 'y', label=\"Negative Noise\")\nplt.title(\"$M_{xy}$ in X-Y Plane\")\nplt.xlabel(\"$M_x/M_o$\")\nplt.ylabel(\"$M_y/M_o$\")\nplt.legend()\n\nplt.show()","repo_name":"AhmedKhaled8/MRITask2","sub_path":"noiseEffect.py","file_name":"noiseEffect.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4755267718","text":"#chat gpt version \nimport sys, os, socket\nfrom datetime import datetime\nfrom threading import Thread\nimport netifaces as nt\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import pyqtSignal, QThread, Qt\nfrom PyQt5.QtGui import QImage, QPixmap\nfrom PyQt5.QtWidgets import QLabel, QWidget, QHBoxLayout, QApplication\nimport pigpio as pp\n\nport = 7500\n\nclass VideoRec(QThread):\n new_frm = pyqtSignal(bytes)\n Log_signal = pyqtSignal(str)\n \n def __init__(self, port):\n super().__init__()\n self.port = port\n self.is_running = True\n \n def run(self):\n self.Log_signal.emit(\"Starting Socket\")\n #self.log(\"starting socket\")\n buffer = b''\n tunnel = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.Log_signal.emit(\"after socket\")\n #self.log(\"after socket\")\n # host = socket.gethostname()\n # ip = socket.gethostbyname(host)\n \n # self.log(\"sup\")\n # self.log(\"socket ip {0} and hostname {1}\".format(ip, host))\n try:\n wi_ip = nt.ifaddresses('wlan0')[nt.AF_INET][0]['addr']\n self.Log_signal.emit(\"socket ip {0}\".format(wi_ip))\n #self.log(\"socket ip {0}\".format(wi_ip))\n tunnel.bind((wi_ip, self.port))\n tunnel.listen(3)\n self.Log_signal.emit(\"Only recieving data\")\n clntunnel, addrs = tunnel.accept() \n self.Log_signal.emit(\"coneccted to ip: {0} on port: {1}\".format(addrs[0], addrs[1] ))\n while self.is_running:\n try:\n data = clntunnel.recv(1024) # check recv(1024).decode(\"utf-8\")\n if not data: \n #self.Log_signal.emit(\"no data\")\n break\n buffer += data\n \n while True:\n #self.Log_signal.emit(\"inside while\")\n #frame, buffer = self.jpeg_EoL(buffer)\n frame, buffer = self.extract_jpeg(buffer)\n #self.log(\"Checking\")\n if frame is None:\n #self.Log_signal.emit(\"no data on frame\")\n break\n self.new_frm.emit(frame)\n #self.Log_signal.emit(\"end of while\")\n #break \n except socket.error as e:\n self.Log_signal.emit(\"Socket error: {0}\".format(e))\n #self.log(\"Someting went wrong, no conecction\")\n break\n except socket.error as e:\n self.Log_signal.emit(\"Socket error: {0}\".format(e))\n \n # this one works fine \n # def run(self):\n # self.Log_signal.emit(\"Starting Socket\")\n # #self.log(\"starting socket\")\n # buffer = b''\n # tunnel = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # self.Log_signal.emit(\"after socket\")\n # #self.log(\"after socket\")\n # # host = socket.gethostname()\n # # ip = socket.gethostbyname(host)\n \n # # self.log(\"sup\")\n # # self.log(\"socket ip {0} and hostname {1}\".format(ip, host))\n # try:\n # wi_ip = nt.ifaddresses('wlan0')[nt.AF_INET][0]['addr']\n # self.Log_signal.emit(\"socket ip {0}\".format(wi_ip))\n # #self.log(\"socket ip {0}\".format(wi_ip))\n # tunnel.bind((wi_ip, self.port))\n # tunnel.listen(3)\n \n # clntunnel, addrs = tunnel.accept() \n # self.Log_signal.emit(\"coneccted to ip: {0} on port: {1}\".format(addrs[0], addrs[1] ))\n # #self.log(\"conected to ip: {0} on port: {1}\".format( addrs[0],addrs[1] ) )\n # while self.is_running:\n # try:\n # data = clntunnel.recv(1024) # check recv(1024).decode(\"utf-8\")\n # if not data: \n # self.Log_signal.emit(\"no data\")\n # break\n # buffer += data\n # #if not buffer:\n # # self.log(\"No data on buffer\")\n # # else:\n # # self.log(\"new data\")\n # #self.Log_signal.emit(\"before while\")\n # while True:\n # #self.Log_signal.emit(\"inside while\")\n # #frame, buffer = self.jpeg_EoL(buffer)\n # frame, buffer = self.extract_jpeg(buffer)\n # #self.log(\"Checking\")\n # if frame is None:\n # #self.Log_signal.emit(\"no data on frame\")\n # break\n # else: \n # try:\n # send_text = \"ESP_OK\".encode(\"utf-8\")\n # clntunnel.send(send_text)\n # except socket.error as e:\n # self.Log_signal.emit(\"cannont send confirmation error: {0}\".format(e))\n # break\n \n # response = clntunnel.recv(10).decode(\"utf-8\", errors=\"replace\")\n # #response = response.decode()\n # # check encoding on the esp32cam\n # if response is None:\n # self.Log_signal.emit(\"not recieved confirm\")\n # break\n # self.Log_signal.emit(response)\n # if \"ESP_OK\" in response:\n # self.Log_signal.emit(\"Ok: {0}\".format(response))\n # self.new_frm.emit(frame)\n # if \"ESP_ERROR\" in response:\n # self.Log_signal.emit(\"confirmation error: {0}\".format(response))\n # break\n # self.Log_signal.emit(\"end of while\")\n # #break \n # except socket.error as e:\n # self.Log_signal.emit(\"Socket error: {0}\".format(e))\n # #self.log(\"Someting went wrong, no conecction\")\n # break\n # except socket.error as e:\n # self.Log_signal.emit(\"Socket error: {0}\".format(e))\n \n def stop(self):\n self.is_running = False\n \n def extract_jpeg(self, buffer):\n start = buffer.find(b'\\xff\\xd8')\n end = buffer.find(b'\\xff\\xd9')\n if start != -1 and end !=-1:\n jpeg = buffer[start:end+2]\n buffer = buffer[end+2:]\n return jpeg, buffer\n else:\n return None, buffer\n \n def jpeg_EoL(self, buffer):\n inicio = 0\n postn = buffer.find(b'\\n', inicio)\n if postn == -1:\n return None, buffer # not found\n \n if postn != -1:\n frame = buffer[inicio:postn]\n buffer = buffer[postn + 1:]\n return frame, buffer\n else:\n return None, buffer\n \n \nclass VideoStrRecv(QWidget):\n def __init__(self, port):\n super().__init__()\n self.cam1 = QLabel(\"HELLO\", alignment=Qt.AlignCenter) #cam1\n self.cam1.setStyleSheet(\"font-size:90px; font-weight:bold; color:Black; border: 1px solid black;\")\n self.Video = VideoRec(port)\n self.Video.new_frm.connect(self.update_frame)\n self.Video.Log_signal.connect(self.Logging)\n\n layout = QHBoxLayout(self)\n layout.addWidget(self.cam1)\n self.setLayout(layout)\n \n def start_this_shiat(self):\n self.log(\"starting thread\")\n self.Video.start()\n \n def update_frame(self, frame):\n self.log(\"update frame\")\n q_image = QImage.fromData(frame)\n pismap = QPixmap.fromImage(q_image)\n #pismap.scaled(300,226, Qt.KeepAspectRatio)\n self.cam1.setPixmap(pismap)\n self.cam1.setFixedWidth(800)\n self.cam1.setFixedHeight(600)\n \n def log(self, datalog):\n try:\n with open(\"LogTrilla2.txt\",\"a\") as file:\n file.write(\"{0} -- {1}\\n\".format(datetime.now().strftime(\"%H:%M %d-%m-%Y\"), datalog))\n except Exception as a:\n print(f\"Error on log file {a}\")\n \n def Logging(self, error):\n self.log(\"- {0}\".format(error))\n\n\nif __name__ == \"__main__\":\n print ('program start')\n app = QApplication([])\n w = VideoStrRecv(port)\n w.log(\"--------------\\nStarting Screen\")\n w.resize(1024,530) # X,Y\n #w.showFullScreen() # setfor later, fullscreen\n #w.setWindowFlag(Qt.FramelessWindowHint)\n w.setWindowTitle(\"You son of a biatch\")\n #w.setWindowIcon(QIcon('novideo.png'))\n w.show()\n w.start_this_shiat()\n sys.exit(app.exec_())","repo_name":"lightshadown/ServerEsp32cam","sub_path":"Server_esp32cam.py","file_name":"Server_esp32cam.py","file_ext":"py","file_size_in_byte":8816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"957342799","text":"import os\n# import pickle\nimport json\n\n# e.g.: returns 'p2_p0' from 'utilization_p2_p0.rpt'\ndef get_n_rpt(rpt_file):\n\treturn (rpt_file.split('utilization_')[1]).split('.')[0]\n\ndef main():\n\t# rpt_files = [f for f in os.listdir('.') if f.endswith('.rpt')]\n\t# # print(rpt_files)\n\t# n = len(rpt_files) + 1 # returns 10 when overlay_p10\n\t# # print(n)\n\t# rpt_files = ['utilization'+ str(i) +'.rpt' for i in range(2,n+1)] # sorted\n\n\trpt_files = [f for f in os.listdir('.') if f.endswith('.rpt')]\n\t# print(rpt_files)\n\t# filedata=''\n\tutil_dict = {}\n\t(num_clb, num_ram36, num_ram18, num_dsp) = (0, 0, 0, 0)\t\n\tfor rpt_file in rpt_files:\n\t\twith open(rpt_file, 'r') as file:\n\t\t\tfor line in file:\n\t\t\t\tif(line.startswith('| CLB LUTs')):\n\t\t\t\t\tnum_clb = line.split()[16] # 16 is magic number for \"Available\" \n\t\t\t\t\t# print(line.split()[16]) # 16 is magic number for \"Available\"\n\t\t\t\telif(line.startswith('| RAMB36')):\n\t\t\t\t\tnum_ram36 = line.split()[15] # 15 is magic number for \"Available\"\n\t\t\t\t\t# print(line.split()[15]) # 15 is magic number for \"Available\"\n\t\t\t\telif(line.startswith('| RAMB18')):\n\t\t\t\t\tnum_ram18 = line.split()[15] # 15 is magic number for \"Available\"\n\t\t\t\t\t# print(line.split()[15]) # 15 is magic number for \"Available\"\n\t\t\t\telif(line.startswith('| DSPs')):\n\t\t\t\t\tnum_dsp = line.split()[15] # 15 is magic number for \"Available\"\n\t\t\t\t\t# print(line.split()[15]) # 15 is magic number for \"Available\"\n\t\tutil_dict[get_n_rpt(rpt_file)] = (num_clb, num_ram36, num_ram18, num_dsp)\n\t\t# filedata = filedata + rpt_file + ': ' + str((num_clb, num_ram36, num_ram18, num_dsp)) + '\\n'\n\n\t# print(util_dict)\n\t# print(len(util_dict))\n\twith open('util_all_pre_blocked.json', 'w') as outfile:\n\t\tjson.dump(util_dict, outfile)\n\n\n\twith open('blocked_util.json', 'r') as infile:\n\t\tblocked_resource_count_dict = json.load(infile)\n\tprint(blocked_resource_count_dict)\n\n\tfor pblock_name in util_dict:\n\t\tnum_blocked_clb = blocked_resource_count_dict[pblock_name]['SLICE_LUTs']\n\t\tnum_blocked_ram36 = blocked_resource_count_dict[pblock_name]['RAMB36']\n\t\tnum_blocked_ram18_extra = blocked_resource_count_dict[pblock_name]['RAMB18_extra']\n\t\tnum_blocked_dsp = blocked_resource_count_dict[pblock_name]['DSP48E2']\n\n\t\tnum_clb = str(int(util_dict[pblock_name][0]) - int(num_blocked_clb))\n\t\tnum_ram36 = str(int(util_dict[pblock_name][1]) - int(num_blocked_ram36))\n\t\tnum_ram18 = str(int(util_dict[pblock_name][2]) - int(num_blocked_ram36)*2 - int(num_blocked_ram18_extra))\n\t\tnum_dsp = str(int(util_dict[pblock_name][3]) - int(num_blocked_dsp))\n\t\tutil_dict[pblock_name] = (num_clb, num_ram36, num_ram18, num_dsp) # rewrite util_dict reflecting blocked resources\n\tprint(util_dict)\n\twith open('util_all.json', 'w') as outfile:\n\t\tjson.dump(util_dict, outfile)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"icgrp/prflow_nested_dfx","sub_path":"common/script_src/parse_ovly_util.py","file_name":"parse_ovly_util.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"23372058249","text":"#!/usr/bin/env python3\n# angle analysis considering errors in angles and counts\n#\n# @haslbeck\n# 5 July\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import odr\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\n\n\n# read in data\ndata = pd.read_csv('../data/counts_angle.csv', skiprows = 1, header = 0, names = ['folder', 'angle', 'height', 'counts', 'time', 'angle_err'])\ndata = data.sort_values(by = ['angle']).reset_index()\n\ncounts = data['counts'].values\ntimes = data['time'].values\nangles = data['angle'].values\nangles_errs = data['angle_err'].values\nnorm_counts = counts /times\nnorm_counts_errs = np.sqrt(counts) / times\n\n\nprint(\n type(counts), len(counts),\n type(times), len(times),\n type(angles), len(angles),\n type(norm_counts), len(norm_counts),\n type(norm_counts_errs), len(norm_counts_errs),\n \n)\n\n\ndef func(x, a):\n return a * np.cos(x) ** 2 #+ c\n\n''' \n# Model object\nmodel = odr.Model(func)\n\n# Create a RealData object\ndata = odr.RealData(angles, norm_counts, sx=angles_errs, sy=norm_counts_errs) #FIXME\n\n# Set up ODR with the model and data and intial guess\nodr = odr.ODR(data, model, beta0=[0.1])\n\n# Run the regression.\nout = odr.run()\n\n#print fit parameters and 1-sigma estimates\npopt = out.beta\nperr = out.sd_beta\nprint('fit parameter 1-sigma error')\nprint('———————————–')\nfor i in range(len(popt)): print('%s +- %s'%(popt[i],perr[i]))\n\n# prepare confidence level curves\nnstd = 5. # to draw 5-sigma intervals\npopt_up = popt + nstd * perr\npopt_dw = popt - nstd * perr\n# fit line to draw\nx_fit = np.linspace(min(angles), max(angles), 100)\nfit = func(popt, x_fit)\nfit_up = func(popt_up, x_fit)\nfit_dw= func(popt_dw, x_fit)\n''' \n \ndef fit(func, xdata = angles, ydata = norm_counts, yerrs = norm_counts_errs, p0 = None):\n \"\"\" perform a chi2 fit \"\"\"\n \n popt, pcov = curve_fit(func, xdata, ydata, sigma = yerrs, absolute_sigma = True, p0 = p0)\n npars = len(popt)\n \n # goodness of the fit\n res = (ydata - func(xdata, *popt))\n chi2 = np.sum((res/yerrs)**2)\n chi2red = chi2/(len(xdata)-len(popt))\n \n \n # fit values\n print(20*'=')\n print('%s, par, par err\\n%s'%(func.__name__,20*'-'))\n print('chi2 %.2f chi2red %.2f'%(chi2,chi2red))\n for i in range(npars): print('p%i:\\t%.3f\\t%.3f'%(i,popt[i],np.sqrt(pcov[i][i])))\n print(20*'=')\n \n vals , errs = np.asarray([popt[i] for i in range(npars)]), np.asarray([np.sqrt(pcov[i][i]) for i in range(npars)])\n return vals , errs, chi2red\n \n'''\n# std fit\n#popt, pcov = curve_fit(func, angles, norm_counts)\npopt, pcov = curve_fit(func, angles, norm_counts, sigma = norm_counts_errs, absolute_sigma = True, p0 = [0.06])\nprint('I = %.3f'%popt[0])\nprint('I_err = %.3f'%np.sqrt(pcov[0][0]))\n'''\n\ndef cos2(x, a):\n return a * np.cos(x) ** 2 \n \ndef cos2plusb(x, a,b):\n return a * np.cos(x) ** 2 + b\n\nv1, e1, c1 = fit(cos2)\nv2, e2, c2 = fit(cos2plusb)\n\n\n\n\n\n#plot\nxplot = np.linspace(min(angles), max(angles), 100)\n\nplt.figure()\n#rcParams['font.size']= 20\nplt.errorbar(angles, norm_counts, yerr=norm_counts_errs, xerr=angles_errs, ecolor='k', fmt='ok')\n\n# fits\n\n\n\nplt.plot(xplot, cos2plusb(xplot,*v2), 'r', lw=2, label=\"$I_{0}~cos(x)^2 + c$ and 3(5)$\\sigma$, $\\chi^2_{red}$=%.1f\\n\"%c2 + \\\n \"$I_{0}$=(%.1f$\\pm$%.1f)$10^{-2}$ [$\\\\frac{N}{s}$]\\n\"%(v2[0]*100,e2[0]*100) + \\\n \"$c$ =(%.1f$\\pm$%.1f)$10^{-2}$ [$\\\\frac{N}{s}$]\"%(v2[1]*100,e2[1]*100))\nplt.fill_between(xplot, cos2plusb(xplot,*(v2-3*e2)), cos2plusb(xplot,*(v2+3*e2)),\\\n color=\"red\",alpha=0.25,edgecolor=\"r\",hatch='||')\n\nplt.fill_between(xplot, cos2plusb(xplot,*(v2-5*e2)), cos2plusb(xplot,*(v2+5*e2)), \\\n color=\"red\",alpha=0.2,edgecolor=\"r\")\n \n \n\n\n\n\nplt.plot(xplot, cos2(xplot,*v1), 'b', lw=2, label=\"$I_{0} ~cos(x)}^2$ and 3(5)$\\sigma$, $\\chi^2_{red}$=%.1f\\n\"%c1 + \\\n \"$I_{0}$=(%.1f$\\pm$%.1f)$10^{-2}$ [$\\\\frac{N}{s}$]\"%(*v1*100,*e1*100))\nplt.fill_between(xplot, cos2(xplot,*(v1-3*e1)), cos2(xplot,*(v1+3*e1)), color='blue',alpha=.25, hatch = '---')\n\nplt.fill_between(xplot, cos2(xplot,*(v1-5*e1)), cos2(xplot,*(v1+5*e1)), color='blue',alpha=.2)\n \nplt.plot(xplot, cos2plusb(xplot,*v2), 'r' , lw=2)\n\n\nplt.ylim(-.005,0.09)\n#plt.xlim(-.05,1.8)\n\n\n\n\nplt.legend(loc='best',fontsize=10, frameon = False)\nplt.xlabel('Zenith angle [Radians]', fontsize=12)\nplt.ylabel('Count rate [$\\\\frac{N}{s}$ $\\pm$ $\\\\frac{\\sqrt{N}}{s}$] ', fontsize=12)\n\nplt.savefig('../data/angle_fit.png',dpi=300, bbox_inches='tight')\n\n\n\n# close the plot when pressing a key\nplt.draw()\nplt.pause(1)\ninput('press any key to close')\nplt.close('all')\nprint('goodbye')\n\n ","repo_name":"cfuselli/Nikhef-Project-2021","sub_path":"Results/share/angle_fit.py","file_name":"angle_fit.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42308450399","text":"# Yield from\n\ndef gen1():\n print('COMEÇOU GEN 1')\n yield 1\n yield 2\n yield 3\n yield 4\n yield 5\n print('ACABOU GEN 1')\n\n# def gen2():\n# yield from gen1() # Começa já do \"gen1\"\n# yield 6\n# yield 7\n# yield 8\n\n# Assim só precisa pegar o \"gen2\"\n\n# g = gen2()\n\n# for numero in g:\n# print(numero)\n\n# ____________________________________________________________________\n\n\n# Mas para ser feito de maneira mais dinâmica apenas fazer isto :\n\ndef gen2(gen=None):\n print('COMEÇOU GEN 2')\n # yield from gen() # Irá começar da função que selecionar # támbem pode ser feito assim : yield from gen\n if gen is not None:\n yield from gen\n yield 6\n yield 7\n yield 8\n print('ACABOU GEN 2')\n\ndef gen3():\n print('COMEÇOU GEN 3')\n yield 10\n yield 20\n yield 30\n print('ACABOU GEN 3')\n\n\ng1 = gen2(gen1())\ng2 = gen2(gen3())\ng3 = gen2()\n\nfor numero in g1:\n print(numero)\nprint()\nfor numero in g2:\n print(numero)\nprint()\nfor numero in g3:\n print(numero)\nprint()","repo_name":"CidineiPuto/aulaDePython","sub_path":"aula98.py","file_name":"aula98.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7704626647","text":"from argparse import ArgumentParser, FileType\nfrom sys import stdin\n\nfrom pprint import pprint\nfrom dataclasses import dataclass\nimport itertools as it\nfrom collections import defaultdict\nfrom math import prod\nimport bisect\n\nimport networkx as nx\n\n\ndef intersects_1d(l1, l2):\n a1, a2 = l1\n b1, b2 = l2\n\n return max(a1, b1) <= min(a2, b2)\n\n\n@dataclass(frozen=True)\nclass Cuboid:\n x: (int, int)\n y: (int, int)\n z: (int, int)\n fill: bool\n\n @property\n def dims(self):\n for n in \"xyz\":\n yield getattr(self, n)\n\n def fully_encloses(self, other):\n return all(\n a1 <= b1 <= b2 <= a2 for (a1, a2), (b1, b2) in zip(self.dims, other.dims)\n )\n\n def __contains__(self, pt):\n raise NotImplementedError\n\n def intersects_edge(self, edge):\n return all(it.starmap(intersects_1d, zip(zip(*edge), self.dims)))\n\n @property\n def corners(self):\n yield from it.product(*self.dims)\n\n @property\n def edges(self):\n for c1, c2 in it.combinations(self.corners, 2):\n if sum(a == b for (a, b) in zip(c1, c2)) != 2:\n continue\n\n yield tuple(sorted([c1, c2]))\n\n def intersects(self, other):\n return (\n self.fully_encloses(other)\n or other.fully_encloses(self)\n or any(\n a.intersects_edge(e)\n for (a, b) in it.permutations([self, other])\n for e in b.edges\n )\n )\n\n def chop_along(self, axis, value, d):\n assert d in {1, -1}\n d1, d2 = {1: (0, d), -1: (d, 0)}[d]\n\n n1 = dict(zip(\"xyz\", self.dims))\n n2 = n1.copy()\n\n a = getattr(self, axis)\n assert a[0] <= value + d1 < value + d2 <= a[1]\n n1[axis] = (a[0], value + d1)\n n2[axis] = (value + d2, a[1])\n\n c1, c2 = (Cuboid(*(n[k] for k in \"xyz\"), self.fill) for n in (n1, n2))\n\n assert not c1.intersects(c2), f\"{c1} intersects with {c2}\"\n return [c1, c2]\n\n def split(self, other):\n if self.fully_encloses(other):\n return []\n\n # assert not self.fully_encloses(other)\n res = []\n\n for axis in \"xyz\":\n for end in [0, 1]:\n a = getattr(self, axis)\n b = getattr(other, axis)\n\n if not intersects_1d(a, b) or a[0] <= b[0] <= b[1] <= a[1]:\n continue\n\n cbs = None\n if end == 0 and a[1] < b[1]:\n cbs = other.chop_along(axis, a[1], 1)\n elif end == 1 and b[0] < a[0]:\n cbs = other.chop_along(axis, a[0], -1)\n\n if cbs is not None:\n (other,) = (c for c in cbs if self.intersects(c))\n res.extend(c for c in cbs if not self.intersects(c))\n assert not any(self.intersects(c) for c in res)\n\n # res.append(other)\n assert self.fully_encloses(other)\n assert not any(a.intersects(b) for a, b in it.combinations(res, 2))\n assert len(res) <= 6\n return res\n\n @property\n def volume(self):\n return prod(b - a + 1 for (a, b) in self.dims)\n\n\ndef parse(file):\n for line in file:\n line = line.strip()\n if not line:\n continue\n\n action, dims = line.split(\" \", 1)\n\n if action == \"on\":\n action = True\n elif action == \"off\":\n action = False\n else:\n raise ValueError(\"Unknown action\")\n\n ns = []\n for s in dims.split(\",\"):\n _, s = s.split(\"=\")\n ns.append(tuple(sorted([int(n) for n in s.split(\"..\")])))\n\n yield Cuboid(*ns, action)\n\n\ndef parse_dag(file, boundry=None):\n parsed = list(parse(file))\n sort_key = lambda c: c.x[1]\n in_order = sorted(parsed, key=sort_key)\n\n dag = nx.DiGraph()\n for cuboid in parsed:\n if boundry is not None and not boundry.fully_encloses(cuboid):\n continue\n dag.add_node(cuboid)\n\n i = bisect.bisect_left(in_order, cuboid.x[0], key=sort_key)\n while i < len(in_order) and cuboid.x[0] <= in_order[i].x[1]:\n other = in_order[i]\n i+= 1\n\n if other == cuboid or other not in dag:\n continue\n\n if cuboid.intersects(other):\n dag.add_edge(other, cuboid)\n\n return dag\n\n\ndef to_disjoint(G):\n res = []\n G = G.copy()\n while G.number_of_nodes() > 0:\n print(G.number_of_nodes())\n for node in [n for n, d in G.in_degree() if d == 0]:\n candidates = list(G.neighbors(node))\n\n G.remove_node(node)\n if len(candidates) == 0:\n res.append(node)\n continue\n\n split_on = candidates[0]\n\n new_nodes = split_on.split(node)\n\n G.add_nodes_from(new_nodes)\n G.add_edges_from(\n (a, b) for a in new_nodes for b in candidates if a.intersects(b)\n )\n\n return res\n\n\ndef solve(file, boundry=None):\n if boundry is not None:\n x = boundry\n boundry = Cuboid((-x, x), (-x, x), (-x, x), None)\n g = parse_dag(file, boundry=boundry)\n fills = to_disjoint(g)\n pprint(sorted(fills, key=lambda x: x.volume))\n\n res = sum(x.volume for x in fills if x.fill)\n print(res)\n\n\ndef getopts():\n opts = ArgumentParser()\n opts.add_argument(\"files\", nargs=\"*\", default=[stdin], type=FileType(\"r\"))\n opts.add_argument(\"--boundry\", default=None, type=int)\n return opts.parse_args()\n\n\ndef main():\n opts = getopts()\n for file in opts.files:\n print(solve(file, opts.boundry))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Synthetica9/AoC2021","sub_path":"day22/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22867188289","text":"\nclass node:\n def __init__(self, x):\n self.value = x\n self.left = None\n self.right = None\n\ndef getleaves(r: node):\n\n if not r:\n return []\n \n l = []\n q = [r]\n lv = []\n\n while q:\n cur_node = q.pop(0)\n\n l.append(cur_node.value)\n \n if cur_node.left == None and cur_node.right == None:\n lv.append(cur_node.value)\n\n if cur_node.left:\n q.append(cur_node.left)\n if cur_node.right:\n q.append(cur_node.right)\n \n return lv\n\n\ndef getleavesdfs(r:node):\n\n if not r:\n return []\n \n lv = []\n\n if r.left:\n lv = getleavesdfs(r.left)\n if r.left == None and r.right == None:\n lv.append(r.value)\n if r.right:\n lv = lv + getleavesdfs(r.right)\n \n return lv\n\ndef leafSimilar(root1: node, root2: node):\n \n if root1 == None and root2:\n return False\n if root2 == None and root1:\n return False\n \n #---bsf will not work becuse it doesn't preserver the order accross trees.\n #lv1 = getleaves(root1) \n #lv2 = getleaves(root2)\n lv1 = getleavesdfs(root1)\n lv2 = getleavesdfs(root2)\n\n if len(lv1) != len(lv2):\n return False\n \n for i in range(len(lv1)):\n if lv1[i] != lv2[i]:\n return False\n \n return True\n\n\n\nr = node(3)\nr.left = node(5)\nr.right = node(1)\n\n(r.left).left = node(6)\n(r.left).right = node(2)\n((r.left).right).left = node(7)\n((r.left).right).right = node(4)\n\n(r.right).left = node(9)\n(r.right).right = node(8)\n\nprint(getleaves(r.left))\nprint(getleavesdfs(r.right))\n\nprint(leafSimilar(r.left, r.right))\n\na = [i for i in range(10)]\nb = [i for i in range(10)]\n\nif a == b:\n print(a,b)\n\n\n\n","repo_name":"jcravener/PythonWorkroom","sub_path":"Leaf-SimilarTrees.py","file_name":"Leaf-SimilarTrees.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1094591866","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Conv_Block(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1):\n super(Conv_Block, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride)\n self.relu = torch.nn.LeakyReLU()\n self.bn = nn.BatchNorm2d(out_channels)\n \n def forward(self, x):\n x = self.conv(x)\n x = self.relu(x)\n x = self.bn(x)\n return x\n\nclass Dense_Block(nn.Module):\n def __init__(self, in_features, out_features):\n super(Dense_Block, self).__init__()\n self.fc = nn.Linear(in_features, out_features)\n self.relu = torch.nn.LeakyReLU()\n self.bn = nn.BatchNorm1d(out_features)\n \n def forward(self, x):\n x = self.fc(x)\n x = self.relu(x)\n x = self.bn(x)\n return x\n\n\nclass Generator_u2m(nn.Module):\n def __init__(self, outdim=500):\n super(Generator_u2m, self).__init__()\n self.conv1 = Conv_Block(1, 20, kernel_size=5)\n self.pool1 = nn.MaxPool2d(2, stride=2)\n self.conv2 = Conv_Block(20, 50, kernel_size=5)\n self.pool2 = nn.MaxPool2d(2, stride=2)\n self.drop = nn.Dropout()\n self.fc = Dense_Block(800, outdim)\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.pool2(x)\n x = x.view(x.size(0), -1)\n x = self.drop(x)\n x = self.fc(x)\n return x\n \nclass decoder(nn.Module):\n def __init__(self, task='u2m', outdim=500):\n super(decoder, self).__init__()\n self.fc = Dense_Block(outdim, 800)\n self.layer = nn.Sequential(\n nn.ConvTranspose2d(50,20,13,stride=1),\n nn.ReLU(),\n nn.BatchNorm2d(20),\n nn.ConvTranspose2d(20,1,13,stride=1),\n nn.ReLU())\n def forward(self, x):\n x = self.fc(x)\n x = x.view(x.size(0), 50, 4, 4)\n x = self.layer(x)\n return x\n\nclass Classifier_u2m(nn.Module):\n def __init__(self, n_output, outdim=500):\n super(Classifier_u2m, self).__init__()\n self.fc = nn.Linear(outdim, n_output)\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n\nclass Net_f(nn.Module):\n def __init__(self, task='s2m', outdim=500):\n super(Net_f, self).__init__()\n self.generator = Generator_u2m(outdim=outdim)\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x, constant = 1, adaption = False):\n x = self.generator(x)\n return x\n\nclass Net_c_cway(nn.Module):\n def __init__(self, task='s2m', outdim=500):\n super(Net_c_cway, self).__init__()\n self.classifier = Classifier_u2m(10, outdim=outdim)\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n x = self.classifier(x)\n return x\n","repo_name":"xymtxwd/concept_drift_detection","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"9129821032","text":"from time import sleep\n\nfrom memory_profiler import profile\n\nfrom ambush.snapshot import SnapShot\n\n\n@profile\ndef main():\n\n try:\n snapshot = SnapShot(\n root_path='.',\n glob='**/*.py',\n stability_threshold=10,\n )\n\n while True:\n sleep(1)\n snapshot = snapshot.next()\n for event in snapshot.iter_events():\n print('[{event_type}] {path}'.format(**event))\n except KeyboardInterrupt:\n return\n\n\nmain()\n","repo_name":"leucoide/ambush","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14108209419","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Attendee',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n options={\n 'db_table': 'attendee',\n },\n ),\n migrations.CreateModel(\n name='Organizer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n options={\n 'db_table': 'organizer',\n },\n ),\n migrations.AlterField(\n model_name='myprofile',\n name='events_assisted',\n field=models.IntegerField(verbose_name='Events already assisted'),\n ),\n migrations.AddField(\n model_name='organizer',\n name='profile',\n field=models.ForeignKey(to='accounts.MyProfile'),\n ),\n migrations.AddField(\n model_name='attendee',\n name='profile',\n field=models.ForeignKey(to='accounts.MyProfile'),\n ),\n ]\n","repo_name":"Juanvulcano/fossasia_eventmanager","sub_path":"accounts/migrations/0002_auto_20160102_1816.py","file_name":"0002_auto_20160102_1816.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72270697127","text":"import streamlit as st\nimport mysql.connector\nimport pandas as pd\nfrom st_aggrid import AgGrid, GridOptionsBuilder\nfrom config import DATABASE_CONFIG\n\n\ndef app():\n st.title(\"Property Management Dashboard\")\n\n # Function to get database connection\n def get_db_connection():\n connection = mysql.connector.connect(**DATABASE_CONFIG)\n return connection\n\n # Function to execute read query\n def execute_read_query():\n connection = get_db_connection()\n query = \"\"\"SELECT * FROM Unit\"\"\" # Adjust the query as needed\n df = pd.read_sql(query, connection)\n connection.close()\n return df\n\n # Function to execute write query (update, delete)\n def execute_write_query(query):\n connection = get_db_connection()\n cursor = connection.cursor()\n cursor.execute(query)\n connection.commit()\n connection.close()\n\n # Fetch data\n df = execute_read_query()\n\n # Set up AgGrid options for editable grid\n gb = GridOptionsBuilder.from_dataframe(df)\n gb.configure_default_column(editable=True, minWidth=150)\n gb.configure_selection('multiple', use_checkbox=True)\n grid_options = gb.build()\n\n # Display the grid\n grid_response = AgGrid(\n df, \n gridOptions=grid_options,\n height=300, \n width='100%',\n data_return_mode='AS_INPUT', \n update_mode='MODEL_CHANGED',\n fit_columns_on_grid_load=True\n )\n\n if 'data' in grid_response:\n updated_df = grid_response['data']\n if not updated_df.equals(df):\n st.session_state['updated_df'] = updated_df\n\n # Store selected rows for deletion\n selected = grid_response['selected_rows']\n if selected:\n st.session_state['selected_for_deletion'] = selected\n st.write(\"Selected rows:\", selected)\n\n # Confirm Update Button\n if st.button('Confirm Update'):\n if 'updated_df' in st.session_state:\n # Truncate the existing table\n truncate_query = \"TRUNCATE TABLE Unit\"\n execute_write_query(truncate_query)\n\n # Prepare and execute the insert query for the updated DataFrame\n for i in st.session_state['updated_df'].index:\n columns = ', '.join(st.session_state['updated_df'].columns)\n values = ', '.join([f\"'{st.session_state['updated_df'].at[i, col]}'\" for col in st.session_state['updated_df'].columns])\n insert_query = f\"INSERT INTO Unit ({columns}) VALUES ({values})\"\n execute_write_query(insert_query)\n\n # Execute deletions\n if 'selected_for_deletion' in st.session_state:\n for row in st.session_state['selected_for_deletion']:\n delete_query = f\"DELETE FROM Unit WHERE Unit_ID = {row['Unit_ID']}\" # Replace 'ID' with your primary key column name\n execute_write_query(delete_query)\n\n st.success(\"Database Updated Successfully\")\n del st.session_state['updated_df'] # Clear the updated data from the session state\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"XenosHu/prop-db-manager","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35158126537","text":"\r\ndef recur(idx):\r\n global answer\r\n\r\n if idx == n:\r\n return 0\r\n if idx > n :\r\n return -999999999999999\r\n if dp[idx] != -1: # 이미 저장되었다면\r\n return dp[idx]\r\n # 상담을 받거나 안받거나 그중에서 더 많은 돈을 버는 경우를 내 dp 테이블에 저장\r\n dp[idx] = max(recur(idx+arr[idx][0])+ arr[idx][1], recur(idx+1))\r\n return dp[idx]\r\nn = int(input())\r\narr = [list(map(int, input().split())) for _ in range(n)]\r\ndp = [-1] * (n+1)\r\nanswer = 0\r\nprint(recur(0))\r\n","repo_name":"yooooonzzzzzang/Algo_seed","sub_path":"백준/Silver/14501. 퇴사/퇴사.py","file_name":"퇴사.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19600578681","text":"import os\nimport random\nimport paddle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef same_seeds(seed=2021):\n seed = int(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n paddle.seed(seed)\n\n\ndef draw_process(title,train_metric,val_metric,metric_name):\n plt.figure()\n plt.title(title, fontsize=24)\n plt.xlabel(\"Epoch\", fontsize=20)\n plt.ylabel(metric_name, fontsize=20)\n plt.plot(list(range(len(train_metric))), train_metric,label=f'Train {metric_name}')\n plt.plot(list(range(len(val_metric))), val_metric,label=f'Val {metric_name}')\n plt.legend()\n plt.grid()\n save_dir='image0'\n if not os.path.exists(save_dir):os.makedirs(save_dir)\n plt.savefig(os.path.join(save_dir,metric_name))\n # plt.show()\n\n# 带线性预热的指数衰减学习率\ndef ExpDecayWithWarmup(warmup_steps,lr_start,lr_peak,lr_decay):\n ''' warmup and exponential decay'''\n exp_sched = paddle.optimizer.lr.ExponentialDecay(learning_rate=lr_peak, gamma=lr_decay)\n scheduler = paddle.optimizer.lr.LinearWarmup(learning_rate=exp_sched, warmup_steps=warmup_steps,\n start_lr=lr_start, end_lr=lr_peak, verbose=True)\n return scheduler\n\n","repo_name":"jiaohuix/Bayes_backprop_paddle","sub_path":"bayes_elbo_paddle/utils/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15888373337","text":"#encoding:utf-8\nfrom os import path\nimport multiprocessing\nfrom pathlib import Path\n\"\"\"Note:\npytorch BERT 模型包含三个文件:模型、vocab.txt, bert_config.json, 有两种加载方式:\n(1)在线下载。这种方式下,模型和vocab会通过url的方式下载,只需将bert_model设置为 \"bert_model=bert-base-chinese\"\n 另外,还需要设置cache_dir路径,用来存储下载的文件。\n(2)先下载好文件。下载好的文件是tensorflow的ckpt格式的,首先要利用convert_tf_checkpoint_to_pytorch转换成pytorch格式存储\n 这种方式是通过本地文件夹直接加载的,要注意这时的文件命名方式。首先指定bert_model=存储模型的文件夹\n 第二,将vocab.txt和bert_config.json放入该目录下,并在配置文件中指定VOCAB_FILE路径。当然vocab.txt可以不和模型放在一起,\n 但是bert_config.json文件必须和模型文件在一起。具体可见源代码file_utils\n\"\"\"\nBASE_DIR = Path('pybert')\n\nconfigs = {\n\n 'task':'multi label',\n 'data':{\n 'raw_data_path': BASE_DIR / 'dataset/raw/train.csv', # 总的数据,一般是将train和test何在一起构建语料库\n 'train_file_path': BASE_DIR / 'dataset/processed/train.csv',\n 'valid_file_path': BASE_DIR / 'dataset/processed/valid.csv',\n 'test_file_path': BASE_DIR / 'dataset/raw/test.csv'\n },\n 'output':{\n 'log_dir': BASE_DIR / 'output/log', # 模型运行日志\n 'writer_dir': BASE_DIR / \"output/TSboard\",# TSboard信息保存路径\n 'figure_dir': BASE_DIR / \"output/figure\", # 图形保存路径\n 'checkpoint_dir': BASE_DIR / \"output/checkpoints\",# 模型保存路径\n 'cache_dir': BASE_DIR / 'model/',\n 'result': BASE_DIR / \"output/result\",\n },\n 'pretrained':{\n \"bert\":{\n 'vocab_path': BASE_DIR / 'model/pretrain/chinese_L-12_H-768_A-12/vocab.txt',\n 'tf_checkpoint_path': BASE_DIR / 'model/pretrain/chinese_L-12_H-768_A-12/bert_model.ckpt',\n 'bert_config_file': BASE_DIR / 'model/pretrain/chinese_L-12_H-768_A-12/bert_config.json',\n 'pytorch_model_path': BASE_DIR / 'model/pretrain/pytorch_pretrain/pytorch_model.bin',\n 'bert_model_dir': BASE_DIR / 'model/pretrain/pytorch_pretrain',\n },\n 'embedding':{}\n },\n 'train':{\n 'valid_size': 0.2,\n 'max_seq_len': 128,\n 'do_lower_case':True,\n 'batch_size': 16,#24, # how many samples to process at once\n 'epochs': 16, # number of epochs to train\n 'start_epoch': 1,\n 'warmup_proportion': 0.1,# Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.\n 'gradient_accumulation_steps': 4,# Number of updates steps to accumulate before performing a backward/update pass.\n #按理说batchsize越大训练效果越好,梯度累加则实现了batchsize的变相扩大\n 'learning_rate': 5e-4,\n 'n_gpu': [1,0], # GPU个数,如果只写一个数字,则表示gpu标号从0开始,并且默认使用gpu:0作为controller,\n # 如果以列表形式表示,即[1,3,5],则我们默认list[0]作为controller\n 'num_workers': multiprocessing.cpu_count(), # 线程个数\n 'weight_decay': 1e-5,\n 'seed':2018,\n 'resume':False,\n },\n 'predict':{\n 'batch_size':512\n },\n 'callbacks':{\n 'lr_patience': 5, # number of epochs with no improvement after which learning rate will be reduced.\n 'mode': 'min', # one of {min, max}\n 'monitor': 'valid_loss', # 计算指标\n 'early_patience': 200, # early_stopping\n 'save_best_only': True, # 是否保存最好模型\n 'save_checkpoint_freq': 10 # 保存模型频率,当save_best_only为False时候,指定才有作用\n },\n 'label2id' : { # 标签映射\n # '无': 0, \n # '描述就诊史病史': 1, \n #'描述症状': 2, \n # '咨询联系方式': 3, \n # '咨询价格': 4, \n # '咨询项目': 5, \n # '咨询材料仪器治疗方法': 6, \n # '咨询术后': 7, \n # '咨询地址': 8, \n # '咨询检查': 9, \n # '咨询疗效': 10, \n # '咨询病因': 11, \n # '咨询后果': 12, \n # '确定预约时间': 13, \n # '咨询遗传': 14, \n # '咨询副作用': 15, \n # '咨询药物': 16, \n # '咨询住院': 17, \n # '咨询专家': 18, \n # '咨询疗程': 19, \n # '忌口食物': 20, \n # '宜吃食物': 21, \n # '咨询优惠活动': 22\n '问就诊史': 0, '无': 1, '问症状': 2, '问项目部位': 3, '套电': 4, '问个人信息': 5, '询问治疗意愿': 6, '报价格': 7, '项目介绍': 8\n },\n 'model':{\n 'arch':'bert'\n }\n}\n","repo_name":"Dongcf/Pytorch_Bert_Text_Classification","sub_path":"pybert/config/basic_config.py","file_name":"basic_config.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26403680138","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nimport numpy as np \nimport sys\n\n\nleng=3\ndata = [[i+j for j in range(leng)] for i in range(100)]\ntarget = [[i+j+1 for j in range(leng)] for i in range(1,101)]\ndata = np.array(data, dtype=np.float32)\ntarget = np.array(target, dtype=np.float32)\ndata = data.reshape((100,1,leng))/200\ntarget = target.reshape((100,1,leng))/200\n\n\nx_test = [[i+j for j in range(leng)] for i in range(50,150)]\ny_test = [[i+j+1 for j in range(leng)] for i in range(51,151)]\nx_test = np.array(x_test, dtype=np.float32)\ny_test = np.array(y_test, dtype=np.float32)\nx_test = x_test.reshape((100,1,leng))/200\ny_test = y_test.reshape((100,1,leng))/200\n\n\n\nmodel = Sequential()\nmodel.add(LSTM(leng, input_shape=(1,leng), return_sequences=True,activation='sigmoid'))\nmodel.add(LSTM(leng, input_shape=(1,leng), return_sequences=True,activation='sigmoid'))\nmodel.add(LSTM(leng, input_shape=(1,leng), return_sequences=True,activation='sigmoid'))\nmodel.add(LSTM(leng, input_shape=(1,leng), return_sequences=True,activation='sigmoid'))\nmodel.add(LSTM(leng, input_shape=(1,leng), return_sequences=True,activation='sigmoid'))\nmodel.compile(loss='mean_absolute_error', optimizer='adam',metrics=['accuracy'])\nmodel.fit(data,target,nb_epoch=2500,batch_size=50, verbose=2, validation_data=(x_test,y_test))\n\nmodel.save(\"rnn_sequence.h5\")\ndel model","repo_name":"josearun85/neuralnetclass","sub_path":"code/keras_r2.py","file_name":"keras_r2.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17888291567","text":"from pathlib import Path\n\nimport numpy as np\n\nfrom spikeinterface.core import BaseSorting, BaseSortingSegment\nfrom spikeinterface.core.core_tools import define_function_from_class\n\ntry:\n import h5py\n\n HAVE_H5PY = True\nexcept ImportError:\n HAVE_H5PY = False\n\n\nclass CombinatoSortingExtractor(BaseSorting):\n \"\"\"Load Combinato format data as a sorting extractor.\n\n Parameters\n ----------\n folder_path : str or Path\n Path to the Combinato folder.\n sampling_frequency : int, default: 30000\n The sampling frequency.\n user : str\n The username that ran the sorting. Defaults to 'simple'.\n det_sign : {'both', 'pos', 'neg'}\n Which sign was used for detection.\n keep_good_only : bool, default: True\n Whether to only keep good units.\n\n Returns\n -------\n extractor : CombinatoSortingExtractor\n The loaded data.\n \"\"\"\n\n extractor_name = \"CombinatoSortingExtractor\"\n installed = HAVE_H5PY\n installation_mesg = \"To use the CombinatoSortingExtractor install h5py: \\n\\n pip install h5py\\n\\n\"\n name = \"combinato\"\n\n def __init__(self, folder_path, sampling_frequency=None, user=\"simple\", det_sign=\"both\", keep_good_only=True):\n folder_path = Path(folder_path)\n assert folder_path.is_dir(), \"Folder {} doesn't exist\".format(folder_path)\n if sampling_frequency is None:\n h5_path = str(Path(folder_path).absolute()) + \".h5\"\n if Path(h5_path).exists():\n with h5py.File(h5_path, mode=\"r\") as f:\n sampling_frequency = f[\"sr\"][0]\n\n # ~ self.set_sampling_frequency(sampling_frequency)\n det_file = str(folder_path / Path(\"data_\" + folder_path.stem + \".h5\"))\n sort_cat_files = []\n for sign in [\"neg\", \"pos\"]:\n if det_sign in [\"both\", sign]:\n sort_cat_file = folder_path / Path(\"sort_{}_{}/sort_cat.h5\".format(sign, user))\n if sort_cat_file.exists():\n sort_cat_files.append((sign, str(sort_cat_file)))\n\n unit_counter = 0\n spiketrains = {}\n metadata = {}\n unsorted = []\n with h5py.File(det_file, mode=\"r\") as fdet:\n for sign, sfile in sort_cat_files:\n with h5py.File(sfile, mode=\"r\") as f:\n sp_class = f[\"classes\"][()]\n gaux = f[\"groups\"][()]\n groups = {g: gaux[gaux[:, 1] == g, 0] for g in np.unique(gaux[:, 1])} # array of classes per group\n group_type = {group: g_type for group, g_type in f[\"types\"][()]}\n sp_index = f[\"index\"][()]\n\n times_css = fdet[sign][\"times\"][()]\n for gr, cls in groups.items():\n if keep_good_only and (group_type[gr] < 1): # artifact or unsorted\n continue\n spiketrains[unit_counter] = np.rint(\n times_css[sp_index[np.isin(sp_class, cls)]] * (sampling_frequency / 1000)\n )\n metadata[unit_counter] = {\"group_type\": group_type[gr]}\n unit_counter = unit_counter + 1\n unit_ids = np.arange(unit_counter, dtype=\"int64\")\n BaseSorting.__init__(self, sampling_frequency, unit_ids)\n self.add_sorting_segment(CombinatoSortingSegment(spiketrains))\n self.set_property(\"unsorted\", np.array([metadata[u][\"group_type\"] == 0 for u in range(unit_counter)]))\n self.set_property(\"artifact\", np.array([metadata[u][\"group_type\"] == -1 for u in range(unit_counter)]))\n self._kwargs = {\"folder_path\": str(Path(folder_path).absolute()), \"user\": user, \"det_sign\": det_sign}\n\n self.extra_requirements.append(\"h5py\")\n\n\nclass CombinatoSortingSegment(BaseSortingSegment):\n def __init__(self, spiketrains):\n BaseSortingSegment.__init__(self)\n # spiketrains is dict\n self._spiketrains = spiketrains\n\n def get_unit_spike_train(self, unit_id, start_frame, end_frame):\n times = self._spiketrains[unit_id]\n if start_frame is not None:\n times = times[times >= start_frame]\n if end_frame is not None:\n times = times[times < end_frame]\n return times\n\n\nread_combinato = define_function_from_class(source_class=CombinatoSortingExtractor, name=\"read_combinato\")\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/extractors/combinatoextractors.py","file_name":"combinatoextractors.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"11907926495","text":"from django.db import models\nfrom django.conf import settings\n\nfrom django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist\n\nif \"jogging\" in settings.INSTALLED_APPS:\n\tfrom jogging import logging\nelse:\n\tlogging = None\n\nfrom machiavelli.signals import game_finished\n\nclass Server(models.Model):\n\t\"\"\" Defines core attributes for the whole site \"\"\"\n\tranking_last_update = models.DateTimeField()\n\tranking_outdated = models.BooleanField(default=False)\n\n\tdef __unicode__(self):\n\t\treturn \"Server %s\" % self.pk\n\ndef outdate_ranking(sender, **kwargs):\n\ttry:\n\t\tserver = Server.objects.get()\n\texcept MultipleObjectsReturned:\n\t\tif logging:\n\t\t\tlogging.error(\"Multiple servers found\")\n\texcept ObjectDoesNotExist:\n\t\tif logging:\n\t\t\tlogging.error(\"No configured server\")\n\telse:\n\t\tserver.ranking_outdated = True\n\t\tserver.save()\n\ngame_finished.connect(outdate_ranking)\n","repo_name":"jantoniomartin/condottieri-old","sub_path":"condottieri_common/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25816133026","text":"import numpy as np\n\nfrom scipy.stats import gaussian_kde\n\ndef kde_support(aData_in, bw_in, iGridsize_in, cut_in, aClip_in):\n \"\"\"\n Establish support for a kernel density estimate.\n\n Args:\n aData_in (numpy.array): Input data\n bw_in (string): bandwidth\n iGridsize_in (int): Gridsize\n cut_in (float): Factor\n aClip_in (numpy.array): Bound\n\n Returns:\n numpy.array: kde\n \"\"\"\n support_min = max(aData_in.min() - bw_in * cut_in, aClip_in[0])\n support_max = min(aData_in.max() + bw_in * cut_in, aClip_in[1])\n return np.linspace(support_min, support_max, iGridsize_in)\n\ndef scipy_bivariate_kde(aX_in, aY_in, bw_in, iGridsize_in, cut_in, aClip_in):\n \"\"\"\n Compute a bivariate kde using scipy.\n\n Args:\n aX_in (numpy.array): X array \n aY_in (numpy.array): Y array\n bw_in (string): bandwidth\n iGridsize_in (int): Gridsize\n cut_in (float): Factor\n aClip_in (numpy.array): Bound\n\n Raises:\n \n\n Returns:\n numpy.array: kde\n \"\"\" \n \n\n data = np.c_[aX_in, aY_in]\n kde = gaussian_kde(data.T, bw_method=bw_in)\n data_std = data.std(axis=0, ddof=1)\n if isinstance(bw_in, str):\n bw_in = \"scotts\" if bw_in == \"scott\" else bw_in\n bw_x = getattr(kde, \"%s_factor\" % bw_in)() * data_std[0]\n bw_y = getattr(kde, \"%s_factor\" % bw_in)() * data_std[1]\n elif np.isscalar(bw_in):\n bw_x, bw_y = bw_in, bw_in\n else:\n msg = (\"Cannot specify a different bandwidth for each dimension \"\n \"with the scipy backend. You should install statsmodels.\")\n raise ValueError(msg)\n \n x_support = kde_support(data[:, 0], bw_x, iGridsize_in, cut_in, aClip_in[0])\n y_support = kde_support(data[:, 1], bw_y, iGridsize_in, cut_in, aClip_in[1])\n xx, yy = np.meshgrid(x_support, y_support)\n z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)\n return xx, yy, z","repo_name":"changliao1025/pyearth","sub_path":"pyearth/toolbox/math/stat/scipy_bivariate_kde.py","file_name":"scipy_bivariate_kde.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"25288723605","text":"import argparse\nimport os\nimport glob\nimport numpy as np\nfrom PIL import Image\nfrom keras.models import load_model\nfrom keras.callbacks import EarlyStopping\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\n\nimport split_video\n\nLABEL = [\"ALKALINE\", \"LIION\", \"NIMH\", \"NICD\"]\nIMAGE_SIZE = 224\n\n\ndef main(args):\n\t\"\"\" Make new model.\n\t\tOrigin model become old_model.h5 and new one become model.h5.\n\t\"\"\"\n\tif not os.path.exists(args.model):\n\t\tprint('The model does not exist : ' + args.model)\n\t\texit(1)\n\telif not os.path.exists(args.train):\n\t\tprint('Train data does not exist : ' + args.train)\n\t\texit(1)\n\n\n\tmodel = load_model(args.model)\n\tpath, file = os.path.split(args.model)\n\tmodel.save(path + '/old_' + file)\n\n\tif args.debug:\n\t\tprint('Processing data...')\n\n\tX, Y = process_train_data(args.train)\n\tX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state = 24)\n\n\tif args.debug:\n\t\tprint('Training with new batteries...')\n\tes_cb = EarlyStopping(monitor='val_loss', patience=2, mode='auto', restore_best_weights=True)\n\thistory = model.fit(X_train, y_train, batch_size=32, epochs=50, callbacks=[es_cb], validation_data=(X_test, y_test), verbose=int(args.debug))\n\n\tmodel.save(args.model)\n\n\tmaintain(args.train, args.number)\n\ndef process_train_data(path):\n\t\"\"\" Process data from directory for CNN training.\n\n\n\t\t:param path: path to the training data. There should be data for each LABEL. :type: str\n\t\t:return X: image data for CNN :type: [[[float]]] (data of image)\n\t\t:return Y: label data for CNN :type: [[int]] (label of battery)\n\t\"\"\"\n\tX = []\n\tY = []\n\tfor index, name in enumerate(LABEL):\n\t\tif not os.path.exists(path + '/' + name):\n\t\t\tprint('Does not exist : ' + path + '/' + name)\n\t\t\texit(1)\n\n\t\tfiles = glob.glob(path + '/' + name + '/*.jpg')\n\t\tfor file in files:\n\t\t\timage = Image.open(file)\n\t\t\timage = image.convert(\"RGB\")\n\t\t\timage = image.resize((IMAGE_SIZE, IMAGE_SIZE))\n\t\t\tdata = np.asarray(image)\n\t\t\tX.append(data)\n\t\t\tY.append(index)\n\t\n\tX = np.array(X, dtype=np.float32)\n\tX = X / 255.0\n\n\tY = np_utils.to_categorical(Y, len(LABEL))\n\n\treturn X, Y\n\ndef maintain(train, number):\n\t\"\"\" Maintain the number of training images.\n\n\n\t\t:param train: The path to the train data. :type: str\n\t\t:param number: The training data should be maintained this number. :type: int\n\t\t:return X: image data for CNN :type: [[[float]]] (data of image)\n\t\"\"\"\n\tfor folder in glob.glob(train + '/*'):\n\t\tfiles = glob.glob(folder + '/*.jpg')\n\t\tif len(files) > number:\n\t\t\tfor i in range(len(files) - number):\n\t\t\t\tos.remove(files[i])\n\n\nif __name__ == \"__main__\":\n\t# Make parser.\n\tparser = argparse.ArgumentParser(\n\t\t\t\tprog='retraining.py', \n\t\t\t\tusage='Retraining the model.', \n\t\t\t\tdescription='description...',\n\t\t\t\tepilog='end',\n\t\t\t\tadd_help=True,\n\t\t\t\t)\n\tparser.add_argument('-M', '--model', help='Choose model. Default is \"./models/model.h5\".', required=False,\n\t\t\t\t\t\t\tdefault='./Models/model.h5')\n\tparser.add_argument('-T', '--train', help='Path to the train data. Default is ./train_data', required=False, default='./train_data')\n\tparser.add_argument('-N', '--number', help='The number os training data. Default is 1000.', required=False, default=1000, type=int)\n\tparser.add_argument('--debug', help='Debug mode.', action='store_true', required=False, default=False)\n\n\t# parse thearguments.\n\targs = parser.parse_args()\n\tmain(args)\n\n","repo_name":"emi-cd/category_recognize","sub_path":"retraining.py","file_name":"retraining.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32088449172","text":"from ...models import EnterKey\n\n\ndef check_key(key):\n model = EnterKey.objects.filter(key=key).first()\n\n if model:\n success = True\n message = 'Key is matching'\n else:\n success = False\n message = 'Key is invalid or is missing'\n\n return {\n 'success': success,\n 'message': message\n }\n","repo_name":"calinvladth/history","sub_path":"2020/1_vr_station/server/app/views/key_check/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"867744168","text":"#!/usr/bin/env python3\n#\n# ./ocr.py : Perform optical character recognition, usage:\n# ./ocr.py train-image-file.png train-text.txt test-image-file.png\n#\n# Authors: Johny Rufus\n# (based on skeleton code by D. Crandall, Oct 2017)\n#\n\n'''\nSome of the design choices made:\n\n1. Comparing two images by comparing the pixels proved to be a pretty tricky issue,\nas comparing the pixels by just taking into account the matching pixels(using both\nblack and white) would lead a lot of characters to be predicted as ' '(Space).\nJust taking into account the pixels that are ON (black), solved the above problem,\nbut this will end up in not predicting a ' ' correctly, even under slight noise.\nSo the final idea that worked was, using a combination of both strategies based on\naverage pixel count and the current Observation's pixel count and employing different\nstrategies for observations that had very low black pixels VS observations that have\na substantial amount of black pixels.\n\n2. Ended up calculating and using the error/noise level based on the naive bayes\nprediction, but this does not seem to have much of an effect, in fact going by a\nconstant Probability of .99 and .01 seems to perform well in general as well.\n\n3. Emission probabilities are normalized on the pixel counts, for HMM-VE every column\nis scaled to prevent underflow and for Viterbi, log scaling is used to prevent underflow\n\nInteresting Observations based on test images provided:\n\nHMM-VE and HMM-MAP outperform Simple in almost every single test image provided.\nWhen looking at HMM-VE vs HMM-MAP, the output is almost the same in terms of the prediction.\nIn fact HMM-VE has a better prediction of the last character, which I believe is due to the\nForward-Backward algorithm taking into consideration the end probabilities, which HMM-MAP\ndoes not consider.\n\n'''\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom naivebayes import NaiveBayes\nfrom collections import defaultdict\nfrom itertools import chain\nfrom math import log\nimport sys\n\nch_width=14\nch_height=25\n\ntrain_letters_ch = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789(),.-!?\\\"' \"\nstart_prob = defaultdict(int)\ntrans_prob = dict()\nprior_prob = dict()\nemm_prob = dict()\nend_prob = defaultdict(int)\n\n\ndef load_letters(fname):\n im = Image.open(fname)\n px = im.load()\n (x_size, y_size) = im.size\n result = []\n for x_beg in range(0, int(x_size / ch_width) * ch_width, ch_width):\n result += [ [ \"\".join([ '*' if px[x, y] < 1 else ' ' for x in range(x_beg, x_beg+ch_width) ]) for y in range(0, ch_height) ], ]\n return result\n\n\ndef load_training_letters(fname):\n letter_images = load_letters(fname)\n return { train_letters_ch[i]: letter_images[i] for i in range(0, len(train_letters_ch) ) }\n\n\n'''\nCalls the NaiveBayes class \nprovided in a separate file naivebayes.py\n'''\ndef simplified_bayes(train_letters, test_letters, prior):\n nb = NaiveBayes(train_letters, prior)\n return ''.join([nb.predict(letter) for letter in test_letters])\n\n\n'''\nVE implementation using forward backward algorithm.\n'''\ndef hmm_ve(test_letters):\n return forward_backward(test_letters, train_letters_ch)\n\n\n'''\nThe forward backward algorithm to implement Variable Elimination,\nthe outer stub is taken from wikipedia - \nhttps://en.wikipedia.org/wiki/Forward%E2%80%93backward_algorithm\nand modified to fit the current problem.\n'''\ndef forward_backward(observations, states):\n # Calculate the forward probabilities of being in a state Qn given all the\n # observations till On.\n fwd = []\n f_prev = {}\n for i, observation_i in enumerate(observations):\n f_curr = {}\n for st in states:\n if i == 0:\n prev_f_sum = start_prob[st]\n else:\n prev_f_sum = sum(f_prev[k] * trans_prob[k][st] for k in states)\n f_curr[st] = emm_prob[st][i] * prev_f_sum\n\n # Scale the column to sum to 1\n column_sum = sum(f_curr[st] for st in states)\n for st in states:\n f_curr[st] = f_curr[st]/column_sum\n\n fwd.append(f_curr)\n f_prev = f_curr\n\n p_fwd = sum(f_curr[k] * end_prob[k] for k in states)\n\n # Calculate the backward probabilities of the observations On-1...Ok, given the state Qn\n bkw = []\n b_prev = {}\n \n for i in reversed(list(range(len(observations)))[1:] + [0]):\n b_curr = {}\n for st in states:\n if i == 0:\n b_curr[st] = end_prob[st]\n else:\n b_curr[st] = sum(trans_prob[st][l] * emm_prob[l][i] * b_prev[l] for l in states)\n\n # Scale the column to sum to 1\n column_sum = sum(b_curr[st] for st in states)\n for st in states:\n b_curr[st] = b_curr[st]/column_sum\n\n bkw.insert(0, b_curr)\n b_prev = b_curr\n\n posterior = []\n for i in range(len(observations)):\n # Prevent underflow, This should not happen, as we have scaled the values, just in case.\n if p_fwd == 0:\n return fwd, bkw, fwd\n else:\n posterior.append({st: fwd[i][st] * bkw[i][st] / p_fwd for st in states})\n\n return fwd, bkw, posterior\n\n\n'''\nHMM-MAP using the Viterbi implementation\n'''\ndef hmm_map(test_letters):\n return viterbi(train_letters_ch, len(test_letters))\n\n\n'''\nThe Viterbi implementation, \nbuilds a dp table with states as rows and observations as columns.\n'''\ndef viterbi(states, num_obs):\n dp = {st: {obs: {} for obs in range(num_obs)} for st in states}\n start_prob_log = {st: log(start_prob[st]) if start_prob[st] > 0 else -100000 for st in states}\n trans_prob_log = {st: {q: log(trans_prob[st][q]) if trans_prob[st][q] > 0 else -100000 for q in states} for st in states}\n emm_prob_log = {st: {obs: log(emm_prob[st][obs]) if emm_prob[st][obs] > 0 else -100000 for obs in range(num_obs)} for st in states}\n\n for st in states:\n dp[st][0]['value'] = start_prob_log[st] + emm_prob_log[st][0]\n dp[st][0]['prev'] = None\n\n for obs in range(1, num_obs):\n for st in states:\n max_prev = max(dp[prev][obs-1]['value'] + trans_prob_log[prev][st] for prev in states)\n for prev in states:\n if dp[prev][obs-1]['value'] + trans_prob_log[prev][st] == max_prev:\n dp[st][obs]['value'] = max_prev + emm_prob_log[st][obs]\n dp[st][obs]['prev'] = prev\n break\n\n last_st = None\n maxv = - float('inf')\n for st in states:\n if dp[st][num_obs-1]['value'] > maxv:\n maxv = dp[st][num_obs-1]['value']\n last_st = st\n res = [last_st]\n\n prev = last_st\n for obs in range(num_obs-1, 0, -1):\n st = dp[prev][obs]['prev']\n res = [st] + res\n prev = st\n return res\n\n\n'''\nCalculates the starting probabilities, \nend probabilities, transition probabilities\n'''\ndef calculate_probabilities(fname):\n train_set = set(train_letters_ch)\n for ch1 in train_letters_ch:\n start_prob[ch1] = 1.0\n prior_prob[ch1] = 1.0\n trans_prob[ch1] = dict()\n for ch2 in train_letters_ch:\n trans_prob[ch1][ch2] = 1.0\n\n with open(fname) as f:\n for para in f.readlines():\n lines = para.split('. ')\n for i, line in enumerate(lines):\n line = line.lstrip()\n for j, ch in enumerate(line):\n if ch not in train_set: continue\n prior_prob[ch] += 1\n if j == 0:\n start_prob[ch] += 1\n elif j == len(line) - 1:\n if i != len(lines)-1:\n trans_prob[ch]['.'] += 1\n end_prob['.'] += 1\n end_prob[ch] += 1\n elif line[j-1] in train_set:\n trans_prob[line[j - 1]][ch] += 1\n\n initial_total = sum(start_prob.values())\n prior_total = sum(prior_prob.values())\n end_total = sum(end_prob.values())\n for ch1 in train_letters_ch:\n start_prob[ch1] = start_prob[ch1] / initial_total\n prior_prob[ch1] = prior_prob[ch1] / prior_total\n end_prob[ch1] = end_prob[ch1] / end_total\n trans_total = sum(trans_prob[ch1].values())\n for ch2 in train_letters_ch:\n trans_prob[ch1][ch2] = trans_prob[ch1][ch2] / trans_total\n end_prob['.'] = 0.1\n\n\n'''\nCalculates an approximate error/noise level, \nbased on the prediction from Naive Bayes.\n'''\ndef calculate_error(train_letters, test_letters, naive_prediction):\n total_error = 1\n total_valid = 1\n for i, ch in enumerate(naive_prediction):\n for j, pixel in enumerate(train_letters[ch]):\n if test_letters[i][j] == '*':\n total_error += (1 if pixel != test_letters[i][j] else 0)\n total_valid += (1 if pixel == test_letters[i][j] else 0)\n\n error_weight = 0.2 # Otherwise the Observation can get completely ignored, if naive bayes prediction is bad\n error_prob = error_weight * total_error / (total_error + total_valid)\n return error_prob\n\n\n'''\nCalculates the emission probabilities \nbased on pixels and normalize them.\n'''\ndef calculate_emission_prob(train_letters, test_letters, error_prob):\n pixel_count = {}\n for ch in train_letters_ch:\n total_on = 0\n for pix in train_letters[ch]:\n total_on += 1 if pix == '*' else 0\n pixel_count[ch] = total_on\n avg_pixel_count = sum(pixel_count.values())/len(train_letters_ch)\n for ch in train_letters_ch:\n emm_prob[ch] = [1.0] * len(test_letters)\n for i in range(len(test_letters)):\n total_on = 0\n for pix in test_letters[i]:\n total_on += 1 if pix == '*' else 0\n for j, pix in enumerate(test_letters[i]):\n if total_on > avg_pixel_count/5:\n if pix == '*':\n emm_prob[ch][i] *= (1 - error_prob) if pix == train_letters[ch][j] else error_prob\n else:\n if pix == ' ':\n emm_prob[ch][i] *= (1 - error_prob) if pix == train_letters[ch][j] else error_prob\n\n for i in range(len(test_letters)):\n total = 0\n for ch in train_letters_ch:\n total += emm_prob[ch][i]\n for ch in train_letters_ch:\n emm_prob[ch][i] = emm_prob[ch][i]/total\n\n\ndef main():\n #train_img_fname = 'courier-train.png'\n #train_txt_fname = 'DemocracyAndEducation.txt'\n\n if len(sys.argv) < 4:\n print('Usage: ')\n print('./ocr.py train-image-file.png train-text.txt test-image-file.png')\n sys.exit()\n\n (train_img_fname, train_txt_fname, test_img_fname) = sys.argv[1:4]\n\n train_letters = load_training_letters(train_img_fname)\n for ch in train_letters_ch:\n train_letters[ch] = list(chain.from_iterable(train_letters[ch]))\n calculate_probabilities(train_txt_fname)\n\n for i in range(1):\n #test_img_fname = 'test-{}-0.png'.format(i)\n test_letters = load_letters(test_img_fname)\n for i, l in enumerate(test_letters):\n test_letters[i] = list(chain.from_iterable(test_letters[i]))\n\n # Simplified\n simplified_res = simplified_bayes(train_letters, test_letters, prior_prob)\n print(' Simple: {}'.format(simplified_res))\n\n calculate_emission_prob(train_letters, test_letters, calculate_error(train_letters, test_letters, simplified_res))\n\n # HMM VE\n fwd, bkw, posterior = hmm_ve(test_letters)\n print(' HMM VE: {}'.format(''.join([max(test_prob, key=test_prob.get) for test_prob in posterior])))\n\n # HMM MAP\n print('HMM MAP: {}'.format(''.join(hmm_map(test_letters))))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"johnyrufus/HiddenMarkovModel-OCR","sub_path":"part2/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":11726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24409408331","text":"number_of_elements=int(input('The number of elements in list\\n'))\n\na=[]\nfor i in range(number_of_elements):\n\ta.append(int(input('enter the list elements')))\n\t\n\n#Use built-in function to calculate the maximum\n\nmax_list=max(a)\n\nprint(\"The maximum element is \",max_list)\n\n#Calculate the number of times the number occur\n\ncount_of_max_num=a.count(max_list)\n\nb=a\n\nif (count_of_max_num == 1):\n\n\tb.remove(max_list)\n\tsecond_max=max(b)\n\tprint(\"The second largest number is\", second_max, \"The new list is\" ,b)\nelse:\n\tfor i in range(count_of_max_num):\n\t\tb.remove(max_list)\n\tprint (\"The second largest is\" , max(b))\n\nprint(\"The original list is \",a)\n","repo_name":"PhoduCoder/PythonPractice","sub_path":"SecondMax.py","file_name":"SecondMax.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72524079527","text":"MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nresources = {\n \"water\": 300,\n \"milk\": 300,\n \"coffee\": 300,\n}\n\nacceptance = ['espresso', 'latte', \"cappuccino\", \"off\", \"report\"]\nprofit = 0\n\n\ndef espresso():\n ingredients = MENU['espresso']['ingredients']\n cost = MENU['espresso']['cost']\n return ingredients, cost\n\n\ndef latte():\n ingredients = MENU['latte']['ingredients']\n cost = MENU['latte']['cost']\n return ingredients, cost\n\n\ndef cappuccino():\n ingredients = MENU['cappuccino']['ingredients']\n cost = MENU['cappuccino']['cost']\n return ingredients, cost\n\n\ndef resources_check(data):\n if data[0]['water'] > resources['water']:\n if data[0]['coffee'] > resources['coffee']:\n print(data[0])\n if 'milk' in data[0].keys() and data[0]['milk'] > resources['milk']:\n return \"water and milk and coffee\"\n else:\n return \"water and coffee\"\n return \"water\"\n elif data[0]['coffee'] > resources['coffee']:\n if 'milk' in data[0].keys() and data[0]['milk'] > resources['milk']:\n return \"milk and coffee\"\n return \"coffee\"\n elif 'milk' not in data[0].keys():\n return False\n else:\n if data[0]['milk'] > resources['milk']:\n return \"milk\"\n else:\n return False\n\n\ndef sub(data):\n for k, v in data[0].items():\n resources[k] = resources[k] - v\n return resources\n\n\ndef money_check(data):\n if money > data[1]:\n return True\n else:\n return False\n\n\ndef output():\n if selection == \"espresso\":\n data = espresso()\n return data\n elif selection == \"latte\":\n data = latte()\n return data\n elif selection == \"cappuccino\":\n data = cappuccino()\n return data\n else:\n return False\n\n\ndef power():\n global power_on\n power_on = False\n\n\ndef report():\n print(f\"Water: {resources['water']}ml\\nMilk: {resources['milk']}ml\\nCoffee: {resources['coffee']}g\\nMoney: ${profit}\")\n\n\ndef coins(data):\n total = (quarters * 0.25) + (dimes * 0.10) + (nickels * 0.05) + (pennies * 0.01)\n change = total - data[1]\n return total, change\n\n\npower_on = True\nwhile power_on:\n selection = input(\"What would you like? (espresso/latte/cappuccino) \")\n while selection not in acceptance:\n selection = input(\"What would you like? (espresso/latte/cappuccino) \")\n out = output()\n if out:\n res_check = resources_check(out)\n if not res_check:\n quarters = int(input(\"How many quarters? \"))\n dimes = int(input(\"How many dimes? \"))\n nickels = int(input(\"How many nickels? \"))\n pennies = int(input(\"How many pennies? \"))\n money = coins(out)\n if money[1] >= 0:\n profit += out[1]\n updated_resources = sub(out)\n resources = updated_resources\n print(f\"Here's your {round(money[1], 2)} in change.\")\n print(f\"Here's your {selection} ☕. Enjoy!\")\n else:\n print(f\"Not enough money. {money[0]} refunded\")\n else:\n print(f\"Sorry there is not enough {res_check}\")\n elif selection == \"report\":\n report()\n else:\n power()\n\n\n","repo_name":"liquidnero/CoffeeMachine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17965378829","text":"\n\nfrom openpyxl import load_workbook\n\nclass ExcelHandler():\n\n def __init__(self, file_name, sheet_name):\n\n self.file_name = file_name\n self.sheet_name = sheet_name\n\n def read_data(self):\n '''读取excel数据'''\n cases = [] # 存储所有用例\n titles = [] # 存储用例表头\n workbook = load_workbook(self.file_name) # 获取Excel文件\n worksheet = workbook[self.sheet_name] #获取sheet\n # 遍历所有行\n # 如果是表头,把表头保存到titles列表中,否则把数据添加到cases列表中\n for i, row in enumerate(worksheet.rows):\n if i == 0:\n for cell in row:\n titles.append(cell.value)\n else:\n cases.append(dict(zip(titles, [cell.value for cell in row])))\n return cases\n\n def write_data(self, row, column, value):\n \"\"\"数据写入指定单元格\n args:\n row: 行号\n column: 列号\n value: 要写入的值\n returns:\n None\n \"\"\"\n workbook = load_workbook(self.file_name) # 获取一个WorkBook对象\n worksheet = workbook[self.sheet_name] # 获取一个WorkSheet对象\n worksheet.cell(row, column, value)\n workbook.save(self.file_name)\n return None\n","repo_name":"waitinghub/apitesting","sub_path":"common/handle_excel.py","file_name":"handle_excel.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34131969435","text":"marka = \"Jeep\"\nmodel = \"Cherokee\"\nrocznik = 2020\n\nsam = \"samochód -> marka: {}, model: {}, rocznik: {}.\"\nprint(sam.format(marka,model,rocznik))\n\nsam = \"samochód -> rocznik: {2}, marka: {0}, model: {1}.\"\nprint(sam.format(marka,model,rocznik))\n\n# f-string\nprint(f\"samochód -> marka: {{marka}}, model: \\\"{model}\\\", rocznik: {rocznik}\")\n\nkonkurs = [\n (\"Jan\",56),\n (\"Anna\",67),\n (\"Olga\",34),\n (\"Henryk\",65),\n (\"Olaf\",44),\n (\"Nadia\",68),\n (\"Janusz\",54),\n (\"Dariusz\",32),\n]\n\nprint(\"___________________________________\")\nprint(list(enumerate(konkurs)))\n\nprint(\"__________________zestawienie wyników konkursu_________________\")\nfor i, (imie,punkty) in enumerate(konkurs):\n print('nr %d: %-9s: %.1f punktów' %(i+1,imie,punkty))\n","repo_name":"albim72/PYTHON___WARSZTAT_12__","sub_path":"DZIEN_1/pep8_io.py","file_name":"pep8_io.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16611969887","text":"with open('../inputs/input23.txt') as f:\n data = f.readlines()\n\n def process_instructions(part_one=True):\n a = 0 if part_one else 1\n b = 0\n ind = 0\n while 0 <= ind < len(data):\n if 'inc' in data[ind]:\n if 'a' in data[ind]:\n a += 1\n else:\n b += 1\n elif 'tpl' in data[ind]:\n a *= 3\n elif 'hlf' in data[ind]:\n a /= 2\n elif 'jmp' in data[ind]:\n ind += int(data[ind].split()[1])\n continue\n elif 'jie' in data[ind]:\n if a % 2 == 0:\n ind += int(data[ind].split()[2])\n continue\n else:\n if a == 1:\n ind += int(data[ind].split()[2])\n continue\n ind += 1\n return b\n\n # part one\n print(process_instructions())\n\n # part two\n print(process_instructions(False))\n","repo_name":"Xyresic/AoC","sub_path":"2015/solutions/day23.py","file_name":"day23.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73232908969","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 9 21:36:30 2023\n\n@author: HP\n\"\"\"\n\n#from mpl_toolkits import mpolt3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nax = plt.axes(projection='3d')\nz = np.random.randint(1,10,10)\nx = np.random.randint(1,100,10)\ny = np.random.randint(1,50,10)\nplt.xlabel('x axis')\nplt.ylabel('y axis')\n\n#ax.plot3D(x,y,z,'red')\n#ax.set_title('3D line plot')\n#plt.show()\n\n#ax.scatter3D(x,y,z,'red')\n#plt.show()\n\nxlist=np.linspace(-6,6,30)\nylist=np.linspace(-6,6,30)\n\nX,Y = np.meshgrid(xlist,ylist)\nZ = np.sin(np.sqrt(X**2 + Y**2))\n\nax.plot_surface(X,Y,Z,cmap='Accent')\n#ax.contour3D(X,Y,Z,50,cmap='Accent')\nplt.show() ","repo_name":"userarpit/Python","sub_path":"Matplotlib/mplot3d.py","file_name":"mplot3d.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31313064747","text":"import sys, pdb\nsys.path.append(\"/home/ubuntu/workspace/python_for_finance\")\nsys.path.append(\"/usr/local/lib/python2.7/dist-packages\")\n# sys.path.append(\"/usr/local/lib/python3.4/dist-packages\")\n# import matplotlib as mpl\n# mpl.use('Agg')\n# import matplotlib.pyplot as plt\n\nfrom dx import *\nfrom utils.utils import *\n\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\n\ndef cds_valuation(haz, T, rf, recov_rate, freq):\n # probability of surviving through a given year\n surv_rates = [np.exp(-haz*t) for t in range(T+1)]\n # probability of default in a given year\n def_rates = [surv_rates[t] - surv_rates[t+1] for t in range(T)]\n # discounted present value of an expected payment\n pv_exp_payments = sum([surv_rates[t] * np.exp(-rf*(t)) for t in range(T+1)]) - 1\n # assume default happens halway thru period\n pv_if_dflt = sum([def_rates[t] * np.exp(-rf*(t + freq/2)) * (1-recov_rate) for t in range(T)])\n # accrual payments --> if default in middle of period, investor still owed half of coupon\n pv_of_accrual = sum([def_rates[t] * freq/2 * np.exp(-rf * (t + freq/2)) for t in range(T)])\n cds_spread = pv_if_dflt / (pv_exp_payments + pv_of_accrual) * 100\n duration = pv_if_dflt / cds_spread\n return cds_spread, duration\n\n\ndef cds_price(haz, cds_sprd, cpn, recov_rate, freq, T, rf, n_companies_idx, dur, prot_per_comp):\n # credit spread = (Upfront premium/Duration) + Fixed coupon\n # should be able to calc duration and hazard rate from other information\n px = 100 - (100 * dur * (cds_sprd - cpn))\n pdb.set_trace()\n pay = prot_per_comp * n_companies_idx * ((px - 100) / 100)\n return pay\n\n\nif __name__ == '__main__':\n # print(cds_valuation(0.02, 5, 0.05, 0.4, 1))\n print(cds_price(0.005717, 0.00345, 0.00406, 0.4, 0.25, 5, 0.04, 125, 4.447, 1))","repo_name":"mccarvik/python_for_finance","sub_path":"books/hull_examples/credit_deriv.py","file_name":"credit_deriv.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"11554199614","text":"# https://www.acmicpc.net/problem/11729\n\nfrom sys import stdin\n\nK = int(stdin.readline())\n\nresult_list = []\n\ndef hanoi(k, bar1, bar2, bar3):\n if k == 1:\n result_list.append(str(bar1) + \" \" + str(bar2))\n\n return\n\n hanoi(k - 1, bar1, bar3, bar2)\n\n result_list.append(str(bar1) + \" \" + str(bar2))\n\n hanoi(k - 1, bar3, bar2, bar1)\n\nhanoi(K, 1, 3, 2)\n\nprint(len(result_list))\nprint(\"\\n\".join(result_list))","repo_name":"Gnoyh/baekjoon-python","sub_path":"baekjoon_11729.py","file_name":"baekjoon_11729.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18107171422","text":"import math\nimport warnings\n\nfrom keystoneauth1 import adapter\nimport keystoneauth1.exceptions.catalog\nfrom keystoneauth1 import session as ks_session\nimport requestsexceptions\nfrom six.moves import urllib\n\nfrom openstack import version as openstack_version\nfrom openstack import _log\nfrom openstack.config import defaults as config_defaults\nfrom openstack import exceptions\n\n\ndef _make_key(key, service_type):\n if not service_type:\n return key\n else:\n service_type = service_type.lower().replace('-', '_')\n return \"_\".join([service_type, key])\n\n\ndef from_session(session, name=None, region_name=None,\n force_ipv4=False,\n app_name=None, app_version=None, **kwargs):\n \"\"\"Construct a CloudRegion from an existing `keystoneauth1.session.Session`\n\n When a Session already exists, we don't actually even need to go through\n the OpenStackConfig.get_one_cloud dance. We have a Session with Auth info.\n The only parameters that are really needed are adapter/catalog related.\n\n :param keystoneauth1.session.session session:\n An existing authenticated Session to use.\n :param str name:\n A name to use for this cloud region in logging. If left empty, the\n hostname of the auth_url found in the Session will be used.\n :param str region_name:\n The region name to connect to.\n :param bool force_ipv4:\n Whether or not to disable IPv6 support. Defaults to False.\n :param str app_name:\n Name of the application to be added to User Agent.\n :param str app_version:\n Version of the application to be added to User Agent.\n :param kwargs:\n Config settings for this cloud region.\n \"\"\"\n config_dict = config_defaults.get_defaults()\n config_dict.update(**kwargs)\n return CloudRegion(\n name=name, session=session, config=config_dict,\n region_name=region_name, force_ipv4=force_ipv4,\n app_name=app_name, app_version=app_version)\n\n\nclass CloudRegion(object):\n \"\"\"The configuration for a Region of an OpenStack Cloud.\n\n A CloudRegion encapsulates the config information needed for connections\n to all of the services in a Region of a Cloud.\n \"\"\"\n def __init__(self, name=None, region_name=None, config=None,\n force_ipv4=False, auth_plugin=None,\n openstack_config=None, session_constructor=None,\n app_name=None, app_version=None, session=None,\n discovery_cache=None):\n\n self._name = name\n self.region_name = region_name\n self.config = config\n self.log = _log.setup_logging('openstack.config')\n self._force_ipv4 = force_ipv4\n self._auth = auth_plugin\n self._openstack_config = openstack_config\n self._keystone_session = session\n self._session_constructor = session_constructor or ks_session.Session\n self._app_name = app_name\n self._app_version = app_version\n self._discovery_cache = discovery_cache or None\n\n def __getattr__(self, key):\n \"\"\"Return arbitrary attributes.\"\"\"\n\n if key.startswith('os_'):\n key = key[3:]\n\n if key in [attr.replace('-', '_') for attr in self.config]:\n return self.config[key]\n else:\n return None\n\n def __iter__(self):\n return self.config.__iter__()\n\n def __eq__(self, other):\n return (\n self.name == other.name\n and self.region_name == other.region_name\n and self.config == other.config)\n\n def __ne__(self, other):\n return not self == other\n\n @property\n def name(self):\n if self._name is None:\n try:\n self._name = urllib.parse.urlparse(\n self.get_session().auth.auth_url).hostname\n except Exception:\n self._name = self._app_name or ''\n return self._name\n\n @property\n def full_name(self):\n \"\"\"Return a string that can be used as an identifier.\n\n Always returns a valid string. It will have name and region_name\n or just one of the two if only one is set, or else 'unknown'.\n \"\"\"\n if self.name and self.region_name:\n return \":\".join([self.name, self.region_name])\n elif self.name and not self.region_name:\n return self.name\n elif not self.name and self.region_name:\n return self.region_name\n else:\n return 'unknown'\n\n def set_session_constructor(self, session_constructor):\n \"\"\"Sets the Session constructor.\"\"\"\n self._session_constructor = session_constructor\n\n def get_requests_verify_args(self):\n \"\"\"Return the verify and cert values for the requests library.\"\"\"\n if self.config['verify'] and self.config['cacert']:\n verify = self.config['cacert']\n else:\n verify = self.config['verify']\n if self.config['cacert']:\n warnings.warn(\n \"You are specifying a cacert for the cloud {full_name}\"\n \" but also to ignore the host verification. The host SSL\"\n \" cert will not be verified.\".format(\n full_name=self.full_name))\n\n cert = self.config.get('cert', None)\n if cert:\n if self.config['key']:\n cert = (cert, self.config['key'])\n return (verify, cert)\n\n def get_services(self):\n \"\"\"Return a list of service types we know something about.\"\"\"\n services = []\n for key, val in self.config.items():\n if (key.endswith('api_version')\n or key.endswith('service_type')\n or key.endswith('service_name')):\n services.append(\"_\".join(key.split('_')[:-2]))\n return list(set(services))\n\n def get_auth_args(self):\n return self.config.get('auth', {})\n\n def get_interface(self, service_type=None):\n key = _make_key('interface', service_type)\n interface = self.config.get('interface')\n return self.config.get(key, interface)\n\n def get_api_version(self, service_type):\n key = _make_key('api_version', service_type)\n return self.config.get(key, None)\n\n def get_service_type(self, service_type):\n key = _make_key('service_type', service_type)\n # Cinder did an evil thing where they defined a second service\n # type in the catalog. Of course, that's insane, so let's hide this\n # atrocity from the as-yet-unsullied eyes of our users.\n # Of course, if the user requests a volumev2, that structure should\n # still work.\n # What's even more amazing is that they did it AGAIN with cinder v3\n # And then I learned that mistral copied it.\n # TODO(shade) This should get removed when we have os-service-types\n # alias support landed in keystoneauth.\n if service_type in ('volume', 'block-storage'):\n vol_ver = self.get_api_version('volume')\n if vol_ver and vol_ver.startswith('2'):\n service_type = 'volumev2'\n elif vol_ver and vol_ver.startswith('3'):\n service_type = 'volumev3'\n elif service_type == 'workflow':\n wk_ver = self.get_api_version(service_type)\n if wk_ver and wk_ver.startswith('2'):\n service_type = 'workflowv2'\n return self.config.get(key, service_type)\n\n def get_service_name(self, service_type):\n key = _make_key('service_name', service_type)\n return self.config.get(key, None)\n\n def get_endpoint(self, service_type):\n key = _make_key('endpoint_override', service_type)\n old_key = _make_key('endpoint', service_type)\n return self.config.get(key, self.config.get(old_key, None))\n\n @property\n def prefer_ipv6(self):\n return not self._force_ipv4\n\n @property\n def force_ipv4(self):\n return self._force_ipv4\n\n def get_auth(self):\n \"\"\"Return a keystoneauth plugin from the auth credentials.\"\"\"\n return self._auth\n\n def get_session(self):\n \"\"\"Return a keystoneauth session based on the auth credentials.\"\"\"\n if self._keystone_session is None:\n if not self._auth:\n raise exceptions.ConfigException(\n \"Problem with auth parameters\")\n (verify, cert) = self.get_requests_verify_args()\n # Turn off urllib3 warnings about insecure certs if we have\n # explicitly configured requests to tell it we do not want\n # cert verification\n if not verify:\n self.log.debug(\n \"Turning off SSL warnings for {full_name}\"\n \" since verify=False\".format(full_name=self.full_name))\n requestsexceptions.squelch_warnings(insecure_requests=not verify)\n self._keystone_session = self._session_constructor(\n auth=self._auth,\n verify=verify,\n cert=cert,\n timeout=self.config['api_timeout'],\n discovery_cache=self._discovery_cache)\n if hasattr(self._keystone_session, 'additional_user_agent'):\n self._keystone_session.additional_user_agent.append(\n ('openstacksdk', openstack_version.__version__))\n # Using old keystoneauth with new os-client-config fails if\n # we pass in app_name and app_version. Those are not essential,\n # nor a reason to bump our minimum, so just test for the session\n # having the attribute post creation and set them then.\n if hasattr(self._keystone_session, 'app_name'):\n self._keystone_session.app_name = self._app_name\n if hasattr(self._keystone_session, 'app_version'):\n self._keystone_session.app_version = self._app_version\n return self._keystone_session\n\n def get_service_catalog(self):\n \"\"\"Helper method to grab the service catalog.\"\"\"\n return self._auth.get_access(self.get_session()).service_catalog\n\n def _get_version_args(self, service_key, version):\n \"\"\"Translate OCC version args to those needed by ksa adapter.\n\n If no version is requested explicitly and we have a configured version,\n set the version parameter and let ksa deal with expanding that to\n min=ver.0, max=ver.latest.\n\n If version is set, pass it through.\n\n If version is not set and we don't have a configured version, default\n to latest.\n \"\"\"\n if version == 'latest':\n return None, None, 'latest'\n if not version:\n version = self.get_api_version(service_key)\n # Octavia doens't have a version discovery document. Hard-code an\n # exception to this logic for now.\n if not version and service_key not in ('load-balancer',):\n return None, None, 'latest'\n return version, None, None\n\n def get_session_client(\n self, service_key, version=None, constructor=adapter.Adapter,\n **kwargs):\n \"\"\"Return a prepped requests adapter for a given service.\n\n This is useful for making direct requests calls against a\n 'mounted' endpoint. That is, if you do:\n\n client = get_session_client('compute')\n\n then you can do:\n\n client.get('/flavors')\n\n and it will work like you think.\n \"\"\"\n (version, min_version, max_version) = self._get_version_args(\n service_key, version)\n\n return constructor(\n session=self.get_session(),\n service_type=self.get_service_type(service_key),\n service_name=self.get_service_name(service_key),\n interface=self.get_interface(service_key),\n region_name=self.region_name,\n version=version,\n min_version=min_version,\n max_version=max_version,\n **kwargs)\n\n def _get_highest_endpoint(self, service_types, kwargs):\n session = self.get_session()\n for service_type in service_types:\n kwargs['service_type'] = service_type\n try:\n # Return the highest version we find that matches\n # the request\n return session.get_endpoint(**kwargs)\n except keystoneauth1.exceptions.catalog.EndpointNotFound:\n pass\n\n def get_session_endpoint(\n self, service_key, min_version=None, max_version=None):\n \"\"\"Return the endpoint from config or the catalog.\n\n If a configuration lists an explicit endpoint for a service,\n return that. Otherwise, fetch the service catalog from the\n keystone session and return the appropriate endpoint.\n\n :param service_key: Generic key for service, such as 'compute' or\n 'network'\n\n \"\"\"\n\n override_endpoint = self.get_endpoint(service_key)\n if override_endpoint:\n return override_endpoint\n endpoint = None\n kwargs = {\n 'service_name': self.get_service_name(service_key),\n 'region_name': self.region_name\n }\n kwargs['interface'] = self.get_interface(service_key)\n if service_key == 'volume' and not self.get_api_version('volume'):\n # If we don't have a configured cinder version, we can't know\n # to request a different service_type\n min_version = float(min_version or 1)\n max_version = float(max_version or 3)\n min_major = math.trunc(float(min_version))\n max_major = math.trunc(float(max_version))\n versions = range(int(max_major) + 1, int(min_major), -1)\n service_types = []\n for version in versions:\n if version == 1:\n service_types.append('volume')\n else:\n service_types.append('volumev{v}'.format(v=version))\n else:\n service_types = [self.get_service_type(service_key)]\n endpoint = self._get_highest_endpoint(service_types, kwargs)\n if not endpoint:\n self.log.warning(\n \"Keystone catalog entry not found (\"\n \"service_type=%s,service_name=%s\"\n \"interface=%s,region_name=%s)\",\n service_key,\n kwargs['service_name'],\n kwargs['interface'],\n kwargs['region_name'])\n return endpoint\n\n def get_cache_expiration_time(self):\n if self._openstack_config:\n return self._openstack_config.get_cache_expiration_time()\n return 0\n\n def get_cache_path(self):\n if self._openstack_config:\n return self._openstack_config.get_cache_path()\n\n def get_cache_class(self):\n if self._openstack_config:\n return self._openstack_config.get_cache_class()\n return 'dogpile.cache.null'\n\n def get_cache_arguments(self):\n if self._openstack_config:\n return self._openstack_config.get_cache_arguments()\n\n def get_cache_expiration(self):\n if self._openstack_config:\n return self._openstack_config.get_cache_expiration()\n\n def get_cache_resource_expiration(self, resource, default=None):\n \"\"\"Get expiration time for a resource\n\n :param resource: Name of the resource type\n :param default: Default value to return if not found (optional,\n defaults to None)\n\n :returns: Expiration time for the resource type as float or default\n \"\"\"\n if self._openstack_config:\n expiration = self._openstack_config.get_cache_expiration()\n if resource not in expiration:\n return default\n return float(expiration[resource])\n\n def requires_floating_ip(self):\n \"\"\"Return whether or not this cloud requires floating ips.\n\n\n :returns: True of False if know, None if discovery is needed.\n If requires_floating_ip is not configured but the cloud is\n known to not provide floating ips, will return False.\n \"\"\"\n if self.config['floating_ip_source'] == \"None\":\n return False\n return self.config.get('requires_floating_ip')\n\n def get_external_networks(self):\n \"\"\"Get list of network names for external networks.\"\"\"\n return [\n net['name'] for net in self.config.get('networks', [])\n if net['routes_externally']]\n\n def get_external_ipv4_networks(self):\n \"\"\"Get list of network names for external IPv4 networks.\"\"\"\n return [\n net['name'] for net in self.config.get('networks', [])\n if net['routes_ipv4_externally']]\n\n def get_external_ipv6_networks(self):\n \"\"\"Get list of network names for external IPv6 networks.\"\"\"\n return [\n net['name'] for net in self.config.get('networks', [])\n if net['routes_ipv6_externally']]\n\n def get_internal_networks(self):\n \"\"\"Get list of network names for internal networks.\"\"\"\n return [\n net['name'] for net in self.config.get('networks', [])\n if not net['routes_externally']]\n\n def get_internal_ipv4_networks(self):\n \"\"\"Get list of network names for internal IPv4 networks.\"\"\"\n return [\n net['name'] for net in self.config.get('networks', [])\n if not net['routes_ipv4_externally']]\n\n def get_internal_ipv6_networks(self):\n \"\"\"Get list of network names for internal IPv6 networks.\"\"\"\n return [\n net['name'] for net in self.config.get('networks', [])\n if not net['routes_ipv6_externally']]\n\n def get_default_network(self):\n \"\"\"Get network used for default interactions.\"\"\"\n for net in self.config.get('networks', []):\n if net['default_interface']:\n return net['name']\n return None\n\n def get_nat_destination(self):\n \"\"\"Get network used for NAT destination.\"\"\"\n for net in self.config.get('networks', []):\n if net['nat_destination']:\n return net['name']\n return None\n\n def get_nat_source(self):\n \"\"\"Get network used for NAT source.\"\"\"\n for net in self.config.get('networks', []):\n if net.get('nat_source'):\n return net['name']\n return None\n\n def get_client_config(self, name=None, defaults=None):\n \"\"\"Get config settings for a named client.\n\n Settings will also be looked for in a section called 'client'.\n If settings are found in both, they will be merged with the settings\n from the named section winning over the settings from client section,\n and both winning over provided defaults.\n\n :param string name:\n Name of the config section to look for.\n :param dict defaults:\n Default settings to use.\n\n :returns:\n A dict containing merged settings from the named section, the\n client section and the defaults.\n \"\"\"\n if not self._openstack_config:\n return defaults or {}\n return self._openstack_config.get_extra_config(\n name, self._openstack_config.get_extra_config('client', defaults))\n","repo_name":"AntObr/credit-to-customer","sub_path":"env/lib/python2.7/site-packages/openstack/config/cloud_region.py","file_name":"cloud_region.py","file_ext":"py","file_size_in_byte":19324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21847815439","text":"import pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nfrom statistics import mean\nimport math\nfrom scipy.fftpack import fft,ifft,rfft\nfrom sklearn.utils import shuffle\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import KFold, RepeatedKFold\nfrom joblib import dump, load\n\ndef get_meal_data(insulin,cmg,fileIndex):\n mealDataTimestamp = []\n for ind in insulin.index:\n if not math.isnan(insulin['BWZ Carb Input (grams)'][ind]) and insulin['BWZ Carb Input (grams)'][ind] != 0:\n date = insulin['Date'][ind] \n time = insulin['Time'][ind]\n timestamp = datetime.strptime(date+'T'+time,\"%m/%d/%YT%H:%M:%S\") if fileIndex == 1 else datetime.strptime(date.split(' ')[0]+'T'+time,\"%Y-%m-%dT%H:%M:%S\")\n if mealDataTimestamp:\n previous = mealDataTimestamp[-1]\n if (previous + timedelta(hours=2)) > timestamp:\n# print(previous, '::', timestamp, '::', insulin['BWZ Carb Input (grams)'][ind])\n mealDataTimestamp.pop()\n mealDataTimestamp.append(timestamp)\n \n glucoseMatrix = []\n for tm in mealDataTimestamp:\n for ind in cmg.index:\n date = cmg['Date'][ind]\n time = cmg['Time'][ind]\n timestamp = datetime.strptime(date+'T'+time,\"%m/%d/%YT%H:%M:%S\") if fileIndex == 1 else datetime.strptime(date.split(' ')[0]+'T'+time,\"%Y-%m-%dT%H:%M:%S\")\n if timestamp > tm:\n diff = (timestamp - tm).seconds / 60\n# print(tm, '::', timestamp)\n if diff > 5:\n# print('Breaking ',tm, '::', timestamp)\n break\n glucose = []\n for i in range(ind+5,ind-25,-1):\n glucose.append(cmg['Sensor Glucose (mg/dL)'][i])\n glucoseMatrix.append(glucose) \n break\n \n meal_list = []\n for l in glucoseMatrix:\n if not np.isnan(l).any():\n meal_list.append(l)\n return pd.DataFrame (meal_list)\n\ndef get_no_meal_data(insulin,cmg,fileIndex):\n noMealDataTimestamp = []\n for ind in insulin.index:\n if math.isnan(insulin['BWZ Carb Input (grams)'][ind]) or insulin['BWZ Carb Input (grams)'][ind] == 0:\n date = insulin['Date'][ind]\n time = insulin['Time'][ind]\n timestamp = datetime.strptime(date+'T'+time,\"%m/%d/%YT%H:%M:%S\") if fileIndex == 1 else datetime.strptime(date.split(' ')[0]+'T'+time,\"%Y-%m-%dT%H:%M:%S\")\n if timestamp not in noMealDataTimestamp:\n noMealDataTimestamp.append(timestamp)\n noMealDataTimestamp \n \n mealDataTimestampNP = []\n\n for ind in insulin.index:\n if not math.isnan(insulin['BWZ Carb Input (grams)'][ind]) and insulin['BWZ Carb Input (grams)'][ind] != 0:\n date = insulin['Date'][ind]\n time = insulin['Time'][ind]\n timestamp = datetime.strptime(date+'T'+time,\"%m/%d/%YT%H:%M:%S\") if fileIndex == 1 else datetime.strptime(date.split(' ')[0]+'T'+time,\"%Y-%m-%dT%H:%M:%S\")\n mealDataTimestampNP.append(timestamp)\n remove = []\n for tm in mealDataTimestampNP:\n for tn in noMealDataTimestamp:\n if tm <= tn <= (tm + timedelta(hours=2)):\n remove.append(tn)\n \n noMealDataTimestamp = [e for e in noMealDataTimestamp if e not in remove]\n\n remove = []\n for i in range(len(noMealDataTimestamp)):\n if noMealDataTimestamp[i] not in remove:\n for j in range(i+1, len(noMealDataTimestamp)):\n if noMealDataTimestamp[i] <= noMealDataTimestamp[j] <= noMealDataTimestamp[i]+ timedelta(hours=2):\n remove.append(noMealDataTimestamp[j])\n\n noMealDataTimestamp = [e for e in noMealDataTimestamp if e not in remove]\n \n glucoseMatrix = []\n \n for tm in noMealDataTimestamp:\n for ind in cmg.index:\n date = cmg['Date'][ind].split(' ')[0]\n time = cmg['Time'][ind]\n timestamp = datetime.strptime(date+'T'+time,\"%m/%d/%YT%H:%M:%S\") if fileIndex == 1 else datetime.strptime(date.split(' ')[0]+'T'+time,\"%Y-%m-%dT%H:%M:%S\")\n\n if timestamp > tm:\n diff = (timestamp - tm).seconds / 60\n # print(tm, '::', timestamp)\n if diff > 5:\n # print('Breaking ',tm, '::', timestamp)\n break\n glucose = []\n for i in range(ind,ind-25,-1):\n if i >= 0:\n glucose.append(cmg['Sensor Glucose (mg/dL)'][i])\n glucoseMatrix.append(glucose) \n break\n no_meal_list = []\n for l in glucoseMatrix:\n if not np.isnan(l).any():\n no_meal_list.append(l)\n \n return pd.DataFrame(no_meal_list)\n\ndef meal_features(meals):\n index = meals.isna().sum(axis=1).replace(0,np.nan).dropna().where(lambda x:x>6).dropna().index\n \n mealData = meals.drop(meals.index[index]).reset_index().drop(columns='index')\n \n indexDrop = mealData.isna().sum(axis=1).replace(0,np.nan).dropna().index\n mealData = mealData.drop(meals.index[indexDrop]).reset_index().drop(columns='index')\n \n mealData = mealData.dropna().reset_index().drop(columns='index')\n \n powerFirstMax = []\n indexFirstMax = []\n powerSecondMax = []\n indexSecondMax = []\n powerThirdMax = []\n \n for i in range(len(mealData)):\n array = abs(rfft(mealData.iloc[:,0:30].iloc[i].values.tolist())).tolist()\n \n sortedArray = abs(rfft(mealData.iloc[:,0:30].iloc[i].values.tolist())).tolist()\n sortedArray.sort()\n \n powerFirstMax.append(sortedArray[-2])\n powerSecondMax.append(sortedArray[-3])\n powerThirdMax.append(sortedArray[-4])\n \n indexFirstMax.append(array.index(sortedArray[-2]))\n indexSecondMax.append(array.index(sortedArray[-3]))\n \n mealFeatureMatrix = pd.DataFrame()\n mealFeatureMatrix['power_second_max'] = powerSecondMax\n mealFeatureMatrix['power_third_max'] = powerThirdMax\n \n tm = mealData.iloc[:,22:25].idxmin(axis=1)\n maximum = mealData.iloc[:,5:19].idxmax(axis=1)\n \n secondDifferentialData = []\n standardDeviation = []\n \n for i in range(len(mealData)):\n secondDifferentialData.append(np.diff(np.diff(mealData.iloc[:,maximum[i]:tm[i]].iloc[i].tolist())).max())\n standardDeviation.append(np.std(mealData.iloc[i]))\n \n mealFeatureMatrix['second_differential']=secondDifferentialData\n mealFeatureMatrix['standard_deviation']=standardDeviation\n return mealFeatureMatrix\n\ndef no_meal_features(noMeals):\n index = noMeals.isna().sum(axis=1).replace(0,np.nan).dropna().where(lambda x:x>5).dropna().index\n \n noMealData=noMeals.drop(noMeals.index[index]).reset_index().drop(columns='index')\n \n indexDrop=noMealData.isna().sum(axis=1).replace(0,np.nan).dropna().index\n noMealData=noMealData.drop(noMealData.index[indexDrop]).reset_index().drop(columns='index')\n \n powerFirstMax=[]\n indexFirstMax=[]\n powerSecondMax=[]\n indexSecondMax=[]\n powerThirdMax=[]\n for i in range(len(noMealData)):\n array=abs(rfft(noMealData.iloc[:,0:24].iloc[i].values.tolist())).tolist()\n sortedArray=abs(rfft(noMealData.iloc[:,0:24].iloc[i].values.tolist())).tolist()\n sortedArray.sort()\n powerFirstMax.append(sortedArray[-2])\n powerSecondMax.append(sortedArray[-3])\n powerThirdMax.append(sortedArray[-4])\n indexFirstMax.append(array.index(sortedArray[-2]))\n indexSecondMax.append(array.index(sortedArray[-3]))\n \n noMealFeatureMatrix=pd.DataFrame()\n noMealFeatureMatrix['power_second_max']=powerSecondMax\n noMealFeatureMatrix['power_third_max']=powerThirdMax\n \n secondDifferentialData=[]\n standardDeviation=[]\n for i in range(len(noMealData)):\n secondDifferentialData.append(np.diff(np.diff(noMealData.iloc[:,0:24].iloc[i].tolist())).max())\n standardDeviation.append(np.std(noMealData.iloc[i]))\n \n noMealFeatureMatrix['second_differential']=secondDifferentialData\n noMealFeatureMatrix['standard_deviation']=standardDeviation\n return noMealFeatureMatrix\n\ndef main_function():\n print('Process Stared........')\n \n insulin_data = pd.read_csv('../data/InsulinData.csv',low_memory=False,usecols=['Date','Time','BWZ Carb Input (grams)'])\n insulin_data1 = pd.read_csv('../data/Insulin_patient2.csv',low_memory=False,usecols=['Date','Time','BWZ Carb Input (grams)'])\n\n cgm_data = pd.read_csv('../data/CGMData.csv',low_memory=False,usecols=['Date','Time','Sensor Glucose (mg/dL)'])\n cgm_data1 = pd.read_csv('../data/CGM_patient2.csv',low_memory=False,usecols=['Date','Time','Sensor Glucose (mg/dL)'])\n\n\n insulin_data = insulin_data.reindex(index=insulin_data.index[::-1])\n insulin_data1 = insulin_data1.reindex(index=insulin_data1.index[::-1])\n\n cgm_data = cgm_data.reindex(index=cgm_data.index[::-1])\n cgm_data1 = cgm_data1.reindex(index=cgm_data1.index[::-1])\n\n meal1 = get_meal_data(insulin_data,cgm_data,1)\n meal2 = get_meal_data(insulin_data1,cgm_data1,2)\n meals = pd.concat([meal1, meal2])\n\n print('Meals data extracted...')\n\n no_meal1 = get_no_meal_data(insulin_data,cgm_data,1)\n no_meal2 = get_no_meal_data(insulin_data1,cgm_data1,2)\n no_meals = pd.concat([no_meal1, no_meal2])\n\n print('No Meals data extracted...')\n\n mealFeatureMatrix = meal_features(meals)\n noMealFeatureMatrix = no_meal_features(no_meals)\n \n mealFeatureMatrix['label']=1\n noMealFeatureMatrix['label']=0\n \n totalData=pd.concat([mealFeatureMatrix,noMealFeatureMatrix]).reset_index().drop(columns='index')\n \n dataset=shuffle(totalData,random_state=1).reset_index().drop(columns='index')\n \n kfold = KFold(n_splits=10,shuffle=False)\n \n unLabeledData=dataset.drop(columns='label')\n\n scores = []\n model=DecisionTreeClassifier(criterion=\"entropy\")\n \n for train_index, test_index in kfold.split(unLabeledData):\n X_train,X_test,y_train,y_test = unLabeledData.loc[train_index],unLabeledData.loc[test_index],dataset.label.loc[train_index],dataset.label.loc[test_index]\n model.fit(X_train,y_train)\n scores.append(model.score(X_test,y_test))\n\n classifier=DecisionTreeClassifier(criterion='entropy')\n X,y= unLabeledData, dataset['label']\n classifier.fit(X,y)\n dump(classifier, 'trained.pickle')\n print('Process Ended........')\n\nif __name__ == \"__main__\":\n main_function()","repo_name":"vrcoder70/Glucose-Analysis-Meal-Detection-and-Meal-Clustering-and-Classification","sub_path":"Meal Detection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33513328723","text":"\"\"\"\nThe multi-processed data generator for the SBAC project. Ultimately, this version relies heavily on the regular\ndata generator, but attempts to achieve performance improvements related to using the multiprocess library from Python.\n\nCommand line arguments:\n --team TEAM_NAME: Name of team to generate data for (expects sonics or arkanoids)\n --state_name STATE_NAME: Name of state to generate data for (defaults to 'North Carolina')\n --state_code STATE_CODE: Code of state to generate data for (defaults to 'NC')\n --state_type STATE_TYPE_NAME: Name of state type to generate data for (expects devel, typical_1, california)\n --process_count: The number of processes to use to generate data (defaults to 2)\n --pg_out: Output data to a PostgreSQL database\n --star_out: Output data to star schema CSV\n --lz_out: Output data to landing zone CSV and JSON\n\n If using PostgreSQL output:\n --host: Host for PostgreSQL server\n --schema: Schema for PostgreSQL database\n\n@author: nestep\n@date: March 22, 2014\n\"\"\"\n\nimport argparse\nimport datetime\nimport multiprocessing\nimport os\nimport random\nimport traceback\n\nimport generate_data as generate_data\nimport data_generation.config.cfg as sbac_in_config\nimport sbac_data_generation.generators.hierarchy as sbac_hier_gen\n\nfrom sbac_data_generation.util.id_gen import IDGen\n\nDISTRICT_TOTAL_COUNT = 0\nDISTRICT_COMPLETE_COUNT = 0\nTOTAL_STUDENT_AVERAGE = 0\nTOTAL_STUDENT_UNIQUE = 0\nCALLBACK_LOCK = multiprocessing.Lock()\n\n\ndef generate_state_district_hierarchy(id_gen):\n \"\"\"\n Create the the states and districts to generate data for.\n\n @param id_gen: ID generator\n @returns: A list of tuples suitable to be fed into a worker\n \"\"\"\n global DISTRICT_TOTAL_COUNT\n district_tuples = []\n gen_il = generate_data.WRITE_IL\n\n # Start with states\n for state_cfg in generate_data.STATES:\n # Create the state object\n state = sbac_hier_gen.generate_state(state_cfg['type'], state_cfg['name'], state_cfg['code'], id_gen)\n print('Created State: %s' % state.name)\n\n # Grab the assessment rates by subjects\n asmt_skip_rates_by_subject = state.config['subject_skip_percentages']\n\n # Create the assessment objects\n assessments = {}\n for year in generate_data.ASMT_YEARS:\n for subject in sbac_in_config.SUBJECTS:\n for grade in generate_data.GRADES_OF_CONCERN:\n # Create the summative assessment\n asmt_key_summ = str(year) + 'summative' + str(grade) + subject\n assessments[asmt_key_summ] = generate_data.create_assessment_object('SUMMATIVE', 'Spring', year,\n subject, id_gen,\n generate_item_level=gen_il)\n\n # Create the interim assessments\n for period in generate_data.INTERIM_ASMT_PERIODS:\n asmt_key_intrm = str(year) + 'interim' + period + str(grade) + subject\n asmt_intrm = generate_data.create_assessment_object('INTERIM COMPREHENSIVE', period, year,\n subject, id_gen, generate_item_level=gen_il)\n assessments[asmt_key_intrm] = asmt_intrm\n\n # Build the districts\n for district_type, dist_type_count in state.config['district_types_and_counts']:\n for _ in range(dist_type_count):\n # Create the district\n district = sbac_hier_gen.generate_district(district_type, state, id_gen)\n district.state = state\n print(' Created District: %s (%s District)' % (district.name, district.type_str))\n district_tuples.append((state, district, assessments, asmt_skip_rates_by_subject))\n DISTRICT_TOTAL_COUNT += 1\n\n # Return the districts\n return district_tuples\n\n\ndef district_pool_worker(state, district, assessments, skip_rates, id_lock, id_mdict, pg_host, pg_schema, passwd=None):\n \"\"\"\n Process a single district. This is basically a wrapper for generate_data.generate_district_date that is designed to\n be called through a multiprocessor.Pool construct.\n\n @param state: The state the district belongs to\n @param district: The district to generate data for\n @param assessments: The assessments to potentially generate\n @param skip_rates: Rates (changes) to skip assessments\n @param id_lock: Monitored lock for ID generator\n @param id_mdict: Monitored dictionary for ID generator\n @param pg_host: PostgreSQL hostname\n @param pg_schema: PostgreSQL schema\n \"\"\"\n id_gen = IDGen(id_lock, id_mdict)\n\n # Note that we are processing\n print('Starting to generate data for district %s (%s District)' % (district.name, district.type_str))\n\n # Connect to Postgres\n if generate_data.WRITE_PG:\n generate_data.DB_CONN = generate_data.connect_to_postgres(pg_host, 5432, 'edware', 'edware', passwd)\n generate_data.DB_SCHEMA = pg_schema\n\n # Start the processing\n dist_tstart = datetime.datetime.now()\n avg_count = 0\n unique_count = 0\n try:\n reg_sys_guid = random.choice(generate_data.REGISTRATION_SYSTEM_GUIDS)\n avg_count, unique_count = generate_data.generate_district_data(state, district, reg_sys_guid, assessments,\n skip_rates, id_gen)\n except Exception as ex:\n print('%s' % ex)\n traceback.print_exc()\n\n # Close the open DB connection\n if generate_data.WRITE_PG:\n generate_data.DB_CONN.commit()\n generate_data.DB_CONN.close()\n\n # Get the run time and report back\n dist_tend = datetime.datetime.now()\n return district.name, avg_count, unique_count, (dist_tend - dist_tstart)\n\n\ndef pool_callback(arg_tpl):\n global DISTRICT_COMPLETE_COUNT, TOTAL_STUDENT_AVERAGE, TOTAL_STUDENT_UNIQUE\n district_name, student_avg_count, student_unique_count, run_time = arg_tpl\n with CALLBACK_LOCK:\n DISTRICT_COMPLETE_COUNT += 1\n TOTAL_STUDENT_AVERAGE += student_avg_count\n TOTAL_STUDENT_UNIQUE += student_unique_count\n format_tpl = (district_name, student_avg_count, student_unique_count, run_time, DISTRICT_COMPLETE_COUNT,\n DISTRICT_TOTAL_COUNT)\n print('District %s generated with average of %i students/year and %i unique in %s (%i of %i)' % format_tpl)\n\n\nif __name__ == '__main__':\n # Argument parsing for task-specific arguments\n parser = argparse.ArgumentParser(description='SBAC data generation task.')\n parser.add_argument('-t', '--type', dest='gen_type', action='store', default='regular',\n help='Specify the type of data generation run to perform (regular, udl)',\n required=False)\n parser.add_argument('-sn', '--state_name', dest='state_name', action='store', default='North Carolina',\n help='Specify the name of the state to generate data for (default=North Carolina)',\n required=False)\n parser.add_argument('-sc', '--state_code', dest='state_code', action='store', default='NC',\n help='Specify the code of the state to generate data for (default=NC)',\n required=False)\n parser.add_argument('-st', '--state_type', dest='state_type', action='store', default='devel',\n help='Specify the type of state to generate data for (devel (default), typical_1, california, udl_test)',\n required=False)\n parser.add_argument('-pc', '--process_count', dest='process_count', action='store', default='2',\n help='Specific the number of sub-processes to spawn (default=2)', required=False)\n parser.add_argument('-o', '--out_dir', dest='out_dir', action='store', default='out',\n help='Specify the root directory for writing output files to',\n required=False)\n parser.add_argument('-ho', '--host', dest='pg_host', action='store', default='localhost',\n help='The host for the PostgreSQL server to write data to')\n parser.add_argument('-p', '--passwd', dest='passwd', action='store', default='',\n help='The passwd for the PostgreSQL server to write data to')\n parser.add_argument('-s', '--schema', dest='pg_schema', action='store', default='dg_data',\n help='The schema for the PostgreSQL database to write data to')\n parser.add_argument('-po', '--pg_out', dest='pg_out', action='store_true',\n help='Output data to PostgreSQL database', required=False)\n parser.add_argument('-so', '--star_out', dest='star_out', action='store_true',\n help='Output data to star schema CSV', required=False)\n parser.add_argument('-lo', '--lz_out', dest='lz_out', action='store_true',\n help='Output data to landing zone CSV and JSON', required=False)\n parser.add_argument('-io', '--il_out', dest='il_out', action='store_true', help='Output item-level data',\n required=False)\n args, unknown = parser.parse_known_args()\n\n # Set team-specific configuration options\n generate_data.assign_configuration_options(args.gen_type, args.state_name, args.state_code, args.state_type)\n\n # Save output flags\n generate_data.WRITE_PG = args.pg_out\n generate_data.WRITE_STAR = args.star_out\n generate_data.WRITE_LZ = args.lz_out\n generate_data.WRITE_IL = args.il_out\n\n # Save output directory\n OUT_PATH_ROOT = args.out_dir\n\n # Validate at least one form of output\n if not generate_data.WRITE_PG and not generate_data.WRITE_STAR and not generate_data.WRITE_LZ:\n print('Please specify at least one output format')\n print(' --pg_out Output to PostgreSQL')\n print(' --star_out Output star schema CSV')\n print(' --lz_out Output landing zone CSV and JSON')\n exit()\n\n # Record current (start) time\n tstart = datetime.datetime.now()\n\n # Verify output directory exists\n if not os.path.exists(generate_data.OUT_PATH_ROOT):\n os.makedirs(generate_data.OUT_PATH_ROOT)\n\n # Clean output directory\n for file in os.listdir(generate_data.OUT_PATH_ROOT):\n file_path = os.path.join(generate_data.OUT_PATH_ROOT, file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except:\n pass\n\n # Connect to Postgres\n if generate_data.WRITE_PG:\n generate_data.DB_CONN = generate_data.connect_to_postgres(args.pg_host, 5432, 'edware', 'edware', args.passwd)\n generate_data.DB_SCHEMA = args.pg_schema\n\n # Create the ID generator\n manager = multiprocessing.Manager()\n lock = manager.Lock()\n mdict = manager.dict()\n idg = IDGen(lock, mdict)\n\n # Prepare the output files\n generate_data.prepare_output_files()\n\n # Create the registration systems\n generate_data.REGISTRATION_SYSTEMS = generate_data.build_registration_systems(generate_data.YEARS, idg)\n for guid, _ in generate_data.REGISTRATION_SYSTEMS.items():\n generate_data.REGISTRATION_SYSTEM_GUIDS.append(guid)\n\n # Build the states and districts\n districts = generate_state_district_hierarchy(idg)\n\n # Close the open DB connection\n if generate_data.WRITE_PG:\n generate_data.DB_CONN.close()\n\n # Go\n print()\n print('Processing of districts beginning now')\n pool = multiprocessing.Pool(processes=int(args.process_count))\n for tpl in districts:\n pool.apply_async(district_pool_worker, args=(tpl[0], tpl[1], tpl[2], tpl[3], lock, mdict, args.pg_host,\n args.pg_schema), callback=pool_callback)\n pool.close()\n pool.join()\n\n # Record now current (end) time\n tend = datetime.datetime.now()\n\n # Print statistics\n print()\n print('Average students per year: %i' % TOTAL_STUDENT_AVERAGE)\n print('Total unique students: %i' % TOTAL_STUDENT_UNIQUE)\n print()\n print('Run began at: %s' % tstart)\n print('Run ended at: %s' % tend)\n print('Run run took: %s' % (tend - tstart))\n print()\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"data_gen/mp_generate_data.py","file_name":"mp_generate_data.py","file_ext":"py","file_size_in_byte":12358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"17869754712","text":"#!/usr/bin/env python\n#\n# Nova is a generic framework for finding instances\n# using python-novaclient\n\nimport click\nfrom collections import Counter\nfrom collections import defaultdict\nfrom glanceclient import client as glance_client\nimport humanize\nimport itertools\nfrom keystoneclient.v3 import client as keystone_client\nfrom nectarallocationclient import client as allocation_client\nfrom neutronclient.v2_0 import client as neutron_client\nfrom novaclient import client as nova_client\nfrom prettytable import PrettyTable\nimport socket\nfrom ssh2.session import Session\nfrom utils import get_session\nfrom utils import parse_nodes\nfrom utils import try_assign\n# from datetime import datetime, timedelta\n# from IPython import embed\n\n# global INSTAN\n# global nova\n# global glance\n# global keystone\n# global neutron\n# global allocation\n\n\ndef dict_product(dicts):\n \"\"\"Generate a Cartesian product of dictionary of lists.\n\n >>> list(dict_product(dict(number=[1,2], character='ab')))\n [{'character': 'a', 'number': 1},\n {'character': 'a', 'number': 2},\n {'character': 'b', 'number': 1},\n {'character': 'b', 'number': 2}]\n \"\"\"\n return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))\n\n\ndef _find_hosts_in_aggregates(nova, aggregates, hosts):\n \"\"\"Find hosts in aggregates.\"\"\"\n if not hosts:\n return set(host for aggregate in nova.aggregates.list()\n for host in aggregate.hosts\n if aggregate.name in aggregates)\n else:\n return set(host for aggregate in nova.aggregates.list()\n for host in aggregate.hosts\n if aggregate.name in aggregates\n if host in hosts)\n\n\ndef _find_hosts_in_azs(nova, zones, hosts):\n \"\"\"Find hosts in the following zones.\"\"\"\n if not hosts:\n return set(host for aggregate in nova.aggregates.list()\n for host in aggregate.hosts\n if u\"availability_zone\" in aggregate.metadata\n and aggregate.metadata[u\"availability_zone\"] in zones)\n else:\n return set(host for aggregate in nova.aggregates.list()\n for host in aggregate.hosts\n if u\"availability_zone\" in aggregate.metadata\n and aggregate.metadata[u\"availability_zone\"] in zones\n and host in hosts)\n\n\ndef _find_projects(allocation, allocation_home):\n \"\"\"Find project IDs with given parameters.\"\"\"\n # Approved allocations\n opts = {'parent_request__isnull': True,\n 'allocation_home': allocation_home}\n allocations = allocation.allocations.list(**opts)\n\n return set(a.project_id\n for a in allocations)\n\n\ndef _find_instances(nova, hosts, statuses, project_ids):\n \"\"\"Find instances from nova client from the zone and statuses.\"\"\"\n opts = {'all_tenants': [True]}\n if project_ids:\n opts['project_id'] = project_ids\n if statuses:\n opts['status'] = statuses\n if hosts:\n opts['host'] = hosts\n # Generate a cartesian product of the search parameters\n queries = dict_product(opts)\n for query in queries:\n for server in nova.servers.list(search_opts=query):\n yield server\n\n\ndef _list_instances(nova, glance, keystone, allocation, aggregate,\n availability_zone, host, status, projects,\n allocation_home, exclude_availability_zones,\n exclude_host, exclude_aggregates):\n \"\"\"List all instances.\"\"\"\n hosts = None if host is None else parse_nodes(host)\n\n if aggregate:\n hosts = _find_hosts_in_aggregates(nova, aggregate, hosts)\n\n if availability_zone:\n hosts = _find_hosts_in_azs(nova, availability_zone, hosts)\n\n if not projects and allocation_home:\n projects = _find_projects(allocation, allocation_home)\n\n exclude_hosts = [] if exclude_host is None else parse_nodes(exclude_host)\n\n if exclude_aggregates:\n exclude_hosts = _find_hosts_in_aggregates(nova, exclude_aggregates,\n exclude_hosts)\n if exclude_availability_zones:\n exclude_hosts = _find_hosts_in_azs(nova,\n exclude_availability_zones,\n exclude_hosts)\n\n output = []\n\n # Caching image and flavor\n USER_CACHE = {}\n PROJECT_CACHE = {}\n IMAGE_CACHE = {}\n FLAVOR_CACHE = {}\n # Augment the instance info with flavor and image\n for i in _find_instances(nova, hosts, status, projects):\n if i._info['OS-EXT-SRV-ATTR:host'] in exclude_hosts:\n continue\n flavor_id = i.flavor['id']\n if flavor_id not in FLAVOR_CACHE:\n FLAVOR_CACHE[flavor_id] = nova.flavors.get(flavor_id)._info\n i._info['flavor'] = FLAVOR_CACHE[flavor_id]\n if i.image:\n if i.image['id'] not in IMAGE_CACHE:\n image = try_assign(glance.images.get, i.image['id'])\n if not image:\n image = {'name': '',\n 'vcpus': '',\n 'ram': '',\n 'swap': '',\n 'disk': '',\n 'rxtx_factor': ''}\n IMAGE_CACHE[i.image['id']] = image\n i._info['image'] = IMAGE_CACHE[i.image['id']]\n else:\n i._info['image'] = {'name': '',\n 'vcpus': '',\n 'ram': '',\n 'swap': '',\n 'disk': '',\n 'rxtx_factor': ''}\n i._info['project_id'] = i._info['tenant_id']\n if i._info['project_id'] not in PROJECT_CACHE:\n PROJECT_CACHE[i._info['project_id']] = keystone.projects.get(\n i._info['project_id'])\n i._info['project'] = PROJECT_CACHE[i._info['project_id']]\n if i._info['user_id'] not in USER_CACHE:\n USER_CACHE[i._info['user_id']] = keystone.users.get(\n i._info['user_id'])\n i._info['user'] = USER_CACHE[i._info['user_id']]\n output.append(i)\n return output\n\n\ndef _render_table_instances(instances, columns, sortby):\n \"\"\"Render instances in table format.\"\"\"\n table = PrettyTable()\n fields = [col for col in columns]\n if not columns:\n fields = ['id', 'name', 'status', 'flavor',\n 'OS-EXT-SRV-ATTR:host', 'addresses']\n table.field_names = fields\n table.align = 'l'\n for ins in instances:\n row = []\n for f in fields:\n if 'flavor' in f:\n if f in 'flavor':\n row.append(ins._info['flavor']['name'])\n else:\n attr = f.split(':')[1]\n row.append(ins._info['flavor'][attr])\n elif f in 'project' or f in 'tenant':\n row.append(ins._info['project'].name)\n elif f in 'user':\n row.append(ins._info['user'].email)\n elif f in 'security_groups':\n row.append(', '.join(sc['name']\n for sc in ins._info['security_groups']))\n elif f in 'image':\n row.append(ins._info['image']['name'])\n elif f in 'addresses':\n output = [\"%s=%s\" % (k, ','.join(v))\n for k, v in ins.networks.items()]\n row.append(';'.join(output))\n else:\n row.append(ins._info[f])\n table.add_row(row)\n if sortby:\n table.sortby = sortby\n click.echo(table)\n\n\ndef _render_table_instance(instance):\n \"\"\"Render the instance in table format.\"\"\"\n table = PrettyTable(['Property', 'Value'])\n table.align = 'l'\n fields = ['OS-EXT-AZ:availability_zone', 'OS-EXT-SRV-ATTR:host',\n 'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-STS:task_state',\n 'OS-EXT-STS:vm_state', 'created', 'flavor:name',\n 'flavor:ram', 'flavor:vcpus', 'id', 'image',\n 'key_name', 'metadata', 'name',\n 'os-extended-volumes:volumes_attached', 'status']\n fields.extend(\"%s network\" % az\n for az in instance._info['addresses'].keys())\n fields.extend(['updated', 'user_id'])\n for f in fields:\n table.add_row([f, instance._info[f]])\n click.echo(table)\n\n\ndef _get_sg_protocol_port(rule):\n proto = rule['protocol']\n port_min = rule['port_range_min']\n port_max = rule['port_range_max']\n if proto in ('tcp', 'udp'):\n if (port_min and port_min == port_max):\n protocol_port = '%s/%s' % (port_min, proto)\n elif port_min:\n protocol_port = '%s-%s/%s' % (port_min, port_max, proto)\n else:\n protocol_port = proto\n elif proto == 'icmp':\n icmp_opts = []\n if port_min is not None:\n icmp_opts.append('type:%s' % port_min)\n if port_max is not None:\n icmp_opts.append('code:%s' % port_max)\n\n if icmp_opts:\n protocol_port = 'icmp (%s)' % ', '.join(icmp_opts)\n else:\n protocol_port = 'icmp'\n elif proto is not None:\n # port_range_min/max are not recognized for protocol\n # other than TCP, UDP and ICMP.\n protocol_port = proto\n else:\n protocol_port = None\n return protocol_port\n\n\ndef _format_sg_rule(rule):\n formatted = []\n for field in ['direction',\n 'ethertype',\n ('protocol_port', _get_sg_protocol_port),\n 'remote_ip_prefix',\n 'remote_group_id']:\n if isinstance(field, tuple):\n field, get_method = field\n data = get_method(rule)\n else:\n data = rule[field]\n if not data:\n continue\n if field in ('remote_ip_prefix', 'remote_group_id'):\n data = '%s: %s' % (field, data)\n formatted.append(data)\n return ', '.join(formatted)\n\n\ndef _format_sg_rules(secgroup):\n try:\n return '\\n'.join(sorted([_format_sg_rule(rule) for rule\n in secgroup['security_group_rules']]))\n except Exception:\n return ''\n\n\ndef _format_secgroups(security_groups):\n pt = PrettyTable(['ID', 'Name', 'Rules'], caching=False)\n pt.align = 'l'\n for sg in security_groups['security_groups']:\n pt.add_row([sg['id'], sg['name'],\n _format_sg_rules(sg)])\n\n output = 'Security Groups:\\n'\n output += pt.get_string()\n return output\n\n\ndef generate_instance_sg_rules_info(neutron, instance_id):\n \"\"\"Generate instance security groups.\"\"\"\n # Security groups\n ports = neutron.list_ports(device_id=instance_id)\n sg_ids = [sg for sgs in (p['security_groups']\n for p in ports['ports']) for sg in sgs]\n security_groups = neutron.list_security_groups(id=sg_ids)\n\n return security_groups\n\n\ndef _recommend_nmap_command(ports, ip):\n \"\"\"Display recommendation for using nmap.\"\"\"\n click.echo(\"Globally opened ports found: %s\" % ports)\n command = \"nmap -sV -sT -sC -p %s %s\" % (','.join(ports),\n ip)\n return command\n\n\n@click.group()\ndef cli():\n \"\"\"Extend nova search functionality.\"\"\"\n pass\n\n\n@cli.group()\ndef security():\n \"\"\"Security related functionality.\"\"\"\n pass\n\n\n@cli.group()\ndef aggregate():\n \"\"\"Aggregate related functionality.\"\"\"\n pass\n\n\n@cli.command()\n@click.option('--host',\n help='Only list instances from HOST (eg. qh2-rcc[10-99])')\n@click.option('--exclude-host',\n help='Exclude instances from HOST(eg. qh2-rcc[10-99])')\n@click.option('-s', '--status', multiple=True,\n help='Only list instances with STATUS')\n@click.option('-az', '--availability-zone', multiple=True,\n help='Only list instances in AVAILABILITY ZONE')\n@click.option('--exclude-availability-zone', multiple=True,\n help='Exclude instances in AVAILABILITY_ZONE')\n@click.option('-ag', '--aggregate', multiple=True,\n help='Only list instances in AGGREGATE')\n@click.option('--exclude-aggregate', multiple=True,\n help='Exclude instances in AGGREGATE')\n@click.option('-lc', '--last-changed',\n help='Only list instances that changed since LAST-CHANGED')\n@click.option('--project', multiple=True,\n help='Only list instances from PROJECT_ID')\n@click.option('-c', '--column', multiple=True,\n help='Include the following columns when rendering table format',\n type=click.Choice(['OS-EXT-STS:task_state', 'addresses', 'links',\n 'image', 'OS-EXT-STS:vm_state',\n 'OS-EXT-SRV-ATTR:instance_name',\n 'OS-SRV-USG:launched_at', 'flavor', 'id',\n 'security_groups', 'user_id',\n 'OS-DCF:diskConfig', 'accessIPv4',\n 'accessIPv6', 'progress',\n 'OS-EXT-STS:power_state',\n 'OS-EXT-AZ:availability_zone',\n 'config_drive', 'status', 'updated', 'hostId',\n 'OS-EXT-SRV-ATTR:host',\n 'OS-SRV-USG:terminated_at',\n 'key_name',\n 'OS-EXT-SRV-ATTR:hypervisor_hostname',\n 'name', 'created', 'tenant_id',\n 'os-extended-volumes:volumes_attached',\n 'metadata', 'project_id', 'user',\n 'project', 'tenant', 'flavor:disk',\n 'flavor:rxtx_factor',\n 'flavor:ram', 'flavor:swap', 'flavor:vcpus']))\n@click.option('--sort-by',\n help='Sort by the selected column',\n type=click.Choice(['OS-EXT-STS:task_state', 'addresses', 'links',\n 'image', 'OS-EXT-STS:vm_state',\n 'OS-EXT-SRV-ATTR:instance_name',\n 'OS-SRV-USG:launched_at', 'flavor', 'id',\n 'security_groups', 'user_id',\n 'OS-DCF:diskConfig', 'accessIPv4',\n 'accessIPv6', 'progress',\n 'OS-EXT-STS:power_state',\n 'OS-EXT-AZ:availability_zone',\n 'config_drive', 'status', 'updated', 'hostId',\n 'OS-EXT-SRV-ATTR:host',\n 'OS-SRV-USG:terminated_at',\n 'key_name',\n 'OS-EXT-SRV-ATTR:hypervisor_hostname',\n 'name', 'created', 'tenant_id',\n 'os-extended-volumes:volumes_attached',\n 'metadata', 'project_id', 'user',\n 'project', 'tenant', 'flavor:disk',\n 'flavor:rxtx_factor',\n 'flavor:ram', 'flavor:swap', 'flavor:vcpus']))\n@click.option('--allocation-home',\n help='Filter by an ALLOCATION_HOME')\ndef list(host=None, last_changed=None, availability_zone=None, column=None,\n aggregate=None, status=None, sort_by=None, project=None,\n allocation_home=None, exclude_availability_zone=None,\n exclude_host=None, exclude_aggregate=None):\n \"\"\"List all nova instances with given parameters.\"\"\"\n session = get_session()\n nova = nova_client.Client(2, session=session)\n glance = glance_client.Client(2, session=session)\n keystone = keystone_client.Client(session=session)\n allocation = allocation_client.Client(1, session=session)\n instances = _list_instances(nova, glance, keystone, allocation, aggregate,\n availability_zone, host, status, project,\n allocation_home, exclude_availability_zone,\n exclude_host, exclude_aggregate)\n # INSTAN = instances\n # embed()\n if not column:\n sort_by = 'OS-EXT-SRV-ATTR:host'\n _render_table_instances(instances, column, sort_by)\n\n\n@cli.command()\n@click.option('--host',\n help='Only list instances from HOST (eg. qh2-rcc[10-99])')\n@click.option('--exclude-host',\n help='Exclude instances from HOST(eg. qh2-rcc[10-99])')\n@click.option('-s', '--status', multiple=True,\n help='Only list instances with STATUS')\n@click.option('-az', '--availability-zone', multiple=True,\n help='Only list instances in AVAILABILITY ZONE')\n@click.option('--exclude-availability-zone', multiple=True,\n help='Exclude instances in AVAILABILITY_ZONE')\n@click.option('-ag', '--aggregate', multiple=True,\n help='Only list instances in AGGREGATE')\n@click.option('--exclude-aggregate', multiple=True,\n help='Exclude instances in AGGREGATE')\n@click.option('-lc', '--last-changed',\n help='Only target instances that changed since LAST-CHANGED')\n@click.option('--project', multiple=True,\n help='Only list instances from PROJECT_ID')\n@click.option('--allocation-home',\n help='Filter by an ALLOCATION_HOME')\n@click.option('--detail', is_flag=True,\n help='DETAIL the instance statistic by projects')\ndef stat(host=None, last_changed=None, availability_zone=None,\n aggregate=None, status=None, project=None,\n allocation_home=None, exclude_availability_zone=None,\n exclude_host=None, exclude_aggregate=None,\n detail=None):\n \"\"\"Gather statistic of all nova instances with given parameters.\"\"\"\n session = get_session()\n nova = nova_client.Client(2, session=session)\n glance = glance_client.Client(2, session=session)\n keystone = keystone_client.Client(session=session)\n allocation = allocation_client.Client(1, session=session)\n instances = _list_instances(nova, glance, keystone, allocation, aggregate,\n availability_zone, host, status, project,\n allocation_home, exclude_availability_zone,\n exclude_host, exclude_aggregate)\n # Summary table\n table = PrettyTable(['Name', 'Value'])\n data = {'instances': 0,\n 'vcpus': 0,\n 'ram': 0}\n table.align = 'l'\n projects_counter = Counter()\n # Detail table\n dt = PrettyTable(['Project', 'Instances', 'vCPUs', 'RAM'])\n projects = defaultdict(lambda: defaultdict(int))\n dt.align = 'l'\n for ins in instances:\n project_name = ins._info['project'].name\n projects[project_name]['instances'] += 1\n data['instances'] += 1\n projects_counter[ins._info['project'].name] += 1\n if not ins._info['flavor']['vcpus']:\n continue\n data['vcpus'] += int(ins._info['flavor']['vcpus'])\n data['ram'] += int(ins._info['flavor']['ram'])\n projects[project_name]['vcpus'] += int(ins._info['flavor']['vcpus'])\n projects[project_name]['ram'] += int(ins._info['flavor']['ram'])\n\n data['common'] = ',\\n'.join('%s: %d' % (k, v)\n for k, v in projects_counter.most_common(3))\n # Convert the data to bytes for humanization\n data['ram'] = data['ram'] * 1024 * 1024\n table.add_row(['Total instances', data['instances']])\n table.add_row(['vCPUs used', data['vcpus']])\n table.add_row(['RAM used', humanize.naturalsize(data['ram'], binary=True)])\n table.add_row(['Total projects affected', len(projects_counter.keys())])\n table.add_row(['Top projects affected', data['common']])\n click.echo(table)\n\n if detail:\n for name, p in projects.items():\n dt.add_row([name,\n p['instances'],\n p['vcpus'],\n humanize.naturalsize(p['ram'] * 1024 * 1024,\n binary=True)])\n click.echo(dt)\n\n\n@security.command(name='investigate')\n@click.argument('ip')\ndef security_investigate(ip):\n \"\"\"Investigate the server at IP address.\n\n Use ssh-agent key to find the VM bridge interface and test its\n SSH authentication method.\n \"\"\"\n session = get_session()\n nova = nova_client.Client(2, session=session)\n glance = glance_client.Client(2, session=session)\n neutron = neutron_client.Client(session=session)\n opts = {'all_tenants': True,\n 'ip': ip}\n instances = nova.servers.list(search_opts=opts, limit=1)\n if not instances:\n return\n instance = instances[0]\n\n target_mac_device = None\n target_host = instance._info['OS-EXT-SRV-ATTR:hypervisor_hostname']\n target_instance_name = instance._info['OS-EXT-SRV-ATTR:instance_name']\n # Augment the retrieved instance info\n if instance.image:\n image = try_assign(glance.images.get, instance.image['id'])\n if image:\n instance._info['image'] = image.name\n instance_flavor = nova.flavors.get(instance.flavor['id'])._info\n instance._info['flavor:name'] = instance_flavor['name']\n instance._info['flavor:vcpus'] = instance_flavor['vcpus']\n instance._info['flavor:ram'] = instance_flavor['ram']\n instance._info['os-extended-volumes:volumes_attached'] = \\\n ', '.join(v['id']\n for v in\n instance._info['os-extended-volumes:volumes_attached'])\n for az in instance._info['addresses'].keys():\n network_name = \"%s network\" % az\n network = instance._info['addresses'][az]\n for net in network:\n # Retrieve the device mac to find out the correct tap interface\n if net['addr'] in ip:\n target_mac_device = net['OS-EXT-IPS-MAC:mac_addr']\n break\n output = ', '.join(\"%s\" % net['addr'] for net in network)\n instance._info[network_name] = output\n # Render instance information\n _render_table_instance(instance)\n security_groups = generate_instance_sg_rules_info(neutron, instance.id)\n click.echo(_format_secgroups(security_groups))\n\n nmap_ports = []\n # Generate recommendation for nmap scan\n for sg in security_groups['security_groups']:\n for rule in sg['security_group_rules']:\n if (rule['direction'] in 'ingress'\n and rule['remote_ip_prefix']\n and rule['remote_ip_prefix'] in '0.0.0.0/0'\n and rule['protocol'] in ['tcp', 'udp']):\n if rule['port_range_min'] == rule['port_range_max']:\n nmap_ports.append(str(rule['port_range_min']))\n else:\n nmap_ports.append(\"%s-%s\" % (rule['port_range_min'],\n rule['port_range_max']))\n nmap_command = _recommend_nmap_command(nmap_ports, ip)\n\n ENABLED_PASSWORD_LOGIN = False\n # Probe the server ssh for password login\n if '22' in nmap_ports:\n vm_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n vm_sock.connect((ip, 22))\n ssh = Session()\n ssh.handshake(vm_sock)\n ssh_authlist = ssh.userauth_list('test')\n click.echo('SSH authentication method list: %s' % ssh_authlist)\n if 'password' in ssh_authlist:\n ENABLED_PASSWORD_LOGIN = True\n # Generate tcpdump\n host_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host_sock.connect((target_host, 22))\n\n ssh = Session()\n ssh.handshake(host_sock)\n ssh.agent_auth('root')\n channel = ssh.open_session()\n channel.execute(\"virsh dumpxml %s | grep %s -A3 | grep bridge\" %\n (target_instance_name,\n target_mac_device))\n size, data = channel.read()\n target_bridge_interface = data.split(\"'\")[1]\n\n tcpdump_command = \"ssh %s 'tcpdump -l -q -i %s not arp and not icmp'\" % \\\n (target_host,\n target_bridge_interface)\n\n # Print out all recommendation\n click.echo('RECOMMENDATION:')\n if ENABLED_PASSWORD_LOGIN:\n click.echo('* RED FLAG: VM has password login enabled!')\n click.echo(\"* Discover VM running services by running: %s\"\n % nmap_command)\n click.echo(\"* Discover VM network traffics by running: %s\"\n % tcpdump_command)\n\n\n@aggregate.command(name='move-host')\n@click.argument('aggregate')\n@click.argument('host')\ndef aggregate_move_host(aggregate, host):\n \"\"\"Move HOSTs from an aggregate to another.\n\n Automatically parse availability zone from aggregate prefix.\n \"\"\"\n hosts = parse_nodes(host)\n nova = nova_client.Client(2, session=get_session())\n # Find the hosts original aggregates, this is a roundabout way compare\n # to simply using nova.hypervisors.show(), however, there are significant\n # performance issue with using nova.hypervisors.show() at the moment.\n # This aggregates call ends up being significantly faster.\n aggregates = nova.aggregates.list()\n host_ag_mapping = {host: [] for host in hosts}\n target_ag = {}\n for ag in aggregates:\n if aggregate in ag.name:\n target_ag[ag.name] = ag\n for host in hosts:\n if host in ag.hosts:\n host_ag_mapping[host].append(ag)\n # Add and remove hosts\n for host in hosts:\n if not host_ag_mapping[host]:\n click.echo(\"ERROR: Unable to move host. %s not in any aggregate.\"\n % host)\n continue\n target = \"%s_%s\" % (host_ag_mapping[host][0].name.split('_')[0],\n aggregate)\n if target not in target_ag:\n click.echo(\"ERROR: %s not found.\" % target)\n continue\n for ag in host_ag_mapping[host]:\n nova.aggregates.remove_host(ag, host)\n click.echo(\"%s removed from [%s]\" % (host, ', '.join(ag.name\n for ag in host_ag_mapping[host])))\n click.echo(\"%s added to %s\" % (host, target))\n nova.aggregates.add_host(target_ag[target], host)\n","repo_name":"NeCTAR-RC/hivemind_contrib","sub_path":"melbourne-tools/scripts/nova.py","file_name":"nova.py","file_ext":"py","file_size_in_byte":26312,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"39627010018","text":"import numpy as np\nfrom functools import partial\nfrom paddle.io import DataLoader, BatchSampler\nfrom paddlenlp.data import Vocab, Pad, Stack\nfrom paddlenlp.datasets import load_dataset\nfrom paddlenlp.data.sampler import SamplerHelper\nfrom .indexed_dataset import MMapIndexedDataset\nfrom . import apply_bpe\nfrom .iter_dataloader import LanguagePairDataset, BufferedDataloader\nfrom .sampler import DistributedDynamicBatchSampler\nfrom sacremoses import MosesTokenizer, MosesTruecaser\n\ndef read(src_path, tgt_path, is_test=False, has_target=False):\n if is_test and not has_target:\n with open(src_path, 'r', encoding='utf-8') as src_f:\n for sample_id, src_line in enumerate(src_f.readlines()):\n src_line = src_line.strip()\n if not src_line:\n continue\n yield {'id': sample_id, 'src': src_line, 'tgt': ''}\n else:\n with open(src_path, 'r', encoding='utf-8') as src_f, open(tgt_path, 'r', encoding='utf-8') as tgt_f:\n for sample_id, (src_line, tgt_line) in enumerate(zip(src_f.readlines(), tgt_f.readlines())):\n src_line, tgt_line = src_line.strip(), tgt_line.strip()\n # if not src_line or not tgt_line:\n # continue\n yield {'id': sample_id, 'src': src_line, 'tgt': tgt_line}\n\n\ndef read_bin(src_path, tgt_path=None, is_test=False, has_target=False):\n if not has_target:\n src_data = MMapIndexedDataset(tgt_path)\n for sample_id, src_line in enumerate(src_data):\n yield [src_line, [], sample_id]\n\n src_data, tgt_data = MMapIndexedDataset(src_path), MMapIndexedDataset(tgt_path)\n for sample_id, (src_line, tgt_line) in enumerate(zip(src_data, tgt_data)):\n yield [src_line, tgt_line, sample_id]\n\n\ndef merge_pref_lang(pref, is_src, src_lang, tgt_lang, use_binary=False):\n filename = pref\n lang = src_lang.strip() if is_src else tgt_lang.strip()\n # if use_binary:\n # filename=filename+f\".{src_lang.strip()}-{tgt_lang.strip()}\"\n filename = f\"{filename}.{lang}\"\n return filename\n\n\ndef prep_dataset(conf, mode='train'):\n assert mode in ['train', 'dev', 'test']\n data_args = conf.data\n merge_file_fn = partial(merge_pref_lang, src_lang=data_args.src_lang, tgt_lang=data_args.tgt_lang,\n use_binary=data_args.use_binary)\n if mode == 'train':\n src_path = merge_file_fn(data_args.train_pref, is_src=True)\n tgt_path = merge_file_fn(data_args.train_pref, is_src=False)\n elif mode == 'dev':\n src_path = merge_file_fn(data_args.valid_pref, is_src=True)\n tgt_path = merge_file_fn(data_args.valid_pref, is_src=False)\n else:\n src_path = merge_file_fn(data_args.test_pref, is_src=True)\n tgt_path = merge_file_fn(data_args.test_pref, is_src=False)\n if data_args.lazy_load and mode == \"train\":\n dataset = LanguagePairDataset(src_path=src_path, tgt_path=tgt_path)\n else:\n read_fn = read if not data_args.use_binary else read_bin\n dataset = load_dataset(read_fn, src_path=src_path, tgt_path=tgt_path,\n is_test=(mode == 'test'),\n has_target=conf.data.has_target, lazy=False)\n\n return dataset\n\n\ndef prep_vocab(conf):\n data_args = conf.data\n merge_file_fn = partial(merge_pref_lang, src_lang=data_args.src_lang, tgt_lang=data_args.tgt_lang)\n src_vocab_fpath = merge_file_fn(data_args.vocab_pref, is_src=True)\n tgt_vocab_fpath = merge_file_fn(data_args.vocab_pref, is_src=False)\n src_vocab = Vocab.load_vocabulary(\n src_vocab_fpath,\n bos_token=data_args.special_token[0], # 顺序不能颠倒,默认词表顺序排列\n pad_token=data_args.special_token[1],\n eos_token=data_args.special_token[2],\n unk_token=data_args.special_token[3]\n )\n tgt_vocab = Vocab.load_vocabulary(\n tgt_vocab_fpath,\n bos_token=data_args.special_token[0],\n pad_token=data_args.special_token[1],\n eos_token=data_args.special_token[2],\n unk_token=data_args.special_token[3]\n )\n # 是否把vocab词数pad到factor倍数,可以加速训练\n conf.defrost()\n if data_args.pad_vocab:\n padding_vocab = (\n lambda x: (x + data_args.pad_factor - 1) // data_args.pad_factor * data_args.pad_factor\n )\n conf.model.src_vocab_size = padding_vocab(len(src_vocab))\n conf.model.tgt_vocab_size = padding_vocab(len(tgt_vocab))\n else:\n conf.model.src_vocab_size = len(src_vocab)\n conf.model.tgt_vocab_size = len(tgt_vocab)\n conf.freeze()\n return src_vocab, tgt_vocab\n\n\ndef convert_samples(sample, src_vocab, tgt_vocab, src_tok=None, tgt_tok=None, truecaser=None, src_bpe=None,\n tgt_bpe=None):\n sample_id = sample[\"id\"]\n source = sample[\"src\"]\n target = sample[\"tgt\"]\n if src_tok is not None: source = \" \".join(src_tok.tokenize(source))\n if tgt_tok is not None: target = \" \".join(tgt_tok.tokenize(target))\n\n if truecaser is not None: # 中文不用\n source = \" \".join(truecaser.truecase(source))\n target = \" \".join(truecaser.tokenize(target))\n\n if src_bpe is not None: source = src_bpe.process_line(source) # text->text\n if tgt_bpe is not None: target = tgt_bpe.process_line(target) # text->text\n\n source = src_vocab.to_indices(source.split())\n target = tgt_vocab.to_indices(target.split())\n return source, target, sample_id # only-src时,target=[]\n\n\n\n# 过滤掉长度 ≤min_len或者≥max_len 的数据\ndef min_max_filer(data, max_len, min_len=0):\n # 1 for special tokens.\n data_min_len = min(len(data[0]), len(data[1])) + 1\n data_max_len = max(len(data[0]), len(data[1])) + 1\n return (data_min_len >= min_len) and (data_max_len <= max_len)\n\n\n# 修改:根据是否是dual,来决定要不要添加bos\ndef batchify(insts, bos_idx, eos_idx, pad_idx, mode=\"train\", has_target=False, with_tag=False,\n lang_embed=False, lang_ids=None):\n \"\"\"\n Put all padded data needed by training into a list.\n # insts是含batch个元素的list,每个batch含src和tgt,和id元素[([],[]),([],[]),...]\n inst:[src,tgt,id,real_read]\n\n # if with tag, replace in prev_tokens with tag: tgt_tokens -> tgt_tokens\n 这里的with_tag会把tgt端的 token替换掉。\n\n \"\"\"\n assert mode in ['train', 'dev', 'test']\n # ★sort by descending source length\n if mode != \"test\":\n neg_src_len = list(map(lambda inst: -len(inst[0]), insts))\n sorted_src_idx = np.argsort(neg_src_len, kind='mergsort')\n insts = np.array(insts)[sorted_src_idx].tolist()\n # bos_idx,eos_idx=0,2\n # pad data to full sentence length\n left_pad = Pad(pad_idx, pad_right=False)\n right_pad = Pad(pad_idx, pad_right=True, dtype='int64')\n if lang_embed:\n src_word = left_pad([inst[0][1:] + [eos_idx] + inst[0][:1] for inst in insts]) # src+ + \n else:\n src_word = left_pad([inst[0] + [eos_idx] for inst in insts]) # src+
    \n\n samples_id = Stack()([inst[2] for inst in insts])\n\n BOS_LS = [bos_idx] if not with_tag else [] # 替换bos为lang,用于dual训练\n if mode != \"test\":\n prev_word = right_pad([BOS_LS + inst[1] for inst in insts]) # +tgt / +tgt\n start = 0 if not with_tag else 1\n tgt_word = np.expand_dims(right_pad([inst[1][start:] + [eos_idx] for inst in insts]),\n axis=2) # tgt+ # pad时候加了bos或eos,导致size突变,*bsz倍\n\n data_inputs = [samples_id, src_word, prev_word, tgt_word]\n # test\n else:\n if not has_target:\n data_inputs = [samples_id, src_word]\n else:\n tgt_word = right_pad([inst[1] for inst in insts])\n data_inputs = [samples_id, src_word, tgt_word]\n\n return data_inputs\n\n\n\ndef get_sampler(conf, dataset, mode='train'):\n assert mode in ['train', 'dev', 'test']\n if mode != 'test':\n args = conf.train\n sampler = SamplerHelper(dataset)\n shuffle_batch = args.shuffle_batch\n if args.sort_type == SortType.GLOBAL:\n src_key = (lambda idx, data_source: len(data_source[idx][0]))\n tgt_key = (lambda idx, data_source: len(data_source[idx][1]))\n # Sort twice\n sampler = sampler.sort(key=tgt_key).sort(key=src_key)\n else: # pool\n if args.shuffle:\n sampler = sampler.shuffle(seed=conf.seed)\n max_key = (lambda idx, data_source: max(\n len(data_source[idx][0]), len(data_source[idx][1])))\n if args.sort_type == SortType.POOL:\n sampler = sampler.sort(key=max_key, buffer_size=args.pool_size)\n # 输入 idx,length(高),size(宽), data_source ,返回新的size,这个size默认是mini batch的句子数,也可以自定义为宽度(最大词数)\n batch_size_fn = lambda idx, count, sofar, data_source: max(sofar, len(data_source[idx][0]),\n len(data_source[idx][1]))\n batch_sampler = sampler.batch(\n batch_size=args.max_tokens,\n drop_last=False,\n batch_size_fn=batch_size_fn, # 返回当前的size(宽度)\n key=lambda size_so_far, minibatch_len: size_so_far * minibatch_len) # 输入宽高,计算token数,和bsz比较\n\n if shuffle_batch:\n batch_sampler = batch_sampler.shuffle(seed=conf.seed)\n if mode == 'train':\n batch_sampler = batch_sampler.shard()\n else:\n batch_sampler = BatchSampler(dataset, batch_size=conf.generate.infer_bsz, drop_last=False)\n return batch_sampler\n\n\ndef prep_loader(conf, dataset, mode='train', multi_process=False):\n assert mode in ['train', 'dev', 'test']\n data_args, model_args, strategy_args, train_args, gen_args = conf.data, conf.model, conf.learning_strategy, conf.train, conf.generate\n # load vocab\n src_vocab, tgt_vocab = prep_vocab(conf)\n lang_ids = None\n if data_args.lang_embed:\n lang_ids = src_vocab.to_indices(data_args.language_token)\n unk_idx = src_vocab.to_indices(src_vocab.unk_token)\n assert unk_idx not in lang_ids, \"language id should not be \"\n batchify_fn = partial(batchify, bos_idx=model_args.eos_idx, eos_idx=model_args.eos_idx,\n pad_idx=model_args.pad_idx, mode=mode, has_target=data_args.has_target,\n with_tag=data_args.with_tag, lang_ids=lang_ids)\n\n if data_args.lazy_load and mode == \"train\": # 训练时懒加载,仅加载buffer_size大小进内存\n assert data_args.use_binary == True, \"current only support binary data for lazy load.\" # TODO:支持text格式文本懒加载(目前只支持二进制)\n assert isinstance(dataset, LanguagePairDataset)\n dataloader = BufferedDataloader(src_data=dataset.src_data,\n tgt_data=dataset.tgt_data,\n buffer_size=train_args.pool_size,\n sort_type=train_args.sort_type,\n max_tokens=train_args.max_tokens,\n seed=conf.seed,\n shuffle=True,\n batchify_fn=batchify_fn)\n\n if conf.train.resume and mode == 'train': # resume应该bool,路径由init来决定\n dataloader.set_epoch(conf.train.last_epoch + 1)\n print(f\"----- Resume Training: set sampler's epoch to {conf.train.last_epoch + 1} as a random seed\")\n else:\n if not data_args.use_binary:\n src_tok, tgt_tok, truecaser, src_bpe, tgt_bpe = None, None, None, None, None\n if conf.data.use_moses_bpe:\n src_tok = MosesTokenizer(lang=conf.data.src_lang) # text->token\n tgt_tok = MosesTokenizer(lang=conf.data.tgt_lang)\n if conf.data.truecase_path != \"None\":\n truecaser = MosesTruecaser(load_from=conf.data.truecase_path)\n parser = apply_bpe.create_parser()\n src_args = parser.parse_args(args=['-c', conf.data.src_bpe_path])\n tgt_args = parser.parse_args(args=['-c', conf.data.tgt_bpe_path])\n src_bpe = apply_bpe.BPE(src_args.codes, src_args.merges, src_args.separator, None, src_args.glossaries)\n tgt_bpe = apply_bpe.BPE(tgt_args.codes, tgt_args.merges, tgt_args.separator, None, tgt_args.glossaries)\n\n trans_fn = partial(convert_samples, src_vocab=src_vocab,\n tgt_vocab=tgt_vocab, src_tok=src_tok,\n tgt_tok=tgt_tok, truecaser=truecaser,\n src_bpe=src_bpe, tgt_bpe=tgt_bpe)\n dataset = dataset.map(trans_fn, lazy=False)\n if mode != 'test' and not data_args.use_binary:\n filt_fn = partial(min_max_filer, max_len=model_args.max_length)\n dataset = dataset.filter(filt_fn)\n # paddle 自带的sampler\n # batch_sampler = get_sampler(conf, dataset, mode=mode)\n\n # samplerv2\n if mode!=\"test\":\n max_tokens = train_args.max_tokens if mode != 'test' else gen_args.max_tokens\n max_sentences = train_args.max_sentences if mode != 'test' else gen_args.max_sentences\n batch_sampler = DistributedDynamicBatchSampler(dataset,\n mode=mode,\n has_target=data_args.has_target,\n max_tokens=max_tokens,\n max_sentences=eval(str(max_sentences)),\n bsz_factor=train_args.batch_size_factor,\n seed=conf.seed,\n num_replicas=None if multi_process else 1,\n rank=None if multi_process else 0,\n drop_last=False)\n else:\n batch_sampler = BatchSampler(dataset, batch_size=conf.generate.infer_bsz, drop_last=False)\n if conf.train.resume and mode == 'train': # resume应该bool,路径由init来决定\n batch_sampler.set_epoch(conf.train.last_epoch + 1)\n print(f\"----- Resume Training: set sampler's epoch to {conf.train.last_epoch + 1} as a random seed\")\n\n dataloader = DataLoader(\n dataset=dataset,\n batch_sampler=batch_sampler,\n collate_fn=batchify_fn,\n num_workers=train_args.num_workers,\n )\n\n return dataloader\n\n\nclass SortType(object):\n GLOBAL = 'global'\n POOL = 'pool'\n NONE = \"none\"\n\n","repo_name":"jiaohuix/PaddleSeq","sub_path":"paddleseq/reader/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":15015,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"8251164749","text":"# -*- coding: utf-8 -*-\n\n# Движок игры «Быки и коровы»\nfrom random import randint\nfrom termcolor import cprint, colored\n\n\ndef check_user_input(): # проверка числа, введоного пользователем\n while True:\n user_input = input(colored('Введите число : ', color='yellow'))\n user_number_list = list(map(int, user_input))\n if len(user_number_list) != 4:\n cprint('Вы ввели неверное число! Число должно состоять из четырех цифр.', color='red')\n continue\n elif user_number_list[0] == 0 or len(user_number_list) != len(user_number_list):\n cprint('Вы ввели неверное число! Число должно состоять из неповторяющихся цифр и'\n ' не должно начинаться с нуля.', color='red')\n continue\n else:\n return user_number_list\n\n\nnumber_list = []\n\n\ndef guess_the_number():\n global number_list\n number_list = []\n x = randint(1, 9)\n number_list += [x]\n while len(number_list) != 4:\n y = randint(0, 9)\n if y not in number_list:\n number_list += [y]\n else:\n continue\n return number_list\n\n\ndef check_the_number(user_number):\n animals = {'bulls': 0, 'cows': 0}\n for siting, value in enumerate(number_list):\n if value == user_number[siting]:\n animals['bulls'] += 1\n elif value in user_number:\n animals['cows'] += 1\n return animals\n","repo_name":"AlexanderKornev/Public","sub_path":"lesson_006/mastermind_engine.py","file_name":"mastermind_engine.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24410581125","text":"import os\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nimport pandas as pd\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.sampler import Sampler\nimport torch\nimport numpy as np\nimport random\n\n# mini-Imagenet dataset\nclass MiniDataset(Dataset):\n def __init__(self, csv_path, data_dir):\n self.data_dir = data_dir\n self.data_df = pd.read_csv(csv_path)\n self.labels = np.array(self.data_df[\"label\"])\n self.filenames = np.array(self.data_df[\"filename\"])\n\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n def __getitem__(self, index):\n filename = self.filenames[index]\n label = self.labels[index]\n image = Image.open(os.path.join(self.data_dir, filename))\n image = self.transform(image)\n return image, label\n \n def __len__(self):\n return len(self.filenames)\n \nclass CategoriesSampler(Sampler):\n \n def __init__(self, labels, classes_per_it, num_per_class, episodes):\n '''\n Args:\n - labels: an iterable containing all the labels for the current dataset\n samples indexes will be infered from this iterable.\n - classes_per_it: number of random classes for each iteration\n - num_per_class: number of samples for each iteration for each class (support + query)\n - episodes: number of iterations (episodes) per epoch\n '''\n self.labels = labels\n self.classes_per_it = classes_per_it\n self.num_per_class = num_per_class\n self.episodes = episodes\n \n class_names = np.unique(labels)\n self.idxs_per_class = [] # e.g. [[0,1,2],[3,4],[5,6,7], ... ]\n for c in class_names:\n index_slice = np.argwhere(labels == c).reshape(-1)\n index_slice = torch.from_numpy(index_slice)\n self.idxs_per_class.append(index_slice)\n \n\n def __iter__(self):\n \n for i_batch in range(self.episodes):\n batch = []\n sample_classes = torch.randperm(len(self.idxs_per_class))[:self.classes_per_it] # e.g. [3,6,7,10,1]\n for c in sample_classes:\n sample_class = self.idxs_per_class[c]\n # random permute then select first num_per_class samples\n pos = torch.randperm(len(sample_class))[:self.num_per_class] \n batch.append(sample_class[pos])\n # print(batch)\n batch = torch.stack(batch).t().reshape(-1)\n # print(batch)\n yield batch\n \n\n def __len__(self):\n return self.episodes","repo_name":"yiwei32/NTU_courses","sub_path":"2021_Fall/DLCV/hw4/p1_dataset.py","file_name":"p1_dataset.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16233341630","text":"#!/usr/bin/python3\n\n# stage_1_swarm_gen.py\n# Created: 6/19/20 (pipeline_version_1.1)\n# Last edited:\n# Written by Nikhil Goyal, National Institute of Mental Health, 2019-2020\n\nimport os\nimport sys\n\nfp_sub_list = sys.argv[1] # absolute path to subject list (/data/ABCD_MBDU/goyaln2/abcd_cca_replication/data_prep/data/stage_0/subjects_with_rsfmri.txt)\nabcd_cca_replication = sys.argv[2] # absolute path to abcd_cca_replication folder\nscript_to_call = sys.argv[3] # absolute path to script (/data/ABCD_MBDU/goyaln2/abcd_cca_replication/data_prep/support_scripts/stage_1/subject_classifier.sh)\nswarm_dir = sys.argv[4] # absolute path to directory where to print out the swarm file (/data/ABCD_MBDU/goyaln2/abcd_cca_replication/data_prep/support_scripts/stage_1/)\n\nsubjects = [line.rstrip('\\n') for line in open(fp_sub_list)]\n\nfp = os.path.join(swarm_dir,'stage_1.swarm')\nf_swarm = open(fp, 'w')\n\n# example command\n# /data/ABCD_MBDU/goyaln2/abcd_cca_replication/data_prep/support_scripts/stage_1/subject_classifier.sh sub-NDARINVxxxxxxxx /data/ABCD_MBDU/goyaln2/abcd_cca_replication/\n\nfor subject in subjects:\n cmd = \"{} {} {}\".format(script_to_call, subject, abcd_cca_replication)\n f_swarm.write(cmd+'\\n')\nf_swarm.close()","repo_name":"nih-fmrif/abcd_cca_replication","sub_path":"data_prep/support_scripts/stage_1/stage_1_swarm_gen.py","file_name":"stage_1_swarm_gen.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4344964708","text":"from boggle import Boggle\nfrom flask import Flask, request, render_template, jsonify, session\n\nboggle_game = Boggle()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"majorkey\"\n\n# Make a function to create HTML board\n@app.route('/')\ndef index():\n \"\"\"Show Home Screen\"\"\"\n\n return render_template(\"home.html\")\n\n@app.route('/boggle')\ndef game_board():\n \"\"\"Show New Game Board\"\"\"\n\n new_board = boggle_game.make_board()\n session['game_board'] = new_board\n\n\n\n\n return render_template(\"game_board.html\")\n\n@app.route('/guess')\ndef handle_guess():\n \"\"\" Take post request and pass to guess check function \"\"\"\n \n word = request.args[\"word\"]\n board = session[\"game_board\"]\n response = boggle_game.check_valid_word(board, word)\n\n return jsonify({'result': response})\n\n\n@app.route(\"/post-score\", methods=[\"POST\"])\ndef post_score():\n \"\"\"Receive score, update nplays, update high score if appropriate.\"\"\"\n\n score = request.json[\"score\"]\n highscore = session.get(\"highscore\", 0)\n nplays = session.get(\"nplays\", 0)\n\n session['nplays'] = nplays + 1\n session['highscore'] = max(score, highscore)\n\n return jsonify(brokeRecord=score > highscore)\n","repo_name":"herfalerf/flask-boggle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37080212125","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\nimport seaborn as sns\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\nfrom datetime import datetime\r\nimport missingno as msno\r\nimport plotly.figure_factory as ff\r\nimport plotly.graph_objects as go\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\nfrom scipy.cluster.hierarchy import linkage, dendrogram\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import median_abs_deviation\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import mean_squared_error as mse, r2_score\r\nfrom sklearn.tree import DecisionTreeRegressor\r\n\r\n\r\ndf = pd.read_csv('final_df.csv')\r\n\r\nst.title(\"Disease Mortality Rate App\")\r\nst.sidebar.info(\"Welcome to my Mortality Analysis App\")\r\nst.sidebar.info(\"Created by [Prakyath Mannungal Chandran]\")\r\n\r\ndef main():\r\n option = st.selectbox('Select an option', [\"Statistical Analysis\",\"Visual Representation\",\"Prediction\"])\r\n if option == 'Statistical Analysis':\r\n statistical_analysis()\r\n elif option == 'Visual Representation':\r\n visual_representation()\r\n else:\r\n prediction()\r\n\r\n\r\ndef statistical_analysis():\r\n st.write(\"This section provides insights to the data\")\r\n st.dataframe(df.head(5))\r\n st.dataframe(df.tail(5))\r\n st.write(\"DataFrame shape is\", {df.shape})\r\n\r\n st.write(\"Missing values summary:\")\r\n if st.dataframe(df.isnull().sum() == 0):\r\n st.write(f\"The number of missing values are zero\")\r\n else:\r\n st.write(f\"The dataset includes missing values\")\r\n\r\n st.markdown(\"Note: Certain coins will have lesser rows compared to Bitcoin,\"\r\n \" Since many coins were recently introduced in the market\", unsafe_allow_html=True)\r\n\r\n st.write(\"Columns in the dataset are:\")\r\n st.dataframe(df.columns)\r\n st.write(f\"Summary Statistics of the dataset are:\")\r\n st.dataframe(df.describe())\r\n st.write(\"Outlier Detection\")\r\n st.markdown(\"Note: Lower and Upper bounds identified, \", unsafe_allow_html=True)\r\n\r\n def outliers():\r\n numerical_columns = [\r\n 'Total Deaths',\r\n 'COVID-19 Deaths',\r\n 'Influenza Deaths',\r\n 'Pneumonia Deaths',\r\n 'Pneumonia and COVID-19 Deaths',\r\n 'Pneumonia, Influenza, or COVID-19 Deaths'\r\n ]\r\n\r\n for column in numerical_columns:\r\n IQR = df[column].quantile(0.75) - df[column].quantile(0.25)\r\n\r\n lower_bridge = df[column].quantile(0.25) - (IQR * 3)\r\n upper_bridge = df[column].quantile(0.75) + (IQR * 3)\r\n\r\n st.markdown(f\"{column}\", unsafe_allow_html=True)\r\n\r\n st.write(f\"Lower boundary: {lower_bridge}\")\r\n st.write(f\"Upper boundary: {upper_bridge}\\n\\n\")\r\n\r\n # Call the outliers function after defining it\r\n outliers()\r\n\r\ndef visual_representation():\r\n option = st.radio('Choose an option to see the visualization:', [\r\n 'Percentage of zeros',\r\n 'Missing Values in data',\r\n 'Data-types',\r\n 'Outlier Detection',\r\n 'Months with most mortality',\r\n 'Correlation plots',\r\n 'Hierarchical Clustering of Deaths with States',\r\n 'Number of Deaths per state',\r\n 'Top-10 cities with most deaths',\r\n 'Growth/Trend of different deaths with time',\r\n 'Deaths compared to Place of Death',\r\n 'Time Series with highest and lowest death rate'\r\n ])\r\n\r\n def zeros():\r\n # Calculate the percentage of zero values in each column\r\n percentage_zeros = (df == 0).sum() / len(df) * 100\r\n percentage_non_zeros = 100 - percentage_zeros\r\n\r\n # Create subplots with pie charts\r\n num_columns = len(df.columns)\r\n num_rows = (num_columns + 1) // 2\r\n\r\n fig, axs = plt.subplots(num_rows, 2, figsize=(13, num_rows * 2))\r\n\r\n # Custom colors for the column names and pie chart labels\r\n column_name_color = 'blue'\r\n label_colors = ['red', 'green']\r\n\r\n for index, (column, ax) in enumerate(zip(df.columns, axs.flatten())):\r\n ax.pie([percentage_zeros[column], percentage_non_zeros[column]], labels=['Zero Values', 'Non-Zero Values'],\r\n autopct='%1.1f%%', startangle=90, colors=label_colors)\r\n ax.set_title(column, color=column_name_color)\r\n\r\n # Adjust layout and remove unused subplots (if any)\r\n fig.tight_layout()\r\n for unused_ax in axs.flatten()[index + 1:]:\r\n unused_ax.axis('off')\r\n\r\n plt.suptitle('Percentage of Zero Values in Each Column of the Sampled DataFrame', y=1.02, fontsize=16)\r\n st.pyplot(fig)\r\n\r\n\r\n def missing_values():\r\n matplotlib.rcParams['figure.figsize'] = (14, 6)\r\n fig, ax = plt.subplots()\r\n sns.heatmap(df.isnull(), cbar=False, cmap='viridis')\r\n st.pyplot(fig)\r\n\r\n def data_types():\r\n\r\n fig, ax = plt.subplots(figsize=(8,6))\r\n dtype_count = df.dtypes.value_counts()\r\n # bar plot\r\n dtype_count.plot(kind='bar', ax=ax)\r\n ax.set_xlabel('Data Type')\r\n ax.set_ylabel('Number of columns')\r\n ax.set_title('Number of Columns by Data Types')\r\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45)\r\n\r\n # Pass the Matplotlib figure to Streamlit\r\n st.pyplot(fig)\r\n\r\n def outliers():\r\n # box plot\r\n fig1, axes = plt.subplots(3,3,sharex=True)\r\n df.plot(kind='box', subplots=True, layout=(3, 3), sharex=True)\r\n st.pyplot(fig1)\r\n\r\n #density plots\r\n numeric_columns = [\r\n 'Year',\r\n 'Month',\r\n 'COVID-19 Deaths',\r\n 'Total Deaths',\r\n 'Pneumonia Deaths',\r\n 'Pneumonia and COVID-19 Deaths',\r\n 'Influenza Deaths',\r\n 'Pneumonia, Influenza, or COVID-19 Deaths'\r\n ]\r\n fig2, axes = plt.subplots(nrows=1, ncols=len(numeric_columns), figsize=(15, 5))\r\n\r\n for i, column in enumerate(numeric_columns):\r\n sns.kdeplot(data=df[column], ax=axes[i], fill=True)\r\n # axes.set_title(f'Density Plot of {column}')\r\n\r\n plt.tight_layout()\r\n st.pyplot(fig2)\r\n\r\n #histogram plots\r\n fig3 = make_subplots(rows=1, cols=len(numeric_columns), subplot_titles=numeric_columns,\r\n column_widths=[0.3] * len(numeric_columns))\r\n\r\n for i, column in enumerate(numeric_columns):\r\n fig3.add_trace(go.Histogram(x=df[column], nbinsx=30), row=1, col=i + 1)\r\n\r\n fig3.update_layout(\r\n title='Histograms of Numeric Columns',\r\n width=1200,\r\n height=500,\r\n margin=dict(l=10, r=10, t=175, b=10),\r\n showlegend=False\r\n )\r\n\r\n for annotation in fig3['layout']['annotations']:\r\n annotation['textangle'] = 45\r\n st.plotly_chart(fig3)\r\n\r\n def months_mortality():\r\n fig, ax = plt.subplots()\r\n fig = sns.jointplot(x='Total Deaths', y='Month', data=df, kind='scatter')\r\n st.pyplot(fig)\r\n\r\n def corrlation_plots():\r\n fig1, ax = plt.subplots()\r\n sns.heatmap(df.corr(), cmap='coolwarm', annot=True)\r\n st.pyplot(fig1)\r\n\r\n fig2, ax = plt.subplots()\r\n sns.heatmap(df.corr(), cmap='magma', linecolor='white', linewidths=1, annot=True)\r\n st.pyplot(fig2)\r\n\r\n\r\n def clustering():\r\n fig, ax = plt.subplots(figsize=(12, 8))\r\n statewise_sum = df.pivot_table(\r\n values=['COVID-19 Deaths', 'Pneumonia Deaths', 'Pneumonia and COVID-19 Deaths', 'Influenza Deaths',\r\n 'Pneumonia, Influenza, or COVID-19 Deaths', 'Total Deaths'], index='State', aggfunc=np.sum)\r\n\r\n scaler = MinMaxScaler()\r\n statewise_normalised = pd.DataFrame(scaler.fit_transform(statewise_sum), index=statewise_sum.index,\r\n columns=statewise_sum.columns)\r\n\r\n # Perform hierarchical clustering\r\n linkage_matrix = linkage(statewise_normalised, method='ward')\r\n\r\n # Create the dendrogram\r\n dendrogram(linkage_matrix, labels=statewise_normalised.index, leaf_rotation=90)\r\n ax.set_xlabel('State')\r\n ax.set_ylabel('Distance (Ward)')\r\n ax.set_title('Hierarchical Clustering Dendrogram for States')\r\n st.pyplot(fig)\r\n\r\n def deaths_state():\r\n statewise = pd.pivot_table(df, values=['COVID-19 Deaths', 'Pneumonia Deaths', 'Pneumonia and COVID-19 Deaths',\r\n 'Influenza Deaths', 'Pneumonia, Influenza, or COVID-19 Deaths',\r\n 'Total Deaths'], index='State', aggfunc=np.sum)\r\n statewise = statewise.sort_values(by=['Total Deaths'], ascending=False)\r\n st.write(statewise.style.background_gradient(cmap='cubehelix'))\r\n\r\n def top_ten():\r\n fig,ax = plt.subplots(figsize=(16, 9))\r\n top_10_cities = df.groupby(by='State').max()[\r\n ['COVID-19 Deaths', 'Pneumonia Deaths', 'Pneumonia and COVID-19 Deaths', 'Influenza Deaths',\r\n 'Pneumonia, Influenza, or COVID-19 Deaths', 'Total Deaths', 'Start Date']].sort_values(by=['Total Deaths'],\r\n ascending=False).reset_index()\r\n plt.title(\"Top 10 states with most mortality\", size=25)\r\n ax = sns.barplot(data=top_10_cities.iloc[:10], y=\"Total Deaths\", x=\"State\", linewidth=2, edgecolor='black')\r\n plt.xlabel('States')\r\n plt.ylabel('Total Mortality')\r\n st.pyplot(fig)\r\n\r\n def trend():\r\n import plotly.graph_objs as go\r\n from plotly.subplots import make_subplots\r\n\r\n df['Start Date'] = pd.to_datetime(df['Start Date'])\r\n df['End Date'] = pd.to_datetime(df['End Date'])\r\n\r\n grouped_start_data = df.groupby('Start Date').agg({\r\n 'COVID-19 Deaths': 'sum',\r\n 'Pneumonia Deaths': 'sum',\r\n 'Influenza Deaths': 'sum'\r\n })\r\n\r\n grouped_end_data = df.groupby('End Date').agg({\r\n 'COVID-19 Deaths': 'sum',\r\n 'Pneumonia Deaths': 'sum',\r\n 'Influenza Deaths': 'sum'\r\n })\r\n\r\n cumulative_df_start = grouped_start_data.cumsum()\r\n cumulative_df_end = grouped_end_data.cumsum()\r\n\r\n cumulative_df_start.reset_index(inplace=True)\r\n cumulative_df_end.reset_index(inplace=True)\r\n\r\n melted_df_start = cumulative_df_start.melt(id_vars='Start Date', var_name='Death Type',\r\n value_name='Cumulative Deaths')\r\n melted_df_end = cumulative_df_end.melt(id_vars='End Date', var_name='Death Type',\r\n value_name='Cumulative Deaths')\r\n\r\n fig = make_subplots(rows=1, cols=2, subplot_titles=('Start Date', 'End Date'))\r\n\r\n for death_type in melted_df_start['Death Type'].unique():\r\n fig.add_trace(\r\n go.Scatter(x=melted_df_start.loc[melted_df_start['Death Type'] == death_type, 'Start Date'],\r\n y=melted_df_start.loc[melted_df_start['Death Type'] == death_type, 'Cumulative Deaths'],\r\n name=death_type + ' (Start Date)'),\r\n row=1, col=1\r\n )\r\n\r\n fig.add_trace(\r\n go.Scatter(x=melted_df_end.loc[melted_df_end['Death Type'] == death_type, 'End Date'],\r\n y=melted_df_end.loc[melted_df_end['Death Type'] == death_type, 'Cumulative Deaths'],\r\n name=death_type + ' (End Date)'),\r\n row=1, col=2\r\n )\r\n\r\n # Update the layout\r\n fig.update_layout(title='Growth Trend of Different Types of Deaths Based on Start Date and End Date',\r\n showlegend=True)\r\n\r\n # Show the plot in the Streamlit app\r\n st.plotly_chart(fig)\r\n\r\n def place_death(df):\r\n fig, ax = plt.subplots()\r\n df = df[df['Place of Death'] != 'Total - All Places of Death']\r\n\r\n # Group by Place of Death and calculate the sum of COVID-19 deaths\r\n place_of_death_df = df.groupby('Place of Death').agg({'COVID-19 Deaths': 'sum'}).reset_index()\r\n\r\n # Create a pie chart\r\n fig = px.pie(place_of_death_df, values='COVID-19 Deaths', names='Place of Death',\r\n title='COVID-19 Deaths by Place of Death')\r\n st.plotly_chart(fig)\r\n\r\n def time_series():\r\n state1 = 'Texas'\r\n state2 = 'District of Columbia'\r\n\r\n filtered_df = df[(df['State'] == state1) | (df['State'] == state2)]\r\n filtered_df['Start Date'] = pd.to_datetime(filtered_df['Start Date'])\r\n grouped_df = filtered_df.groupby(['Start Date', 'State']).agg({'COVID-19 Deaths': 'sum'}).reset_index()\r\n\r\n fig, ax = plt.subplots(figsize=(15, 8))\r\n ax.grid(True, linestyle='--', alpha=0.7)\r\n ax.set_facecolor('#eafff5')\r\n for state in [state1, state2]:\r\n state_data = grouped_df[grouped_df['State'] == state]\r\n ax.plot(state_data['Start Date'], state_data['COVID-19 Deaths'], label=state)\r\n\r\n ax.set_title(f'COVID-19 Deaths for {state1} and {state2}')\r\n ax.set_xlabel('Time', fontsize=14)\r\n ax.set_ylabel('COVID-19 Deaths', fontsize=14)\r\n\r\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %Y'))\r\n ax.xaxis.set_major_locator(mdates.MonthLocator(interval=3))\r\n\r\n ax.legend()\r\n st.pyplot(fig)\r\n\r\n if option == 'Percentage of zeros':\r\n zeros()\r\n elif option == 'Missing Values in data':\r\n missing_values()\r\n elif option == 'Data-types':\r\n data_types()\r\n elif option == 'Outlier Detection':\r\n outliers()\r\n elif option == 'Months with most mortality':\r\n months_mortality()\r\n elif option == 'Correlation plots':\r\n corrlation_plots()\r\n elif option == 'Hierarchical Clustering of Deaths with States':\r\n clustering()\r\n elif option == 'Number of Deaths per state':\r\n deaths_state()\r\n elif option == 'Top-10 cities with most deaths':\r\n top_ten()\r\n elif option == 'Growth/Trend of different deaths with time':\r\n trend()\r\n elif option == 'Deaths compared to Place of Death':\r\n place_death(df)\r\n elif option == 'Time Series with highest and lowest death rate':\r\n time_series()\r\n\r\n\r\ndef prediction():\r\n fig, ax = plt.subplots(figsize=(15, 8))\r\n X = df[['Year', 'Month', 'Total Deaths', 'Pneumonia Deaths', 'Influenza Deaths']]\r\n y = df['COVID-19 Deaths']\r\n\r\n X_train,X_test, y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42)\r\n model = LinearRegression()\r\n model.fit(X_train,y_train)\r\n\r\n y_pred = model.predict(X_test)\r\n ax.scatter(y_test, y_pred, alpha=0.5)\r\n ax.plot([min(y_test), max(y_test)], [min(y_test), max(y_test)], color='red', linestyle='--', lw=2)\r\n st.pyplot(fig)\r\n\r\n mse_val = mse(y_test,y_pred)\r\n st.write(f\"Mean Squared Error: {mse_val}\")\r\n st.write(f\"R-Squared {r2_score(y_test,y_pred)}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"PrakyathMC/Covid-19_Mortality_analysis_app","sub_path":"health_app.py","file_name":"health_app.py","file_ext":"py","file_size_in_byte":15430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24677639058","text":"from scripts.manager import GameManager, Process, logging, get_config_value\n\n\ndef main():\n import pygame as pg\n GameManager().run()\n\n\nif __name__ == '__main__':\n game = GameManager()\n # Debug\n if get_config_value('game', 'duplicate_window'):\n logging.info('Create duplicate window')\n Process(target=main, daemon=True).start()\n game.run()","repo_name":"danuar/platformer_on_pygame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6687680266","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass NeweggItem(scrapy.Item):\n condition = scrapy.Field()\n delivery = scrapy.Field()\n seller = scrapy.Field()\n price_shipping = scrapy.Field()","repo_name":"Scruf/PrivateStash","sub_path":"ScrapyTest/newegg/newegg/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43583677174","text":"# -*- coding=utf-8 -*- #\n\nfrom crawler import Crawler\nimport time, os\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\n\ndef initDict():\n d = {}\n file = './md5.txt'\n try:\n with open(file, mode = 'r') as f:\n line = f.readline()\n if line != '':\n d = eval(str(line)) # 直接把字符串转成字典格式\n\n return d\n except:\n # 如果没有文件,则直接创建文件\n fd = open(file, mode = 'a+', encoding = 'utf-8')\n fd.close()\n\n return d\n\n\ndef deleteFiles():\n filePath = '/root/estar_save/'\n timeStamp = time.time()\n timeArray = time.localtime(timeStamp)\n current = time.strftime(\"%Y-%m-%d\", timeArray)\n name = os.listdir(filePath)\n\n for i in name:\n try:\n fileInfo = os.stat(filePath + i)\n except FileNotFoundError:\n continue\n timeStamp = fileInfo.st_mtime\n timeArray = time.localtime(timeStamp)\n date = time.strftime(\"%Y-%m-%d\", timeArray)\n if current != date:\n os.remove(filePath + i)\n\n\ndef crawl(d):\n c = Crawler(d)\n c.doJob()\n\n\nif __name__ == '__main__':\n crawlItrv = 60 * 10\n deleteItrv = 60 * 60\n d = initDict()\n\n scheduler = BlockingScheduler()\n\n scheduler.add_job(crawl, 'interval', seconds = crawlItrv, id = 'crawl', args = [d])\n scheduler.add_job(deleteFiles, 'interval', seconds = deleteItrv, id = 'deleteFiles')\n\n scheduler.start()\n","repo_name":"tonyhauuk/Non-project","sub_path":"crawl/dianwang/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12259856507","text":"#!/usr/bin/env python3\n\nimport torch\nimport kornia as kn\nimport torch.nn as nn\nimport kornia.feature as kf\nimport torch.nn.functional as F\nimport kornia.geometry.conversions as C\n\nfrom utils import Projector\nfrom utils import Visualizer\nfrom models.featurenet import GridSample\nfrom models.match import ConsecutiveMatch\nfrom models.tool import GlobalStepCounter\n\nclass FeatureNetLoss(nn.Module):\n def __init__(self, beta=[1, 1], K=None, writer=None, viz_start=float('inf'), viz_freq=200, counter=None):\n super().__init__()\n self.writer, self.beta, self.counter = writer, beta, counter if counter is not None else GlobalStepCounter()\n self.score_corner = ScoreLoss()\n self.desc_match = DiscriptorMatchLoss(writer=writer, counter=self.counter)\n self.projector = Projector()\n self.viz = Visualizer() if self.writer is None else Visualizer('tensorboard', writer=self.writer)\n self.viz_start, self.viz_freq = viz_start, viz_freq\n\n def forward(self, descriptors, points, scores, score_map, depth_map, poses, K, imgs, env):\n def batch_project(pts):\n return self.projector.cartesian(pts, depth_map, poses, K)\n\n H, W = score_map.size(2), score_map.size(3)\n cornerness = self.beta[0] * self.score_corner(score_map, imgs, batch_project)\n proj_pts, invis_idx = batch_project(points)\n match = self.beta[1] * self.desc_match(descriptors, scores, points.unsqueeze(0), proj_pts, invis_idx, H, W)\n loss = cornerness + match\n\n n_iter = self.counter.steps\n if self.writer is not None:\n self.writer.add_scalars('Loss', {'cornerness': cornerness,\n 'match': match,\n 'all': loss}, n_iter)\n\n if n_iter >= self.viz_start and n_iter % self.viz_freq == 0:\n self.viz.show(imgs, points, 'hot', values=scores.squeeze(-1).detach().cpu().numpy(), name='train', step=n_iter)\n\n self.viz.show(score_map, color='hot', name='score', step=n_iter)\n\n pair = torch.tensor([[0, 1], [0, 3], [0, 5], [0, 7]])\n b_src, b_dst = pair[:, 0], pair[:, 1]\n matched, confidence = ConsecutiveMatch()(descriptors[b_src], descriptors[b_dst], points[b_dst])\n top_conf, top_idx = confidence.topk(50, dim=1)\n top_conf, top_idx = top_conf.detach().cpu().numpy(), top_idx.unsqueeze(-1).repeat(1, 1, 2)\n self.viz.showmatch(imgs[b_src], points[b_src].gather(1, top_idx), imgs[b_dst], matched.gather(1, top_idx), 'hot', top_conf, 0.9, 1, name='match', step=n_iter)\n\n return loss\n\n\nclass ScoreLoss(nn.Module):\n def __init__(self, radius=8, num_corners=500):\n super(ScoreLoss, self).__init__()\n self.bceloss = nn.BCELoss()\n self.corner_det = kf.CornerGFTT()\n self.num_corners = num_corners\n self.pool = nn.MaxPool2d(kernel_size=radius, return_indices=True)\n self.unpool = nn.MaxUnpool2d(kernel_size=radius)\n\n def forward(self, scores_dense, imgs, projector):\n corners = self.get_corners(imgs, projector)\n corners = kn.filters.gaussian_blur2d(corners, kernel_size=(7, 7), sigma=(1, 1))\n lap = kn.filters.laplacian(scores_dense, 5) # smoothness\n\n return self.bceloss(scores_dense, corners) + (scores_dense * torch.exp(-lap)).mean() * 10\n\n def get_corners(self, imgs, projector=None):\n (B, _, H, W), N = imgs.shape, self.num_corners\n corners = kf.nms2d(self.corner_det(kn.rgb_to_grayscale(imgs)), (5, 5))\n\n # only one in patch\n output, indices = self.pool(corners)\n corners = self.unpool(output, indices)\n\n # keep top\n values, idx = corners.view(B, -1).topk(N, dim=1)\n coords = torch.stack([idx % W, idx // W], dim=2) # (x, y), same below\n\n if not projector:\n # keep as-is\n b = torch.arange(0, B).repeat_interleave(N).to(idx)\n h, w = idx // W, idx % W\n values = values.flatten()\n else:\n # combine corners from all images\n coords = kn.normalize_pixel_coordinates(coords, H, W)\n coords, invis_idx = projector(coords)\n coords[tuple(invis_idx)] = -2\n coords_combined = coords.transpose(0, 1).reshape(B, B * N, 2)\n coords_combined = kn.denormalize_pixel_coordinates(coords_combined, H, W).round().to(torch.long)\n b = torch.arange(B).repeat_interleave(B * N).to(coords_combined)\n w, h = coords_combined.reshape(-1, 2).T\n mask = w >= 0\n b, h, w, values = b[mask], h[mask], w[mask], values.flatten().repeat(B)[mask]\n\n target = torch.zeros_like(corners)\n target[b, 0, h, w] = values\n target = kf.nms2d(target, (5, 5))\n\n return (target > 0).to(target)\n\n\nclass DiscriptorMatchLoss(nn.Module):\n eps = 1e-6\n def __init__(self, radius=1, writer=None, counter=None):\n super(DiscriptorMatchLoss, self).__init__()\n self.radius, self.writer, self.counter = radius, writer, counter if counter is not None else GlobalStepCounter()\n self.cosine = PairwiseCosine(inter_batch=True)\n\n def forward(self, descriptors, scores, pts_src, pts_dst, invis_idx, height, width):\n pts_src = C.denormalize_pixel_coordinates(pts_src.detach(), height, width)\n pts_dst = C.denormalize_pixel_coordinates(pts_dst.detach(), height, width)\n\n dist = torch.cdist(pts_dst, pts_src)\n dist[tuple(invis_idx)] = float('nan')\n pcos = self.cosine(descriptors, descriptors)\n\n match = (dist <= self.radius).triu(diagonal=1)\n miss = (dist > self.radius).triu(diagonal=1)\n\n scores = scores.detach()\n score_ave = (scores[:, None, :, None] + scores[None, :, None, :]).clamp(min=self.eps) / 2\n pcos = self.cosine(descriptors, descriptors)\n\n sig_match = -torch.log(score_ave[match])\n sig_miss = -torch.log(score_ave[miss])\n\n s_match = pcos[match]\n s_miss = pcos[miss]\n\n if self.writer is not None:\n n_iter = self.counter.steps\n self.writer.add_scalars('Misc/DiscriptorMatch/Count', {\n 'n_match': match.sum(),\n 'n_miss': miss.sum(),\n }, n_iter)\n\n if len(sig_match) > 0:\n self.writer.add_histogram('Misc/DiscriptorMatch/Sim/match', s_match, n_iter)\n self.writer.add_histogram('Misc/DiscriptorMatch/Sim/miss', s_miss[:len(s_match)], n_iter)\n\n return self.nll(sig_match, s_match) + self.nll(sig_miss, s_miss, False, match.sum() * 2)\n\n def nll(self, sig, cos, match=True, topk=None):\n # p(x) = exp(-l / sig) * C; l = 1 - x if match else x\n norm_const = torch.log(sig * (1 - torch.exp(-1 / sig)))\n loss = (1 - cos if match else cos) / sig + norm_const\n return (loss if topk is None else loss.topk(topk).values).mean()\n\n\nclass PairwiseCosine(nn.Module):\n def __init__(self, inter_batch=False, dim=-1, eps=1e-8):\n super(PairwiseCosine, self).__init__()\n self.inter_batch, self.dim, self.eps = inter_batch, dim, eps\n self.eqn = 'amd,bnd->abmn' if inter_batch else 'bmd,bnd->bmn'\n\n def forward(self, x, y):\n xx = torch.sum(x**2, dim=self.dim).unsqueeze(-1) # (A, M, 1)\n yy = torch.sum(y**2, dim=self.dim).unsqueeze(-2) # (B, 1, N)\n if self.inter_batch:\n xx, yy = xx.unsqueeze(1), yy.unsqueeze(0) # (A, 1, M, 1), (1, B, 1, N)\n xy = torch.einsum(self.eqn, x, y)\n return xy / (xx * yy).clamp(min=self.eps**2).sqrt()\n","repo_name":"wang-chen/lgl-feature-matching","sub_path":"models/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":7580,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"12508463725","text":"import logging\n\nimport xbmc\nimport xbmcaddon\n\n\nclass KodiLogHandler(logging.StreamHandler):\n def __init__(self) -> None:\n logging.StreamHandler.__init__(self)\n addon_id = xbmcaddon.Addon().getAddonInfo(\"id\")\n format = f\"### [{addon_id}] - [%(filename)s:%(lineno)s - %(funcName)20s() ] %(name)s: %(message)s\"\n formatter = logging.Formatter(format)\n self.setFormatter(formatter)\n\n def emit(self, record: logging.LogRecord) -> None:\n levels = {\n logging.CRITICAL: xbmc.LOGFATAL,\n logging.ERROR: xbmc.LOGERROR,\n logging.WARNING: xbmc.LOGWARNING,\n logging.INFO: xbmc.LOGINFO,\n logging.DEBUG: xbmc.LOGDEBUG,\n logging.NOTSET: xbmc.LOGNONE,\n }\n\n xbmc.log(self.format(record), levels[record.levelno])\n\n def flush(self) -> None:\n pass\n\n\ndef config() -> None:\n logger = logging.getLogger()\n logger.addHandler(KodiLogHandler())\n logger.setLevel(logging.DEBUG)\n","repo_name":"maximeh/script.myepisodes","sub_path":"kodilogging.py","file_name":"kodilogging.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"9892067397","text":"import responses\nfrom dagster import job, op\nfrom dagster_census import CensusOutput, census_resource, census_trigger_sync_op\n\nfrom .utils import (\n get_destination_data,\n get_source_data,\n get_sync_data,\n get_sync_run_data,\n get_sync_trigger_data,\n)\n\n\ndef test_census_trigger_sync_op():\n cen_resource = census_resource.configured({\"api_key\": \"foo\"})\n\n @op\n def foo_op():\n pass\n\n @job(\n resource_defs={\"census\": cen_resource},\n config={\n \"ops\": {\n \"census_trigger_sync_op\": {\n \"config\": {\n \"sync_id\": 52,\n \"poll_interval\": 0,\n \"poll_timeout\": 10,\n }\n }\n }\n },\n )\n def census_sync_job():\n census_trigger_sync_op(start_after=foo_op())\n\n with responses.RequestsMock() as rsps:\n rsps.add(\n rsps.GET,\n \"https://app.getcensus.com/api/v1/syncs/52\",\n json=get_sync_data(),\n )\n rsps.add(\n rsps.GET,\n \"https://app.getcensus.com/api/v1/sources/15\",\n json=get_source_data(),\n )\n rsps.add(\n rsps.GET,\n \"https://app.getcensus.com/api/v1/destinations/15\",\n json=get_destination_data(),\n )\n rsps.add(\n rsps.POST,\n \"https://app.getcensus.com/api/v1/syncs/52/trigger\",\n json=get_sync_trigger_data(),\n )\n rsps.add(\n rsps.GET,\n \"https://app.getcensus.com/api/v1/sync_runs/94\",\n json=get_sync_run_data(),\n )\n\n result = census_sync_job.execute_in_process()\n assert result.output_for_node(\"census_trigger_sync_op\") == CensusOutput(\n sync_run=get_sync_run_data()[\"data\"],\n source=get_source_data()[\"data\"],\n destination=get_destination_data()[\"data\"],\n )\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-census/dagster_census_tests/test_op.py","file_name":"test_op.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"70888606887","text":"import os\nimport subprocess\nclass NodeUser:\n\tdef __init__(self,name):\n\t\tself.name=name\n\t\tself.next = None\n\t\tself.prev = None\n\nclass Circular_Double_List:\n\tdef __init__(self):\n\t\tself.first=None\n\t\tself.last=None\n\t\tself.size=0\n\n\tdef addUser(self,name):\n\t\tnuevo = NodeUser(name)\n\t\tif self.first is None:\n\t\t\tself.first=nuevo\n\t\t\tself.first.next=self.first\n\t\t\tnuevo.prev=self.last\n\t\t\tself.last=nuevo\n\t\telse:\n\t\t\tself.last.next=nuevo\n\t\t\tnuevo.next=self.first\n\t\t\tnuevo.prev=self.last\n\t\t\tself.last=nuevo\n\t\t\tself.first.prev=self.last\n\t\tself.size+=1\n\tdef returnFirst(self):\n\t\treturn self.first\n\tdef isFind(self,name2):\n\t\taux = self.first\n\t\tif aux != None:\n\t\t\twhile True:\n\t\t\t\tif aux.name==name2:\n\t\t\t\t\treturn True\n\t\t\t\taux = aux.next\n\t\t\t\tif self.first.name == aux.name:\n\t\t\t\t\treturn False\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\treturn False\n\tdef generateImage(self):\n\t\tCola1 = '\\n digraph{\\n rankdir=LR; \\n node [shape=record];\\n label=\\\"User List\\\";\\n'\n\t\tcompras = '';\n\t\taux = self.first\n\t\twhile True:\n\t\t\tcompras += aux.name\n\t\t\tcompras += \" -> \";\n\t\t\tcompras += aux.next.name\n\t\t\tcompras += \";\\n\";\n\t\t\tcompras += aux.next.name\n\t\t\tcompras += \" -> \";\n\t\t\tcompras += aux.name\n\t\t\tcompras += \";\\n\";\n\t\t\taux = aux.next\n\t\t\tif self.first.name == aux.name:\n\t\t\t\tbreak\n\t\twith open(\"usersList.txt\",'w',encoding = 'utf-8') as f:\n\t\t\tf.write(Cola1+compras+'\\n}')\n\t\t\tf.close()\n\t\tcmd='dot -Tpng usersList.txt -o user.png'\n\t\tos.system(cmd)\n\t\t#Sirve para abrir la imagen en windows\n\t\t#Descomenta subprocess para que abra la imagen en tu compu\n\t\tos.system('user.png')\n\t\t#subprocess.check_call(['open','user.png'])\n\tdef printUser(self):\n\t\taux = self.first\n\t\twhile True:\n\t\t\tprint(aux.name)\n\t\t\taux = aux.next\n\t\t\tif self.first.name == aux.name:\n\t\t\t\tbreak\n","repo_name":"EdsonArmando/EDD_1S2019_P1_201701029","sub_path":"Structures/CircularDoubleList.py","file_name":"CircularDoubleList.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29991096524","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom api.views import router\n\nurlpatterns = [\n path(\"\", include(\"ui.urls\")),\n path(\"admin/\", admin.site.urls),\n path(\"api/\", include(router.urls)),\n path(\"api-auth/\", include(\"rest_framework.urls\")),\n path(\"o/\", include(\"oauth2_provider.urls\", namespace=\"oauth2_provider\")),\n path(\"select2/\", include(\"django_select2.urls\")),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n import debug_toolbar\n\n urlpatterns += [\n path(\"__debug__/\", include(debug_toolbar.urls)),\n ]\n","repo_name":"elgohr-update/tinventory","sub_path":"web/tinventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"25049566417","text":"'''\n定义一些功能函数\n'''\nfrom elasticsearch import Elasticsearch\nimport json\nimport random\nimport report_item\nimport re\n\ndef createTable():\n\t'''\n\t用随机数自动填充并生成表格 ok\n\treturn [{'name' : XX, 'value' : XX}{..}..]\n\t'''\n\titem_list = []\n\tname_list = list(set(report_item.item_dict.values()))\n\n\tfor name in name_list:\n\t\titem = dict()\n\t\titem['name'] = name\n\t\titem['value'] = str(round(random.random(), 2))\n\t\titem_list.append(item)\n\n\treturn item_list\n\n\n\ndef search(es, text):\n\t'''\n\t检索相关模板 ok\n\treturn list\n\t'''\n\tdsl = {\n\t\t'query': {\n\t\t 'multi_match': {\n\t\t 'query': text,\n\t\t 'fields':[ 'title', 'content'],\n\t\t 'type': 'best_fields',\n\n\t\t }\n\t\t}\n\t}\n\n\tresult = es.search(index='templets', doc_type='report', body=dsl)\n\tprint(json.dumps(result, indent=2, ensure_ascii=False))\n\treturn result['hits']['hits']\n\n\ndef extract(text, item_list):\n\t'''\n\t抽取关键数值信息\n\treturn {key:value}\n\t'''\n\tinfo_dict = {}\n\n\t# 先加入表格中的信息\n\tfor item in item_list:\n\t\tinfo_dict[item['name']] = item['value']\n\n\tkeyWords = report_item.keyWord_dict.keys()\n\tsentence_list = re.split( '[,,。\\n]', text)\n\tpattern = re.compile(r'[0-9]+\\.?[0-9]*') # 查找数字\n\n\tprint('========= 文本抽取结果 =========')\n\tfor sentence in sentence_list:\n\n\t\t# 最长匹配方式确定每个句子中的类型\n\t\tsentence_type = ''\n\t\tfor keyWord in keyWords:\n\t\t\tif keyWord in sentence and len(keyWord) > len(sentence_type):\n\t\t\t\tsentence_type = keyWord\n\t\tif sentence_type != '':\n\t\t\tvalues = pattern.findall(sentence)\n\t\t\tif len(values) >= 1:\n\t\t\t\tinfo_dict[sentence_type] = values[0]\n\t\t\t\tprint(sentence, sentence_type, values[0])\n\n\t\n\treturn info_dict\n\ndef fullfill(text, info_dict):\n\t'''\n\t用抽取值自动填充文本\n\treturn text\n\t'''\n\tprint('========= 填充信息 =========')\n\tprint(info_dict)\n\tfor info in info_dict.keys():\n\t\ttext = text.replace('%s' % info ,info_dict[info])\n\treturn text\n","repo_name":"zhangjiatao/Hospital-Report-Demo","sub_path":"src/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"72038172328","text":"#import namemap\nimport time\nadjacencies = [\n #0 1 2 3 4\n [0,1,0,1,0],#0\n [1,0,1,0,0],#1\n [0,1,0,1,1],#2\n [1,0,1,0,1],#3\n [0,0,1,1,0],#4\n\n]\n\npositions = [\n #x y\n [0,0],#0\n [0,-100],#1\n [50,-80],#2\n [30,0],#3\n [100,20],#3\n]\n\nclass myQueue:\n def __init__(self):\n self.queueRep=[]\n def queue(self,num):\n self.queueRep.append(num)\n def dequeue(self):\n ret = self.queueRep[0]\n self.queueRep.pop(0)\n return ret\n def showQ(self):\n print(self.queueRep)\n def qSize(self):\n return len(self.queueRep)\n def qGet(self,pos):\n return self.queueRep[pos]\n\n# use BFS (shortest path)\n\ndistances = [float('inf'),float('inf'),float('inf'),float('inf'),float('inf')]\n\nstart = 0\nprevDist = 0\n\nq = myQueue()\nq.queue(start)\ndistances[0] = 0\ndone = []\nprevPath = 1\n\n#paths1.append([0])\n\nwhile(q.qSize() != 0):\n curr = q.qGet(0)\n for destID in range(len(adjacencies[curr])):\n adjValue = adjacencies[curr][destID]\n dist = adjValue + prevDist\n if (adjValue>0):\n skip = False\n for adjValue in done:\n if adjValue == destID:\n skip = True\n if not(skip):\n q.queue(destID)\n if (adjValue > 0) & (distances[destID]>dist):\n distances[destID] = dist\n #paths1[destID] = \n q.showQ()\n done.append(q.dequeue())\n q.showQ()\n prevDist = distances[curr]\n\nprint(distances)\n#print(namemap.names)\n#print(len(namemap.names))\n","repo_name":"AtheeshThanaseelan/python-routes","sub_path":"old/BFS_Search.py","file_name":"BFS_Search.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74061615847","text":"t=int(input())\r\nwhile t>0:\r\n t-=1\r\n M,x,y=map(int,input().split())\r\n a=[int(i) for i in input().split()]\r\n h=[0 for i in range(100)]\r\n cst=x*y\r\n for i in a:\r\n i=i-1\r\n r=i\r\n ll=i-1\r\n for j in range(cst+1):\r\n if r=0:\r\n h[ll]=1\r\n ll=ll-1\r\n ct=0\r\n for i in range(100):\r\n if h[i]==0:\r\n ct+=1\r\n print(ct)\r\n","repo_name":"maifujalam/CCDSAP_PythonSolution","sub_path":"COPS.py","file_name":"COPS.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40423492119","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 10 10:34:06 2018\n\n@author: lukas\n\"\"\"\n\nimport os\nworkingDir = os.getcwd()\n \n#-------------------------------------------------------------------------------------------------------------\n\n# Parameters \n\n######################################### MODEL PARAMETERS\n# Models : 'CNN_TPM' , 'DeepMedic'\nmodel = 'DeepMedic'\ndpatch=51\nL2 = 0.0001\n# Loss functions: 'Dice', 'wDice', 'Multinomial'\nloss_function = 'Dice2'\n\nmy_custom_objects = {'dice_coef_multilabel2':0,\n 'dice_coef_multilabel0':0,\n 'dice_coef_multilabel1':0}\n\nnum_channels = 1\noutput_classes = 6\n\ndropout = [0,0] # dropout for last two fully connected layers\nlearning_rate = 2e-04\noptimizer_decay = 0\n\n","repo_name":"lkshrsch/brst_segmentation","sub_path":"configFiles/model_config_DeepMedic.py","file_name":"model_config_DeepMedic.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20871794712","text":"from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n # Главная страница\n path(\"\", views.index),\n # Список мороженого\n path(\"posts/\", views.group_posts),\n # Подробная информация о постах. Ждем пременную типа slug\n path(\"group//\", views.group_posts),\n]\n","repo_name":"Karina-Rin/yatube_project","sub_path":"yatube/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73576284649","text":"import re\nfrom datetime import datetime\n\n\ndef valid_data(data: dict) -> dict:\n new_data = dict()\n for key, value in data.items():\n new_data[key] = value[0]\n if not _valid_items(key, value[0]):\n continue\n if key == \"date\":\n try:\n new_data[key] = datetime.strptime(value[0], '%Y-%m-%d')\n except ValueError as e:\n new_data[key] = datetime.strptime(value[0], '%d.%m.%Y')\n if key == \"phone\":\n new_data[key] = f'+{value[0].strip()}'\n\n new_data.setdefault('phone', 'str')\n new_data.setdefault('email', 'str')\n new_data.setdefault('date', 'datetime')\n return new_data\n\n\ndef _valid_items(key: str, val: str) -> bool:\n reg = {'email': r'^\\S+@\\w+.\\w{2,4}$',\n 'date': r'(\\d{4}-\\d{2}-\\d{2})|(\\d{2}.\\d{2}.\\d{4})',\n 'phone': r'^\\s7\\s\\d{3}\\s\\d{3}\\s\\d{2}\\s\\d{2}$',\n }\n if re.fullmatch(reg[key], val):\n return True\n\n return False\n\n\ndef type_data(data: dict) -> dict:\n new_data = {}\n for key, val in data.items():\n new_data[key] = type(val[0]).__name__\n if key == \"date\":\n new_data[key] = 'datetime'\n return new_data\n","repo_name":"Andrei00001/djangoProject","sub_path":"app/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72405235048","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file contains the core code for the object tracker.\n\"\"\"\n\nimport skimage.measure\n\nfrom .labelledobjects import LabelledObjectCollection\n\nclass LabelledObjectTracker(object):\n \"\"\"\n Track labelled objects through a series of images. Will modify the inputted\n labelled images.\n \"\"\"\n \n def __init__(self, image_series, labelled_series, **kwargs):\n if image_series.shape != labelled_series.shape:\n raise TypeError(\"The image series and the labelled series must be the same shape\")\n\n self.image_series = image_series\n self.labelled_series = labelled_series\n \n def pre_tracking(self, i1, i2):\n \"\"\"\n Run this code before comparing two frames.\n \n Parameters\n ----------\n i1, i2 : int\n The indexes of the frames being compared\n \"\"\"\n \n def post_tracking(self, i1, i2):\n \"\"\"\n Run this code after comparing two frames.\n \n Parameters\n ----------\n i1, i2 : int\n The indexes of the frames that have been compared\n \"\"\"\n \n def compare_images(self, i1, i2):\n \"\"\"\n The comparison method, modifies the images in place\n \"\"\"\n \n def track_objects(self):\n \"\"\"\n Run the tracking code on the series of images.\n \n Returns\n -------\n labelledobjects : `~sunkit_tracking.LabelledObjectCollection`\n A collection of all object found in the image series\n \"\"\"\n\n self.obj_collection = LabelledObjectCollection()\n for k, (im, label_im) in enumerate(zip(self.image_series[:-1],\n self.labelled_series[:-1])):\n self.pre_tracking(k, k+1)\n\n self.compare_images(k, k+1)\n\n all_rprops = skimage.measure.regionprops(label_im, im)\n self.obj_collection.add_frame(k, all_rprops)\n\n self.post_tracking(k, k+1)\n \n # Add the last frame\n all_rprops = skimage.measure.regionprops(self.labelled_series[-1],\n self.image_series[-1])\n self.obj_collection.add_frame(k, all_rprops)\n \n return self.obj_collection\n\n\nclass OverlapObjectTracker(LabelledObjectTracker):\n \"\"\"\n Track objects through a series of images via their overlap with the \n previous frame\n \"\"\"\n \n def compare_images(self, i1, i2):\n omax = max(self.obj_collection).label if len(self.obj_collection) else 0\n self.labelled_series[i1], self.labelled_series[i2] = \\\n skimage.measure.label_match(self.labelled_series[i1],\n self.labelled_series[i2],\n remove_nonoverlap=omax,\n remove_duplicates=True,\n background=0)","repo_name":"Cadair/sunkit-tracking","sub_path":"sunkit_tracking/objecttracker.py","file_name":"objecttracker.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24682854855","text":"\"\"\"\nComputes multiple statistics for given datasets, such as mean word complexity\nand its standard deviation, Fleiss kappa for annotator agreement, Pearson and\nSpearman correlation. If given auxiliary dataset for comparison, finds a\nnumber of common lemmas in both and performs Welch t-test with two-sided\nalternative.\nOptionally, computes all statistic above, except for correlation scores, for\neach frequency range among those used for data sampling.\n\"\"\"\n\nfrom typing import Tuple\n\nimport click\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import pearsonr, spearmanr\nfrom statsmodels.stats.inter_rater import fleiss_kappa\nfrom statsmodels.stats.weightstats import ttest_ind, ttost_paired\n\nfrom src.tools.data_analysis.analysis_utils import (\n aggregate_by_lemma, filter_by_freq_range, load_and_prep_dataframe,\n project_labels_into_discrete, select_common_lemmas)\nfrom src.tools.data_preparation import FREQUENCY_RANGES\n\npd.options.mode.chained_assignment = None\n\n\ndef mean_complexity(dataframe: pd.DataFrame, freq_range=None) -> float:\n \"\"\"\n Computes mean value for word complexities projected into [0,1] range and\n aggregated by lemma. Optionally, does that only for given freq range.\n\n Args:\n dataframe: pandas Dataframe with complexity, contexts, and metadata.\n freq_range: frequency range for filtration.\n\n Returns:\n mean value in range [0,1]\n \"\"\"\n dataframe = aggregate_by_lemma(dataframe, freq_range)\n if freq_range is not None:\n dataframe = filter_by_freq_range(dataframe, freq_range)\n return dataframe[\"OUTPUT:complexity\"].mean()\n\n\ndef std_complexity(dataframe: pd.DataFrame, freq_range=None) -> float:\n \"\"\"\n Computes standard deviation for word complexities projected into [0,1]\n range and aggregated by lemma. Optionally, does that only for given freq\n range.\n\n Args:\n dataframe: pandas Dataframe with complexity, contexts, and metadata.\n freq_range: frequency range for filtration.\n\n Returns:\n standard deviation in range [0,1]\n \"\"\"\n dataframe = aggregate_by_lemma(dataframe, freq_range)\n if freq_range is not None:\n dataframe = filter_by_freq_range(dataframe, freq_range)\n return dataframe[\"OUTPUT:complexity\"].std()\n\n\ndef annotator_agreement(\n dataframe: pd.DataFrame,\n freq_range=None\n ) -> float:\n \"\"\"\n https://stats.stackexchange.com/questions/153225/why-does-fleisss-kappa-decrease-with-increased-response-homogeneity/207640#207640\n\n Computes Fleiss kappa on discrete labels. Optionally, does that only for\n given frequency range.\n\n Args:\n dataframe: pandas Dataframe with complexity, contexts, and metadata.\n freq_range: frequency range for filtration.\n\n Returns:\n Fleiss kappa value in range [-1, 1]. See Wiki for score interpretation\n \"\"\"\n if freq_range is not None:\n dataframe = filter_by_freq_range(dataframe, freq_range)\n if dataframe[\"OUTPUT:complexity\"].dtype == float:\n dataframe = project_labels_into_discrete(dataframe)\n onehot = pd.get_dummies(dataframe[\"OUTPUT:complexity\"])\n dataframe = pd.concat([dataframe, onehot], axis='columns')\n for k in range(1, 6):\n if k not in dataframe.keys():\n dataframe[k] = 0\n scores_mat = dataframe.groupby(\"ASSIGNMENT:task_id\", sort=False).aggregate(\n {k: \"sum\" for k in range(1, 6)}\n ).to_numpy(dtype=int)\n return fleiss_kappa(scores_mat, method='fleiss')\n\n\ndef datasets_intersection(\n l_dataframe: pd.DataFrame,\n r_dataframe: pd.DataFrame,\n freq_range=None\n ) -> int:\n \"\"\"\n Finds how many common lemmas are in both datasets. Optionally, does that\n only for given frequency range.\n\n Args:\n l_dataframe: first Dataframe with complexity, contexts, and metadata.\n r_dataframe: second Dataframe with complexity, contexts, and metadata.\n freq_range: frequency range for filtration.\n\n Returns:\n number of common lemmas\n \"\"\"\n l_dataframe = aggregate_by_lemma(l_dataframe, freq_range)\n r_dataframe = aggregate_by_lemma(r_dataframe, freq_range)\n if freq_range:\n l_dataframe = filter_by_freq_range(l_dataframe, freq_range)\n r_dataframe = filter_by_freq_range(r_dataframe, freq_range)\n return len(l_dataframe[l_dataframe.index.isin(r_dataframe.index)])\n\n\ndef correlation_between_intersection(\n l_dataframe: pd.DataFrame,\n r_dataframe: pd.DataFrame\n ) -> Tuple:\n \"\"\"\n Estimates how well complexities for common lemmas are correlated to each\n other. Works on full dataset only.\n\n Args:\n l_dataframe: first Dataframe with complexity, contexts, and metadata.\n r_dataframe: second Dataframe with complexity, contexts, and metadata.\n\n Returns:\n Pearson and Spearman correlation scores.\n \"\"\"\n l_dataframe, r_dataframe = select_common_lemmas(\n l_dataframe,\n r_dataframe\n )\n l_complexity = l_dataframe[\"OUTPUT:complexity\"]\n r_complexity = r_dataframe[\"OUTPUT:complexity\"]\n p_corr = pearsonr(l_complexity,\n r_complexity)\n s_corr = spearmanr(l_complexity,\n r_complexity)\n return p_corr, s_corr\n\n\ndef welch_ttest(\n l_dataframe: pd.DataFrame,\n r_dataframe: pd.DataFrame,\n alternative=\"larger\",\n ttest_value=0,\n freq_range=None\n ) -> Tuple[float, float, float]:\n \"\"\"\n Performs Welch t-test with given alternative to test wheter mean\n complexities for two datasets are statistically different from each other\n or not. Optionally, does that only for given frequency range.\n\n Args:\n l_dataframe: first Dataframe with complexity, contexts, and metadata.\n r_dataframe: second Dataframe with complexity, contexts, and metadata.\n alternative: alternative for ttest - smaller, larger or two-sided.\n ttest_value: difference in means of samples.\n freq_range: frequency range for filtration.\n\n Returns:\n test statistic value, p-value and number of degrees of freedom.\n \"\"\"\n l_dataframe = aggregate_by_lemma(l_dataframe)\n r_dataframe = aggregate_by_lemma(r_dataframe)\n if freq_range:\n l_dataframe = filter_by_freq_range(l_dataframe, freq_range)\n r_dataframe = filter_by_freq_range(r_dataframe, freq_range)\n return ttest_ind(\n l_dataframe[\"OUTPUT:complexity\"],\n r_dataframe[\"OUTPUT:complexity\"],\n alternative=alternative,\n usevar=\"unequal\",\n value=ttest_value\n )[1]\n\n\ndef paired_ttost(\n l_dataframe: pd.DataFrame,\n r_dataframe: pd.DataFrame,\n ttost_range=(0, 0.01),\n freq_range=None\n ) -> Tuple[float, float, float]:\n \"\"\"\n Performs paired t-test with two-sided alternative to test wheter mean\n complexities for two datasets are statistically different from each other\n or not. Optionally, does that only for given frequency range.\n\n Args:\n l_dataframe: first Dataframe with complexity, contexts, and metadata.\n r_dataframe: second Dataframe with complexity, contexts, and metadata.\n ttost_range: lower and upper values for tests.\n freq_range: frequency range for filtration.\n\n Returns:\n test statistic value, p-value and number of degrees of freedom.\n \"\"\"\n if freq_range is not None:\n l_dataframe = filter_by_freq_range(l_dataframe, freq_range)\n r_dataframe = filter_by_freq_range(r_dataframe, freq_range)\n l_dataframe, r_dataframe = select_common_lemmas(\n l_dataframe,\n r_dataframe\n )\n l_complexity = l_dataframe[\"OUTPUT:complexity\"]\n r_complexity = r_dataframe[\"OUTPUT:complexity\"]\n ppval, (_, l_pval, _), (_, u_pval, _) = ttost_paired(\n l_complexity,\n r_complexity,\n low=ttost_range[0],\n upp=ttost_range[1],\n )\n return ppval, l_pval, u_pval\n\n\n@click.command()\n@click.argument(\"pools_folder\")\n@click.argument(\"initial_df\")\n@click.option(\"--auxiliary_pools_folder\", default=None)\n@click.option(\"--auxiliary_initial_df\", default=None)\n@click.option(\"--split_by_freq_ranges\", is_flag=True)\n@click.option(\"--ttest_alternative\",\n default=\"larger\",\n type=click.Choice(\n ['larger', 'smaller', 'two_sided'],\n case_sensitive=True))\n@click.option(\"--ttest_value\", default=0, type=float)\n@click.option(\"--ttost_range\", type=(float, float), default=(0, 0.01))\n# @click.option(\"--fast_responses_limit\", default=15)\ndef main( # pylint: disable=too-many-arguments\n pools_folder,\n auxiliary_pools_folder,\n auxiliary_initial_df,\n split_by_freq_ranges,\n initial_df,\n ttest_alternative,\n ttest_value,\n ttost_range\n # fast_responses_limit\n ) -> None:\n \"\"\"\n POOLS_FOLDER: directory with annotation results (tsv) from toloka\n INITIAL_DF: tsv file with all sentences and their data (lemma, freq, word)\n \"\"\"\n dataframe = load_and_prep_dataframe(pools_folder, initial_df)\n if auxiliary_pools_folder is not None:\n auxiliary_dataframe = load_and_prep_dataframe(\n auxiliary_pools_folder,\n auxiliary_initial_df)\n if split_by_freq_ranges:\n for freq_range in sorted(FREQUENCY_RANGES):\n print(f\"For freq range {freq_range[0]}-{freq_range[1]} ipm:\")\n print(\"-\" * 20)\n print(\"Mean and std complexity:\")\n print(\n np.round(mean_complexity(dataframe, freq_range), 3),\n np.round(std_complexity(dataframe, freq_range), 3))\n print(\"Fleiss kappa:\")\n print(np.round(\n annotator_agreement(dataframe, freq_range), 3))\n if auxiliary_pools_folder is not None:\n print(\"Common lemmas between datasets:\")\n print(datasets_intersection(\n dataframe, auxiliary_dataframe, freq_range))\n pval = welch_ttest(\n dataframe, auxiliary_dataframe, ttest_alternative,\n ttest_value, freq_range)\n print(\"Welch ttest between datasets:\")\n print(f\"P-value: {pval}\")\n ppvals = paired_ttost(\n dataframe, auxiliary_dataframe, freq_range, ttost_range)\n print(\"Paired TOST between datasets:\")\n print(f\"P-value: {ppvals[0]}, p-value (lower): {ppvals[1]}, \"\n f\"p-value (upper): {ppvals[2]}\")\n print(\"-\" * 20)\n else:\n print(f\"Mean complexity: {np.round(mean_complexity(dataframe), 3)}\")\n print(f\"STD complexity: {np.round(std_complexity(dataframe), 3)}\")\n print(f\"Fleiss kappa: {np.round(annotator_agreement(dataframe), 3)}\")\n if auxiliary_pools_folder is not None:\n lemma_intersection = datasets_intersection(\n dataframe, auxiliary_dataframe)\n print(f\"Common lemmas between datasets: {lemma_intersection}\")\n if auxiliary_pools_folder is not None:\n corrs = correlation_between_intersection(\n dataframe, auxiliary_dataframe)\n print(f\"Pearson corr: {corrs[0]}, Spearman corr: {corrs[1]}\")\n pval = welch_ttest(\n dataframe, auxiliary_dataframe, ttest_alternative,\n ttest_value)\n print(\"Welch ttest between datasets:\")\n print(f\"P-value: {pval}\")\n ppvals = paired_ttost(\n dataframe, auxiliary_dataframe, ttost_range)\n print(\"Paired TOST between datasets:\")\n print(f\"P-value: {ppvals[0]}, p-value (lower): {ppvals[1]}, \"\n f\"p-value (upper): {ppvals[2]}\")\n\n\nif __name__ == '__main__':\n main() # pylint: disable=no-value-for-parameter\n","repo_name":"AbramovAV/Multi-Domain-Russian-Word-Complexity","sub_path":"src/tools/data_analysis/compute_stats.py","file_name":"compute_stats.py","file_ext":"py","file_size_in_byte":12008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34953258760","text":"# Alberto Robledo\r\n# Discord Python Bot\r\n# This is a discord bot that creates logs for certain channels and can be set\r\n# to create logs for them daily.\r\n# It creates a directory for each date and each date will have a directory for\r\n# each channel which will contain a .txt file of the logs\r\n# Creating logs manually is also an option for either a specific date or from a\r\n# starting date to an ending date\r\nimport discord\r\nimport os\r\nimport datetime\r\nimport shutil\r\nimport asyncio\r\nimport re\r\nimport distutils.dir_util\r\nimport sys\r\nfrom discord.ext import commands\r\nfrom datetime import timezone\r\nfrom pathlib import Path\r\nimport time\r\nimport xml\r\nimport xml.etree.ElementTree as ET\r\nimport xml.dom.minidom\r\nimport traceback\r\nimport logging\r\n\r\n\r\nlogging.basicConfig(filename='./console.txt', filemode='a+', level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')\r\n# Current version\r\n# keep this up to date\r\nVersion = '0.10.4'\r\n\r\n#------------------------------------------------Constants-------------------------------------------------------------------------\r\nBotPrefix = []\r\n\r\nTOKEN = None\r\n\r\nLogLimitPerChannel = sys.maxsize\r\nSecondsInDay = 86400\r\nSecondsInHour = 3600\r\n\r\nAutomaticLogging = False\r\n\r\nLogTypeInfo = 0\r\nLogTypeWarning = 1\r\nLogTypeError = 2\r\n\r\nParentDirectory = 'Logs'\r\nConsoleLogs = 'console.txt'\r\nTempFolder = 'temp'\r\nConfigFile = 'config.txt'\r\nConfigXmlFile = 'Config.xml'\r\nURL = 'url'\r\nBadChannelIdOrName = 'Bad Channel ID or Name'\r\nDateFormat = \"%Y-%m-%d\"\r\nDateTimeFormat = \"%Y-%m-%d %H:%M:%S\"\r\n#Possible Log Times\r\n# 24 hour clock\r\n# example for adding minutes\r\n# datetime.time(1,30)\r\n# this will try to log from midnight to 1 am\r\n# it will attempt to log during this time period midnight - 1 am\r\nEarlyLogTime = datetime.time(0)\r\nLateLogTime = datetime.time(1)\r\n\r\nError = '```excel\\n'\r\nWarning = '```http\\n'\r\nLog = '```Elm\\n'\r\n\r\n#XML element names\r\nXMLToken = 'Token' \r\nXMLTokenAttribute = 'id'\r\nXMLChannel = 'Channel'\r\nXMLChannelAttribute = 'info'\r\nXMLPrefix = 'Prefix'\r\nXMLPrefixAttribute = 'character'\r\nXMLRole = 'Role'\r\nXMLRoleAttribute = 'name'\r\n\r\n#probably should make this an enum\r\nMessageTypeLog = 'Log'\r\nMessageTypeError = 'Error'\r\nMessageTypeWarning = 'Warning'\r\n\r\nEndCodeBlock = '```'\r\n\r\nServerID = None\r\n\r\nstart_time = time.time()\r\n#---------------------------------------------------Constants\r\n#end------------------------------------------------------------------\r\n\r\n\r\n#---------------------------------------------RegEx----------------------------------------------------------------------------\r\ndate_pattern = re.compile(r'(\\d\\d\\d\\d)\\W(\\d\\d)\\W(\\d\\d)')\r\n\r\nemoji_pattern = re.compile(\"[\"u\"\\U0001F600-\\U0001F64F\"\r\n u\"\\U0001F300-\\U0001F5FF\"\r\n u\"\\U0001F680-\\U0001F6FF\"\r\n u\"\\U0001F1E0-\\U0001F1FF\"\r\n u\"\\U00002702-\\U000027B0\"\r\n u\"\\U000024C2-\\U0001F251\" \"]+\", flags=re.UNICODE)\r\n\r\n#---------------------------------------------RegEx---------------------------------------------------------------------------\r\nclient = commands.Bot(command_prefix=BotPrefix)\r\n\r\n\r\n# if we want to add a channel here manually enter it as a string '543553573'\r\n# entering it as an int does not work\r\n# This should be read from a config file and saved when a new channel is added\r\n# to log or removed\r\nchannelsToLog = []\r\n\r\nparsedChannelInfo = []\r\n\r\ncommandRoles = []\r\n\r\n#------------------------------------------------------check_config_file----------------------------------------------------------\r\ndef check_config_file():\r\n \"\"\"Checks if config file exists if not it creates one\"\"\"\r\n if not os.path.exists(ConfigXmlFile):\r\n create_xml_config_file()\r\n parse_xml_config()\r\n\r\n#-----------------------------------------------------parse_xml_config------------------------------------------------------------\r\ndef parse_xml_config():\r\n \"\"\"Trys to parse xml file into tree. If it fails it creates a default xml file.\r\nParses the data and assigns prefixes, channelsToLog, Token\"\"\"\r\n try:\r\n tree = ET.parse(ConfigXmlFile)\r\n except Exception as e:\r\n print_and_log(LogTypeError, e)\r\n print_and_log(LogTypeInfo, 'Creating new xml file')\r\n create_xml_config_file()\r\n tree = ET.parse(ConfigXmlFile)\r\n root = tree.getroot()\r\n for Element in root:\r\n if Element.tag == XMLChannel:\r\n info = Element.get(XMLChannelAttribute)\r\n print_and_log(LogTypeInfo, 'Adding ' + info + ' to logged channels')\r\n if info in parsedChannelInfo:\r\n print_and_log(LogTypeInfo, info + ' was already in channels to log')\r\n else:\r\n parsedChannelInfo.append(info)\r\n if Element.tag == XMLToken:\r\n global TOKEN\r\n TOKEN = Element.get(XMLTokenAttribute)\r\n print_and_log(LogTypeInfo, 'Using this Token: ' + TOKEN)\r\n if Element.tag == XMLPrefix:\r\n prefix = Element.get(XMLPrefixAttribute)\r\n print_and_log(LogTypeInfo, 'Adding ' + prefix + ' to bot prefixes')\r\n if prefix in BotPrefix:\r\n print_and_log(LogTypeInfo, prefix + ' is already a prefix!')\r\n else:\r\n BotPrefix.append(Element.get(XMLPrefixAttribute))\r\n if Element.tag == XMLRole:\r\n role = Element.get(XMLRoleAttribute)\r\n print_and_log(LogTypeInfo, 'Adding ' + role + ' to command roles')\r\n if role in commandRoles:\r\n print_and_log(LogTypeInfo, role + ' was already in command roles!')\r\n else:\r\n commandRoles.append(role)\r\n\r\n\r\n#-----------------------------------------------------------RemoveFromXML-----------------------------------------------------------\r\ndef RemoveFromXML(Element, attribName, attribValue):\r\n \"\"\"Removes element with an attrivute of certain value from XML file\"\"\"\r\n tree = ET.parse(ConfigXmlFile)\r\n root = tree.getroot()\r\n for element in root.findall(Element):\r\n if attribValue == element.get(attribName):\r\n root.remove(element)\r\n xmlstr = xml.dom.minidom.parseString(ET.tostring(root)).toxml()\r\n file = open(ConfigXmlFile, 'w')\r\n file.write(xmlstr)\r\n file.close()\r\n\r\n#-------------------------------------------------------------AddToXML---------------------------------------------------------------\r\ndef AddToXML(element, attribName, attribValue):\r\n \"\"\"Adds an element of type element that has an attribute with the passed in value\"\"\"\r\n tree = ET.parse(ConfigXmlFile)\r\n root = tree.getroot()\r\n ET.SubElement(root, element).set(attribName, attribValue)\r\n xmlstr = xml.dom.minidom.parseString(ET.tostring(root)).toxml()\r\n file = open(ConfigXmlFile, 'w')\r\n file.write(xmlstr)\r\n file.close()\r\n\r\n\r\n\r\n#--------------------------------------------------------create_xml_config_file------------------------------------------------------\r\n# Creates initial XML config file for the bot\r\ndef create_xml_config_file():\r\n file = open(ConfigXmlFile, 'w+')\r\n try:\r\n global TOKEN\r\n if TOKEN is None:\r\n TOKEN = \"Enter Token Here\"\r\n file.write('''\r\n\r\n\t\r\n\t\r\n \r\n''')\r\n except UnicodeEncodeError as e:\r\n print_and_log(LogTypeI, 'Failed to write XML file')\r\n print_and_log(LogTypeError, e)\r\n file.close()\r\n\r\n#----------------------------------------------------------prefixes---------------------------------------------------------------\r\n@client.command(name='Prefixes',\r\n description='Lists the command prefixes that have been set',\r\n brief='Lists command prefixes',\r\n aliases=['prefixes'],\r\n pass_context=True)\r\nasync def prefixes(context):\r\n \"\"\"Sends client all prefixes in BotPrefixes\"\"\"\r\n if BotPrefix == 0:\r\n await context.send(format_message(MessageTypeError, 'Error: no prefixes assigned. Either add a prefix in xml or delete config file and run again'))\r\n return\r\n await context.send(format_message(MessageTypeLog, 'Here are all the command prefixes'))\r\n for prefix in BotPrefix:\r\n await context.send(format_message(MessageTypeLog, prefix))\r\n await context.send(format_message(MessageTypeLog, 'Done!'))\r\n\r\n#---------------------------------------------------create_logs_for_yesterday---------------------------------------------------------\r\nasync def create_logs_for_yesterday():\r\n \"\"\"Logs everything that happened yesterday in every channel in ChannelsToLog\"\"\"\r\n info = await yesterday_log_loop()\r\n if info is not None:\r\n print(info)\r\n\r\n#------------------------------------------------------LogChannel-------------------------------------------------------------------\r\nasync def LogChannel(channel, startTime, command=None):\r\n \"\"\"Logs a certain channel from a startTime to 24 hours from then\r\nCreates a folder with the logged date as the name and a folder \r\nwith the channels name\r\nInside of the folder with the channel's name it creates a .txt file that\r\ncontains each message along with its author's name\r\nand the time it was sent at. It also adds the URL of any attachment that \r\nthe author included\"\"\"\r\n midnight = startTime + datetime.timedelta(days=1)\r\n print_and_log(LogTypeInfo, 'Getting logs for ' + channel.name + ' from ' + startTime.strftime(DateTimeFormat) + ' to ' + midnight.strftime(DateTimeFormat))\r\n channelNameFix = channel.name.replace('/', '-')\r\n channelNameFix = channelNameFix.replace(r'\\\\', '-')\r\n mainDirectory = channelNameFix\r\n fileName = channelNameFix + ' ' + startTime.strftime(DateFormat)\r\n path = ParentDirectory + '/' + mainDirectory\r\n os.makedirs(path, exist_ok=True)\r\n file = open(path + '/' + fileName + '.txt', 'w+')\r\n# For each message in a channel from startTime to midnight ---- From a start\r\n# time to 24 hours after\r\n test = channel.history(limit=LogLimitPerChannel, before=midnight, after=startTime)\r\n channelMessages = []\r\n try:\r\n async for message in channel.history(limit=LogLimitPerChannel, before=midnight, after=startTime):\r\n messageAuthor = \"null\"\r\n # if the author has set a nickname use that else use their discord\r\n # account name\r\n if hasattr(message.author, 'nick'):\r\n if message.author.nick is not None:\r\n messageAuthor = message.author.nick\r\n else:\r\n messageAuthor = message.author.name\r\n # Get the message's timestamp and convert it to local time\r\n # using GetTimeoffset\r\n timeZoneTime = message.created_at - datetime.timedelta(hours=GetTimeOffset())\r\n # save the date, the message's author and the message content\r\n # without emojis\r\n newMessage = {}\r\n newMessage['data'] = timeZoneTime.strftime(DateTimeFormat) + \" \" + kill_emojis(messageAuthor) + \":\" + kill_emojis(message.content) + '\\n'\r\n newMessage['id'] = message.id\r\n # append the newMessage to the channelMessages list\r\n channelMessages.append(newMessage)\r\n\r\n # if the message has attachments append the attachment url to\r\n # channelMessages\r\n if hasattr(message, 'attachments'):\r\n if message.attachments:\r\n attachments = message.attachments\r\n for attachment in attachments:\r\n message_url = {'data': attachment.url + '\\n', 'id': 0}\r\n channelMessages.append(message_url)\r\n #try to write each message in channelMessages else print the error message\r\n except Exception as e:\r\n if command is not None:\r\n print_and_log(LogTypeInfo, 'Command: ' + command)\r\n print_and_log(LogTypeError, e)\r\n for message in reversed(channelMessages):\r\n try:\r\n file.write(message['data'])\r\n except UnicodeEncodeError as e:\r\n print(\"UnicodeEncodeError for message with id of:\" + str(message['id']))\r\n print(e)\r\n file.close()\r\n\r\n#-----------------------------------------------------check_time-------------------------------------------------------------------\r\nasync def check_time():\r\n \"\"\"Checks if the currentTime is in between the times that the bot can \r\nlog at for automatic logging\r\nIf the bot is within that time then it createsLogs for yesterday and waits a\r\nfull 24 hours before logging again\r\nIf it was not in the correct time to log then it checks every hour until it\r\nhits the goal time.\"\"\"\r\n while True:\r\n if EarlyLogTime <= datetime.datetime.now().time() <= LateLogTime:\r\n print(\"Logging these channels: \")\r\n await create_logs_for_yesterday()\r\n print_and_log(LogTypeInfo, 'Logs created. Sleeping for 24 hours until it is time to log again')\r\n await asyncio.sleep(SecondsInDay)\r\n else:\r\n print(\"Not correct time to log. Will wait an hour and try again\")\r\n await asyncio.sleep(SecondsInHour)\r\n\r\n#---------------------------------------------------------print_and_log---------------------------------------------------------------\r\ndef print_and_log(log_type, string):\r\n \"\"\"prints string to console and writes string to log file\"\"\"\r\n print(string)\r\n if log_type == LogTypeInfo:\r\n logging.info(string, exc_info=True)\r\n elif log_type == LogTypeWarning:\r\n logging.warning(string, exc_info=True)\r\n elif log_type == LogTypeError:\r\n logging.error(string, exc_info=True)\r\n else:\r\n logging.log(string, exc_info=True)\r\n \r\n#-------------------------------------------------------on_ready------------------------------------------------------------------\r\n@client.event\r\nasync def on_ready():\r\n \"\"\"This is called when the bot comes is ready\r\n if channelsToLog is not empty it then calls check_time() which can start the\r\n autolog process\r\n printing early log time and late logtime along with timezone\"\"\"\r\n if len(client.guilds) > 1:\r\n print_and_log(LogTypeWarning,'Connected to more than one server!')\r\n for server in client.guilds:\r\n print_and_log(LogTypeInfo,'Connected to: ' + server.name)\r\n global ServerID\r\n ServerID = server.id\r\n break\r\n \r\n commands = client.commands\r\n print_and_log(LogTypeInfo, 'Running version: ' + Version)\r\n print('Connected and ready to log!\\nLogging sometime between these times')\r\n print(EarlyLogTime)\r\n print(LateLogTime)\r\n print(time.tzname)\r\n \r\n for channelInfo in parsedChannelInfo:\r\n channel = get_channel_by_name(channelInfo)\r\n if channel is None:\r\n channel = client.get_channel(channelInfo)\r\n if channel is not None:\r\n channelsToLog.append(channel.id)\r\n if len(channelsToLog) > 0:\r\n global AutomaticLogging\r\n AutomaticLogging = True\r\n await check_time()\r\n else:\r\n print('Channels to log is empty.\\nAdd channels to log then call StartLogging Command')\r\n\r\n#-----------------------------------------------------version----------------------------------------------------------------------\r\n@client.command(name='Version',\r\n description='Sends client version of the bot',\r\n brief='Version number',\r\n aliases=['version', 'v'],\r\n pass_context=True)\r\nasync def version(context):\r\n \"\"\"sends user bot's version number\"\"\"\r\n async with context.typing():\r\n await context.send(format_message(MessageTypeLog,'Version: ' + Version))\r\n\r\n#-------------------------------------------------------on_message----------------------------------------------------------------\r\n@client.event\r\nasync def on_message(message):\r\n \"\"\"takes each message that the bot can access and makes it lower case\r\n this is used so commands can be non case sensitive\"\"\"\r\n if message.author.bot == True:\r\n return\r\n for prefix in BotPrefix:\r\n if message.content[0].lower() == prefix:\r\n break\r\n else:\r\n return\r\n global ServerID\r\n server = client.get_guild(ServerID)\r\n serverMember = server.get_member(message.author.id)\r\n if serverMember == None:\r\n print_and_log(LogTypeInfo, 'User: ' + kill_emojis(message.author.name) + ' is not connected to the server')\r\n else:\r\n roles = serverMember.roles\r\n doesBotListen = False\r\n for role in commandRoles:\r\n for r in roles:\r\n if role == r.name:\r\n doesBotListen = True\r\n break\r\n if doesBotListen:\r\n message.content = message.content.lower()\r\n message.author = serverMember\r\n await client.process_commands(message)\r\n else:\r\n print_and_log(LogTypeInfo, 'User with no permissions trying to run command!')\r\n if serverMember.nick is not None:\r\n print_and_log(LogTypeInfo, 'Username: ' + kill_emojis(message.author.name))\r\n print_and_log(LogTypeInfo, 'Nickname: ' + kill_emojis(serverMember.nick))\r\n else:\r\n print_and_log(LogTypeInfo, 'Username: ' + kill_emojis(message.author.name))\r\n print_and_log(LogTypeInfo, 'Message: ' + kill_emojis(message.content))\r\n\r\n#--------------------------------------------------------get_channels-------------------------------------------------------------\r\n@client.command(name='GetChannelIDs',\r\n\t\t\t\tdescription='Gets text channels that the bot can access and prints their names and ids',\r\n\t\t\t\tbrief='Prints Channel names and ids',\r\n\t\t\t\taliases=['channels', 'listchannelids', 'getchannelids'],\r\n\t\t\t\tpass_context=True)\r\nasync def get_channels(context):\r\n \"\"\"Shows the user the current text channels along with the channel's ID that are\r\n available to the bot\"\"\"\r\n channels = client.get_guild(ServerID).text_channels\r\n await context.send('Getting Available Channels...')\r\n async with context.typing():\r\n for channel in channels: \r\n await context.send(format_message(MessageTypeLog, channel.name + \" \" + str(channel.id)))\r\n await context.send('All available channels listed.')\r\n\r\n\r\n#----------------------------------------------------add_all_channels--------------------------------------------------------------\r\n# adds all channels the bot can access to the logged channels list\r\n@client.command(name='AddAllChannels',\r\n description='Adds all text channels to logged channels list',\r\n brief='Adds all channels to log list',\r\n aliases=['addallchannels', 'aac'],\r\n pass_context=True)\r\nasync def add_all_channels(context):\r\n async with context.typing():\r\n channels = client.get_guild(ServerID).text_channels\r\n for channel in channels:\r\n if channel.id not in channelsToLog:\r\n channelsToLog.append(channel.id)\r\n AddToXML(XMLChannel, XMLChannelAttribute, channel.name)\r\n LogCommand(context.message)\r\n await context.send('Added all text channels to be logged')\r\n\r\n#--------------------------------------------------remove_all_channels-------------------------------------------------------------\r\n@client.command(name='RemoveAllChannels',\r\n description='Removes all channels that are currently set to be logged.',\r\n brief='Removes all channels from logging',\r\n aliases=['removeallchannels'],\r\n pass_context=True)\r\nasync def remove_all_channels(context):\r\n \"\"\"removes all the channels currently in channels to log list\"\"\"\r\n async with context.typing():\r\n for channelID in channelsToLog:\r\n channel = client.get_channel(channelID)\r\n RemoveFromXML(XMLChannel, XMLChannelAttribute, channel.name)\r\n LogCommand(context.message)\r\n channelsToLog.clear()\r\n await context.send('Removed all channels from logging!')\r\n\r\n#-------------------------------------------------------log_date--------------------------------------------------------------------\r\n@client.command(name='LogDate',\r\n description='Logs certain dates depending on dates submitted. \\\r\nTo log one date enter date in format yyyy/mm/dd. To log a range of dates enter second date in same format. LogDate yyyy/mm/dd or LogDate yyyy/mm/dd yyyy/mm/dd',\r\n brief='Logs certain dates depending on date(s) submitted.',\r\n aliases=['LogDates', 'logdate', 'logdates'],\r\n pass_context=True)\r\nasync def log_date(context, date=None, endDate=None):\r\n \"\"\"Creates a log for a specific date or from startDate to endDate for each channel in channelsToLog\"\"\"\r\n if len(channelsToLog) == 0:\r\n await context.send(format_message(MessageTypeWarning, 'Channels to log is empty'))\r\n return\r\n #if no date was passed in then return\r\n if date is None:\r\n return\r\n # the first date is set to the checked date and adjusted to local time\r\n firstDate = check_date(date)\r\n # if firstDate was an incorrect format let the client know then return\r\n if firstDate is None:\r\n await context.send(format_message(MessageTypeWarning, 'Date has an incorrect format'))\r\n return\r\n firstDate = firstDate - datetime.timedelta(hours=-GetTimeOffset())\r\n if endDate is not None:\r\n # if a second date was passed in set the date to the checked date plus\r\n # a day and convert to local time\r\n secondDate = check_date(endDate)\r\n # if the secondDate is None then return because it was in a bad format\r\n # or an invalid date\r\n if secondDate is None:\r\n await context.send(format_message(MessageTypeWarning, 'Second date has an incorrect format'))\r\n return\r\n else:\r\n secondDate = secondDate - datetime.timedelta(days=-1,hours=-GetTimeOffset())\r\n initialDate = firstDate\r\n lastDate = secondDate - datetime.timedelta(days=1)\r\n date_message = await context.send(format_message(MessageTypeLog, \"Currently Logging: \" + firstDate.strftime(DateFormat)))\r\n # if the second date is a correct format Log each channel in\r\n # ChannelsToLog and increment the day until\r\n # firstdate is equal or greater than the secondDate\r\n while firstDate < secondDate:\r\n async with context.typing():\r\n for channelID in channelsToLog:\r\n channel = client.get_channel(channelID)\r\n if(channel.type is discord.ChannelType.text):\r\n await LogChannel(channel, firstDate, 'LogDate')\r\n else:\r\n await context.send(\"Channel: \" + channel.name + \" is not a TextChannel!\")\r\n firstDate = firstDate + datetime.timedelta(days=1)\r\n await date_message.edit(content=format_message(MessageTypeLog, \"Currently Logging: \" +firstDate.strftime(DateFormat)))\r\n await date_message.edit(content=format_message(MessageTypeLog, 'Logs for created from:' + initialDate.strftime(DateFormat) + ' to ' + lastDate.strftime(DateFormat)))\r\n return\r\n # if no second date was entered\r\n # create logs for the first date for each channel in ChannelsToLog\r\n async with context.typing():\r\n for channelID in channelsToLog:\r\n channel = client.get_channel(channelID)\r\n if(channel.type is discord.ChannelType.text):\r\n await LogChannel(channel, firstDate, 'LogDate')\r\n else:\r\n await context.send(\"Channel: \" + channel.name + \" is not a TextChannel!\")\r\n await context.send('Done!')\r\n\r\n#--------------------------------------------------log_yesterday---------------------------------------------------------------------\r\n# Creates logs for each channel in ChannelsToLog for yesterday's date\r\n@client.command(name='LogYesterday',\r\n\t\t\t\tdescription='Logs yesterdays messages',\r\n\t\t\t\tbrief='Logs yesterdays messages',\r\n\t\t\t\taliases=['ly', 'logy', 'logyes', 'logyesterday'],\r\n pass_context=True)\r\nasync def log_yesterday(context):\r\n async with context.typing():\r\n info = await yesterday_log_loop('LogYesterday')\r\n if info is not None:\r\n await context.send(info)\r\n await context.send(format_message(MessageTypeLog, 'Finished logging yesterday'))\r\n\r\n#--------------------------------------------------roles----------------------------------------------------------------------------\r\n@client.command(name='Roles',\r\n description='Sends client the list of roles that the bot will take commands from.',\r\n brief='Sends client roles bot listens to',\r\n aliases=['roles'],\r\n pass_context=True)\r\nasync def roles(context):\r\n \"\"\"Sends client each role in commandRoles\"\"\"\r\n if len(commandRoles) == 0:\r\n await context.send(format_message(MessageTypeError, 'Error: No roles have been added. Use AddRole to add a role.'))\r\n return\r\n async with context.typing():\r\n await context.send(format_message(MessageTypeLog, 'These are the roles that the bot will take commands from:'))\r\n for role in commandRoles:\r\n await context.send(format_message(MessageTypeLog, role))\r\n await context.send(format_message(MessageTypeLog, 'Done listing roles.'))\r\n\r\n#------------------------------------------------------add_role----------------------------------------------------------------------\r\n@client.command(name='AddRole',\r\n description='Adds a server role that the bot will take commands from.',\r\n brief='Add a role that the bot will listen to.',\r\n aliases=['addrole'],\r\n pass_context=True)\r\nasync def add_role(context, role=None):\r\n \"\"\"Adds a role to the list of roles the bot will listen to. Adds a new xml element for the role.\"\"\"\r\n if role is None:\r\n await context.send(format_message(MessageTypeWarning, 'Role not entered'))\r\n return\r\n async with context.typing():\r\n role = role.replace(\"'\", '')\r\n server = client.get_guild(ServerID)\r\n added_role = None\r\n for servRole in server.roles:\r\n if role.lower() == servRole.name.lower():\r\n added_role = servRole.name\r\n break\r\n else:\r\n await context.send(format_message(MessageTypeWarning, 'Server does not have this role'))\r\n return\r\n LogCommand(context.message)\r\n commandRoles.append(added_role)\r\n AddToXML(XMLRole, XMLRoleAttribute, added_role)\r\n await context.send(format_message(MessageTypeLog, added_role + ' has been added'))\r\n\r\n#----------------------------------------------------------remove_role---------------------------------------------------------------\r\n@client.command(name='RemoveRole',\r\n description='Removes a server role that the bot takes commands from.',\r\n brief='Remove a role that the bot listens to',\r\n aliases=['removerole'],\r\n pass_context=True)\r\nasync def remove_role(context, role=None):\r\n \"\"\"Removes specified role from list of roles the bot listens to and removes it from the xml config file.\"\"\"\r\n if role == None:\r\n await context.send(format_message(MessageTypeWarning, 'Role not entered'))\r\n role = role.replace(\"'\", '')\r\n removeRole = None\r\n for servRole in commandRoles:\r\n if servRole.lower() == role:\r\n removeRole = servRole\r\n break\r\n else:\r\n await context.send(format_message(MessageTypeWarning, 'Role was not in Command Roles'))\r\n return\r\n if len(commandRoles) == 1:\r\n await context.send(format_message(MessageTypeWarning, 'Only one Role can give the bot commands. Add another role to be able to remove this one.'))\r\n return\r\n commandRoles.remove(servRole)\r\n LogCommand(context.message)\r\n RemoveFromXML(XMLRole, XMLRoleAttribute, servRole)\r\n await context.send(format_message(MessageTypeLog, servRole + ' has been removed'))\r\n \r\n\r\n#-------------------------------------------------------add_prefix----------------------------------------------------------\r\n@client.command(name='AddPrefix',\r\n description='Adds a new character as a command prefix. Can use this character to issue a command.',\r\n brief='Add new character as command prefix',\r\n aliases=['addprefix', 'addprfx'],\r\n pass_context=True)\r\nasync def add_prefix(context, prefix=None):\r\n \"\"\"Adds a prefix to list of BotPrefixes used for calling commands\"\"\"\r\n if prefix is None:\r\n await context.send(format_message(MessageTypeWarning, 'Prefix not entered'))\r\n return\r\n# name and nick\r\n if len(prefix) == 1:\r\n BotPrefix.append(prefix)\r\n AddToXML(XMLPrefix, XMLPrefixAttribute, prefix)\r\n await context.send(format_message(MessageTypeLog, prefix + ' has been added as a prefix'))\r\n LogCommand(context.message)\r\n else:\r\n await context.send(format_message(MessageTypeWarning, 'Prefix should be one character'))\r\n\r\n\r\n#-----------------------------------------------------LogCommand--------------------------------------------------------------------\r\ndef LogCommand(message):\r\n \"\"\"username, nick, and message and printed in console and written to file\"\"\"\r\n print_and_log(LogTypeInfo, 'Username: ' + kill_emojis(message.author.name))\r\n if message.author.nick is not None:\r\n print_and_log(LogTypeInfo, 'Nickname: ' + kill_emojis(message.author.nick))\r\n print_and_log(LogTypeInfo, 'Message: ' + kill_emojis(message.content))\r\n\r\n#-------------------------------------------------------remove_prefix----------------------------------------------------------\r\n@client.command(name='RemovePrefix',\r\n description='Removes a prefix from command prefixes.',\r\n brief='Remove a character from command prefixes',\r\n aliases=['removeprefix', 'rmvprfx'],\r\n pass_context=True)\r\nasync def remove_prefix(context, prefix=None):\r\n \"\"\"Removes a prefix from BotPrefixes. Prefixes are used to call commands\"\"\"\r\n if prefix is None:\r\n await context.send(format_message(MessageTypeWarning, 'No prefix entered'))\r\n return\r\n if prefix in BotPrefix and len(prefix) == 1:\r\n if len(BotPrefix) == 1:\r\n await context.send(format_message(MessageTypeWarning, 'Only one prefix exists. Add another to be able to remove this one'))\r\n return\r\n BotPrefix.remove(prefix)\r\n RemoveFromXML(XMLPrefix, XMLPrefixAttribute, prefix)\r\n await context.send(format_message(MessageTypeLog, prefix + ' has been removed from prefixes'))\r\n LogCommand(context.message)\r\n else:\r\n await context.send(format_message(MessageTypeWarning, prefix + ' was not a prefix'))\r\n\r\n#---------------------------------------------------add_channel_to_log----------------------------------------------------------------\r\n@client.command(name='AddChannelToLog',\r\n\t\t\t\tdescription='Adds a channel to list of channels to log',\r\n\t\t\t\tbrief='Adds channel to channel list',\r\n\t\t\t\taliases=['addchannel', 'addchn', 'addchanneltolog'],\r\n\t\t\t\tpass_context=True)\r\nasync def add_channel_to_log(context, newChannel):\r\n \"\"\"Adds Channels to Logging\"\"\"\r\n newChannel = newChannel.replace(\"'\", '')\r\n channel = get_channel_by_name(newChannel)\r\n if channel is None:\r\n channel = client.get_channel(newChannel)\r\n if channel is None:\r\n await context.send(format_message(MessageTypeError, BadChannelIdOrName))\r\n return\r\n \r\n for channelID in channelsToLog:\r\n if channelID == channel.id:\r\n await context.send(format_message(MessageTypeWarning, 'Channel: ' + channel.name + '\\nID:' + str(channel.id) + '\\nHas already been added.'))\r\n return\r\n if(channel.type is not discord.ChannelType.text):\r\n await context.send(format_message(MessageTypeLog, 'Channel: ' + channel.name + '\\nID:' + str(channel.id) + '\\nis not a TextChannel and has not been added.'))\r\n return\r\n channelsToLog.append(channel.id)\r\n AddToXML(XMLChannel, XMLChannelAttribute, channel.name)\r\n LogCommand(context.message)\r\n await context.send(format_message(MessageTypeLog, 'Channel: ' + channel.name + '\\nID:' + str(channel.id) + '\\nHas been added to channels to log.'))\r\n\r\n#----------------------------------------------------remove_channel_from_log--------------------------------------------------------\r\n@client.command(name='RemoveChannelFromLog',\r\n\t\t\t\tdescription='Removes channel from list of channels to log',\r\n\t\t\t\tbrief='Removes channel to channel list',\r\n\t\t\t\taliases=['removechannel', 'rmchn', 'removechannelfromlog'],\r\n\t\t\t\tpass_context=True)\r\nasync def remove_channel_from_log(context, new_channel):\r\n \"\"\"Removes channels from logging\"\"\"\r\n new_channel = new_channel.replace(\"'\", '')\r\n channel = client.get_channel(new_channel)\r\n if channel is None:\r\n channel = get_channel_by_name(new_channel)\r\n if channel is None:\r\n await context.send(format_message(MessageTypeError, BadChannelIdOrName))\r\n return\r\n if channel.id in channelsToLog:\r\n channelsToLog.remove(channel.id)\r\n RemoveFromXML(XMLChannel, XMLChannelAttribute, channel.name)\r\n await context.send(format_message(MessageTypeLog, 'Channel: ' + channel.name + '\\nID:' + str(channel.id) + '\\nwas removed from Channels To Log'))\r\n LogCommand(context.message)\r\n else:\r\n await context.send(format_message(MessageTypeWarning, 'Channel: ' + channel.name + '\\nID:' + str(channel.id) + '\\nwas not in Channels To Log'))\r\n\r\n\r\n#----------------------------------------------------format_date-------------------------------------------------------------\r\ndef format_date(date, date_number):\r\n if date is not None:\r\n newDate = check_date(date)\r\n if newDate is None:\r\n print('Error: ' + date_number + ' is not valid')\r\n return None\r\n else:\r\n return newDate\r\n\r\n#-----------------------------------------------------get_logs----------------------------------------------------------------\r\n@client.command(name='GetLogs',\r\n description='Gets all logs and sends them to user',\r\n brief='Gets all logs',\r\n aliases=['getlogs', 'gl', 'getlog'],\r\n pass_context=True)\r\nasync def get_logs(context):\r\n \"\"\"Gets all logs and sends them to user\"\"\"\r\n if os.path.exists(ParentDirectory):\r\n await context.send('Getting all logs...')\r\n log_name = 'LogCat_logs_All_Logs'\r\n shutil.make_archive(log_name, 'zip', ParentDirectory)\r\n try:\r\n await context.send(file=discord.File(log_name + '.zip', log_name + '.zip'))\r\n await context.send('Done!')\r\n except Exception as e:\r\n print_and_log(LogTypeInfo, 'Command: ' + 'GetLogs')\r\n print_and_log(LogTypeError, e)\r\n await context.send(e)\r\n os.remove(log_name + '.zip')\r\n else:\r\n await context.send(format_message(MessageTypeError, 'No Logs Exist'))\r\n\r\n#----------------------------------------------get_channel_logs-------------------------------------------------------------------\r\n\r\n@client.command(name='GetChannelLogs',\r\n description='Gets logs from specificed channel name or ID. Can also pass in dates formatted yyyy/mm/dd. Can enter\\\r\n dates in any order. Enter a single date for logs for that day or 2 dates for all logs during that period. GetChannelLogs general yyyy/mm/dd yyyy/mm/dd',\r\n brief='Gets the logs from the specified channel name',\r\n aliases=['getchannellogs', 'getchannellog', 'chnlog', 'chnlogs'],\r\n pass_context=True)\r\nasync def get_channel_logs(context, param1=None, param2=None, param3=None):\r\n \"\"\"Gets logs from a specific channel that the user passes in. If no date is\r\nentered all logs are returned for that specific channel.\r\nif a single date is entered then the logs for that channel on that date are returned.\r\nif two dates and a channel are entered then the logs from the earlier date to\r\nthe later date are returned from that channel\"\"\"\r\n directoryToCopy = ParentDirectory\r\n channel = None\r\n date1 = None\r\n date2 = None\r\n filesToCopy = []\r\n\r\n #tries to see what parameter is a channel\r\n if param1 is not None:\r\n channel = get_channel_by_name(param1)\r\n if channel is None and param2 is not None:\r\n channel = get_channel_by_name(param2)\r\n if channel is None and param3 is not None:\r\n channel = get_channel_by_name(param3)\r\n \r\n permission_required = await has_permissions(channel)\r\n\r\n if permission_required == False:\r\n await context.send(format_message(MessageTypeError, \"Error: read_channel_history permission is required.\"))\r\n return\r\n \r\n\r\n # if no channel wa found return and send error message\r\n if channel is None:\r\n await context.send(format_message(MessageTypeError, 'Error: could not find channel name')) \r\n return\r\n\r\n # if the channel's name has any slashes replace them with -\r\n channelNamePath = channel.name.replace('/', '-')\r\n channelNamePath = channelNamePath.replace(r'\\\\', '-')\r\n \r\n # if we found a channel set the directoryToCopy to the parent directory +\r\n # the channels name\r\n if channel is not None:\r\n directoryToCopy = ParentDirectory + '/' + channelNamePath\r\n #check the if another parameter was added and check if it's a date\r\n if param1 is not channel and param1 is not None:\r\n date1 = check_date(param1)\r\n if param2 is not channel and param2 is not None:\r\n if date1 is None:\r\n date1 = check_date(param2)\r\n else:\r\n date2 = check_date(param2)\r\n if param3 is not channel and param3 is not None:\r\n if date1 is None:\r\n date1 = check_date(param3)\r\n else:\r\n if date2 is None:\r\n date2 = check_date(param3)\r\n \r\n if channel is not None:\r\n # if channel is none create a temp folder with the channel's name\r\n await context.send(format_message(MessageTypeLog, 'Attempting to get logs for ' + channel.name))\r\n path = TempFolder + '/' + channelNamePath\r\n os.makedirs(path, exist_ok=True)\r\n if os.path.exists(ParentDirectory + '/' + channelNamePath):\r\n print('path exists')\r\n else:\r\n await context.send(format_message(MessageTypeError,'Error: Logs do not exist'))\r\n return\r\n # check if a first date was entered\r\n if date1 is not None:\r\n #if second date was entered compare dates and set earlier date to\r\n #startDate\r\n # and later date to endDate\r\n if date2 is not None:\r\n if date1 >= date2:\r\n startDate = date2\r\n endDate = date1\r\n else:\r\n startDate = date1\r\n endDate = date2\r\n interDate = startDate\r\n #copy filenames to filesToCopy for each day until we reach the\r\n #end date\r\n while interDate <= endDate:\r\n await copy_files(interDate, directoryToCopy, filesToCopy, channel.name)\r\n interDate = interDate + datetime.timedelta(days=1)\r\n # if only a first date was entered copy the filenames to\r\n # filesToCopy for that day\r\n else:\r\n await copy_files(date1, directoryToCopy, filesToCopy, channel.name)\r\n\r\n if len(filesToCopy) > 0:\r\n for file in filesToCopy:\r\n sourcePath = ParentDirectory + '/' + channel.name + '/' + file\r\n shutil.copyfile(sourcePath, path + '/' + file)\r\n else:\r\n await context.send(format_message(MessageTypeWarning, 'No Logs available. Sending empty folder'))\r\n\r\n fileDate = date1.strftime(DateFormat) \r\n if date2 is not None:\r\n fileDate = startDate.strftime(DateFormat) + ' ' + endDate.strftime(DateFormat)\r\n \r\n LogName = 'LogCat_logs_' + channel.name + '_' + fileDate\r\n shutil.make_archive(LogName, 'zip', TempFolder)\r\n\r\n # if no date was entered send the channel's entire logs\r\n if date1 is None and date2 is None:\r\n source_path = ParentDirectory + '/' + channelNamePath\r\n if os.path.isdir(source_path):\r\n distutils.dir_util.copy_tree(source_path, path)\r\n LogName = 'LogCat_logs_' + channelNamePath\r\n shutil.make_archive(LogName, 'zip', ParentDirectory + '/' + channelNamePath)\r\n else:\r\n await context.send(format_message(MessageTypeError, 'No Logs available. Sending empty folder'))\r\n # send the logs with the channel's name\r\n try:\r\n await context.send(file=discord.File(LogName + '.zip', LogName + '.zip'))\r\n await context.send('Logs Sent!')\r\n except Exception as e:\r\n print_and_log(LogTypeInfo, 'Command: ' + 'get_channel_logs')\r\n print_and_log(LogTypeError, e)\r\n await context.send(e)\r\n\r\n # delete the temp folder\r\n if os.path.exists(TempFolder):\r\n distutils.dir_util.remove_tree(TempFolder)\r\n if os.path.isfile(LogName + '.zip'):\r\n os.remove(LogName + '.zip')\r\n\r\n#------------------------------------------------------copy_files--------------------------------------------------------------------\r\nasync def copy_files(date, directoryToCopy, filesToCopy, channelName):\r\n \"\"\"Checks if a file exists and appends its path to filesToCopy\r\nif it does not exist then it attempts to create a log for that day\"\"\"\r\n file = channelName + ' ' + date.strftime(DateFormat) + '.txt'\r\n if os.path.exists(directoryToCopy + '/' + file):\r\n print('Found file')\r\n print(file)\r\n else:\r\n channel = get_channel_by_name(channelName)\r\n check = GetTimeOffset()\r\n if(channel.type is discord.ChannelType.text):\r\n await LogChannel(channel, date + datetime.timedelta(hours=GetTimeOffset()), 'get_channel_logs')\r\n else:\r\n print_and_log(LogTypeError, \"Channel: \" + channel.name + \" is not a TextChannel!\")\r\n filesToCopy.append(file)\r\n\r\n#------------------------------------------get_channel_by_name-------------------------------------------------------------------\r\ndef get_channel_by_name(channelName):\r\n \"\"\"Loops through all channels and returns channel if the channelName was equal to the channel's name\"\"\"\r\n channels = client.get_all_channels()\r\n for channel in channels:\r\n if channel.name.lower() == channelName.lower() and channel.type == discord.ChannelType.text:\r\n return channel\r\n\r\n#------------------------------------------------logged_channels------------------------------------------------------------------\r\n@client.command(name='LoggedChannels',\r\n description='Tells what channels are set to be logged.',\r\n brief='What channels are set to be logged',\r\n aliases=['LoggedChan', 'loggedchannels', 'lc'],\r\n pass_context=True)\r\nasync def logged_channels(context):\r\n \"\"\"Tells the client what channels are in channelsToLog list\"\"\"\r\n if len(channelsToLog) == 0:\r\n await context.send(format_message(MessageTypeWarning,'No channels have been added'))\r\n else:\r\n await context.send('These are the channels that are currently set to be logged:')\r\n async with context.typing():\r\n for channelID in channelsToLog:\r\n await context.send(format_message('Log', client.get_channel(channelID).name))\r\n await context.send('Done listing channels set to be logged.')\r\n\r\n#-----------------------------------------------start_logging---------------------------------------------------------------\r\n@client.command(name='StartLogging',\r\n description='Starts the automated daily logging',\r\n brief='Starts auto logging',\r\n aliases=['startlogging', 'strtlog'],\r\n pass_context=True)\r\nasync def start_logging(context):\r\n \"\"\"Starts automatic logging\r\n checks if channelsToLog's size is greater than 0 and if it is then calls\r\n check_time, and sets AutomaticLogging to True\"\"\"\r\n global AutomaticLogging\r\n if AutomaticLogging == False:\r\n if len(channelsToLog) > 0:\r\n AutomaticLogging = True\r\n await context.send('Starting AutoLogging')\r\n await check_time()\r\n else:\r\n await context.send(format_message(MessageTypeError, 'Error : No channels added to log list'))\r\n else:\r\n await context.send(format_message(MessageTypeWarning, 'Warning : Auto logging is already started'))\r\n\r\n#----------------------------------------------------auto_logging----------------------------------------------------------------\r\n@client.command(name=\"AutoLogging\",\r\n description='Says state of Automatic Logging',\r\n brief='State of Auto Logging',\r\n aliases=['autologging','autolog'],\r\n pass_context=True)\r\nasync def auto_logging():\r\n \"\"\"Tells client the current state of AutomaticLogging\"\"\"\r\n await context.send('Automatic logging is currently set to: ' + str(AutomaticLogging))\r\n\r\n#------------------------------------------------kill_emojis----------------------------------------------------------------------\r\ndef kill_emojis(string):\r\n \"\"\"Removes emojis from a string. Writing strings to a file that have emojis\r\ncauses an error in write. So we just remove them\"\"\"\r\n return emoji_pattern.sub(r'', string)\r\n\r\n\r\n#--------------------------------------------------yesterday_log_loop------------------------------------------------------------\r\nasync def yesterday_log_loop(command=None):\r\n \"\"\"Loops through each channel ID in channels to log\r\ngets each channel and creates logs for yesterday\"\"\"\r\n for channelID in channelsToLog:\r\n channel = client.get_channel(channelID)\r\n if channel is None:\r\n return('Incorrect Channel ID')\r\n yesterday = datetime.datetime.now() - datetime.timedelta(days=1, hours=datetime.datetime.now().hour - GetTimeOffset(), minutes=datetime.datetime.now().minute, seconds=datetime.datetime.now().second)\r\n if(channel.type is discord.ChannelType.text):\r\n await LogChannel(channel, yesterday, command)\r\n else:\r\n print_and_log(LogTypeError ,\"Channel: \" + channel.name + \" is not a TextChannel!\")\r\n\r\n#--------------------------------------------------------check_date---------------------------------------------------------------\r\ndef check_date(date):\r\n \"\"\"Checks if a string the user entered can be converted into a datetime object\r\nif it can then a datetime object is made and returned\"\"\"\r\n found_date = date_pattern.match(date)\r\n if found_date is None:\r\n return None\r\n the_date = found_date.group(1) + '-' + found_date.group(2) + '-' + found_date.group(3)\r\n print(the_date)\r\n newDateTime = datetime.datetime.strptime(found_date.group(1) + ' ' + found_date.group(2) + ' ' + found_date.group(3),'%Y %m %d')\r\n return newDateTime\r\n\r\n#-------------------------------------------------------GetTimeOffset-------------------------------------------------------------\r\ndef GetTimeOffset():\r\n \"\"\"Gets the hour difference between local time and GMT\r\nIs used to convert from GMT to Local time\r\nthis won't work for some timezones because some change by half an hour during\r\ndst. This works fine for pacific time\"\"\"\r\n hourOffset = time.gmtime().tm_hour - time.localtime().tm_hour\r\n return hourOffset % 24\r\n\r\n#------------------------------------------------------format_message---------------------------------------------------------------\r\ndef format_message(message_type, message):\r\n \"\"\"Formats a message by adding a color and making them into a code block in discord\"\"\"\r\n formatted_message = None\r\n if message_type == MessageTypeError:\r\n formatted_message = Error + message\r\n elif message_type == MessageTypeLog:\r\n formatted_message = Log + message\r\n elif message_type == MessageTypeWarning:\r\n formatted_message = Warning + message\r\n formatted_message += EndCodeBlock\r\n return formatted_message\r\n\r\n#----------------------------------------------check_if_roles_assigned----------------------------------------------------------------\r\ndef check_if_roles_assigned():\r\n global commandRoles\r\n if len(commandRoles) == 0:\r\n print_and_log(LogTypeError, 'No roles assigned. Either generate a new file or add\\n in config.xml')\r\n\r\n#-----------------------------------------------get_console_logs------------------------------------------------------------------\r\n@client.command(name='GetConsoleLogs',\r\n description='Sends the debug text file to the client. console.txt contains a list of the add and remove commands that were sent to the bot along with the sender info',\r\n brief='Sends console.txt to client',\r\n aliases=['getconsolelogs'],\r\n pass_context=True)\r\nasync def get_console_logs(context):\r\n \"\"\"Sends the console.txt file to the client\"\"\"\r\n try:\r\n await context.send(file=discord.File(ConsoleLogs, ConsoleLogs))\r\n await context.send('Done sending console logs')\r\n except Exception as e:\r\n print_and_log(LogTypeInfo, 'Command: ' + 'GetConsoleLogs')\r\n print_and_log(LogTypeError, e)\r\n await context.send(e)\r\n\r\n#-----------------------------------------------get_runtime-----------------------------------------------------------------------\r\n@client.command(name='GetRuntime',\r\n description='Gets the runtime of the bot',\r\n brief='Returns runtime',\r\n aliases=['getruntime', 'runtime'],\r\n pass_context=True)\r\nasync def get_runtime(context):\r\n \"\"\"Returns the runtime of the script\"\"\"\r\n total_time = time.time() - start_time\r\n seconds = int(total_time)\r\n minutes = 0\r\n hours = 0\r\n days = 0\r\n if total_time >= 60:\r\n minutes = int(seconds / 60)\r\n seconds = seconds % 60\r\n if minutes >= 60:\r\n hours = int(minutes / 60)\r\n minutes = minutes % 60\r\n if hours > 24:\r\n days = int(hours/24)\r\n hours = hours % 24\r\n await context.send(str(days) + \" Days, \" + str(hours) + ' Hours, ' + str(minutes) + ' Minutes, ' + str(seconds) + \" Seconds\")\r\n#------------------------------------------------on_error--------------------------------------------------------------------------\r\n@client.event\r\nasync def on_error(event, *args, **kwargs):\r\n print_and_log(LogTypeError, sys.exc_info())\r\n\r\n#------------------------------------------------has_permissions----------------------------------------------------------------\r\nasync def has_permissions(channel):\r\n \"\"\"Returns read message history permissions for the specified channel\"\"\"\r\n server = client.get_guild(ServerID)\r\n permissions = channel.permissions_for(server.me)\r\n return permissions.read_message_history\r\n \r\n\r\n#-------------------------------------------------run_client-----------------------------------------------------------------------\r\ndef run_client(client):\r\n \"\"\"Try to run the bot and if it fails wait a minute and try again\"\"\"\r\n bot_loop = asyncio.get_event_loop()\r\n while True:\r\n try:\r\n #Check config file and apply settings\r\n check_config_file()\r\n check_if_roles_assigned()\r\n global TOKEN\r\n bot_loop.run_until_complete(client.start(TOKEN))\r\n except Exception as e:\r\n if e is not IOError:\r\n print_and_log(LogTypeError, e)\r\n print_and_log(LogTypeInfo, 'Waiting 60 seconds then restarting')\r\n time.sleep(60)\r\n\r\nprint_and_log(LogTypeInfo, \"Discord Api Version: \" + discord.__version__)\r\nrun_client(client)\r\n","repo_name":"Arobledo707/LogCat","sub_path":"DiscordBot.py","file_name":"DiscordBot.py","file_ext":"py","file_size_in_byte":52445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21390988919","text":"\"\"\"Build src file list.\"\"\"\nimport re\nfrom pathlib import Path\nfrom typing import List\n\nfrom click import echo\nfrom colorama import Fore\n\nfrom .settings import Config\n\n\ndef get_src(src: List[Path], config: Config) -> List[Path]:\n \"\"\"Get source files.\"\"\"\n paths = []\n for item in src:\n # normalize path\n\n normalized_item = item.resolve()\n\n if (\n Path.is_file(normalized_item)\n and no_pragma(config, normalized_item)\n and (\n (\n config.use_gitignore\n and not config.gitignore.match_file(normalized_item)\n )\n or not config.use_gitignore\n )\n ):\n paths.append(normalized_item)\n continue\n\n # remove leading . from extension\n extension = str(config.extension)\n extension = extension[1:] if extension.startswith(\".\") else extension\n\n paths.extend(\n filter(\n lambda x: not re.search(config.exclude, x.as_posix(), re.VERBOSE)\n and no_pragma(config, x)\n and (\n (config.use_gitignore and not config.gitignore.match_file(x))\n or not config.use_gitignore\n ),\n list(normalized_item.glob(f\"**/*.{extension}\")),\n )\n )\n\n if len(paths) == 0:\n echo(Fore.BLUE + \"No files to check! 😢\")\n\n return paths\n\n\nhtml_patterns = [re.compile(r\"\")]\ndjango_jinja_patterns = [\n re.compile(r\"\\{#\\s*djlint\\:on\\s*#\\}\"),\n re.compile(r\"\\{%\\s*comment\\s*%\\}\\s*djlint\\:on\\s*\\{%\\s*endcomment\\s*%\\}\"),\n]\nnunjucks_patterns = [re.compile(r\"\\{#\\s*djlint\\:on\\s*#\\}\")]\nhandlebars_patterns = [re.compile(r\"\\{\\{!--\\s*djlint\\:on\\s*--\\}\\}\")]\ngolang_patterns = [re.compile(r\"\\{\\{-?\\s*/\\*\\s*djlint\\:on\\s*\\*/\\s*-?\\}\\}\")]\n\n\ndef no_pragma(config: Config, this_file: Path) -> bool:\n \"\"\"Verify there is no pragma present.\"\"\"\n if config.require_pragma is False:\n return True\n\n with this_file.open(encoding=\"utf8\") as open_file:\n first_line = open_file.readline()\n\n pragma_patterns = {\n \"html\": html_patterns,\n \"django\": django_jinja_patterns + html_patterns,\n \"jinja\": django_jinja_patterns + html_patterns,\n \"nunjucks\": nunjucks_patterns + html_patterns,\n \"handlebars\": handlebars_patterns + html_patterns,\n \"golang\": golang_patterns + html_patterns,\n \"angular\": html_patterns,\n \"all\": django_jinja_patterns\n + nunjucks_patterns\n + handlebars_patterns\n + golang_patterns\n + html_patterns,\n }\n\n return any(\n re.match(pattern, first_line) for pattern in pragma_patterns[config.profile]\n )\n","repo_name":"Riverside-Healthcare/djLint","sub_path":"src/djlint/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"53"} +{"seq_id":"2099541005","text":"import logging\nimport os\nimport time\nimport health_checker\nfrom multiprocessing import Pool\n\nlogger = logging.getLogger(__file__)\n\ndef _health_check_and_result(service):\n res = health_checker.check_health(service)\n service['service_state'] = res[0]\n service['service_info'] = res[1]\n service['service_time'] = res[2]\n return res\n\nclass ConsecutiveWorker(object):\n\n class JobExecutor(object):\n \n def __init__(self, exporter):\n self.exporter = exporter\n \n def execute(self, service):\n res = _health_check_and_result(service)\n self.exporter.export(service)\n\n def __init__(self, services_list, exporter, **kwargs):\n self.exporter = exporter\n self.services_list = services_list\n self.job_executor = self.JobExecutor(exporter)\n \n # if the exporter is able to callback\n set_worker_callback = getattr(self.exporter, \"set_worker_callback\", None)\n if set_worker_callback:\n if callable(set_worker_callback):\n logger.debug(\"found callback in exporter {}\".format(str(type(self.exporter))))\n set_worker_callback(self.work_single)\n\n def work_single(self, service_name):\n logger.debug(\"requesting an out-of-order processing for service: {}\".format(service_name))\n for service in self.services_list:\n #find the one with 'service_name' == service_name\n if service['service_name'] == service_name:\n self.job_executor.execute(service)\n break\n \n \n\n def work_all(self, workers):\n \n if workers:\n logger.debug(\"running with {} workers\".format(workers))\n pool = Pool(processes=workers)\n while True:\n start_time = time.time()\n pool.map(self.job_executor.execute, self.services_list)\n end_time = time.time()\n logger.debug(\"needed {}s to process a full cycle\".format((end_time - start_time)))\n \n else:\n logger.debug(\"running without workers\")\n while True:\n start_time = time.time()\n for service in self.services_list:\n self.job_executor.execute(service)\n end_time = time.time()\n logger.debug(\"needed {}s to process a full cycle\".format((end_time - start_time)))\n sleep_time = 60*5\n logger.info(\"now waiting {}s for the next cycle.\".format(sleep_time))\n time.sleep(sleep_time)\n \n # will never come...\n\ndef _as_job(service_data):\n res = _health_check_and_result(service_data)\n return service_data\n\nclass OnceWorker(object):\n\n def __init__(self, services_list, exporter, **kwargs):\n self.exporter = exporter\n self.services_list = services_list\n \n def work_all(self, workers):\n \n # calculate health\n results = None\n \n start_time = time.time()\n if workers:\n logger.debug(\"running with {} workers\".format(workers))\n procs = workers\n p = Pool(processes=procs)\n results = p.map(_as_job, self.services_list)\n else:\n logger.debug(\"running without workers\")\n for service in self.services_list:\n _health_check_and_result(service)\n results = self.services_list\n end_time = time.time()\n logger.debug(\"needed {}s to process\".format((end_time - start_time)))\n \n self.exporter.export(results)\n # wait a little\n time.sleep(2)\n os._exit(0)\n","repo_name":"derHeinz/service_monitor","sub_path":"workers.py","file_name":"workers.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28255575778","text":"\"\"\"\nThis is meant to serve as an example of slightly more complex parsing of\norientation measurements.\n\nAngelier, 1979's seminal paper on paleostress determination includes a table\nof slickenslide measurements from normal faults.\n\nHowever, some of the measurements are rakes, while others are strike/dip and an\nazimuth of the slickenslides (\"Rake\" measurements without a direction letter\nare actually azimuthal measurements.).\n\nFurthermore, the measurements do not follow the right-hand-rule for indicating\ndip direction of a plane and they indicate rake direction using a directional\nletter.\n\nTo unify the measurements for plotting, etc, we need to parse all of the\nmeasurements, and convert the azimuth measurements to rakes.\n\"\"\"\nimport os\nimport matplotlib.pyplot as plt\nimport mplstereonet\n\ndef main():\n strike, dip, rake = load()\n\n # Plot the data.\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='stereonet')\n ax.rake(strike, dip, rake, 'ro')\n plt.show()\n\ndef load():\n \"\"\"Read data from a text file on disk.\"\"\"\n # Get the data file relative to this file's location...\n datadir = os.path.dirname(__file__)\n filename = os.path.join(datadir, 'angelier_data.txt')\n\n data = []\n with open(filename, 'r') as infile:\n for line in infile:\n # Skip comments\n if line.startswith('#'):\n continue\n\n # First column: strike, second: dip, third: rake.\n strike, dip, rake = line.strip().split()\n\n if rake[-1].isalpha():\n # If there's a directional letter on the rake column, parse it\n # normally.\n strike, dip, rake = mplstereonet.parse_rake(strike, dip, rake)\n else:\n # Otherwise, it's actually an azimuthal measurement of the\n # slickenslide directions, so we need to convert it to a rake.\n strike, dip = mplstereonet.parse_strike_dip(strike, dip)\n azimuth = float(rake)\n rake = mplstereonet.azimuth2rake(strike, dip, azimuth)\n\n data.append([strike, dip, rake])\n\n # Separate the columns back out\n strike, dip, rake = zip(*data)\n return strike, dip, rake\n\nif __name__ == '__main__':\n main()\n","repo_name":"joferkington/mplstereonet","sub_path":"examples/parse_angelier_data.py","file_name":"parse_angelier_data.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"53"} +{"seq_id":"23079550296","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/7/7 上午9:58\n# @Author : Hou Rong\n# @Site : \n# @File : my_logger.py\n# @Software: PyCharm\nimport logging.handlers\n\nhandler_multi = logging.handlers.WatchedFileHandler(\n '/tmp/multi_matched_log',\n mode='w'\n)\n\nhandler_none = logging.handlers.WatchedFileHandler(\n '/tmp/none_matched_log',\n mode='w'\n)\nstream_handler = logging.StreamHandler()\n\nlogger_multi = logging.getLogger(\"CtripSuggestionMultiMatched\")\nlogger_none = logging.getLogger(\"CtripSuggestionNoneMatched\")\n\nlogger_multi.setLevel(level=logging.DEBUG)\nlogger_none.setLevel(level=logging.DEBUG)\n\nlogger_multi.addHandler(handler_multi)\nlogger_none.addHandler(handler_none)\n\n# logger_multi.addHandler(stream_handler)\n# logger_none.addHandler(stream_handler)\n","repo_name":"20113261/p_m","sub_path":"Suggestion/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7879078571","text":"#ala ma kota to 11 znakow\ntext = 'Ala ma kota'\n# # to rozwiazanie lepsze\n# for char in text:\n# print(char)\n# # to gorsze\nlenght = len(text)\n# for idx in range (lenght):\n# print(text[idx])\n\nsome_range = range (lenght)\nprint(some_range)\n#triczek\nfor value in some_range:\n print(value)\n#pomaga ocenic czy petla sie juz zakonczyla, wazne jesli mamy program zalezny od zakonczenia innych procesow\nelse:\n print('Helloo')\n\n","repo_name":"aksa1/infoshare_2","sub_path":"forloops.py","file_name":"forloops.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69863812968","text":"import librosa as lb\nfrom numpy import *\nimport matplotlib.pyplot as plt\nfrom constants import *\n\nfrom time import time\n\nclass MonoMelody :\n\n def __init__(self, file, signal, duration, sr = SAMPLE_RATE, hop_length = HOP_LENGTH):\n \n \"\"\"constructor : \n - feature.chroma_stft is a function of librosa library which compute a chromagram using the song spectrogram\n \"\"\"\n \n self.__file = file\n self.__y, self.__sr = (signal, sr)\n if (duration == None):\n self.__duration = lb.get_duration(y = self.__y, sr = sr, hop_length = hop_length)\n else :\n self.__duration = duration\n self.__hop_length = hop_length\n S = abs(lb.stft(self.__y, hop_length = self.__hop_length))\n self.__C = lb.feature.chroma_stft(S=S, sr=self.__sr)\n self.filter_chroma()\n\n def show_chroma(self):\n \n \"\"\"print the chromagram of the track:\n - display.specshow is a function of librosa library which useg with 'time' argument for x_axis and 'chroma' argument for y_axis allows to plot the chromagram\n \"\"\"\n \n lb.display.specshow(self.__C, x_axis='time', y_axis='chroma', hop_length = self.__hop_length)\n plt.title('Chromagram')\n plt.show()\n\n def filter_chroma(self):\n \n \"\"\"for each column, keep the note with the highest coefficient\n Note : this filter is efficient only when a single note is played at the same time.\n \"\"\"\n \n l, c = shape( self.__C)\n\n for i in range(0, c):\n max = self.__C[0][i]\n \n for j in range(0, l):\n if( self.__C[j][i] > max ):\n max = self.__C[j][i]\n\n for j in range(0, l):\n if( self.__C[j][i] < max ):\n self.__C[j][i] = 0\n\n\n def filter_components(self, min):\n \n #for each column, keep the frequencies with a coefficient superior to min\n \n l, c = shape( self.__comps )\n\n for i in range(0, c):\n for j in range(0, l):\n if( self.__comps[j][i] < min ):\n self.__comps[j][i] = 0\n\n \n def tab_notes(self):\n\n \"\"\"returns the list of the note played. \n The time between to notes is related to the sample rate\n If no note is played the character is empty\n \"\"\"\n\n\n notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']\n \n l, c = shape( self.__C )\n\n tab = ['']*c\n \n for i in range( 0, c):\n for j in range( 0, l):\n if( self.__C[j][i] > 0 ):\n tab[i] = notes[j]\n\n char = tab[0]\n if (char != tab[2]):\n char = tab[2]\n tab[0] = tab[2]\n for i in range(0,c-2):\n if (char != tab[i]):\n if(char != tab[i+2]):\n tab[i] = tab[i+2]\n char = tab[i+2]\n else:\n char = tab[i]\n \n return tab\n\n\n def tab_time(self,tab):\n\n \"\"\"Takes in parameter the list of notes returned by the function tab_notes\n Returns a list of pairs, containing the note and the time when the note begins\n \"\"\"\n \n l, c = shape(self.__C)\n #print c\n pas = int(self.__duration) * 1.0 / c\n \n tabTime = []\n \n char = tab[0]\n tabTime.append([char,0.0])\n\n for i in range(0,len(tab)):\n if (tab[i] != char):\n char = tab[i]\n tabTime.append([char,i*pas])\n \n return tabTime\n\n\n def belongs(self, tab_time, notes):\n nb_notes = len(notes)\n l = len(tab_time)\n\n result = [False]*l\n \n for i in range(0, l):\n for j in range(0, nb_notes):\n if (tab_time[i][0] == notes[j]):\n result[i] = True\n\n return result\n\n \n \n\n \n","repo_name":"Jedyle/music-analysis","sub_path":"src/monomelody.py","file_name":"monomelody.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12604421295","text":"from iterstrat.ml_stratifiers import MultilabelStratifiedKFold\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom sklearn.metrics import log_loss\n\nfrom config import Config\nfrom fe_cluster import fe_cluster_all\nfrom pca_selection import _pca, _pca_select\nfrom rankgauss import rankGauss\nfrom train import train_test\nfrom utils import seed_everything, process_data, sub_clip, process\n\nfrom pdb import set_trace\n\ndef main():\n\n seed_everything(seed_value=42)\n cfg = Config()\n\n data_dir = '../../data'\n save_path = './'\n load_path = './'\n runty = 'traineval'\n assert runty == 'traineval' or runty == 'eval', \\\n \"Run type is wrong. Should be 'traineval' or 'eval'\"\n\n train_features = pd.read_csv(os.path.join(data_dir, 'train_features.csv'))\n train_targets_scored = pd.read_csv(os.path.join(data_dir, 'train_targets_scored.csv'))\n train_targets_nonscored = pd.read_csv(os.path.join(data_dir, 'train_targets_nonscored.csv'))\n test_features = pd.read_csv(os.path.join(data_dir, 'test_features.csv'))\n submission = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))\n \n train_features2 = train_features.copy()\n test_features2 = test_features.copy()\n \n if (runty == 'traineval'):\n test_features_private = test_features.copy()\n elif (runty == 'eval'):\n test_features_private = pd.read_csv(os.path.join(data_dir, 'test_features_private_fake.csv'))\n\n test_features_private2 = test_features_private.copy()\n\n train_featurs, test_features, test_features_private = \\\n rankGauss(train_features=train_features, test_features=test_features,\n test_features_p=test_features_private, runty=runty)\n\n train_features, test_features, test_features_private, train_pca, test_pca, test_pca_p = \\\n _pca(train_features=train_features, test_features=test_features,\n runty=runty, test_features_private=test_features_private,\n ncomp_g=cfg.ncomp_g, ncomp_c=cfg.ncomp_c)\n \n train_features, test_features, test_features_private = \\\n _pca_select(train_features, test_features, test_features_private)\n \n train_features, test_features, test_features_private = \\\n fe_cluster_all(train_features=train_features, test_features=test_features,\n test_features_private=test_features_private,\n train_features2=train_features2, test_features2=test_features2,\n test_features_private2=test_features_private2,\n train_pca=train_pca, test_pca=test_pca, test_pca_p=test_pca_p)\n \n if (runty == 'traineval'):\n train, test, target = process(train_features, test_features, train_targets_scored)\n elif (runty == 'eval'):\n train, test, target = process(train_features, test_features_private, train_targets_scored)\n\n folds = train.copy()\n\n target_cols = target.drop('sig_id', axis=1).columns.values.tolist()\n\n oof = np.zeros((len(train), len(target_cols)))\n predictions = np.zeros((len(test), len(target_cols)))\n\n for seed in cfg.seeds:\n mskf = MultilabelStratifiedKFold(n_splits=cfg.nfolds, shuffle=True, random_state=seed)\n for fold, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)):\n folds.loc[v_idx, 'kfold'] = int(fold)\n folds['kfold'] = folds['kfold'].astype(int)\n \n trte = train_test(folds, test, target, save_path, load_path, runty=runty)\n \n if (runty == 'train'):\n oof_ = trte.run_k_fold(seed)\n oof += oof_ / len(cfg.seeds)\n elif (runty == 'eval'):\n predictions_ = trte.run_k_fold(seed)\n predictions += predictions_ / len(cfg.seeds)\n elif (runty == 'traineval'):\n oof_, predictions_ = trte.run_k_fold(seed)\n oof += oof_ / len(cfg.seeds)\n predictions += predictions_ / len(cfg.seeds)\n\n # oof_, predictions_ = trte.run_k_fold(seed)\n # oof += oof_ / len(cfg.seed)\n # predictions += predictions_ / len(cfg.seed)\n\n if (runty == 'train'):\n train[target_cols] = oof\n valid_results = train_targets_scored.drop(columns=target_cols).merge(\n train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)\n\n y_true = train_targets_scored[target_cols].values\n y_pred = valid_results[target_cols].values\n\n score = 0\n for i in range(len(target_cols)):\n score_ = log_loss(y_true[:, i], y_pred[:, i])\n score += score_ / (target.shape[1]-1)\n\n print(\"CV log_loss: \", score)\n\n elif (runty == 'eval'):\n test[target_cols] = predictions\n\n sub = submission.drop(columns=target_cols).merge(\n test[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)\n\n # clip the submission\n # sub_c = sub_clip(sub, test_features)\n # sub_c.to_csv('submission.csv', index=False)\n\n sub.to_csv('submission.csv', index=False)\n \n elif (runty == 'traineval'):\n train[target_cols] = oof\n valid_results = train_targets_scored.drop(columns=target_cols).merge(\n train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)\n\n y_true = train_targets_scored[target_cols].values\n y_pred = valid_results[target_cols].values\n\n score = 0\n for i in range(len(target_cols)):\n score_ = log_loss(y_true[:, i], y_pred[:, i])\n score += score_ / (target.shape[1]-1)\n\n print(\"CV log_loss: \", score)\n \n test[target_cols] = predictions\n\n sub = submission.drop(columns=target_cols).merge(\n test[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)\n\n # clip the submission\n # sub_c = sub_clip(sub, test_features)\n # sub_c.to_csv('submission.csv', index=False)\n\n sub.to_csv('submission.csv', index=False)\n\n # train[target_cols] = oof\n # test[target_cols] = predictions\n\n # valid_results = train_targets_scored.drop(columns=target_cols).merge(\n # train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)\n\n # y_true = train_targets_scored[target_cols].values\n # y_pred = valid_results[target_cols].values\n\n # score = 0\n # for i in range(len(target_cols)):\n # score_ = log_loss(y_true[:, i], y_pred[:, i])\n # score += score_ / target.shape[1]\n\n # print(\"CV log_loss: \", score)\n\n # sub = submission.drop(columns=target_cols).merge(\n # test[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)\n\n # # clip the submission\n # sub_c = sub_clip(sub, test_features)\n # sub_c.to_csv('submission.csv', index=False)\n\nif __name__ == '__main__':\n main()\n","repo_name":"XStargate/MoA_prediction","sub_path":"src_knn_cluster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4445293638","text":"from MPR import MP_Recommender\r\n\r\nwith open('MP_api_key.ini','r') as content_file:\r\n api_key = content_file.read()\r\n\r\nmpr = MP_Recommender('user_table.p',\r\n 'route_table.p',\r\n api_key,\r\n verbatim=True)\r\n\r\nuser_id = 200128311 #mine, trad, alpine, intermediate\r\n#user_id = 110596403 #boulder-er\r\n#user_id = 200272475 #boulder-er, advanced\r\n#200128311 boulder-er\r\n#user_id = 200077815 #michaels, trad, alpine, intermediate\r\n#user_id = 106540415 #mixed climber, alpine climber, advanced\r\n\r\n#mpr.load_prev_colab_results(user_id)\r\n#mpr.print_recs(user_id)\r\n\r\nmpr.load_prev_content_results('similarity_results.p')\r\nmpr.get_user_recs(user_id)","repo_name":"glabaria/MountainProject_Recommender","sub_path":"MPR_helper.py","file_name":"MPR_helper.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23396849356","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\n\n# Removes all characters that can not be in a file path\ndef clean(toClean):\n cleaned = toClean.replace(':', '')\n cleaned = cleaned.replace('/', '')\n cleaned = cleaned.replace('\\\\', '')\n cleaned = cleaned.replace('?', '')\n cleaned = cleaned.replace('', '')\n cleaned = cleaned.replace('\"', '')\n cleaned = ' '.join(cleaned.split())\n return cleaned\n\n# Gets a full url if only part is given\ndef getFullUrl(url):\n domain = 'http://www.marineband.marines.mil'\n if domain in url:\n return url\n else:\n return domain + url\n\n# Asks the user what file location they would like to save the files in\n# Folder must be on the same drive as the script being run\n# Edit 'C:' to the drive you are running the script on if you need to\nprint('')\nprint('Enter the file path you would like to save your files in ')\nsaveLocation = input();\nsaveLocation = saveLocation.replace('C:', '')\nsaveLocation = saveLocation.replace('\\\\', '/')\n\ncurrentPage = 'https://www.marineband.marines.mil/Audio-Resources/The-Complete-Marches-of-John-Philip-Sousa/'\nmainPage = requests.get(currentPage).text\nmainPage = BeautifulSoup(mainPage, 'html.parser')\n\n# Loops through each volume\nfor volume in mainPage.find_all('tbody'):\n print(volume.find('strong').text)\n volumeTitle = volume.find('strong').text\n volumeTitle = clean(volumeTitle)\n \n # Loops through each song in a volume\n for song in volume.find_all('a'):\n songPage = requests.get(getFullUrl(song['href'])).text\n songPage = BeautifulSoup(songPage, 'html.parser')\n \n # Finds and names the .mp3 file on a song page\n mp3 = None\n score = None\n image = None\n mp3filetype = 'none'\n for link in songPage.find_all('a', href=True):\n linkcheck = link['href'].split('/')[-1].split('?')[0]\n if (linkcheck.endswith('.mp3')):\n mp3 = getFullUrl(link['href'])\n linkcheck = linkcheck.replace('%20', ' ')\n mp3filetype = linkcheck\n songnumber = mp3filetype.split('_', 1)\n if len(songnumber) > 1 and 72 < int(songnumber[0]) < 77 and songnumber[1] != 'Liberty_Loan.mp3':\n mp3filetype = str(int(songnumber[0]) - 1) + '_' + songnumber[1]\n \n break\n \n # Skips pages with no downloads\n if mp3filetype == 'none':\n print('no download available') \n continue\n \n # Finds the pdf score and png file on a song page\n for link in songPage.find('tbody').find_all('a', href=True, attrs={'target': '_blank'}):\n linkcheck = link['href'].split('/')[-1].split('?')[0]\n print (linkcheck)\n if (linkcheck.endswith('.pdf')):\n if linkcheck.startswith(mp3filetype.split(' ')[0]) or linkcheck.startswith('Vol1_' + mp3filetype.split(' ')[0]) or linkcheck.startswith(mp3filetype.split('_')[0]):\n score = getFullUrl(link['href'])\n try:\n image = getFullUrl(link.find('img')['src'])\n break\n except:\n continue\n \n # Gets file and folder names\n songTitle = mp3filetype.rsplit('.', 1)[0].replace('_', ' ')\n savepath = saveLocation + '/' + volumeTitle + '/' + songTitle + '/'\n \n if not os.path.exists(savepath):\n os.makedirs(savepath)\n \n mp3fileSaveLocation = savepath + songTitle + '.mp3'\n scorefileSaveLocation = savepath + songTitle + '.pdf'\n imagefileSaveLocation = savepath + songTitle + '.png'\n \n # Downloads the files if they are not present in the folder\n try:\n with open(mp3fileSaveLocation) as f:\n print(songTitle, 'mp3 has already been downloaded')\n f.close()\n except FileNotFoundError:\n r2 = requests.get(mp3)\n with open(mp3fileSaveLocation, 'wb') as f:\n f.write(r2.content)\n f.close()\n \n try:\n with open(scorefileSaveLocation) as f:\n print(songTitle, 'score has already been downloaded')\n f.close()\n except FileNotFoundError:\n r2 = requests.get(score)\n with open(scorefileSaveLocation, 'wb') as f:\n f.write(r2.content)\n f.close()\n \n try:\n with open(imagefileSaveLocation) as f:\n print(songTitle, 'image has already been downloaded')\n f.close()\n except FileNotFoundError:\n r2 = requests.get(image)\n with open(imagefileSaveLocation, 'wb') as f:\n f.write(r2.content)\n f.close()\n \n print('Successfully downloaded ', songTitle)\n \nprint('Finished downloading all songs')\nexit","repo_name":"lazybookwyrm/AlmostCompleteSousaDownloader","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74716469927","text":"# https://sites.google.com/view/nivel1-desafios/funciones\n# 150 años es el tiempo que tarda una bolsa de plástico común en degradarse\n# y una botella de PET puede tardar 1.000 años en desaparecer. \n# Por otro lado los Envases de tetrabrik pueden tardar hasta 30 años en\n# degradarse.\n# Un trozo de chicle tarda 5 años en degradarse. \n\n# Se solicita una función para que dado el ingreso de un elemento,\n# se solicite tipo: Bolsa de plástico, botella PET, envase tetrabrik\n# o chicle, e imprima la cantidad de años que tarda en degradarse.\nfrom def_desafio01 import prints_descomposition_time\n\ndict_type_ = {1:[\"la bolsa de plastico\", 150],\n 2: [\"la botella PET\", 1000],\n 3: [\"el envase tetrabrik\", 30],\n 4: [\"el chicle\", 5]\n }\n\nwhile(True):\n name_ = input(\"Ingrese nombre de elemento:\\n\\t\")\n print(\"Ingrese tipo de elemento:\")\n for key, value in dict_type_.items():\n print(f\"\\\"{key}\\\" para {value[0]}\", end=\", \")\n print(\"\\\"0\\\" para salir.\")\n type_ = int(input(\"\\n\\t\"))\n if type_ == 0:\n print(\"FIN DE PROGRAMA\")\n break\n elif type_ not in dict_type_.keys():\n print(\"Valor ingresado invalido - reiniciando consulta.\")\n continue\n print(f\"{name_}: \", end=\"\")\n prints_descomposition_time(type_, dict_type_)\n\n ","repo_name":"AlexisRmnk/practicaInformatorio2022","sub_path":"prog_web/01_python/practicas_01_informatorio/ejercicios_desafios/03_funciones/desafio01.py","file_name":"desafio01.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29300533110","text":"#!/usr/bin/env python\n\nimport struct\nimport math\nfrom StringIO import StringIO\nfrom PIL import Image\n\ndef im2bmp(im):\n \"\"\"Convert an image into a 32-bit BMP.\n Return the BMP data.\n Assume that sizes are powers of 2.\n \"\"\"\n w, h = im.size\n data = struct.pack(' 256 or h > 256:\n raise ValueError(\"image is too big\")\n data = im2bmp(im)\n fout.write(struct.pack(IMAGE_FMT,\n w if w!=256 else 0, h if h!=256 else 0,\n 0, 0, 1, 32, len(data), offset)\n )\n offset += len(data)\n datas.append(data)\n\n # image data\n for d in datas:\n fout.write(d)\n\n\ndef main():\n from optparse import OptionParser\n parser = OptionParser(\n description=\"Assemble images into a .ICO file.\",\n usage=\"%prog OUTPUT FILES\",\n )\n (opts, args) = parser.parse_args()\n\n if len(args) == 0:\n parser.error(\"no output file\")\n if len(args) < 2:\n parser.error(\"no input file\")\n images2ico(args[0], args[1:])\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"benoitryder/panettopon","sub_path":"res/images2ico.py","file_name":"images2ico.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6824977819","text":"#!/usr/bin/env python3\n# pip3 install cfscrape\nimport json\nimport sys\nimport cfscrape # type: ignore\n\nfrom http import HTTPStatus\n\ndef check(email):\n scraper = cfscrape.create_scraper()\n query = \"https://haveibeenpwned.com/api/v2/breachedaccount/\" + email + \"?includeUnverified=true\"\n check = scraper.get(\n query,\n verify=True,\n )\n if check.status_code == 200:\n return check.json()\n elif check.status_code == 404:\n return []\n else:\n raise RuntimeError(f\"Bad status code {check.status_code}; query {query}\")\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description=\"Verify if email address has been pwned\")\n parser.add_argument(\"email\", help=\"Address to be checked\")\n args = parser.parse_args()\n\n j = check(args.email)\n json.dump(j, sys.stdout, ensure_ascii=False, indent=1, sort_keys=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"karlicoss/haveibeenpwned","sub_path":"haveibeenpwned.py","file_name":"haveibeenpwned.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5868653274","text":"from concurrent.futures import Future\nimport datetime\nimport hashlib\nimport io\nimport logging\nimport os\nimport pathlib\nimport re\nfrom typing import Dict, List\n\nfrom rst_lsp.database.main import DocutilsCache\nfrom rst_lsp.sphinx_ext.main import (\n assess_source,\n create_sphinx_app,\n find_all_files,\n retrieve_namespace,\n SourceAssessResult,\n SphinxAppEnv,\n)\nfrom . import uri_utils as uris\nfrom .cache import create_default_cache_path, remove_default_cache_path\nfrom .constants import MessageType\nfrom .utils import find_parents\nfrom .datatypes import Position, TextDocument, TextEdit\nfrom .plugin_manager import create_manager\n\nlogger = logging.getLogger(__name__)\n\n# TODO: this is not the best e.g. we capture numbers\nRE_START_WORD = re.compile(\"[A-Za-z_0-9]*$\")\nRE_END_WORD = re.compile(\"^[A-Za-z_0-9]*\")\n\n\nclass Config:\n \"\"\"Store configuration settings.\"\"\"\n\n def __init__(self, root_uri, init_opts, process_id, capabilities):\n self._root_path = uris.to_fs_path(root_uri)\n self._root_uri = root_uri\n self._init_opts = init_opts\n self._process_id = process_id\n self._capabilities = capabilities\n self._settings = {}\n self._plugin_manager = create_manager(logger)\n # TODO extract settings from plugin manager\n self._update_disabled_plugins()\n\n def _update_disabled_plugins(self):\n # All plugins default to enabled\n self._disabled_plugins = [\n plugin\n for name, plugin in self.plugin_manager.list_name_plugin()\n if not self.settings.get(\"plugins\", {}).get(name, {}).get(\"enabled\", True)\n ]\n logger.info(\"Disabled plugins: %s\", self._disabled_plugins)\n\n @property\n def init_opts(self):\n return self._init_opts\n\n @property\n def root_uri(self):\n return self._root_uri\n\n @property\n def process_id(self):\n return self._process_id\n\n @property\n def capabilities(self):\n return self._capabilities\n\n @property\n def settings(self):\n return self._settings\n\n @property\n def plugin_manager(self):\n return self._plugin_manager\n\n @property\n def disabled_plugins(self):\n return self._disabled_plugins\n\n def update(self, settings: dict):\n \"\"\"Recursively merge the given settings into the current settings.\"\"\"\n self._settings = settings\n\n\nclass Workspace(object):\n \"\"\"Store an in-memory representation of the open workspace files.\"\"\"\n\n def __init__(self, root_uri: str, server, config: Config):\n self._config = config\n self._root_uri = root_uri\n self._server = server\n self._root_uri_scheme = uris.urlparse(self._root_uri)[0]\n self._root_path = uris.to_fs_path(self._root_uri)\n self._open_docs = {}\n\n self._root_uri_hash = hashlib.md5(root_uri.encode(\"utf-8\")).hexdigest()\n # TODO persist cache?\n remove_default_cache_path(self._root_uri_hash)\n path = create_default_cache_path(self._root_uri_hash, \"database\")\n self._db = DocutilsCache(str(path), echo=False)\n\n self._update_env()\n\n def _update_env(self):\n \"\"\"Update the sphinx application.\"\"\"\n # TODO how to watch conf.py for changes? (or at least have command to update)\n # TODO use self.source_roots to find conf path?\n # TODO allow source directory to be different to conf path\n conf_path = self._config.settings.get(\"conf_path\", None)\n logger.debug(f\"Settings: {self._config.settings}\")\n if conf_path and not os.path.exists(conf_path):\n self.server.show_message(\n f\"The path set in `rst_lsp.conf_path` does not exist: {conf_path}\",\n msg_type=MessageType.Error,\n )\n conf_path = None\n elif conf_path:\n conf_path = os.path.realpath(conf_path)\n logger.debug(f\"Using conf dir: {conf_path}\")\n try:\n self._app_env = create_sphinx_app(\n conf_dir=os.path.dirname(conf_path) if conf_path else None,\n doctree_dir=create_default_cache_path(self._root_uri_hash, \"doctrees\"),\n output_dir=create_default_cache_path(self._root_uri_hash, \"outputs\"),\n )\n except Exception as err:\n self.server.show_message(\n (\n \"An error occurred creating a sphinx application from \"\n f\"`rst_lsp.conf_path`: {conf_path}.\\n\\n\"\n f\"{err}\"\n ),\n msg_type=MessageType.Error,\n )\n conf_path = None\n self._app_env = create_sphinx_app(\n conf_dir=None,\n doctree_dir=create_default_cache_path(self._root_uri_hash, \"doctrees\"),\n output_dir=create_default_cache_path(self._root_uri_hash, \"outputs\"),\n )\n roles, directives = retrieve_namespace(self._app_env)\n self._db.update_conf_file(\n conf_path, datetime.datetime.utcnow(), roles, directives\n )\n # TODO if local, use os.path.getmtime?\n # TODO when to remove roles and directives with 'removed' status?\n\n def close(self):\n # TODO persist cache?\n remove_default_cache_path(self._root_uri_hash)\n\n @property\n def documents(self) -> dict:\n return self._open_docs\n\n @property\n def database(self) -> DocutilsCache:\n \"\"\"Return the workspace database.\n\n If any document's source text hasn't been parsed/assessed, since its last change\n (or config update), then that will be done, and the database updated,\n before returning.\n \"\"\"\n for doc in self._open_docs.values():\n result = doc.get_assessment() # type: SourceAssessResult\n self._db.update_doc(\n doc.uri,\n doc.mtime,\n doc_symbols=result.doc_symbols,\n positions=result.positions,\n targets=result.targets,\n references=result.references,\n lints=result.linting,\n )\n return self._db\n\n @property\n def app_env(self) -> SphinxAppEnv:\n return self._app_env\n\n @property\n def root_path(self) -> str:\n return self._root_path\n\n @property\n def root_uri(self) -> str:\n return self._root_uri\n\n @property\n def server(self):\n return self._server\n\n @property\n def config(self):\n return self._config\n\n def get_document(self, doc_uri: str):\n \"\"\"Return a managed document if-present, else create one pointing at disk.\n\n See https://github.com/Microsoft/language-server-protocol/issues/177\n \"\"\"\n doc = self._open_docs.get(doc_uri, None)\n if doc is None:\n doc = self._create_document({\"uri\": doc_uri})\n return doc\n\n def put_document(self, document: TextDocument):\n self._open_docs[document[\"uri\"]] = self._create_document(document)\n\n def rm_document(self, doc_uri):\n # TODO remove from database? or get notification when rst are deleted\n # see also m_workspace__did_change_watched_files\n self._open_docs.pop(doc_uri)\n\n def update_document(self, doc_uri, change: TextEdit, version=None):\n self._open_docs[doc_uri].apply_change(change)\n self._open_docs[doc_uri].version = version\n\n def update_config(self, config):\n self._config = config\n self._update_env()\n for doc_uri in self.documents:\n self.get_document(doc_uri).update_config(config)\n\n # TODO configuration option, whether to read all files\n\n conf_file = self._db.query_conf_file()\n if not conf_file or not os.path.exists(conf_file.uri):\n return\n exclude_patterns = (\n self.app_env.app.config.exclude_patterns\n + self.app_env.app.config.templates_path\n + [uris.to_fs_path(uri) for uri in self.documents]\n # TODO current doc exclude doesn't appear to be working\n )\n all_paths = find_all_files(\n os.path.dirname(conf_file.uri), exclude_patterns=exclude_patterns\n )\n self.server.log_message(f\"parsing {len(all_paths)} closed files\")\n\n # start in separate thread, so the request can be returned\n future = self._server._endpoint._executor_service.submit(\n self.parse_closed_files, paths=all_paths\n )\n future.add_done_callback(self.notify_files)\n\n def notify_files(self, future: Future):\n if future.cancelled():\n self.server.log_message(\"cancelled parsing closed files\")\n self.server.log_message(f\"finished parsing {future.result()} closed files\")\n\n def parse_closed_files(self, paths):\n # TODO send progress to client (will require next LSP version 3.15.0)\n passed = 0\n for path in paths:\n try:\n with open(path) as handle:\n source = handle.read()\n # TODO check doc not in database with same mtime\n result = assess_source(source, self.app_env, doc_uri=path)\n self._db.update_doc(\n uri=uris.from_fs_path(path),\n # TODO use os.path.getmtime(path)?\n mtime=datetime.datetime.utcnow(),\n doc_symbols=result.doc_symbols,\n positions=result.positions,\n targets=result.targets,\n references=result.references,\n lints=result.linting,\n )\n self.server.log_message(f\"file parsed: {uris.from_fs_path(path)}\")\n passed += 1\n except Exception as err:\n self.server.log_message(\n f\"file parse failed: {path}: {err}\", MessageType.Error\n )\n # TODO now remove removed roles/directives from database\n return passed\n\n @property\n def is_local(self):\n \"\"\"Test if the directory is local (i.e. can be accessed by ``os``).\"\"\"\n return (\n self._root_uri_scheme == \"\" or self._root_uri_scheme == \"file\"\n ) and os.path.exists(self._root_path)\n\n def source_roots(self, document_path: str, filename: str = \"conf.py\"):\n \"\"\"Return the source roots for the given document.\"\"\"\n if not self.is_local:\n return None\n files = find_parents(self._root_path, document_path, [filename]) or []\n return list(set((os.path.dirname(project_file) for project_file in files))) or [\n self._root_path\n ]\n\n def _create_document(self, document: TextDocument):\n return Document(\n document[\"uri\"],\n source=document.get(\"text\", None),\n version=document.get(\"version\", None),\n config=self._config,\n workspace=self,\n )\n\n\nclass Document:\n \"\"\"Store an in-memory representation of a source file.\n\n The documents source text is kept in-sync with the clients,\n by applying ``TextEdit`` changes, on notification by the client.\n\n docutils/sphinx parsing of the source text is done lazily,\n whenever ``doc.get_assessment()`` is called,\n and the source text/configuration has changed.\n \"\"\"\n\n def __init__(\n self, uri, source=None, version=None, local=True, config=None, workspace=None\n ):\n self.uri = uri\n self.version = version\n self.path = uris.to_fs_path(uri)\n self.filename = os.path.basename(self.path)\n\n self._config = config\n self._workspace = workspace\n self._local = local\n self._source = source\n self._assessment = None\n self._mtime = datetime.datetime.utcnow()\n\n @property\n def workspace(self) -> Workspace:\n return self._workspace\n\n def __str__(self):\n return str(self.uri)\n\n @property\n def mtime(self) -> datetime.datetime:\n return self._mtime\n\n @property\n def lines(self) -> List[str]:\n return self.source.splitlines(True)\n\n @property\n def source(self) -> str:\n if self._source is None:\n with open(self.path, \"r\", encoding=\"utf-8\") as f:\n return f.read()\n return self._source\n\n def get_assessment(self) -> SourceAssessResult:\n if self._assessment is None:\n # TODO partial reassessment of source, given applied changes\n self._assessment = assess_source(\n self.source, self.workspace.app_env, doc_uri=self.uri\n )\n # TODO if local, use os.path.getmtime?\n self._mtime = datetime.datetime.utcnow()\n return self._assessment\n\n def update_config(self, config: Config):\n self._config = config\n self._assessment = None\n\n def apply_change(self, change: TextEdit):\n \"\"\"Apply a change to the document.\"\"\"\n text = change[\"text\"]\n change_range = change[\"range\"]\n\n if not change_range:\n # The whole file has changed\n self._source = text\n return\n\n start_line = change_range[\"start\"][\"line\"]\n start_col = change_range[\"start\"][\"character\"]\n end_line = change_range[\"end\"][\"line\"]\n end_col = change_range[\"end\"][\"character\"]\n\n # Check for an edit occuring at the very end of the file\n if start_line == len(self.lines):\n self._source = self.source + text\n return\n\n new = io.StringIO()\n\n # Iterate over the existing document until we hit the edit range,\n # at which point we write the new text, then loop until we hit\n # the end of the range and continue writing.\n for i, line in enumerate(self.lines):\n if i < start_line:\n new.write(line)\n continue\n\n if i > end_line:\n new.write(line)\n continue\n\n if i == start_line:\n new.write(line[:start_col])\n new.write(text)\n\n if i == end_line:\n new.write(line[end_col:])\n\n self._source = new.getvalue()\n self._assessment = None\n\n def get_line(self, position: Position) -> str:\n \"\"\"Return the position's line.\"\"\"\n return self.lines[position[\"line\"]]\n\n def get_line_before(self, position: Position) -> str:\n \"\"\"Return the section of the position's line before the position.\"\"\"\n return self.lines[position[\"line\"]][: position[\"character\"]]\n\n def offset_at_position(self, position: Position):\n \"\"\"Return the byte-offset pointed at by the given position.\"\"\"\n return position[\"character\"] + len(\"\".join(self.lines[: position[\"line\"]]))\n\n def word_at_position(self, position: Position, start_regex=None, end_regex=None):\n \"\"\"Get the word under the cursor returning the start and end positions.\"\"\"\n if position[\"line\"] >= len(self.lines):\n return \"\"\n\n line = self.lines[position[\"line\"]]\n i = position[\"character\"]\n # Split word in two\n start = line[:i]\n end = line[i:]\n\n # Take end of start and start of end to find word\n # These are guaranteed to match, even if they match the empty string\n start_regex = start_regex or RE_START_WORD\n end_regex = end_regex or RE_END_WORD\n m_start = start_regex.findall(start)\n m_end = end_regex.findall(end)\n\n return m_start[0] + m_end[-1]\n\n\ndef match_uri_to_workspace(\n uri: str, workspaces: Dict[str, Workspace], default: Workspace\n) -> Workspace:\n \"\"\"Find the workspace containing the URI.\"\"\"\n if uri is None:\n return None\n max_len, chosen_workspace = -1, None\n path = pathlib.Path(uri).parts\n for workspace in workspaces:\n workspace_parts = pathlib.Path(workspace).parts\n if len(workspace_parts) > len(path):\n continue\n match_len = 0\n for workspace_part, path_part in zip(workspace_parts, path):\n if workspace_part == path_part:\n match_len += 1\n if match_len > 0:\n if match_len > max_len:\n max_len = match_len\n chosen_workspace = workspace\n return workspaces.get(chosen_workspace, default)\n","repo_name":"chrisjsewell/rst-language-server","sub_path":"rst_lsp/server/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":16215,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"9596767273","text":"#!/usr/bin/env python3.7\n# -*- coding: utf8 -*-\n\nimport matplotlib as mat\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nsns.set(rc={\"figure.figsize\":(8,4)})\nsns.set_context('paper',font_scale=1.5,rc={'lines.linewidth':1.5})\nsns.set_style('ticks')\nmat.rc('text',usetex=True)\nmat.rc('text.latex',preamble=r'\\usepackage[utf8]{inputenc}\\usepackage[T1]{fontenc}\\usepackage[english]{babel}\\usepackage{mathpazo}\\usepackage[euler-digits,euler-hat-accent]{eulervm}\\usepackage{amsmath,amsfonts,amssymb}\\usepackage{siunitx}')\n\nebins=np.ravel(np.outer(10**np.arange(1,5),np.arange(1,10,0.5)))\nebins=ebins[0:60]\np=np.loadtxt('september-attenuation.dat',delimiter=' ',comments='#')\nn0,n1=p[0:10,:].reshape(60),p[10:20,:].reshape(60)\ng0,g1=p[20:30,:].reshape(60),p[30:40,:].reshape(60)\nc0=sns.cubehelix_palette(10,start=0.2,rot=-.3,dark=0.1,reverse=True)\nsns.set_palette(c0)\nfig,ax=plt.subplots(nrows=1,ncols=2,sharex=False,sharey=False)\nax[0].loglog(ebins,n0)\nax[0].loglog(ebins,n1)\nax[1].loglog(ebins,g0)\nax[1].loglog(ebins,g1)\n#ax.set_xlabel(r'Energía cinética $\\left[\\si{\\mega\\electronvolt}\\right]$',x=0.9,ha='right')\n#ax.set_ylabel(r'Atenuación')\n#ax.set_xlim(5e1,1.5e3)\n#ax.set_ylim(1e-6,1e0)\nplt.tight_layout(pad=1.0)\n#plt.savefig('neutron-at.pdf')\nplt.show()\n","repo_name":"anzorenam/tesis","sub_path":"images/september-attenuation.py","file_name":"september-attenuation.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"633851356","text":"import repoUsuarios as repo\nfrom Usuario import Usuario\nimport RSAKeyHandle as rsa\nimport AesPlayground as aes\n\n\nusuarios = [\n Usuario(\"ale\", rsa.getKeyPair(rsa.KEY_SIZE)),\n Usuario(\"pedro\", rsa.getKeyPair(rsa.KEY_SIZE)),\n Usuario(\"carlos\", rsa.getKeyPair(rsa.KEY_SIZE)),\n Usuario(\"alice\", rsa.getKeyPair(rsa.KEY_SIZE)),\n Usuario(\"bob\", rsa.getKeyPair(rsa.KEY_SIZE))\n]\nrepo.agregarUsuarios(usuarios)\n\n# rsa.exportKeyToFile(rsa.encryptRsaKey(usuarios[0].pubkey),\"ale.pem\")\n# print(repo.getUsernameList())\n# print(repo.getPubKeyFromUsername(\"ale\"))\n\n# Ale le envía un mensaje a Pedro\nmensaje = \"Un mensaje de prueba.\".encode(\"utf-8\")\ndestination_pubkey = repo.getPubKeyFromUsername(\"pedro\")\nsession_key = rsa.get_random_bytes(16)\ncipher_rsa = rsa.getNewRsaCipher(destination_pubkey)\nenc_session_key = cipher_rsa.encrypt(session_key) # lista para mandar a Pedro\nencrypted_data = aes.encryptBytesWithAes(mensaje, session_key) # listo para mandar a Pedro\n\n# Pedro: (tiene: enc_session_key y encrypted_data)\npedro_private = usuarios[1].key\ncipher_rsa_pedro = rsa.getNewRsaCipher(pedro_private)\nsession_key_pedro = cipher_rsa_pedro.decrypt(enc_session_key)\n\ndata = aes.decryptBytesWithAes(encrypted_data, session_key_pedro)\n\nprint(data)\n","repo_name":"alete89/playground","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1614028288","text":"import json\nimport cv2\nimport numpy as np\nimport os\n\n# Load your JSON data\nwith open('.json', 'r') as json_file:\n data = json.load(json_file)\n\n# Path to the directory containing your images\nimage_dir = ' '\n\n# Create an output directory for the masked images\noutput_dir = ' '\nos.makedirs(output_dir, exist_ok=True)\n\n# Iterate through each image and its regions\nfor image_filename, image_info in data[\"_via_img_metadata\"].items():\n # Load the image\n image_path = os.path.join(image_dir, image_info[\"filename\"])\n image = cv2.imread(image_path)\n\n # Create a mask\n mask = np.zeros(image.shape, dtype=np.uint8)\n alpha_mask = np.zeros((image.shape[0], image.shape[1], 4 ), dtype=np.uint8)\n for region in image_info[\"regions\"]:\n points_x = region[\"shape_attributes\"][\"all_points_x\"]\n points_y = region[\"shape_attributes\"][\"all_points_y\"]\n points = np.array(list(zip(points_x, points_y)), np.int32)\n points = points.reshape((-1, 1, 2))\n cv2.fillPoly(mask, [points], (255, 255, 255, 0)) # RGB = White, 0 = alpha \n\n # Apply the mask to the image\n masked_image = cv2.bitwise_and(image, mask)\n\n # Save the masked image\n output_filename = os.path.join(output_dir, image_info[\"filename\"])\n cv2.imwrite(output_filename, masked_image)\n\nprint(\"Masking completed. Masked images are saved in the output directory.\")\n","repo_name":"OutAway/VIA2Mask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3283808016","text":"from livewires import games\nimport random\n\ngames.init(screen_width = 640, screen_height = 480, fps = 50)\n\nclass Asteroid(games.Sprite):\n SMALL = 1\n MEDIUM = 2\n LARGE = 3\n images = {SMALL : games.load_image('asteroid_small.bmp'),\n MEDIUM : games.load_image('asteroid_med.bmp'),\n LARGE : games.load_image('asteroid_big.bmp')}\n SPEED = 2\n def __init__(self, x, y, size):\n super().__init__(\n image = Asteroid.images[size],\n x = x, y = y,\n dx = random.choice([1, -1]) * Asteroid.SPEED * random.random() / size,\n dy = random.choice([1, -1]) * Asteroid.SPEED * random.random() / size)\n self.size = size\n def update(self):\n '''огибание экрана'''\n if self.bottom < 0:\n self.top = games.screen.height\n if self.top > games.screen.height:\n self.bottom = 0\n if self.left > games.screen.width:\n self.right = 0\n if self.right < 0:\n self.left = games.screen.width\n\nclass Ship(games.Sprite):\n image = games.load_image('ship.bmp')\n image2 = games.load_image('ship_turbine.bmp')\n ROTATION_STEP = 3\n def update(self):\n if games.keyboard.is_pressed(games.K_LEFT):\n self.angle -= Ship.ROTATION_STEP\n self.image = Ship.image2\n if games.keyboard.is_pressed(games.K_RIGHT):\n self.angle += Ship.ROTATION_STEP\n self.image = Ship.image2\ndef main():\n nebula_image = games.load_image('nebula.jpg')\n games.screen.background = nebula_image\n for i in range(8):\n x = random.randrange(games.screen.width)\n y = random.randrange(games.screen.height)\n size = random.choice([Asteroid.SMALL, Asteroid.MEDIUM, Asteroid.LARGE])\n new_asteroid = Asteroid(x, y, size)\n games.screen.add(new_asteroid)\n new_ship = Ship(image = Ship.image,\n x = games.screen.width / 2,\n y = games.screen.height / 2)\n games.screen.add(new_ship)\n games.screen.mainloop()\nmain()\n","repo_name":"aomay/python_M.Douson","sub_path":"12/pewpew2.py","file_name":"pewpew2.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15867041600","text":"'''\n测试用例标题:采购询价测试\n测试场景:采购询价业务流程测试\n创建者:Tom\n创建日期:2018-7-25\n最后修改日期:2018-7-25\n输入数据:供应商:搭瓦家具公司,审批流程各个角色账号\n输出数据:无\n\n'''\n\n\n\nimport unittest\nfrom cgitb import text\nimport selenium.webdriver.support.ui as ui\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport unittest, time, re\nimport time,unittest,configparser\n\n\n'''\n加载配置选项\n'''\ncfg = configparser.ConfigParser()\ncfg.read('../../../../core/config.ini')\n\n\nclass ProductQuotation(unittest.TestCase):\n base_url = cfg.get(\"project\", \"base_url\")\n project_path = cfg.get(\"project\", \"project_path\")\n log_path = cfg.get(\"webdriver\", \"log\") + '/' + cfg.get(\"webdriver\", \"logfile\") + '-%s.log' % time.strftime(\n \"%Y-%m-%d %H_%M_%S\")\n def setUp(self):\n # 脚本标识-标题\n self.script_name = '采购询价'\n # 脚本标识-ID\n self.script_id = 'test_flow_product_quotation'\n self.target_url = self.base_url + self.project_path\n if (cfg.get(\"webdriver\", \"enabled\") == \"off\"):\n # 如果使用最新firefox需要屏蔽下面这句\n self.dr = webdriver.Firefox()\n else:\n # 如果使用最新firefox需要使用下面这句\n self.dr = webdriver.Firefox(log_path=self.log_path)\n\n\n self.dr.implicitly_wait(15)\n self.dr.maximize_window()\n self.verificationErrors = []\n self.accept_next_alert = True\n\n # 定义登录方法\n def login(self, username, password):\n self.dr.get('http://192.168.1.108:880/') # 登录页面\n self.dr.find_element_by_id('account-inputEl').send_keys(username)\n self.dr.find_element_by_id('password-inputEl').send_keys(password)\n self.dr.find_element_by_id('button-1013-btnIconEl').click()\n\n\n def test_PurchaseContract(self):\n self.login('Vic_cn','123')\n sleep(5)\n\n\n self.dr.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").click()#关闭弹出框\n\n self.dr.find_element_by_xpath(\"//*[@id='appNavTabPanel']//span[contains(@class,'fa-code-fork')]\").click()#定位到申请单据\n\n self.dr.find_element_by_xpath(\"//*[@id='west-panel-targetEl']//span[contains(text(), '采购询价')]\").click() # 定位到采购询价\n\n self.dr.find_element_by_xpath(\"//*[@id='FlowProductQuotationView']//span[contains(@class,'fa-plus')]\").click()#定位到采购询价新建\n\n self.dr.find_element_by_xpath( \"//*[@id='FlowProductQuotationViewFormPanelID-body']//input[@name='main.vendorName']\").click() ## 选择供应商\n\n self.dr.find_element_by_xpath(\"//*[@id='VendorDialogWinSearchPanelID-body']//input[@name='keywords']\").send_keys('搭') ## 定位到关键字\n\n self.dr.find_element_by_xpath(\"//*[@id='VendorDialogWinSearchPanelID-innerCt']//span[contains(@class,'fa-search')]\").click() # 点击搜索\n\n\n\n\n\n _elementFirst = self.dr.find_element_by_xpath(\"//*[@id='VendorDialogWinGridPanelID-body']//div[contains(text(), '1')]\") # 定位采购第一条记录\n\n ActionChains(self.dr).double_click(_elementFirst).perform() # 在此元素上双击\n\n _elementSecond = self.dr.find_element_by_xpath(\"//*[@id='FlowProductQuotationViewFormGridPanelID_header-targetEl']//img[contains(@class,'x-tool-plus')]\") # 定位添加SKU按钮'''\n\n ActionChains(self.dr).double_click(_elementSecond).perform()\n\n _elementThird = self.dr.find_element_by_xpath(\"//*[@id='ProductDialogWinGridPanelID-body']//div[contains(text(), '1')]\") # 定位SKU第一条记录\n\n ActionChains(self.dr).double_click(_elementThird).perform() # 在此元素上双击\n\n self.dr.find_element_by_xpath(\"//*[@id='ProductDialogWinID']//span[contains(@class,'fa-check')]\").click() #点击确认\n\n self.dr.find_element_by_xpath(\"//div[@id='FlowProductQuotationViewFormGridPanelID-normal-body']/div/table/tbody/tr/td[8]\").click() # 定位到aud框\n\n self.dr.find_element_by_xpath(\"//*[@id='FlowProductQuotationViewFormGridPanelID']//input[@name='priceAud']\").send_keys('10') ## 定位到AUD输入\n\n self.dr.find_element_by_xpath(\"//*[@id='FlowProductQuotationForm']//span[contains(@class,'fa-play')]\").click() # 点击发启\n\n\n\n self.dr.find_element_by_link_text('注销').click() # 点击注销\n\n\n self.dr.find_element_by_link_text('是').click()\n\n alert = self.dr.switch_to_alert()\n\n alert.accept() # 退出页面\n\n sleep(5)\n\n self.login('Vic_cn', '123')\n\n sleep(5)\n\n self.dr.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").click() # 关闭弹出框\n\n self.dr.find_element_by_xpath(\"//*[@id='appNavTabPanel']//span[contains(@class,'fa-desktop')]\").click() # 定位到工作面板\n\n self.dr.find_element_by_xpath(\"//*[@id='west-panel-targetEl']//span[contains(text(), '待办事项')]\").click() # 定位到待办事项\n\n self.dr.find_element_by_xpath(\"//*[@id='EventsGridPanelID-body']//div[contains(text(), '1')]\").click() #定位到待办事项第一条记录\n\n self.dr.find_element_by_xpath(\"//*[@id='EventsFormPanelID-body']//span[contains(@class, 'x-btn-icon-el')]\").click() # 点击马上处理\n\n self.dr.find_element_by_xpath(\"//*[@id='FlowProductQuotationForm']//span[contains(@class, 'fa-check-square')]\").click() # 点击通过\n\n sleep(3)\n\n def is_element_present(self, how, what):\n try:\n self.driver.find_element(by=how, value=what)\n except NoSuchElementException as e:\n return False\n return True\n\n def is_alert_present(self):\n try:\n self.driver.switch_to_alert()\n except NoAlertPresentException as e:\n return False\n return True\n\n def close_alert_and_get_its_text(self):\n try:\n alert = self.driver.switch_to_alert()\n alert_text = alert.text\n if self.accept_next_alert:\n alert.accept()\n else:\n alert.dismiss()\n return alert_text\n finally:\n self.accept_next_alert = True\n\n\n def tearDown(self):\n self.dr.quit()\n self.assertEqual([], self.verificationErrors)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"tian848-tim/trunk","sub_path":"case/Wokflow/PurchasingApplication/PurchaseQuptationApplication/test_flow_product_quotation.py","file_name":"test_flow_product_quotation.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17479610566","text":"from configparser import SafeConfigParser\n\nimport json\nimport yaml\nimport os\nimport os.path\nimport pystache\nimport pytumblr\nimport re\nimport requests\nimport sys\n\n\ndef auth_tumblr(config):\n # Authenticate via OAuth\n parser = SafeConfigParser()\n parser.read(\"tumblr_jekyll.ini\")\n\n CONSUMER_KEY = parser.get('tumblr_api', 'tumblr_consumer_key')\n CONSUMER_SECRET = parser.get('tumblr_api', 'tumblr_consumer_secret')\n OAUTH_TOKEN = parser.get('tumblr_api', 'tumblr_oauth_token')\n OAUTH_SECRET = parser.get('tumblr_api', 'tumblr_oauth_secret')\n\n client = pytumblr.TumblrRestClient(\n CONSUMER_KEY,\n CONSUMER_SECRET,\n OAUTH_TOKEN,\n OAUTH_SECRET,\n )\n\n return client\n\n\ndef write_out_json(thing, directory, fn):\n \"\"\"\n Write out a `thing` as json.\n\n thing: some object to write out as json\n directory: location to write to\n fn: filename to write to\n \"\"\"\n results_path = \"{}/json\".format(directory)\n results_json = \"{}.json\".format(fn)\n fullpath = \"{}/{}\".format(results_path, results_json)\n\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n print(\".......... Created {}\".format(results_path))\n\n try:\n loaded_json = json.dumps(thing)\n with open(fullpath, \"w\") as json_results_file:\n json_results_file.write(loaded_json)\n print(\".......... Wrote out {}\".format(results_json))\n except ValueError as e:\n print(\".......... ValueError {}\".format(e))\n print(\"!!!!!!!!!! {}\".format(thing))\n\n\ndef write_out_yaml(thing, directory, fn):\n \"\"\"\n Write a 'thing' out as as yaml to a file.\n\n thing: some object to write out as json\n directory: location to write to\n fn: filename to use\n \"\"\"\n results_path = \"{}/yaml\".format(directory)\n results_yaml = \"{}.yaml\".format(fn)\n fullpath = \"{}/{}\".format(results_path, results_yaml)\n\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n print(\".......... Created {}\".format(results_path))\n\n try:\n dumped_yaml = yaml.dump(thing)\n with open(fullpath, \"w\") as yaml_results_file:\n yaml_results_file.write(dumped_yaml)\n print(\".......... Wrote out {}\".format(results_yaml))\n except:\n print(\".......... Unexpected error:\", sys.exc_info()[0])\n\n\ndef write_out_template(dictionary, path, fn, template):\n \"\"\"\n Render the dictionary using the given template.\n\n path: the location to write to\n fn: filename to use\n template: which mustache template use when rendering\n \"\"\"\n html_path = \"{}/{}\".format(path, fn)\n results_template = open(\"../_templates/{}\".format(template)).read()\n html_results = pystache.render(results_template, dictionary)\n # need to encode to pass to write()\n html_results_encoded = html_results.encode(\n encoding='UTF-8', errors='strict'\n )\n\n with open(html_path, \"w\") as html_file:\n html_file.write(html_results_encoded.decode('utf-8'))\n print(\".......... Wrote out {}\".format(html_path))\n\n\ndef sort_nicely(l):\n \"\"\"\n Sort the given list in the way that humans expect.\n\n Cribbed from http://nedbatchelder.com/blog/200712.html#e20071211T054956\n \"\"\"\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n l.sort(key=alphanum_key)\n\n return l\n\n\ndef split_on_sep(seperator, thing):\n return thing.split(seperator, 1)[0]\n\n\ndef get_img_from_url(image_path, url):\n if os.path.isfile(image_path):\n print(\"---------- Already downloaded {}\".format(url))\n else:\n print(\"---------- Downloading {}\".format(url))\n with open(image_path, 'wb') as f:\n f.write(requests.get(url).content)\n\n\ndef clean_string(dirty_string):\n import string\n import re\n\n chars = re.escape(string.punctuation)\n clean_string = re.sub(r'['+chars+']', '', dirty_string)\n clean_string.lstrip().rstrip()\n return clean_string\n","repo_name":"wholewheattoast/wwtJekyl","sub_path":"_scripts/toast_tools.py","file_name":"toast_tools.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6459909955","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 14 12:16:31 2017\n\n@author: Biagio Brattoli\n\"\"\"\nimport os, sys, numpy as np\nimport argparse\nfrom time import time\nfrom tqdm import tqdm\nimport math\n\nimport tensorflow # needs to call tensorflow before torch, otherwise crush\nsys.path.append('Utils')\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport random\nfrom PIL import Image\nsys.path.append('Dataset')\nfrom JigsawNetwork import Network,Network2,loss2\nimport torchvision.transforms as transforms\nfrom Utils.TrainingUtils import adjust_learning_rate, compute_accuracy\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport shutil\n\nparser = argparse.ArgumentParser(description='Train JigsawPuzzleSolver on Imagenet')\nparser.add_argument('--data', type=str, help='Path to Imagenet folder')\nparser.add_argument('--model', default=None, type=str, help='Path to pretrained model')\nparser.add_argument('--classes', default=600, type=int, help='Number of permutation to use')\nparser.add_argument('--gpu', default=0, type=int, help='gpu id')\nparser.add_argument('--epochs', default=20000, type=int, help='number of total epochs for training')\nparser.add_argument('--iter_start', default=0, type=int, help='Starting iteration count')\nparser.add_argument('--batch', default=71, type=int, help='batch size')\nparser.add_argument('--kuai', default=3, type=int, help='kuai * kuai')\nparser.add_argument('--checkpoint', default='checkpoints/', type=str, help='checkpoint folder')\nparser.add_argument('--lr', default=0.0005, type=float, help='learning rate for SGD optimizer')\nparser.add_argument('--cores', default=6, type=int, help='number of CPU core for loading')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set, No training')\nargs = parser.parse_args()\n\n#from ImageDataLoader import DataLoader\nfrom Dataset.JigsawImageLoader import mydataset,testdataset,manudataset\n\nbest_recall=0.0\nbest_recall2 = 0.0\nrandom.seed(2022)\n\ndef main():\n\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n \n print('Process number: %d'%(os.getpid()))\n \n ## DataLoader initialize ILSVRC2012_train_processed\n datapath = '/data/tszhe/jiaguwen/pic/all'\n manupath = '/data/tszhe/jiaguwen/pic/cut5'\n #cp_path = '/data/tszhe/jiaguwen/pic/cp'\n cp_path='/data/rhji/oracle_cut/exp/crops/jiagu'\n fileslist = os.listdir(manupath)\n files_len = int(len(fileslist)/5)\n numkey = list(range(files_len))\n random.shuffle(numkey)\n train_len = int(files_len*0.8)\n test_len = files_len - train_len\n train_key = numkey[:train_len]\n test_key = numkey[train_len:]\n\n\n train_data = manudataset(data_path=manupath,\n kuai=3,\n key=train_key) #这个叫dataloader的其实是Dataset的子类\n train_loader = torch.utils.data.DataLoader(dataset=train_data,\n batch_size=args.batch, #包含batch*5张图片\n shuffle=True,\n pin_memory = True,\n num_workers=args.cores)\n \n\n\n val_data1 = manudataset(data_path=manupath,\n kuai=3)\n\n val_loader1 = torch.utils.data.DataLoader(dataset=val_data1,\n batch_size=val_data1.__len__(),\n shuffle=False,\n pin_memory=True,\n num_workers=args.cores)\n val_data2 = manudataset(data_path=manupath,\n kuai=3,\n key=test_key) #这个叫dataloader的其实是Dataset的子类\n val_loader2 = torch.utils.data.DataLoader(dataset=val_data2,\n batch_size=val_data2.__len__(),\n shuffle=False,\n pin_memory=True,\n num_workers=args.cores)\n cp_data = manudataset(data_path=cp_path,\n kuai=3)\n\n cp_loader = torch.utils.data.DataLoader(dataset=cp_data,\n batch_size=int(cp_data.__len__()/5),\n shuffle=False,\n pin_memory=True,\n num_workers=args.cores)\n\n # for i, (cpimages, cpnames) in enumerate(cp_loader):\n # cpimages = Variable(cpimages)\n # if args.gpu is not None:\n # cpimages = cpimages.cuda()\n\n\n # Network initialize\n net = Network(args.classes).cuda()\n## net.load_state_dict(torch.load(\"/home/hxxiang/jiagu_model_1113/model_save80.55554962158203_epoch310\"))\n net.load_state_dict(torch.load(\"/home/tszhe/CodePj/jiagu/JigsawPuzzle/model_save_600_83.33332824707031\"))\n## net.load_state_dict(torch.load(\"/home/rhji/jiagu/JigsawPuzzlePytorch-3/model_save_net_0.867231638418079\"))\n net.zero_grad()\n '''acc = test(net, val_loader, 0)\n test1(net, val_loader1, val_data1.datalen,val_data1)'''\n net2 = Network2().cuda()\n ## net2.load_state_dict(torch.load(\"/home/rhji/jiagu/JigsawPuzzlePytorch-3/model_save_net1_0.4166666666666667\"))\n net2.zero_grad()\n\n net.eval()\n net2.eval()\n save_topk(net, net2, val_loader2, val_data2.datalen,cp_loader)\n## save_emb(net, net2, val_loader1, cp_loader)\n # image_to_find = '/data/tszhe/jiaguwen/pic/cut5/100_3.jpg'\n # find_pic(image_to_find,net,net2,val_loader1, int(val_data1.datalen / 5),cp_loader)\n # find_lowest_pic(net, val_loader1, int(val_data1.datalen / 5))\n # test1(net, net2, val_loader1, int(val_data1.datalen / 5),cp_loader)\n test2(net, net2, val_loader2, val_data2.datalen,cp_loader)\n\n\ndef find_lowest_pic(net,val_loader,item_num):\n print('find_lowest_pic')\n net.eval()\n with torch.no_grad():\n for i, (images,names) in enumerate(val_loader): # 这个for其实没有意义,一次会把整个batch都取完。\n images = Variable(images)\n if args.gpu is not None:\n images = images.cuda()\n outputs = net(images, 0)\n## outputs = net2(outputs)\n outputs = outputs.cpu().numpy()\n simi = cosine_similarity(outputs)\n\n lowest_pos_num = np.zeros(5,dtype=int)\n for j in range(item_num):\n lowest_sim = 1\n randpicpos = 2\n picnum = j*5+randpicpos\n for bias in range(-2,3):\n cossim = simi[picnum][picnum+bias]\n if cossim < lowest_sim:\n lowest_sim = cossim\n lowest_pos = 2 + bias\n lowest_pos_num[lowest_pos] += 1\n # print(\"pic:{:d},lowest_pos:{:d}\".format(j, lowest_pos))\n for j in range(5):\n print(\"lowest_pos:{:d},sum:{:d},\".format(j,lowest_pos_num[j]))\n\n\ndef test1(net,net2,val_loader,item_num,cp_loader): #这个item_num是有多少个整图,不要乘9\n global best_recall\n print('Evaluating network.......')\n print('Dataset: 170*5 + 1w')\n accuracy = []\n net.eval()\n net2.eval()\n with torch.no_grad():\n for i, (images,names) in enumerate(val_loader): # 这个for其实没有意义,一次会把整个batch都取完。\n images = Variable(images)\n if args.gpu is not None:\n images = images.cuda()\n outputs = net(images, 0)\n outputs = net2(outputs)\n outputs = outputs.cpu().numpy()\n\n for j, (cpimages, cpnames) in enumerate(cp_loader):\n cpimages = Variable(cpimages)\n if args.gpu is not None:\n cpimages = cpimages.cuda()\n cpoutputs = net(cpimages, 0)\n cpoutputs = net2(cpoutputs)\n cpoutputs = cpoutputs.cpu().numpy()\n outputs = np.append(outputs, cpoutputs, axis=0)\n\n simi = cosine_similarity(outputs)\n\n\n fpall = np.zeros(5,dtype=int)\n fnall = np.zeros(5,dtype=int)\n topn = (10, 20)\n ##topn = (10,20,50,200,500,1000,2000,3000,4000,5000,6000,7000,8000)\n tpall = np.zeros(len(topn),dtype=int)\n for j in range(item_num):\n\n #randpicpos = random.randint(0,args.kuai * args.kuai-1) #中间的块编号为4\n randpicpos = 2\n picnum = j*5+randpicpos\n idx = np.argsort(simi[picnum]).astype(np.int32)[::-1]\n delidx = np.where(idx == picnum)\n idx = np.delete(idx, delidx)\n #print(val_data.files[j]+','+str(randpicpos)+': ',end=\"\")\n for ii,n in enumerate(topn):\n tp = fp = fn = tn = 0\n for k in range(n):\n '''if ii == 1:\n print(val_data.files[int(idx[k].item()/9)] + ',' + str(idx[k].item()%9)+' ',end=\"\")'''\n if int(idx[k]/5)==j:\n tp = tp + 1\n #print()\n tpall[ii]+=tp\n print()\n tpall = tpall/item_num\n for j in range(len(topn)):\n precesion = tpall[j] / topn[j]\n recall = tpall[j] / 4\n f1 = 2.0*precesion*recall/(precesion+recall)\n print(\"top{:d} : precision:{:04f} recall:{:04f} f1:{:04f}\".format(topn[j],precesion, recall, f1))\n if j==2 and recall>best_recall:\n best_recall = recall\n print(\"best recall:\",best_recall)\n\n\n\n net.train()\n net2.train()\n\n\ndef test2(net,net2,val_loader,item_num,cp_loader): #这个item_num是有多少张完整的图(不乘9,也不包括后面的残片)\n global best_recall2\n print('Evaluating test2.......')\n print('Dataset: 170*5*0.2 + 1w')\n\n accuracy = []\n net.eval()\n net2.eval()\n with torch.no_grad():\n for i, (images,names) in enumerate(val_loader): # 这个for其实没有意义,一次会把整个batch都取完。\n images = Variable(images)\n if args.gpu is not None:\n images = images.cuda()\n outputs = net(images, 2)\n## outputs = net2(outputs)\n outputs = outputs.cpu().numpy()\n\n for j, (cpimages, cpnames) in enumerate(cp_loader):\n cpimages = Variable(cpimages)\n if args.gpu is not None:\n cpimages = cpimages.cuda()\n cpoutputs = net(cpimages, 0)\n## cpoutputs = net2(cpoutputs)\n cpoutputs = cpoutputs.cpu().numpy()\n outputs = np.append(outputs, cpoutputs, axis=0)\n\n simi = cosine_similarity(outputs)\n\n\n fpall = np.zeros(5,dtype=int)\n fnall = np.zeros(5,dtype=int)\n## topn = (10,20)\n topn = (10, 20, 50, 200, 500, 1000, 2000, 3000, 4000, 5000, 6000)\n tpall = np.zeros(len(topn), dtype=int)\n for j in range(item_num):\n\n #randpicpos = random.randint(0,args.kuai * args.kuai-1) #中间的块编号为4\n randpicpos = 2\n picnum = j*5+randpicpos\n #_, idx = simi[picnum].sort(descending=True)\n idx = np.argsort(simi[picnum]).astype(np.int32)[::-1]\n delidx = np.where(idx==picnum)\n idx = np.delete(idx,delidx)\n #print(val_data.files[j]+','+str(randpicpos)+': ',end=\"\")\n for ii,n in enumerate(topn):\n tp = fp = fn = tn = 0\n for k in range(n):\n '''if ii == 1:\n print(val_data.files[int(idx[k].item()/9)] + ',' + str(idx[k].item()%9)+' ',end=\"\")'''\n if int(idx[k]/5)==j:\n tp = tp + 1\n #print()\n tpall[ii]+=tp\n if ii==5 and tp>4:\n print(j,'error')\n print()\n tpall = tpall/item_num\n for j in range(len(topn)):\n precesion = tpall[j] / topn[j]\n recall = tpall[j] / 4\n f1 = 2.0*precesion*recall/(precesion+recall)\n print(\"top{:d} : precision:{:04f} recall:{:04f} f1:{:04f}\".format(topn[j],precesion, recall, f1))\n if j==2 and recall>best_recall2:\n best_recall2 = recall\n print(\"best recall2:\",best_recall2)\n\n\n\n net.train()\n net2.train()\n\n\ndef save_emb(net,net2,val_loader,cp_loader):\n print('Saving feature.......')\n net.eval()\n net2.eval()\n with torch.no_grad():\n for i, (images, names) in enumerate(val_loader): # 这个for其实没有意义,一次会把整个batch都取完。\n images = Variable(images)\n if args.gpu is not None:\n images = images.cuda()\n outputs = net(images, 0)\n outputs = net2(outputs)\n outputs = outputs.cpu().numpy()\n\n for j, (cpimages, cpnames) in enumerate(cp_loader):\n cpimages = Variable(cpimages)\n if args.gpu is not None:\n cpimages = cpimages.cuda()\n cpoutputs = net(cpimages, 0)\n cpoutputs = net2(cpoutputs)\n cpoutputs = cpoutputs.cpu().numpy()\n outputs = np.append(outputs, cpoutputs, axis=0)\n names = np.append(names, cpnames)\n\n np.save('/home/hxxiang/feature.npy', outputs)\n np.save('/home/hxxiang/path.npy', names)\n print('saving end')\n\n\ndef save_topk(net,net2,val_loader,item_num,cp_loader):\n print('Dataset: 170*5*0.2 + 1w')\n\n net.eval()\n net2.eval()\n with torch.no_grad():\n for i, (images, names) in enumerate(val_loader): # 这个for其实没有意义,一次会把整个batch都取完。\n images = Variable(images)\n if args.gpu is not None:\n images = images.cuda()\n outputs = net(images, 2)\n ## outputs = net2(outputs)\n outputs = outputs.cpu().numpy()\n\n for j, (cpimages, cpnames) in enumerate(cp_loader):\n cpimages = Variable(cpimages)\n if args.gpu is not None:\n cpimages = cpimages.cuda()\n cpoutputs = net(cpimages, 0)\n ## cpoutputs = net2(cpoutputs)\n cpoutputs = cpoutputs.cpu().numpy()\n outputs = np.append(outputs, cpoutputs, axis=0)\n\n simi = cosine_similarity(outputs)\n\n tpall = np.zeros(6000, dtype=int)\n for j in range(item_num):\n\n # randpicpos = random.randint(0,args.kuai * args.kuai-1) #中间的块编号为4\n randpicpos = 2\n picnum = j * 5 + randpicpos\n # _, idx = simi[picnum].sort(descending=True)\n idx = np.argsort(simi[picnum]).astype(np.int32)[::-1]\n delidx = np.where(idx == picnum)\n idx = np.delete(idx, delidx)\n # print(val_data.files[j]+','+str(randpicpos)+': ',end=\"\")\n tp = 0\n for k in range(6000):\n '''if ii == 1:\n print(val_data.files[int(idx[k].item()/9)] + ',' + str(idx[k].item()%9)+' ',end=\"\")'''\n if int(idx[k] / 5) == j:\n tp = tp + 1\n tpall[k] += tp\n\n print()\n tpall = tpall / item_num\n recall = tpall / 4\n np.save('CFN_topk_recall_2022.npy', recall)\n\n net.train()\n net2.train()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PennykkLu/join_oracle","sub_path":"JigsawVal.py","file_name":"JigsawVal.py","file_ext":"py","file_size_in_byte":15866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37655035541","text":"from courses.data_storage_and_processing.exercise2_1.Candle import PriceInfo, Candle, CandleDate\n\n\nclass StockExchangeData:\n\n def __init__(self, csv_stock_data_file) -> None:\n self.__csv_stock_data_file = csv_stock_data_file\n super().__init__()\n\n # method assumes that there is NO structure description in first string like the following one:\n # ;;;