diff --git "a/5464.jsonl" "b/5464.jsonl"
new file mode 100644--- /dev/null
+++ "b/5464.jsonl"
@@ -0,0 +1,963 @@
+{"seq_id":"38794950841","text":"#################################\n#\t\t Button LED\t\t\t#\n#################################\n#\t\t\t\t\t\t\t\t#\n# @Author: Aaron Earl\t\t\t#\n#\t\t\t\t\t\t\t\t#\n# From Freenove tutorials for\t#\n# RPi. Button w/LED.\t\t\t#\n#\t\t\t\t\t\t\t\t#\n# Task: Turn on an LED w/a\t\t#\n# button press.\t\t\t\t\t#\n#################################\n\nimport RPi.GPIO as GPIO\n\n# Note RPi uses physical pin locations\nledPin = 11\nbuttonPin = 12\n\ndef setup():\n\n\tprint(\"Prgogram starting...\")\n\tGPIO.setmode(GPIO.BOARD)\t\t# Numbers the GPIOs physical location\n\tGPIO.setup(ledPin, GPIO.OUT)\t# Set LEDPin's mode to output\n\tGPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set buttonPin's mode to input,\n\t# and pull up to high level (3.3V)\n\ndef loop():\n\n\twhile True:\n\t\tif GPIO.input(buttonPin) == GPIO.LOW:\n\t\t\tGPIO.output(ledPin, GPIO.HIGH)\n\t\t\tprint(\"LED On...\")\n\t\telse:\n\t\t\tGPIO.output(ledPin, GPIO.LOW)\n\t\t\tprint(\"LED Off...\")\n\ndef destroy():\n\n\tGPIO.output(ledPin, GPIO.LOW) # led off\n\tGPIO.cleanup() #r realease resources\n\nif __name__ == \"__main__\":\n\n\tsetup()\n\ttry:\n\t\tloop()\n\texcept KeyboardInterrupt: # ctrl-c pressed program exits and destroy gets called\n\t\tdestroy()\n","repo_name":"aearl16/Python","sub_path":"src/Python 3/RaspberryPi/ButtonLED.py","file_name":"ButtonLED.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"7877766794","text":"from tqdm import tqdm\nimport numpy as np\nimport math\nimport time\nimport statistics\n\nfrom ..test import customplot\nfrom ..data import ChipData, CustomWord, TestPulse\nfrom .threshold import ThresholdScan\n\nclass TimewalkScan(ScanTest):\n pixels = {}\n th = 1\n sections = []\n\n def pre_main(self):\n super().pre_main()\n self.daq.injection_digital(0xffff)\n self.daq.read_enable(0xffff)\n self.daq.clock_enable(0xffff)\n self.daq.injection_enable(0xffff)\n\n self.analysis.file.seek(0, 2)\n self.daq.send_tp(1)\n time.sleep(0.1)\n self.analysis.cleanup(); self.analysis.analyze()\n\n self.daq.send_tp(1)\n time.sleep(0.1)\n self.analysis.cleanup(); self.analysis.analyze()\n\n self.pixels = {}\n\n print(\"Starting scan on the following pixels:\")\n counter = 0\n for packet in self.analysis.packets:\n if(type(packet) != ChipData):\n continue\n\n for p in packet.pixels:\n p.injected = {}\n p.noise = [0] * 64\n self.pixels[(p.row,p.col)] = p\n print(\"\\t%3d) %s\" % (counter, p.to_string()))\n counter += 1\n\n if(packet.sec not in self.sections):\n self.sections.append(packet.sec)\n\n self.range = range(0,64)\n\n def pre_loop(self):\n for section in self.sections:\n self.daq.write_gcrpar('BIAS%1d_VCASN' % section, 1)\n return\n\n def loop_body(self, iteration):\n th = iteration\n\n for section in self.sections:\n self.daq.write_gcrpar('BIAS%1d_VCASN' % section, th)\n \n self.daq.custom_word(0xDEAFABBA, iteration)\n self.daq.read_enable(self.sections)\n self.daq.injection_digital(self.sections)\n self.daq.send_tp(2)\n time.sleep(0.01)\n\n self.daq.custom_word(0xBEEFBEEF, iteration)\n self.daq.injection_analog(self.sections)\n self.daq.send_tp(100)\n time.sleep(0.01)\n\n self.daq.custom_word(0xDEADBEEF, iteration)\n for i in range(0,100):\n self.daq.injection_analog(self.sections)\n self.daq.injection_digital(self.sections)\n\n time.sleep(0.01)\n\n self.daq.read_disable(self.sections)\n self.daq.custom_word(0xCAFECAFE, iteration)\n\n def post_main(self):\n super().post_main()\n print(\"Now analysing results...\")\n self.analysis.analyze()\n\n iterator = iter(self.analysis.packets)\n packet = next(p for p in iterator if type(p) == CustomWord and p.word == 0xDEAFABBA);\n counter = 0\n with tqdm(total=len(self.analysis.packets), desc='Data analysis') as bar:\n while True:\n th = packet.payload\n\n # Initialize arrays\n for i in self.pixels:\n self.pixels[i].injected[th] = []\n\n bar.update(counter)\n counter = 0;\n\n # Check Digital injections\n dig_injs = []\n tps = 0\n ts = time.time()\n c=0\n while True:\n try:\n packet = next(iterator); counter += 1\n if(type(packet) == PixelData):\n dig_injs.append(packet)\n elif(type(packet) == TestPulse):\n tps += 1\n else:\n break\n except StopIteration:\n break\n\n c += 1\n\n for section in self.sections:\n packets = list(filter(lambda x:type(x) == ChipData and x.sec == section, dig_injs))\n num = len(packets)\n\n if(num < tps):\n raise RuntimeError(\"TH:%u - Section %u didn't receive the digitally injected packets.\" % (th, section))\n\n #print(\"\\t\\tElapsed for digital: %3d (%d packets)\" % ((time.time() - ts)*1000, c))\n\n # Go on to injected packets\n if(type(packet) != CustomWord or packet.word != 0xBEEFBEEF or packet.payload != th):\n raise RuntimeError('Unexpected packet here %s ' % packet.to_string())\n\n ts = time.time()\n c=0\n while True:\n try:\n packet = next(iterator); counter += 1\n if(type(packet) == ChipData):\n for pix in packet.pixels:\n try:\n self.pixels[(pix.row,pix.col)].injected[th].append(packet.ts_ext - packet.last_tp)\n except KeyError:\n self.logger.warning(\"Unexpected pixel in this run: %s\" % pix.to_string())\n\n elif(type(packet) == TestPulse):\n continue\n else:\n break\n except StopIteration:\n break\n \n c += 1\n\n #print(\"\\t\\tElapsed for injected: %3d (%d packets)\" % ((time.time() - ts)*1000, c))\n\n # Go on to noisy packets\n if(type(packet) != CustomWord or packet.word != 0xDEADBEEF or packet.payload != th):\n raise RuntimeError('Unexpected packet here: %s ' % packet.to_string())\n\n ts = time.time()\n c = 0\n while True:\n try:\n packet = next(iterator); counter += 1\n if(type(packet) == ChipData):\n for pix in packet.pixels:\n try:\n self.pixels[(pix.row,pix.col)].noise[th] += 1\n except KeyError:\n self.logger.warning(\"Unexpected pixel in this run: %s\" % pix.to_string())\n elif(type(packet) == TestPulse):\n continue\n else:\n break\n except StopIteration:\n break\n\n c += 1\n\n #print(\"\\t\\tElapsed for noise: %3d (%d packets)\" % ((time.time() - ts)*1000, c))\n\n if(type(packet) != CustomWord or packet.word != 0xCAFECAFE or packet.payload != th):\n raise RuntimeError('Unexpected packet here %s ' % packet.to_string())\n \n if(th > 63):\n break\n\n try:\n packet = next(iterator); counter += 1\n except StopIteration:\n break\n\n if(type(packet) != CustomWord or packet.word != 0xDEAFABBA):\n raise RuntimeError('Unexpected packet here: %s' % packet.to_string())\n\n @customplot(('VCASN (#)', 'Timewalk (us)'), 'Timewalk distribution')\n def singleplot(self, pix, show=True, saveas=None, ax=None):\n inj = []\n err = []\n for injected in self.pixels[pix].injected.values():\n c = len(injected)\n if(c>1):\n inj.append(statistics.mean (injected)*1E6/self.analysis.ts_hz)\n err.append(statistics.stdev(injected)*1E6/self.analysis.ts_hz)\n elif(c==1):\n inj.append(injected[0]*1E6/self.analysis.ts_hz)\n err.append(0)\n else:\n inj.append(0)\n err.append(0)\n\n ax.errorbar(list(self.range), inj, yerr=err, fmt='-o', label='Test Pulses')\n\n def plot(self, show=True, saveas=None):\n for pixel in self.pixels:\n pix_saveas = None if (saveas == None) else f\"{saveas}_{pixel[0]}_{pixel[1]}\"\n self.singleplot(pixel, show=show, saveas=pix_saveas)\n","repo_name":"apatern0/arcadia_daq","sub_path":"pyarcadia/tests/timewalk.py","file_name":"timewalk.py","file_ext":"py","file_size_in_byte":7925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"29561574178","text":"from datetime import timedelta,datetime\nfrom sqlalchemy.dialects.mysql import insert\nfrom sqlalchemy.orm import load_only\nfrom sqlalchemy.sql import text\nfrom statistics import median\nimport MySQLdb\n\nfrom app import db\nfrom app.common.helpers.common import is_string\nfrom app.common.helpers.youtube_api import YouTubeApi\nfrom app.channel.models import Channel\nfrom app.video.models import Video, Statistic\nfrom app.tags.models import Tag\n\n\nclass YoutubeChannelScrapeJob:\n \"\"\"\n Cronjob for scraping videos and their statistics\n Scan time every 20 min because of youtube api limitation(*/20 * * * *) for channel with 2000\n videos\n \"\"\"\n\n def __init__(self, app_config, range_=20):\n \"\"\"Pass Flask app configs for cronjob\"\"\"\n self.app_config = app_config\n self.range = range_\n\n @staticmethod\n def _chunks(_list: list, n: int) -> list:\n \"\"\"\n Split given list l to chunks of n items\n \"\"\"\n for i in range(0, len(_list), n):\n yield _list[i : i + n]\n\n def _insert_tags(self, tags):\n \"\"\"Method for inserting videos tags to database table\"\"\"\n for tag in tags:\n query = insert(Tag).prefix_with('IGNORE').values(\n name=tag\n )\n db.session.execute(query)\n db.session.commit()\n\n @staticmethod\n def _insert_videos(videos, channel_id):\n \"\"\"Methods for inserting video to \"\"\"\n for video in videos:\n query = insert(Video).prefix_with('IGNORE').values(\n id=video['youtube_id'],\n name=video['name'],\n published_at=video['published'],\n channel_id=channel_id\n )\n db.session.execute(query)\n db.session.commit()\n\n @staticmethod\n def _insert_statistics(videos):\n \"\"\"Method for add video statistics records\"\"\"\n values = ','.join(\n '({0}, {1}, {2}, {3}, {4}, \"{5}\")'.format(\n video['views'],\n video['likes'],\n video['dislikes'],\n video['favorites'],\n video['comments'],\n video['youtube_id'],\n ) for video in videos\n )\n names = '(views, likes, dislikes, favorites, comments, video_id)'\n sql = 'INSERT INTO %s %s VALUES %s' % (Statistic.__tablename__, names, values)\n db.session.execute(text(sql))\n db.session.commit()\n\n @staticmethod\n def _count_first_hour_view(videos, range_):\n \"\"\"Method for adding first_hour_views in video using liner interpolation\"\"\"\n for video in videos:\n now = datetime.now()\n hour = now - timedelta(hours=1)\n hour_range = now - timedelta(hours=1, minutes=range_)\n\n if hour > video['published'] > hour_range:\n # get statics and count and video\n statistic = Statistic.query.filter(\n Statistic.video_id == video['youtube_id']\n ).order_by(Statistic.time_created.desc()).first()\n update = db.session.query(Video).filter(Video.id == video['youtube_id'])\n\n if statistic:\n # liner interpolation method for getting first hour views\n interval = (video['published'] + timedelta(hours=1)) - statistic.time_created\n interval = int((interval.total_seconds() % 3600) // 60)\n\n views = video['views'] - statistic.views\n interpolated_views = statistic.views + int(round(views * interval/range_))\n update.update({'first_hour_views': interpolated_views})\n\n else:\n update.update({'first_hour_views': video['views']})\n db.session.commit()\n\n @staticmethod\n def _video_tag_relation(videos):\n \"\"\"Method for relationship for videos tag\"\"\"\n for video in videos:\n if video.get('tags'):\n tags = Tag.query.options(load_only('id')).filter(Tag.name.in_(video['tags'])).all()\n\n tags_ids = ','.join('({0}, \"{1}\")'.format(tag.id, video['youtube_id']) for tag in tags)\n sql = 'INSERT IGNORE INTO tags (tag_id, video_id) VALUES %s' % tags_ids\n db.session.execute(text(sql))\n db.session.commit()\n\n @staticmethod\n def _count_channel_median(channel_id):\n \"\"\"Method for counting median of all chanel videos first_hours_views\"\"\"\n videos = Video.query.options(load_only('first_hour_views'))\\\n .filter(Video.first_hour_views != None).all()\n\n if videos:\n first_views = [video.first_hour_views for video in videos]\n\n db.session.query(Channel).filter(\n Channel.id == channel_id\n ).update({'views_median': median(first_views)})\n\n def run(self):\n \"\"\"Method for running crontjob of youtube channel videos scraping\"\"\"\n channels = Channel.query.all()\n for channel in channels:\n youtube = YouTubeApi(self.app_config['YOUTUBE_API_KEY'])\n\n while youtube.next_channel_page:\n video_ids = youtube.find_channel_videos(\n channel.id,\n page_token=is_string(youtube.next_channel_page)\n )\n if video_ids:\n videos = youtube.get_videos_stats(video_ids)\n self._insert_videos(videos, channel.id)\n\n tags = []\n for video in videos:\n if video.get('tags'):\n tags.extend(video.get('tags'))\n\n if tags:\n self._insert_tags(list(set(tags)))\n self._video_tag_relation(videos)\n\n self._count_first_hour_view(videos, self.range)\n self._insert_statistics(videos)\n\n self._count_channel_median(channel.id)\n db.session.commit()\n","repo_name":"KestutisKazlauskas/youtuber","sub_path":"api/app/common/crons/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"2905172592","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torchvision import transforms, datasets\nfrom model import shufflenet, efficientnet\nfrom sklearn.metrics import f1_score\nfrom src.utils import loss, macs\nfrom tqdm import tqdm\nimport os\nimport copy\nimport time\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=10):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_f1 = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0.0\n\n labels_total = []\n preds_total = []\n\n for inputs, labels in tqdm(dataloaders[phase]):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n labels_total.extend(labels.cpu().numpy().tolist())\n preds_total.extend(preds.cpu().numpy().tolist())\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n epoch_f1 = f1_score(labels_total,preds_total, average='macro')\n\n print('{} Loss: {:.4f} Acc: {:.4f} F1: {:.4f}\\n'.format(\n phase, epoch_loss, epoch_acc, epoch_f1))\n\n if phase == 'val':\n scheduler.step()\n\n if phase == 'val' and epoch_f1 > best_f1:\n best_f1 = epoch_f1\n best_model_wts = copy.deepcopy(model.state_dict())\n torch.save(model.state_dict(), \"./save/model.pt\")\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val F1: {:4f}'.format(best_f1))\n\n model.load_state_dict(best_model_wts)\n return model\n\nclass Efficientnet(nn.Module):\n def __init__(self):\n super(Efficientnet, self).__init__()\n self.net = efficientnet.efficientnet_b0(pretrained=True)\n self.linear = nn.Linear(1000,9)\n\n def forward(self,x):\n x = self.net(x)\n x = self.linear(x)\n return x\n\nif __name__ == \"__main__\":\n\n # https://pytorch.org/vision/stable/transforms.html\n\n img_size = 32\n\n data_transforms = {\n 'train': transforms.Compose([\n transforms.Resize([img_size,img_size]),\n # transforms.RandomHorizontalFlip(0.5),\n # transforms.RandomVerticalFlip(0.5),\n # transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize((0.4991, 0.4795, 0.4673), (0.2048, 0.2043, 0.2123))\n ]),\n 'val': transforms.Compose([\n transforms.Resize([img_size,img_size]),\n # transforms.RandomHorizontalFlip(0.5),\n # transforms.RandomVerticalFlip(0.5),\n # transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize((0.4966, 0.4769, 0.4646), (0.2057, 0.2053, 0.2126))\n ]),\n }\n\n data_dir = '/opt/ml/input/data'\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\n\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=256,\n shuffle=True,\n num_workers=4)\n for x in ['train', 'val']}\n\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n class_names = image_datasets['train'].classes\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # https://pytorch.org/vision/stable/models.html\n\n model = shufflenet.shufflenet_v2_x0_5(pretrained=True)\n model.conv5 = nn.Sequential(\n nn.Conv2d(192, 128, kernel_size=(1, 1), stride=(1, 1), bias=False),\n nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),\n nn.ReLU(inplace=True)\n )\n model.fc = nn.Linear(in_features=128, out_features=9, bias=True)\n # model.load_state_dict(torch.load('/opt/ml/code/save/model.pt'))\n model = model.to(device)\n\n criterion = loss.LabelSmoothingLoss()\n\n optimizer = optim.AdamW(model.parameters(), lr=1e-3)\n\n scheduler = lr_scheduler.StepLR(optimizer, step_size=5)\n\n print('MACs :', macs.calc_macs(model, (3, img_size, img_size)))\n\n # train_model(model, criterion, optimizer, scheduler, num_epochs=50)\n","repo_name":"skaurl/P4-Model-Optimization","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"20839182009","text":"\n#__Print Functions__##\n\n\n\ndef DisplayCards(cards):\n\n\tCard='''\n ____ \n|x |\n| |\n| x|\n ---- \n '''\n\tl_Card=''\n\tcard_list=[]\n\tIndividual_lines=[]\n\tcards_print=''\n\n\t#_replace x with Card Values\n\n\tfor r in range(0,len(cards)):\n\t\tif cards[r]==10:\n\t\t\tl_Card=(Card.replace('x ','10'))\n\t\t\tl_Card=(l_Card.replace(' x','10'))\n\t\telse:\t\n\t\t\tl_Card=(Card.replace('x',str(cards[r])))\n\t\tl_Card=l_Card.split('\\n')\n\t\tcard_list.append(l_Card)\n\n\n\t#formatting cards side by side\n\n\tfor r in range(0,7):\n\t\tfor r2 in range(0,len(card_list)):\n\t\t\n\t\t\tIndividual_lines = card_list[r2]\n\t\t\n\t\t\tcards_print+=Individual_lines[r] +' '\n\t\tcards_print=cards_print +'\\n'\n\n\t#cards_print=cards_print[:-6]\n\t\n\treturn cards_print\n\n\n\n\n\n\ndef DisplayDealerHidden(cards):\n\n\tCard='''\n ____ \n|x |___\n| | |\n| x| |\n ----\t |\n ----\n '''\n\tl_Card=''\n\t\n\t#_replace x with Card Values\n\tif cards[0]==10:\n\t\tl_Card=(Card.replace('x ','10'))\n\t\tl_Card=(l_Card.replace(' x','10'))\n\telse:\n\t\tl_Card=(Card.replace('x',str(cards[0])))\n\treturn print(l_Card)\n","repo_name":"CME-GIT/BlackJack","sub_path":"print_functions.py","file_name":"print_functions.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"29369518936","text":"\"\"\"\r\nhttp://incompleteideas.net/sutton/MountainCar/MountainCar1.cp\r\npermalink: https://perma.cc/6Z2N-PFWC\r\n\"\"\"\r\n\r\nimport math\r\nimport numpy as np\r\nfrom gym import spaces\r\nfrom gym.utils import seeding\r\n\r\nclass MountainCarEnv():\r\n def __init__(self):\r\n self.min_position = -1.2 # 最低点\r\n self.max_position = 0.6 # 最高点\r\n self.max_speed = 0.07 # 最大速度\r\n self.goal_position = -0.2 # 目标高度\r\n self.goal_velocity = 0 # 目标速度 \r\n self.force=0.001 # 推力\r\n self.gravity=0.0025 # 重量\r\n self.time = None # 一个回合持续时间步\r\n\r\n self.low = np.array([self.min_position,\r\n -self.max_speed],dtype=np.float32)\r\n self.high = np.array([self.max_position,\r\n self.max_speed],dtype=np.float32)\r\n self.action_space = spaces.Discrete(3)\r\n self.observation_space = spaces.Box(self.low,\r\n self.high,dtype=np.float32)\r\n \r\n self.seed()\r\n \r\n def seed(self, seed=None):\r\n self.np_random,seed = seeding.np_random(seed)\r\n return seed\r\n\r\n def step(self, action):\r\n position,velocity = self.state\r\n velocity += (action-1)*self.force+math.cos(3*position)*(-self.gravity)\r\n velocity = np.clip(velocity,-self.max_speed,self.max_speed)\r\n position += velocity\r\n position = np.clip(position, self.min_position, self.max_position)\r\n if (position==self.min_position and velocity<0): \r\n velocity = 0\r\n \r\n self.state = [position, velocity]\r\n self.time += 1\r\n \r\n if position>=self.goal_position and velocity>=self.goal_velocity:\r\n done = True\r\n reward = 0\r\n info = 'Goal Obtained'\r\n elif self.time > 1000:\r\n done = True\r\n reward = -1\r\n info = 'Maximum Timesteps'\r\n else:\r\n done = False\r\n reward = -1\r\n info = 'Goal Obtained'\r\n\r\n return self.state, reward, done, info\r\n\r\n def reset(self):\r\n self.state = [self.np_random.uniform(low=-0.6, high=-0.4), 0]\r\n self.time = 0\r\n \r\n return self.state\r\n \r\nif __name__ == '__main__':\r\n env = MountainCarEnv()\r\n s = env.reset()\r\n for i in range(2000):\r\n prob = np.random.rand(3,)\r\n prob = prob/np.sum(prob)\r\n a = np.random.choice(np.arange(3),p=prob)\r\n s_,r,end,info = env.step(a)\r\n print(i,s,a,s_,end,info)\r\n \r\n if end:\r\n break","repo_name":"QiangLong2017/Deep-Reiforcement-Learning","sub_path":"LectureNotes/Code7/MountainCar.py","file_name":"MountainCar.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"}
+{"seq_id":"25987707512","text":"# https://www.codewars.com/kata/5a26ca51e1ce0e987b0000ee/train/python\ndef branch(n):\n if n == 1: return 0\n r = 1\n # 边长每圈增加 2, 确定在第几圈\n while r ** 2 < n:\n r += 2\n\n L = (r - 2) ** 2\n res = 3\n # 从最大的开始, 倒着减掉边长-1, 判断在 3, 2, 1, 0.\n for i in range(r ** 2, L, 1 - r):\n if n > i:\n return res + 1\n res -= 1\n return 0\n\n\nprint(branch(6), 0)\n","repo_name":"IsolatedRain/code_wars","sub_path":"completed/Progressive Spiral Number Branch.py","file_name":"Progressive Spiral Number Branch.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"7784412864","text":"from distutils.core import setup\n\nfiles = ['decapp2.py','decrypy.py','decgui2.py','decgui2.ui','README','setup.py']\n\nsetup(name = \"decrypy\",\n version = \"2.0\",\n description = \"A tool to decrypt Yahoo chat archives\",\n author = \"Akhil Wali\",\n author_email = \"green.transistor@gmail.com\",\n url = \"http://code.google.com/p/decrypy/\",\n #Name the folder where your packages live:\n #(If you have other packages (dirs) or modules (py files) then\n #put them into the package directory - they will be found \n #recursively.)\n packages = ['.'],\n #'package' package must contain files (see list above)\n #I called the package 'package' thus cleverly confusing the whole issue...\n #This dict maps the package name =to=> directories\n #It says, package *needs* these files.\n package_data = {'.' : files } \n )\n\n","repo_name":"darth10/decrypy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"31346206253","text":"#!/usr/bin/python\n\nimport sys\n\nstoredUsers= open('names', 'a')\n\nname = raw_input('Hi, please enter your name: ')\nage = raw_input('How old are you?: ')\nusername = raw_input('Please enter your username: ')\n\nsys.stdout.write('your name is ' + name + ', you are ' + str(age) + ' years old, and your username is ' + str(username) + '\\n')\n\nstoredUsers.write(name + '\\t' + age + '\\t'+username+'\\n')\n\nstoredUsers.close()\n","repo_name":"Yeorj/Easy1","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"12167485412","text":"import glob\nimport os\nimport pathlib\nimport setuptools\n\npackage_data = dict()\npackage_data['ambit'] = []\nfor path, paths, files in os.walk('ambit/resources/', followlinks=True):\n for f in files:\n include = os.path.join(path, f)[6:]\n package_data['ambit'].append(include)\n\nsetuptools.setup(\n name='ambit',\n version='0.3.6',\n description='Take control of your Palette.',\n long_description=(\n '**ambit** is a Python library for interacting with PaletteGear '\n 'and MonogramCC devices, a graphical simulator for device-free '\n 'development, and an accompanying set of configurable end user '\n 'tools and demos.'\n ),\n long_description_content_type='text/markdown',\n keywords=['monogramcc', 'palettegear', 'demoscene', 'pygame', 'linux', 'pyusb'],\n url='https://github.com/khimaros/ambit',\n author='khimaros',\n packages=setuptools.find_packages(),\n package_data=package_data,\n scripts=glob.glob('./bin/*'),\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: POSIX',\n 'Topic :: System :: Hardware',\n ],\n python_requires='>=3.6',\n)\n\n","repo_name":"khimaros/ambit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"}
+{"seq_id":"41178963338","text":"print('======================================')\nprint('Mahasiswa Dengan Nilai Akhir Tertinggi')\nprint('======================================')\nnilaiMhs = [{'nim' : 'A01', 'nama' : 'Amir', 'mid' : 50, 'uas' : 80},\n {'nim' : 'A02', 'nama' : 'Budi', 'mid' : 40, 'uas' : 90}, \n {'nim' : 'A03', 'nama' : 'Cici', 'mid' : 50, 'uas' : 50}, \n {'nim' : 'A04', 'nama' : 'Dedi', 'mid' : 20, 'uas' : 30},\n {'nim' : 'A05', 'nama' : 'Fifi', 'mid' : 70, 'uas' : 40}]\ndef nilaiTertinggi(nilaiMhs):\n nilai = 0\n for i in nilaiMhs:\n nilaimid = i.get('mid')\n nilaiuas = i.get('uas')\n nilaiakhir = (nilaimid + 2*nilaiuas)/3\n if (nilaiakhir > nilai):\n nilai = nilaiakhir\n data = {}\n data['nim'] = i.get('nim')\n data['nama'] = i.get('nama')\n return data\nnilaitinggi = nilaiTertinggi(nilaiMhs)\nprint('NIM Mahasiswa',nilaitinggi['nim'])\nprint('Nama Mahasiswa',nilaitinggi['nama'])\n","repo_name":"DestriAbdiSaputro/Python-Projects-Protek","sub_path":"Praktikum 08/python project/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"17506002924","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 14 23:36:06 2021\n\n@author: Sai Krishna\n\"\"\"\n\n#Setting directory for download location\nimport os\nos.chdir('C:\\\\Data\\\\github\\\\python_projects\\\\Youtube video downloader')\n\n#Importing pytube (use pip install pytube)\nfrom pytube import YouTube\n\n#Video link as a string\nlink = \"https://www.youtube.com/watch?v=8b0ubLO2MUE\"\n\nvideo = YouTube(link)\n\n#Using the best resolution option\nstream = video.streams.get_highest_resolution()\n\n#File name of the video about to be downloaded\nstream.default_filename\n\n#Video Resolution\nstream.resolution\n\n#Checking the lowest resolution \nvideo.streams.get_lowest_resolution().resolution\n\n#Title of the video\nstream.title\n\n#Download command\nstream.download()\n","repo_name":"krishnasai321/Python_projects","sub_path":"Youtube video downloader/yt_video_downloader.py","file_name":"yt_video_downloader.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"1148669925","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom operators.models import QuoteRequest, TourOperator, Itinerary, ItineraryType\nfrom users.models import UserProfile\nimport MySQLdb\nfrom django.db.models import Count\nfrom django.contrib.auth.models import User\nfrom places.models import Park, CountryIndex\nfrom photos.models import Photo\nfrom blog.models import Article\nfrom reviews.models import ParkReview, KilimanjaroParkReview, TourOperatorReview\nfrom analytics.models import Analytic\n\n\nclass Command(BaseCommand):\n help = ''\n\n def handle(self, *args, **options):\n\n # update tour operators\n tour_operators = TourOperator.objects.all()\n #tour_operators = tour_operators.filter(slug='africaventure')\n for tour_operator in tour_operators:\n tour_operator.update_reviews_count()\n tour_operator.update_average_rating()\n tour_operator.update_parks_count()\n tour_operator.update_packages_count()\n tour_operator.update_quote_request_count()\n tour_operator.update_photos_count()\n tour_operator.update_yas_score()\n tour_operator.update_vehicle_rating()\n tour_operator.update_meet_and_greet_rating()\n tour_operator.update_responsiveness()\n tour_operator.update_safari_quality()\n tour_operator.update_itinerary_quality()\n tour_operator.update_packages_count()\n for country in tour_operator.country_indexes.all():\n tour_operator.update_yas_score(country)\n print('Updated', tour_operators.count(), 'tour_operators')\n\n\n #activity_level\n itineraries = Itinerary.objects.filter(date_deleted=None) \n for itinerary in itineraries:\n itinerary.activity_level = itinerary.calc_max_activity_level()\n itinerary.activity_level_name = itinerary.calc_activity_level_string()\n itinerary.save()\n print('Updated', itineraries.count(), 'itineraries')\n \n # update country\n countries = CountryIndex.objects.all()\n for country in countries:\n country.update_packages_count()\n country.update_photos_count()\n country.update_parks_count()\n country.update_operators_count()\n print('Updated', countries.count(), 'countries')\n\n # update articles\n articles = Article.objects.all()\n for article in articles:\n article.update_kudu_count()\n article.update_visit_count()\n article.update_comments_count()\n print('Updated', articles.count(), 'articles')\n\n #parks\n parks = Park.objects.all()\n for park in parks:\n park.update_reviews_count()\n park.update_tour_operators_count()\n park.update_average_rating()\n park.update_packages_count()\n park.update_photos_count()\n print('Updated', parks.count(), 'parks')\n\n # update park reviews\n reviews = ParkReview.objects.all()\n for review in reviews:\n review.update_views_count()\n review.update_kudu_count()\n print('Updated', reviews.count(), 'park reviews')\n\n # update tour operator reviews\n reviews = TourOperatorReview.objects.all()\n for review in reviews:\n review.update_views_count()\n review.update_kudu_count()\n \n print('Updated', reviews.count(), 'tour op reviews')\n\n # update kilimanjaro reviews\n reviews = KilimanjaroParkReview.objects.all()\n for review in reviews:\n review.update_views_count()\n review.update_kudu_count()\n print('Updated', reviews.count(), 'kilimanjaro park reviews visit counts')\n\n objs = Itinerary.objects.all()\n for obj in objs:\n obj.update_visit_count()\n print('Updated', objs.count(), 'itinerary views')\n\n objs = UserProfile.objects.all()\n for obj in objs:\n obj.update_review_count()\n obj.update_kudus_count()\n print('Updated', objs.count(), 'users reviews and kudus')\n\n objs = Photo.objects.filter(date_deleted__isnull=False)\n for obj in objs:\n obj.update_kudu_count()\n print('Updated', objs.count(), 'photos')\n\n self.stdout.write(self.style.SUCCESS(\"DONE\"))\n","repo_name":"montenegrop/djangotravelportal","sub_path":"extras/management/commands/updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"71291563465","text":"# Fibonacci Series using Dynamic Programming \ndef fibonacci(n): \n # check for n is zero or less than two \n \n if n == 0:\n return 0\n elif n <= 2:\n return 1\n\n # List of zeros with length of given value n\n seq = [0] * n\n seq[0] = seq[1] = 1\n\n # Algorithm Calculating Fibonacci Numbers\n for i in range(2,n):\n seq[i]= seq[i-1]+seq[i-2]\n \n return seq[n-1]\n\nprint(fibonacci(9))","repo_name":"harkaranbrar7/Algorithm-Python","sub_path":"Dynamic Programming/fibonacciSeries.py","file_name":"fibonacciSeries.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"28562330808","text":"\"\"\"\nsearch(here,d) : percentage of finding at here after d days\n\"\"\"\n\n\nn,d,p = map(int,input().split())\nconnected = []\nmemo = [[-1]*(d+1) for _ in range(n+1)]\n\nfor i in range(n):\n connected.append(list(map(int,input().split())))\nt = int(input())\nq = list(map(int,input().split()))\n\ndeg = [0]*(n+1)\n\nfor i in range(n):\n for j in range(n):\n if connected[i][j] == 1:\n deg[j] += 1\n\ndef search(here,days):\n if days == 0:\n if here == p:\n return 1.0\n else:\n return 0.0\n if memo[here][days] > -0.5:\n return memo[here][days]\n \n memo[here][days] = 0.0\n \n for there in range(n):\n if connected[here][there] == 1:\n memo[here][days] += search(there,days - 1)/deg[there]\n \n return memo[here][days]\n\n\nfor i in q:\n print(search(i,d))","repo_name":"kjh000/Algorithm","sub_path":"algo/두니발 박사의 탈옥.py","file_name":"두니발 박사의 탈옥.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"73046577225","text":"# coding:utf-8\nimport math\nimport stemming.porter2 as porter\nimport numpy as np\nimport copy\nfrom collections import Counter\n\n\ndef position_rank(sentence, tokenizer, alpha=0.85, window_size=6, num_keyphrase=10, lang=\"en\"):\n \"\"\"Compute position rank score.\n\n Position rank is a method for extracting keyphrase from sentence.\n This method is allowed any language if you provide tokenizer\n that tokenize language you wanna apply position rank.\n In the oroginal paper, authors used 'title' and 'abstract' of scholarly documents.\n Original paper is here: http://aclweb.org/anthology/P/P17/P17-1102.pdf\n\n Args:\n sentence: Text concatenated title and abstract.\n tokenizer: Object that tokenize sentence.\n Tokenizer must has tokenize(), tokenize() receive sentence and return token list\n and phrase list. See tokenizer.py and class of tokenizer for detail.\n alpha: Damping parameter. This allows 'teleport' operation into another node in the graph.\n window_size: Size of woindow for co-occurece word.\n num_keyphrase: Number of keyphrase which method will return.\n lang: Target language.\n\n Returns:\n Keyphrase list. Length of list is decided by 'num_keyphrase' param.\n\n \"\"\"\n if lang == \"en\":\n stem = porter.stem\n else:\n stem = lambda word: word\n # origial words(=no stemming) and phrase list\n original_words, phrases = tokenizer.tokenize(sentence)\n # stemmed words\n stemmed_word = [stem(word) for word in original_words]\n unique_word_list = set([word for word in stemmed_word])\n n = len(unique_word_list)\n\n adjancency_matrix = np.zeros((n, n))\n word2idx = {w: i for i, w in enumerate(unique_word_list)}\n p_vec = np.zeros(n)\n # store co-occurence words\n co_occ_dict = {w: [] for w in unique_word_list}\n\n # 1. initialize probability vector\n for i, w in enumerate(stemmed_word):\n # add position score\n p_vec[word2idx[w]] += float(1 / (i+1))\n for window_idx in range(1, math.ceil(window_size / 2)+1):\n if i - window_idx >= 0:\n co_list = co_occ_dict[w]\n co_list.append(stemmed_word[i - window_idx])\n co_occ_dict[w] = co_list\n\n if i + window_idx < len(stemmed_word):\n co_list = co_occ_dict[w]\n co_list.append(stemmed_word[i + window_idx])\n co_occ_dict[w] = co_list\n\n # 2. create adjancency matrix from co-occurence word\n for w, co_list in co_occ_dict.items():\n cnt = Counter(co_list)\n for co_word, freq in cnt.most_common():\n adjancency_matrix[word2idx[w]][word2idx[co_word]] = freq\n\n adjancency_matrix = adjancency_matrix / adjancency_matrix.sum(axis=0)\n p_vec = p_vec / p_vec.sum()\n # principal eigenvector s\n s_vec = np.ones(n) / n\n\n # threshold\n lambda_val = 1.0\n loop = 0\n # compute final principal eigenvector\n while lambda_val > 0.001:\n next_s_vec = copy.deepcopy(s_vec)\n for i, (p, s) in enumerate(zip(p_vec, s_vec)):\n next_s = (1 - alpha) * p + alpha * (weight_total(adjancency_matrix, i, s_vec))\n next_s_vec[i] = next_s\n lambda_val = np.linalg.norm(next_s_vec - s_vec)\n s_vec = next_s_vec\n loop += 1\n if loop > 100:\n break\n\n # score original words and phrases\n word_with_score_list = [(word, s_vec[word2idx[stem(word)]]) for word in original_words]\n for phrase in phrases:\n total_score = sum([s_vec[word2idx[stem(word)]] for word in phrase.split(\"_\")])\n word_with_score_list.append((phrase, total_score))\n\n sort_list = np.argsort([t[1] for t in word_with_score_list])\n keyphrase_list = []\n # if not check stemmed keyphrase, there are similar phrases in keyphrase list\n # i.e. \"neural network\" and \"neural networks\" in list\n stemmed_keyphrase_list = []\n for idx in reversed(sort_list):\n keyphrase = word_with_score_list[idx][0]\n stemmed_keyphrase = \" \".join([stem(word) for word in keyphrase.split(\"_\")])\n if not stemmed_keyphrase in stemmed_keyphrase_list:\n keyphrase_list.append(keyphrase)\n stemmed_keyphrase_list.append(stemmed_keyphrase)\n if len(keyphrase_list) >= num_keyphrase:\n break\n return keyphrase_list\n\n\ndef weight_total(matrix, idx, s_vec):\n \"\"\"Sum weights of adjacent nodes.\n\n Choose 'j'th nodes which is adjacent to 'i'th node.\n Sum weight in 'j'th column, then devide wij(weight of index i,j).\n This calculation is applied to all adjacent node, and finally return sum of them.\n\n \"\"\"\n return sum([(wij / matrix.sum(axis=0)[j]) * s_vec[j] for j, wij in enumerate(matrix[idx]) if not wij == 0])\n","repo_name":"ymym3412/position-rank","sub_path":"position_rank.py","file_name":"position_rank.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"81"}
+{"seq_id":"25398228976","text":"from . import usage\n\ndef solution():\n # PyPy ~ 18 ms\n\n tot = 0\n for n in range(1, 10_000):\n s = n\n i = 0\n while i < 50:\n s += int(str(s)[::-1])\n ss = str(s)\n if ss == ss[::-1]:\n break\n i += 1\n if i == 50:\n tot += 1\n return tot\n\nif __name__ == '__main__':\n usage.usage(solution, n=30)","repo_name":"ykw1793/project-euler","sub_path":"project_euler/problems/55.py","file_name":"55.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"43373311925","text":"def combinations(s):\n #mapper work\n if len(s) < 2:\n return s\n res = []\n# 문자데이터 할때 위를 수정 sep = ' ' 힌트 -> 단어가 다 잘라진 상태의 배열을 만들고 비교함수\n\n # reduce work\n for i, c in enumerate(s):\n res.append(c) #추가되는 부분 -> [ 'a', 'b', 'c', 'd']\n # print(i, c, res)\n print(i, c, res, end = ' : ')\n #자기를 뺀 나머지를 비교\n for j in combinations(s[:i] + s[i+1:]): # 재귀\n res.append(c+j)\n print(s[:i], end=' / ')\n print(s[i+1:], end= ' / ')\n print(s[:i] + s[i+1:], end= ' | ')\n print(i, c, j, res)\n return res\n\n\nif __name__ == \"__main__\":\n result = combinations('1234')\n print(result)\n \n \n#빅오 O(N^2) -> 2중 for문에 if문 포함\n# O(2N) -> 2중 for문","repo_name":"MethodFunc/KSA_edu_Daejeon","sub_path":"20200609.py","file_name":"20200609.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"40745862664","text":"import time\n\n\nclass ReplicatedMixin(object):\n\n scalable_attr = \"replicas\"\n\n @property\n def replicas(self):\n return self.obj[\"spec\"][\"replicas\"]\n\n @replicas.setter\n def replicas(self, value):\n self.obj[\"spec\"][\"replicas\"] = value\n\n\nclass ScalableMixin(object):\n\n @property\n def scalable(self):\n return getattr(self, self.scalable_attr)\n\n @scalable.setter\n def scalable(self, value):\n setattr(self, self.scalable_attr, value)\n\n def scale(self, replicas=None):\n count = self.scalable if replicas is None else replicas\n self.exists(ensure=True)\n if self.scalable != count:\n self.scalable = count\n self.update()\n while True:\n self.reload()\n if self.scalable == count:\n break\n time.sleep(1)\n","repo_name":"kelproject/pykube","sub_path":"pykube/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":351,"dataset":"github-code","pt":"81"}
+{"seq_id":"4994422878","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 25 13:04:25 2020\n\n@author: elif.ayvali\n\"\"\"\nfrom ddpg_agent import DDPGAgent\n\nimport torch\nimport numpy as np\nimport random\nfrom collections import namedtuple, deque\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n#Parameters from OpenAI Baselines\nBUFFER_SIZE = int(1e6) # replay buffer size\nBATCH_SIZE = 256 # minibatch size\nUPDATE_EVERY = 3 # Number of episodes that should elapse between gradient descent updates\nGAMMA = 0.995 # discount factor\nTAU = 1e-3 # for soft update of target parameters\n\nLR_ACTOR = 1e-4 # learning rate of the actor \nLR_CRITIC =5e-4 # learning rate of the critic\nWEIGHT_DECAY = 0 # L2 weight decay\nEPSILON = 1.0 # Exploration noise coefficient\nEPSILON_DECAY = 0 # Decay rate for exploration noise\nLEARN_TIMES= 2 # Number of times to backprop with the batch\nWARM_UP= 0 # Number of steps for uniform-random action selection, before running real policy. Helps exploration.\nCLIP_NORM=True\n\n\n\n\nclass MADDPG:\n def __init__(self, num_agents, state_size, action_size, random_seed):\n super(MADDPG, self).__init__()\n \n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n self.random_seed=random_seed\n self.num_agents=num_agents\n self.episode_num=0\n self.total_steps=0\n\n # Initialize centralized memory buffer\n self.memory = ReplayBuffer(self.action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)\n # Initialize decentralized actors/critics\n self.maddpg_agents=self.get_agents() \n \n\n def get_agents(self):\n \"\"\"get all the agents in the MADDPG object\"\"\"\n agents = [DDPGAgent(self,idx) for idx in range(self.num_agents)]\n return agents\n \n \n def reset_agents(self):\n for agent in self.maddpg_agents:\n agent.reset()\n \n def act(self, states, add_noise=True):\n \"\"\"get actions from all agents in the MADDPG object\"\"\"\n actions=[agent.act(state,self.episode_num, add_noise) for agent, state in zip(self.maddpg_agents, states)] \n return actions\n\n def encode_buffer(self,obs):\n #Concatenate states/actions of all agents\n #Reference : github@fsasilva59\n# print('before encode,obs',obs)\n# print('after encode,obs', np.array(obs).reshape(1,-1).squeeze())\n return np.array(obs).reshape(1,-1).squeeze()\n \n def decode_buffer(self,size, agent_idx, obs):\n #Reference : github@fsasilva59\n list_idx = torch.tensor([ idx for idx in range(agent_idx * size, agent_idx * size + size) ]).to(device) \n# print('before decode,obs',obs)\n# print('after decode,obs', obs.index_select(1, list_idx))\n return obs.index_select(1, list_idx)\n \n def step(self, states, actions, rewards, next_states, dones,episode):\n \"\"\"Concatanate experience of all agents in replay memory, and use random sample from buffer to learn.\"\"\"\n self.episode_num=episode \n self.memory.add(self.encode_buffer(states), self.encode_buffer(actions), self.encode_buffer(rewards), self.encode_buffer(next_states), self.encode_buffer(dones)) \n # Learn periodically, if enough samples are available in memory\n if len(self.memory) > BATCH_SIZE and self.episode_num % UPDATE_EVERY==0:\n for _ in range(LEARN_TIMES):\n for idx in range(self.num_agents): \n experiences = self.memory.sample() \n self.learn(experiences,self.maddpg_agents,idx, GAMMA) \n self.total_steps+=1\n \n def save_checkpoint(self):\n for agent in self.maddpg_agents:\n torch.save(agent.actor_local.state_dict(), 'agent_'+str(agent.agent_idx)+'_checkpoint_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'agent_'+str(agent.agent_idx)+'_checkpoint_critic.pth')\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n \n def learn(self, experiences,meta_agent, agent_idx, gamma):\n \"\"\"Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n \"\"\"\n #all experiences\n #states: batch_sizex56\n #actions:batch_sizex4\n #rewards, batch_sizex2\n states, actions, rewards, next_states, dones = experiences\n# print('states',states)\n# print('actions',actions)\n# print('rewards',rewards)\n #get agent's experiences \n agent=meta_agent[agent_idx]\n# print('agent object',agent)\n agent_states = self.decode_buffer(self.state_size, agent_idx, states)\n agent_actions = self.decode_buffer(self.action_size, agent_idx, actions)\n agent_next_states = self.decode_buffer(self.state_size, agent_idx, next_states) \n agent_rewards=self.decode_buffer(1, agent_idx, rewards) \n agent_dones=self.decode_buffer(1, agent_idx, dones) \n# print('agent_rewards',agent_rewards)\n# print('agent_dones',agent_dones)\n# print('agent_states',agent_states)\n\n #get other agents' experiences\n other_agent_idx=np.delete(range(self.num_agents),agent_idx).squeeze()\n other_agent=meta_agent[other_agent_idx]\n# print('other_agent object',other_agent)\n other_agent_states = self.decode_buffer(self.state_size, other_agent_idx, states)\n other_agent_actions = self.decode_buffer(self.action_size, other_agent_idx, actions)\n other_agent_next_states = self.decode_buffer(self.state_size, other_agent_idx, next_states) \n other_agent_rewards=self.decode_buffer(1, other_agent_idx, rewards) \n #All agents ->torch for centralized critic\n all_states=torch.cat((agent_states, other_agent_states), dim=1).to(device)\n all_actions=torch.cat((agent_actions, other_agent_actions), dim=1).to(device)\n all_next_states=torch.cat((agent_next_states, other_agent_next_states), dim=1).to(device)\n \n# print('other agent_states',other_agent_states)\n\n # ---------------------------- update critic ---------------------------- # \n # Get predicted next-state actions and Q values from target model\n agent_next_actions=agent.actor_target(agent_states)\n other_agent_next_actions=other_agent.actor_target(other_agent_states) #should be agent 2\n# print(' agent_nex_actions',agent_next_actions)\n# print('other_agent_next_actions',other_agent_next_actions)\n\n #Next actions-> torch\n all_next_actions=torch.cat((agent_next_actions,other_agent_next_actions), dim=1).to(device) \n \n Q_targets_next = agent.critic_target(all_next_states, all_next_actions) #batch_sizex1\n# print('Q_targets_next',Q_targets_next)\n # Compute Q targets for current states (y_i)\n \n Q_targets = agent_rewards + (gamma * Q_targets_next * (1 - agent_dones))\n \n # Compute critic loss\n Q_expected = agent.critic_local(all_states, all_actions) #batch_sizex1\n# print('Q_expected',Q_expected)\n\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n\n # Minimize the loss\n agent.critic_optimizer.zero_grad()\n critic_loss.backward()\n if CLIP_NORM is True:\n torch.nn.utils.clip_grad_norm_(agent.critic_local.parameters(), 1) # clip gradient to max 1\n agent.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n # Compute next action predictions for all agents \n agent_action_predictions=agent.actor_local(agent_states)\n #Predictions-> torch, Only backprop agent idx, detach other agents\n other_agent_action_predictions=other_agent.actor_local(other_agent_states)#.detach()\n all_actions_pred = torch.cat((agent_action_predictions, other_agent_action_predictions), dim = 1).to(device) \n #Detach other agent states ,only backprop agent states\n actor_loss = -agent.critic_local(all_states, all_actions_pred).mean()\n\n # Minimize the loss\n agent.actor_optimizer.zero_grad()\n actor_loss.backward()\n agent.actor_optimizer.step()\n\n # ----------------------- update target networks ----------------------- #\n self.soft_update(agent.critic_local, agent.critic_target, TAU)\n self.soft_update(agent.actor_local, agent.actor_target, TAU) \n\n # ---------------------------- update noise ---------------------------- #\n agent.epsilon -= EPSILON_DECAY\n agent.noise.reset()\n \nclass ReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self,action_size, buffer_size, batch_size, seed):\n \"\"\"Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n \"\"\"\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n# print('Replay seed:',seed)\n \n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n# print('new_experience :state',state)\n# print('new_experience :reward',reward)\n\n self.memory.append(e)\n \n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n \n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)\n\n return (states, actions, rewards, next_states, dones)\n\n \n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n \n","repo_name":"eayvali/DeepRL","sub_path":"Multi-agent DDPG/multi_agent_ddpg.py","file_name":"multi_agent_ddpg.py","file_ext":"py","file_size_in_byte":11529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"15904142982","text":"\"\"\"urls.py implements api urls.\"\"\"\n\nfrom nautobot.core.api import OrderedDefaultRouter\nfrom . import views\n\n\nrouter = OrderedDefaultRouter()\nrouter.APIRootView = views.VCSRootView\n\n# Sites\nrouter.register(\"branches\", views.BranchViewSet)\nrouter.register(\"commits\", views.CommitViewSet)\nrouter.register(\"pull_requests\", views.PullRequestViewSet)\nrouter.register(\"pull_requests_reviews\", views.PullRequestReviewViewSet)\n\napp_name = \"nautobot_version_control-api\"\nurlpatterns = router.urls\n","repo_name":"nautobot/nautobot-plugin-version-control","sub_path":"nautobot_version_control/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"}
+{"seq_id":"6789043095","text":"# -*- coding: cp1252 -*-\r\nfrom typing import List\r\nimport os\r\nimport glob\r\nimport re\r\n\r\n#Leitura do arquivo de Entrada\r\nhome = os.path.expanduser('~')\r\ncaminhoEntrada = os.path.join(home, 'data\\in')\r\nos.chdir(caminhoEntrada)\r\nfor procura in glob.glob(\"*.dat\"):\r\n arquivoEntrada = procura\r\nlerArquivo = open(arquivoEntrada, 'r')\r\n\r\n#Listas utilizadas\r\nlistaVendedor = list()\r\nlistaCliente = list()\r\nlistaVendedorDaVenda = list()\r\nlistaNomeVendedorPorVenda = list()\r\nlistaItemVenda = list()\r\nlistaIdVendedoresDaVenda = list()\r\n\r\n#Lendo o arquivo\r\nfor linha in lerArquivo:\r\n\r\n #Limitador de linhas\r\n colunasLinhas = linha.split('c')\r\n\r\n #Montagem de lista de Vendedores\r\n if float(colunasLinhas[0]) == 1:\r\n listaVendedor.append(float(colunasLinhas[0]))\r\n\r\n #Montagem de Lista de Cliente\r\n elif float(colunasLinhas[0]) == 2:\r\n listaCliente.append(float(colunasLinhas[0]))\r\n\r\n #Montagem de Lista de Vendas\r\n elif float(colunasLinhas[0]) == 3:\r\n\r\n listaIdVendedoresDaVenda.append(int(colunasLinhas[1]))\r\n listaItemVenda.append(str(colunasLinhas[2]))\r\n\r\n #Nome dos vendedores e quantidade de venda listaVendedorDaVenda\r\n listaVendedorDaVenda.append(str(colunasLinhas[3]))\r\n\r\n listaNomeVendedorPorVenda.append({\r\n\r\n x:listaVendedorDaVenda.count(x)\r\n\r\n for x in set(listaVendedorDaVenda)\r\n\r\n })\r\n\r\n#Vendedor que menos vendeu\r\nindice = len(listaNomeVendedorPorVenda)-1\r\nultimo: dict = listaNomeVendedorPorVenda[indice]\r\n\r\n\r\n#Impressões a serem gravadas no arquivo de saída\r\nimprimirTotalDeVendedores = len(listaVendedor)\r\nimprimirTotalDeClientes = len(listaCliente)\r\nimprimirTotalDeVendas = len(listaVendedorDaVenda)\r\nimprimirVendasPorVendedor = ultimo\r\nimprimirVendedorMenosVendeu = min(ultimo)\r\nimprimirVendas = listaItemVenda\r\nimprimirIdVendas = listaIdVendedoresDaVenda\r\n\r\n\r\n#Saída via arquivo\r\nhome = os.path.expanduser('~')\r\ncaminhoSaida = os.path.join(home, 'data\\out')\r\nos.chdir(caminhoSaida)\r\n\r\n#Gravando no arquivo de saída\r\narquivo = open('home-data-out.dat', 'w')\r\narquivo.write('Abaixo os resultados gerados pela arquivo de entrada: ')\r\narquivo.write('\\n\\nTotal de Vendedores: {}'.format(str(imprimirTotalDeVendedores)))\r\narquivo.write('\\nTotal de Cliente: {}'.format(str(imprimirTotalDeClientes)))\r\narquivo.write('\\nTotal de Vendas: {}'.format(str(imprimirTotalDeVendas)))\r\narquivo.write('\\nVendas Por Vendedor: {}'.format(str(imprimirVendasPorVendedor)))\r\narquivo.write('\\nVendedor que Menos Vendeu: {}'.format(str(imprimirVendedorMenosVendeu)))\r\narquivo.write('\\nListas de Vendas: {}'.format(str(imprimirVendas)))\r\narquivo.write('\\n\\nListas de IDs Vendas: {}'.format(str(listaIdVendedoresDaVenda)))","repo_name":"jonatasghizi/lerArquivo","sub_path":"lerArquivo.py","file_name":"lerArquivo.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"16837204362","text":"print('Importing model')\nimport ora41_PG_model\nprint('Model imported')\n\nfrom sqlalchemy import func, Column, LargeBinary\nfrom aconn import AConnector\n\nimport json\n\ndef get_count(q):\n count_q = q.statement.with_only_columns([func.count()]).order_by(None)\n count = q.session.execute(count_q).scalar()\n return count\n\n\ndef analyze_tables_sizes(src_connector: AConnector, source_tables_classes: list):\n \"\"\"\n\n :param conn:\n :param source_tables_classes:\n :return: dict {table_name: size}\n \"\"\"\n result = {}\n for source_class in source_tables_classes:\n tablename = source_class.__tablename__\n src_connector.status('Calculating table %s' % tablename)\n result[tablename] = get_count(src_connector.con.query(source_class))\n return result\n\n\ndef split_to_tasks(connector: AConnector, tab_class, total_size, chunk_size):\n has_blobs = False\n # table_name = tab_class.__tablename__\n pk_col = list(tab_class.__table__.primary_key)[0] # type: Column\n for col in tab_class.__table__.columns: # type: Column\n if isinstance(col.type, LargeBinary):\n has_blobs = True\n if has_blobs:\n return None\n\n chunk_start_ids = []\n chunks_count = total_size // chunk_size + 1\n for j in range(chunks_count):\n offset = j * chunk_size\n res = connector.con.query(pk_col).order_by(pk_col).offset(offset).limit(1).all()\n if len(res) > 0:\n chunk_start_ids.append(res[0][0])\n\n return chunk_start_ids\n\n\ndef make_transfer_tasks(connector: AConnector, model, output_filename, max_size, chunk_size):\n print('Making tasks')\n task_pattern = '{{\"task\":\"{task}\", \"table\": \"{table}\", \"pk\": \"{pk}\", \"size\": \"{size}\", ' \\\n '\"full_load\": \"{full_load}\", \"start_id\": \"{start_id}\", \"limit\": \"{limit}\" }}'\n\n tasks = []\n def _add_task(_table_class, _size, _full_load, _start_id, _limit):\n task_no = len(tasks) + 1\n _table = _table_class.__tablename__\n pk = list(_table_class.__table__.primary_key)[0]\n task = task_pattern.format(task=task_no, table=_table, pk=pk, size=_size,\n full_load=_full_load, start_id=_start_id, limit=_limit)\n tasks.append(task)\n\n for class_name, tab_class in model.SCHEMA_TABLES.items():\n tablename = tab_class.__tablename__\n size = get_count(connector.con.query(tab_class))\n if size == 0:\n print('Table %s skip - empty' % tablename)\n continue\n full_load = size <= max_size\n offset = None\n limit = None\n if not full_load:\n print('Table %s size %s splitting to chunks' % (tablename, size))\n chunks = split_to_tasks(connector, tab_class, size, chunk_size)\n print(' chunks count: %s' % len(chunks))\n for start_id in chunks[:-1]:\n _add_task(tab_class, size, False, start_id, chunk_size)\n _add_task(tab_class, size, False, chunks[-1], None) # rest records\n else:\n print('Table %s size %s full load' % (tablename, size))\n _add_task(tab_class, size, True, None, None)\n\n\n file_text = '[\\n' + ',\\n'.join([task for task in tasks]) + '\\n]'\n with open(output_filename, 'w') as f:\n f.write(file_text)\n\n\nif __name__ == '__main__':\n src_params = {\n 'name': 'ora41',\n 'conn': 'oracle://maximo:maximo@192.168.10.41:1521/max75',\n 'dbtype': 'ORA'\n }\n\n src = AConnector(src_params['name'], src_params['conn'], encoding='utf8', echo=False)\n make_transfer_tasks(src, ora41_PG_model, 'ora41_task.json', 300000, 100000)\n # sizes = analyze_tables_sizes(src, SCHEMA_TABLES.values())\n #split_to_tasks(src, T_WORKORDER, 734509, 734508)\n\n\n\n\n","repo_name":"Seyojik/axi-code-renamer","sub_path":"db_schema_creator/utils/tables_analyzer.py","file_name":"tables_analyzer.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"28434929624","text":"\"\"\"\nTest for issue 85:\nhttps://github.com/pandas-profiling/pandas-profiling/issues/85\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom pandas_profiling import ProfileReport\nfrom pandas_profiling.model.base import Variable\n\n\ndef test_issue85():\n data = {\n \"booleans_type\": [False, True, True],\n \"booleans_type_nan\": [False, True, np.nan],\n \"integers\": [1, 0, 0],\n \"integers_nan\": [1, 0, np.nan],\n \"str_yes_no\": [\"Y\", \"N\", \"Y\"],\n \"str_yes_no_mixed\": [\"Y\", \"n\", \"y\"],\n \"str_yes_no_nana\": [\"Y\", \"N\", np.nan],\n \"str_true_false\": [\"True\", \"False\", \"False\"],\n \"str_true_false_nan\": [\"True\", \"False\", np.nan],\n }\n\n df = pd.DataFrame(data)\n\n report = ProfileReport(\n df,\n pool_size=1,\n title=\"Dataset with Boolean Variables\",\n samples={\"head\": 20},\n )\n for col, variable_stats in report.get_description()[\"variables\"].items():\n assert (\n variable_stats[\"type\"] == Variable.TYPE_BOOL\n ), \"Variable should be boolean\"\n","repo_name":"ElijahSeeley/Springboard","sub_path":"pandas-profiling-master/pandas-profiling-master/tests/issues/test_issue85.py","file_name":"test_issue85.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"7061483517","text":"import warnings\r\nimport wandb\r\nimport numpy as np\r\nimport FinanceDataReader as fdr\r\nimport matplotlib\r\nfrom spillover import SpillOver\r\n\r\nif __name__ == '__main__':\r\n warnings.simplefilter('ignore')\r\n matplotlib.use('Agg')\r\n\r\n start = '2016-01-01'\r\n sp_total = fdr.DataReader('US500', start=start)\r\n bt_total = fdr.DataReader('BTC/USD', start=start)\r\n final_date = sp_total.index[-1]\r\n tdelta = 365\r\n fevd_horizon = 10\r\n fevd_target = [3, 6, 9]\r\n\r\n wandb.init(\r\n entity='db22',\r\n project='SpillOver',\r\n name=f'vol {tdelta}',\r\n config={\r\n 'start': start,\r\n 'period': tdelta,\r\n 'fevd_horizon': fevd_horizon\r\n })\r\n\r\n modes = ['vol', 'var05', 'var01']\r\n names = ['S&P500', 'BTC_USD']\r\n mu_params = {\r\n 'p': [0, 1, 2],\r\n 'd': [0, 1],\r\n 'q': [0, 1, 2],\r\n 'P': [0],\r\n 'D': [0],\r\n 'Q': [0],\r\n 'S': [0]\r\n }\r\n resid_params = {\r\n 'p': [1, 2],\r\n 'o': [0, 1],\r\n 'q': [1, 2],\r\n 'dist': ['normal', 'studentsT', 'T', 'skewT']\r\n }\r\n model = SpillOver(sp_total, bt_total, start, tdelta)\r\n step = 0\r\n while True:\r\n eda_result = model.modeling(names, mu_params, resid_params)\r\n tmp_ls = []\r\n for mode in modes:\r\n try:\r\n # AIC : low model-size penalty\r\n # BIC : increases the penalty as the sample size increases\r\n net_df, pvalue1, pvalue2 = model.run(mode, names, VAR_maxlags=None, VAR_ic='bic',\r\n fevd_target=fevd_target,\r\n fevd_horizon=fevd_horizon)\r\n # For wandb\r\n log_dict = {\r\n f'{mode} GC BTC/USD -> S&P500': pvalue1,\r\n f'{mode} GC S&P500 -> BTC/USD': pvalue2,\r\n f'{mode} VAR lags': model.VAR_lags[-1][-1]\r\n }\r\n log_dict.update({f'{mode} S&P500 NET {fevd_target[i]}th horizon': net_df.iloc[i, 0] for i in\r\n range(len(fevd_target))})\r\n log_dict.update({f'{mode} BTC/USD NET {fevd_target[i]}th horizon': net_df.iloc[i, 1] for i in\r\n range(len(fevd_target))})\r\n wandb.log(log_dict, step=step)\r\n\r\n tmp_ls.append(net_df.values)\r\n except Exception as e:\r\n print(\"*********Failures!*********\")\r\n print(model.period, mode)\r\n print(e)\r\n print(\"***************************\")\r\n\r\n model.net_spillover_ls.append(tmp_ls)\r\n print('=' * 20)\r\n print(model.period, 'Done!')\r\n print('=' * 20)\r\n if model.end == final_date:\r\n break\r\n model.update_date()\r\n step += 1\r\n\r\n net_spillover_ls = np.array(model.net_spillover_ls)\r\n np.save('result/net_spillover.npy', net_spillover_ls)\r\n\r\n","repo_name":"hobinkwak/Volatility-Spillover","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"}
+{"seq_id":"43981362214","text":"import libtmux\nimport os\n\nsession_name=\"NFVi\"\nserver=\"\"\nsession=\"\"\nwindow=\"NFViMarks\"\nsutPanel= None\ntesterPanel= None\ncontrollerPanel = None\ndef attachDashboard():\n global sutPanel\n global testerPanel \n global controllerPanel\n server = libtmux.Server()\n session = server.find_where({ \"session_name\": \"NFVi\" })\n #attach to window\n window_base_index = int(session.attached_window.get('window_index'))\n window = session.select_window(window_base_index)\n testerPanel = window.select_pane(0)\n sutPanel = window.select_pane(1)\n controllerPanel = window.select_pane(2)\ndef runCmdOnSutPannel(cmd):\n global sutPanel\n return sutPanel.send_keys(cmd)\n\ndef runCmdOnTesterPannel(cmd):\n global testerPanel \n return testerPanel.send_keys(cmd)\ndef runCmdOnControllerPannel(cmd):\n global controllerPanel \n return controllerPanel.send_keys(cmd)\n","repo_name":"toanddt/CDATS","sub_path":"dats/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"3140622825","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time :2022/3/14 17:18\n# @Author :qh\n# @File : do_excel.py\n# @Software : PyCharm\n\n\nfrom openpyxl import load_workbook\n\n\nclass DeExcel:\n def __init__(self, file_name, sheet_name):\n self.file_name = file_name\n self.sheet_name = sheet_name\n\n def get_data(self):\n wb = load_workbook(self.file_name, read_only=True)\n sheet = wb[self.sheet_name]\n test_data = []\n for i in range(1, sheet.max_row): # 一直变化的是行,所以要获取最大行\n test_datas = {\"任务编号\": sheet.cell(i + 1, 1).value}\n test_data.append(test_datas)\n return test_data\n\n\ntest = DeExcel(\"../../data/data1219/布控任务.xlsx\", \"布控任务\").get_data()\n\n\ndef test_key():\n res = []\n for i in test:\n res.append(i[\"任务编号\"])\n return res\n\ntest_key()\n\n\nif __name__ == '__main__':\n pass\n # 获取数据\n print(test_key())\n print(len(test_key()))\n\n\n\n\n\n\n","repo_name":"awangqinghua/tamps","sub_path":"test/smai_非机动车事件统计/do_excel.py","file_name":"do_excel.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"9266059484","text":"\n\nimport time\nfrom tqdm import tqdm\n\n\n\nclass SampleSubmodul:\n \n # initialize the class with the root directory of the dataset \n def __init__(self,logging):\n \n self.log= logging\n self.log_inf=logging.InfoLogger(logging,__name__)\n self.log_error=logging.ErrorLogger(logging,__name__)\n self.log_warnlog=logging.WarningLogger(logging,__name__)\n self.log_debug=logging.DebugLogger(logging,__name__)\n \n \n def sampleFunc(self):\n \n self.log_inf.info(\"ich bin ein sample Log von sampleSubmodul\")\n return ( \"Hallo Welt\")","repo_name":"Andybabic/datalib","sub_path":"bandyDatalib/_0003_Code/_0001_Modules/_000_Template/master/submodul/sampleSubmodul.py","file_name":"sampleSubmodul.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"13514971146","text":"import random\n\nsuits = [\" of Hearts\", \" of Diamonds\", \" of Clubs\", \" of Spades\"]\ncardValues = [\"A\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"J\",\"Q\",\"K\"]\ntenPointCards = [\"10\", \"J\", \"Q\", \"K\"]\n\n\ndef makeDeck():\n deck = []\n for suit in suits:\n for card in cardValues:\n deck.append(card+suit)\n return(deck)\n\ndef shuffleDeck():\n deck = makeDeck()\n random.shuffle(deck)\n return(deck)\n\ndef dealCards(deck):\n playerHand = []\n dealerHand = []\n\n playerHand.append(deck.pop(0))\n playerHand.append(deck.pop(0))\n\n dealerHand.append(deck.pop(0))\n dealerHand.append(deck.pop(0))\n\n return(playerHand, dealerHand, deck)\n\ndef hitMe(hand, deck):\n hand.append(deck.pop(0))\n return(hand, deck)\n\ndef checkPointsInHand(hand):\n pointValue = 0\n aceCount = 0\n for card in hand:\n cardValue = card.split(\" \")[0]\n if cardValue in tenPointCards:\n pointValue += 10\n elif cardValue == \"A\":\n aceCount += 1\n pointValue += 11\n else:\n pointValue += int(cardValue)\n for x in range(aceCount):\n if pointValue > 21:\n pointValue -= 10\n return(pointValue)\n\n\ndef checkWinner(dealerPoints, playerPoints):\n if dealerPoints >= playerPoints:\n winner = \"dealer\"\n else:\n winner = \"player\"\n return(winner)\n\ndef main():\n print(\"Welcome to the Black Jack table.\")\n deck = shuffleDeck()\n playerHand, dealerHand, deck = dealCards(deck)\n dealerPoints = checkPointsInHand(dealerHand)\n while True:\n playerPoints = checkPointsInHand(playerHand)\n print(\"You are showing:\", playerHand,f\"({playerPoints})\")\n print(\"Dealer is showing:\", dealerHand,f\"({dealerPoints})\")\n if playerPoints == 21:\n print(\"Blackjack baby! You win!\")\n break\n if dealerPoints == 21:\n print(\"Blackjack for the dealer. Dealer wins.\")\n break\n if playerPoints > 21:\n print(\"Bummer, you busted. Dealer wins\")\n break\n print(\"Do you want to hit or stand?\")\n playerChoice = input().lower()\n if playerChoice == 'hit':\n playerHand, deck = hitMe(playerHand,deck)\n print('hit it chewie')\n elif playerChoice == 'stand':\n print('red leader standing by')\n while dealerPoints < 17:\n print(\"Dealer hits!\")\n dealerHand, deck = hitMe(dealerHand,deck)\n dealerPoints = checkPointsInHand(dealerHand)\n print(\"Dealer is showing:\", dealerHand,f\"({dealerPoints})\")\n if dealerPoints > 21:\n print(\"Dealer busted! You Win!\")\n break\n else:\n winner = checkWinner(dealerPoints,playerPoints)\n print(\"aaaaaand the winner is the\", winner)\n break\n\nmain()","repo_name":"captain-afk/blackJack","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"38286575897","text":"#########################################CONFIG \nimport os\nimport yaml\nimport torch \nimport logging\nlogging.basicConfig(level=logging.INFO)\nfrom neural_ar.model import *\nfrom utils import *\nfrom data.dataset import *\nimport tqdm\nwith open(\"config.yaml\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\nwith open(\"config/train/train_conf.yaml\") as f:\n train_conf = yaml.load(f, Loader=yaml.FullLoader)\ntemp = train_conf[\"temp\"]\nn_train = train_conf[\"per_train\"]\n#k = train_conf[\"k\"]\n\nbase_T = 300\nres_T = 10 \nNTEMPS = 11\nNCHAINS = 100 \nNBEADS = 100\nNMODES = 100 \nPROJECTPATH = config[\"paths\"][\"PROJECTPATH\"]\nDATAPATH = config[\"paths\"][\"DATAPATH\"]\nSAVEPATH = config[\"paths\"][\"SAVEPATH\"]\nDEVICE = config[\"paths\"][\"DEVICE\"]\n\nmodes_corr = {}\nopen_log = tqdm.tqdm(total=0, position=0, bar_format='{desc}')\nopen_log.set_description_str(\n f'[Simulation Boxes Analysis: '+str(base_T)+\"K - \"+str(base_T + int(res_T*(NTEMPS-1)))+\"K]\"\n )\nboxes_bar = tqdm.tqdm(total=NTEMPS, desc=\"Sim Boxes\", position=1)\nfor i in range(1,NTEMPS+1):\n # data load \n temp = base_T + int(res_T*(i-1))\n filepath = os.path.join(DATAPATH, \"modesdata_T\"+str(temp)+\"_.pt\")\n modes = torch.load(filepath, map_location=DEVICE).swapaxes(0,1)\n \n \n ###################################################################################MSD\n modes_bar = tqdm.tqdm(total=NMODES, desc=\"MSD, T=\"+str(temp), position=2)\n msd_box = torch.zeros(NMODES, modes.size(1)).to(modes)\n for nmod in range(NMODES):\n msd_box[nmod,:] = msd_fft(modes[:,:n_train,nmod,:].swapaxes(1,2).contiguous().view(modes.size(0)*3,-1)).mean(dim=0)\n modes_bar.update(1)\n \n box_msd_savepath = os.path.join(DATAPATH, \"md_baselines\")\n torch.save(msd_box, os.path.join(box_msd_savepath, \"modes_nacfs_T\"+str(temp)+\"_ntrain\"+str(n_train)+\".pt\"))\n del msd_box\n \n \n\n\n \n ","repo_name":"Gian-Michele-Cherchi/ml-gle","sub_path":"src/msd_.py","file_name":"msd_.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"28574844511","text":"#examples from hackerrank that need to be tested\ngame_one_length = 7\ngame_one_colors = \"RBY_YBR\"\ngame_two_length = 6\ngame_two_colors = \"X_Y__X\"\ngame_three_length = 2\ngame_three_colors =\"__\"\ngame_four_length = 6\ngame_four_colors =\"B_RRBR\"\n#function that should put all together\ndef Happy_Ladybugs(length, colors):\n return check_happiness(length, colors)\n#checking ladybug happiness\ndef check_happiness(s,t):\n if happybug(s,t):\n return \"YES\"\n else:\n return \"NO\"\n#sorting the values and characters [length/color] \ndef dict(t):\n d={}\n for item in t:\n if item != '_':\n if item not in d:\n d[item]=1\n else:\n d[item] += 1\n return d\n#determining happiness\ndef happybug(s,t):\n if s < 3:\n if s == 0:\n return False\n elif s == 1 and t[0] == '_':\n return True\n elif s == 2 and t[0] == t[1]:\n return True\n else:\n return False\n else:\n values = dict(t).values()\n if min(values) < 2:\n return False\n return True\nprint(Happy_Ladybugs(game_one_length,game_one_colors))\nprint(Happy_Ladybugs(game_two_length,game_two_colors))\nprint(Happy_Ladybugs(game_three_length,game_three_colors))\nprint(Happy_Ladybugs(game_four_length,game_four_colors))","repo_name":"shanjidakamal/csci127-assignments","sub_path":"lab_04/lady.py","file_name":"lady.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"14837864550","text":"\"\"\"\nCombinations, Permutations e Product - Itertools\n\nCombinação - Ordem não importa\nPermutação - Ordem importa\nProduto - Ordem importa e repete valores únicos\n\"\"\"\n# Combinação - Ordem não importa\nfrom itertools import combinations\npessoas = ['Luiz', 'Andre', 'Eduardo', 'Leticia', 'Fabricio', 'Joshuel']\nfor grupo in combinations(pessoas, 2):\n print(grupo)\n\n# Permutação - Ordem importa\nfrom itertools import permutations\npessoas = ['Luiz', 'Andre', 'Eduardo', 'Leticia', 'Fabricio', 'Joshuel']\nfor grupo in permutations(pessoas, 2):\n print(grupo)\n\n# Produto - Ordem importa e repete valores únicos\nfrom itertools import product\npessoas = ['Luiz', 'Andre', 'Eduardo', 'Leticia', 'Fabricio', 'Joshuel']\nfor grupo in product(pessoas, repeat=2):\n print(grupo)","repo_name":"JoshuelNobre/learning-py","sub_path":"python-intermediario/21-combinations-permutations-product.py","file_name":"21-combinations-permutations-product.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"}
+{"seq_id":"19966702029","text":"from pathlib import Path\n\nimport more_itertools as mitt\nimport pandas as pd\nimport plotly.express as px\n\nfrom .. import common_shapes, flat\n\n\nRESULTS_PATH = Path(\"./data/lines_3d\")\n\n\ndef _plot(esum: flat.Esum, path, name):\n rows = []\n all_hses = list(mitt.unique_everseen(hs for term in esum.eterms for hs in term.hses))\n\n for hs_i, hs in enumerate(all_hses):\n x1, y1 = hs.p1.position2d\n x2, y2 = hs.p2.position2d\n\n rows.extend(\n [\n {\n \"x\": x1,\n \"y\": y1,\n \"z\": hs_i / len(all_hses),\n \"hs_name\": hs.debug_name,\n \"point_name\": hs.p1.debug_name,\n },\n {\n \"x\": x2,\n \"y\": y2,\n \"z\": hs_i / len(all_hses),\n \"hs_name\": hs.debug_name,\n \"point_name\": hs.p2.debug_name,\n },\n ]\n )\n\n df = pd.DataFrame.from_records(rows)\n\n fig = px.line_3d(\n df,\n x=\"x\",\n y=\"y\",\n z=\"z\",\n color=\"hs_name\",\n hover_name=\"point_name\",\n )\n fig.write_html(path)\n\n\ndef main():\n RESULTS_PATH.mkdir(exist_ok=True, parents=True)\n\n for shape_i, esum_start in enumerate([common_shapes.letter_c()]):\n esum = flat.named_esum(esum_start)\n\n _plot(\n esum=esum,\n path=RESULTS_PATH / f\"shape_{shape_i}.html\",\n name=esum.name,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alexjuda/halfplane","sub_path":"src/halfplane/run/show_lines_3d.py","file_name":"show_lines_3d.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"32937465167","text":"import psycopg2 as psycopg2\nfrom PyQt5.QAxContainer import QAxWidget\nfrom PyQt5.QtCore import QEventLoop, QTimer\nfrom PyQt5.QtTest import QTest\nimport sys\nimport os\nimport datetime\n\n\nclass Kiwoom(QAxWidget):\n\n def __init__(self):\n super().__init__()\n print(\">>> class[Kiwoom] start.\")\n\n # 계좌 관련 변수들\n self.account_number = None\n self.password = \"u23i4523\"\n self.cert_password = \"u23i4523R2\"\n self.deposit = 0\n self.withdraw = 0\n\n # 스크린번호\n self.screen_number_market = \"1000\" # 장 상태 체크 스크린번호\n self.screen_number_account = \"2000\" # 계좌 관련 스크린번호\n self.screen_number_real = \"3000\" # 실시간 스크린번호\n self.screen_number_anal = \"4000\" # 분석용 스크린번호\n self.screen_number_stock = \"5000\" # 종목별 스크린번호\n self.screen_number_order = \"6000\" # 주문용 스크린번호\n\n # 종목 리스트\n self.all_stock_dict = {}\n self.account_stock_dict = {}\n self.outstanding_stock_dict = {}\n self.portfolio_stock_dict = {}\n self.balance_stock_dict = {} # 잔고\n\n # event loop를 실행하기 위한 변수들\n self.event_loop_login = QEventLoop()\n self.event_loop_tr_data = QEventLoop()\n self.event_loop_real_data = QEventLoop()\n\n # ocx\n self.get_ocx_instance()\n\n # event slot\n self.event_slots()\n self.event_real_slots()\n self.event_condition_slots()\n\n # signal(로그인, 예수금, 잔고(종목), 미체결)\n self.signal_login()\n self.signal_deposit_info()\n self.signal_balance_info()\n self.signal_condition_info()\n\n QTimer.singleShot(1000, self.signal_outstanding_info) # 5초 후 실시간미체결요청(opt10075)\n\n # 파일에서 분석된 종목 가져오기\n # self.get_analyzed_stocks()\n\n # QTest.qWait(10000)\n QTimer.singleShot(1000, self.set_screen_number)\n\n # QTest.qWait(5000)\n QTimer.singleShot(1000, self.signal_market_status) # 장 상태 체크\n\n # 대상 종목(보유,미체결,포트폴리오) 실시간 등록\n QTimer.singleShot(1000, self.all_stock_real_reg)\n\n def get_ocx_instance(self):\n self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\")\n\n # 로그인, TR 이벤트 슬롯\n def event_slots(self):\n self.OnEventConnect.connect(self.slot_login)\n self.OnReceiveTrData.connect(self.slot_tr_data)\n self.OnReceiveMsg.connect(self.slot_msg)\n\n # 실시간 이벤트 슬롯\n def event_real_slots(self):\n self.OnReceiveRealData.connect(self.slot_real_data)\n self.OnReceiveChejanData.connect(self.slot_chejan_data)\n\n # 조건검색 이벤트 슬롯\n def event_condition_slots(self):\n self.OnReceiveConditionVer.connect(self.slot_condition)\n self.OnReceiveTrCondition.connect(self.slot_tr_condition)\n self.OnReceiveRealCondition.connect(self.slot_real_condition)\n\n def slot_condition(self, ret, msg):\n print(\"\")\n print(\">>>>>>>>>>>>>>>>>>>>>>>> 조건검색식[S] >>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\">> ret: %s\" % ret)\n # print(\">> msg: %s\" % msg)\n\n condition_name_list = self.dynamicCall(\"GetConditionNameList()\").split(\";\")[:-1]\n for condition in condition_name_list:\n index = int(condition.split(\"^\")[0])\n condition_name = condition.split(\"^\")[1]\n print(\">> [%s]:[%s]\" % (index, condition_name))\n\n ok = self.dynamicCall(\"SendCondition(QString, QString, int, int)\", \"0156\", condition_name, index, 1)\n\n print(\">>>>>>>>>>>>>>>>>>>>>>>> 조건검색식[E] >>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n print(\"\")\n\n # 나의 조건식 받기\n def slot_tr_condition(self, screen_number, code_list, condition_name, index, prev_next):\n print(\"\")\n # print(\">> screen_number: %s\" % screen_number)\n # print(\">> code_list: %s\" % code_list)\n print(\">> [%s]:%s\" % (index, condition_name))\n # print(\">> index: %s\" % index)\n # print(\">> prev_next: %s\" % prev_next)\n\n code_list = code_list.split(\";\")[:-1]\n for code in code_list:\n code_name = self.dynamicCall(\"GetMasterCodeName(QString)\", code).strip()\n print(\">> %s: %s\" % (code, code_name))\n\n def slot_real_condition(self, stock_code, event_type, condition_name, index):\n print(\"\")\n\n stock_name = self.dynamicCall(\"GetMasterCodeName(QString)\", stock_code)\n if event_type == \"I\":\n print(\">> [%s]조건검색명: %s, 종목코드: %s, 종목명: %s, 종목편입: %s\" % (\n index, condition_name, stock_code, stock_name, event_type))\n\n ## 최초 발생하여 편입된 종목을 시초가(2호가 위)에 강제 매수처리 : 매수 종목 리스트 관리\n\n elif event_type == \"D\":\n print(\">> [%s]조건검색명: %s, 종목코드: %s, 종목명: %s, 종목이탈: %s\" % (\n index, condition_name, stock_code, stock_name, event_type))\n\n # HTS 조건식 로딩 (OnReceiveConditionVer() 호출)\n def signal_condition_info(self):\n self.dynamicCall(\"GetConditionLoad()\")\n\n def signal_login(self):\n self.dynamicCall(\"CommConnect()\")\n self.event_loop_login.exec_()\n\n def slot_login(self, error_code):\n print(\"\")\n print(\">>> function[slot_login] start >>>\")\n print(\">> 로그인(0:성공,-10:실패,-100:사용자정보교환실패,-101:서버접속실패): %s\" % error_code)\n accounts = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\")\n self.account_number = accounts.split(\";\")[0]\n print(\">> 계좌번호(%s자리): %s\" % (len(self.account_number), self.account_number))\n self.event_loop_login.exit()\n print(\">>> function[slot_login] end <<<\")\n\n def signal_market_status(self):\n print(\"\")\n print(\"\")\n\n # [장시작시간][장운영구분] (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감)\n value = self.dynamicCall(\"SetRealReg(QString, QString, int, QString)\", self.screen_number_market, ' ', 215, \"0\")\n print(\">> value: %s\" % value)\n print(\"\")\n self.event_loop_real_data.exec_()\n\n # opw00001: 예수금상세현황요청\n # INPUT: 계좌번호, 비밀번호, 비밀번호입력매체구분, 조회구분\n # OUTPUT[S]: 예수금상세현황\n def signal_deposit_info(self, prev_next=\"0\"):\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_number)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", self.password)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\")\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\")\n self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", prev_next,\n self.screen_number_account)\n self.event_loop_tr_data.exec_()\n\n # opw00018: 계좌평가잔고내역요청\n # INPUT: 계좌번호, 비밀번호, 비밀번호입력매체구분, 조회구분\n # OUTPUT[S]: 계좌평가결과\n # OUTPUT[M]: 계좌평가잔고합산\n def signal_balance_info(self, prev_next=\"0\"):\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_number)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", self.password)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\")\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\")\n self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", prev_next,\n self.screen_number_account)\n self.event_loop_tr_data.exec_()\n\n # opt10075: 실시간미체결요청\n # INPUT: 계좌번호, 전체종목구분(0:전체,1:종목), 매매구분(0:전체,1:매도,:매수), 종목코드, 체결구분(0:전체,1:미체결,2:체결)\n # OUTPUT[S]: 계좌평가결과\n # OUTPUT[M]: 계좌평가잔고합산\n def signal_outstanding_info(self, prev_next=\"0\"):\n print(\"\")\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_number)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\")\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\")\n self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", prev_next,\n self.screen_number_account)\n self.event_loop_tr_data.exec_()\n\n def get_analyzed_stocks(self):\n print(\"\")\n if os.path.exists(\"files/analysis.txt\"):\n f = open(\"files/analysis.txt\", \"r\", encoding=\"utf8\")\n\n lines = f.readlines()\n for line in lines:\n if line != \"\":\n ls = line.split(\"\\t\")\n stock_code = ls[0]\n stock_name = ls[1]\n\n self.portfolio_stock_dict.update({stock_code: {\"종목명\": stock_name, \"현재가\": 0}})\n print(\">> 종목코드: %s, 종목명: %s\" % (stock_code, stock_name))\n\n # print(\">> self.portfolio_stock_dict: %s\" % self.portfolio_stock_dict)\n print(\"\")\n f.close()\n\n def slot_tr_data(self, screen_number, rq_name, tr_code, record_name, prev_next):\n print(\"\")\n print(\">>> function[slot_tr_data] start >>>\")\n print(\"> p1.screen_number: %s\" % screen_number)\n print(\"> p2.rq_name: %s\" % rq_name)\n print(\"> p3.tr_code: %s\" % tr_code)\n print(\"> p4.record_name: %s\" % record_name)\n print(\"> p5.prev_next: %s\" % prev_next)\n\n # opw00001: 예수금상세현황요청\n if tr_code == \"opw00001\":\n deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, 0,\n \"예수금\").strip()\n withdraw = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, 0,\n \"출금가능금액\").strip()\n\n self.deposit = int(deposit)\n self.withdraw = int(withdraw)\n\n print(\"\")\n print(\">> 예수금: %s\" % format(self.deposit, \",\"))\n print(\">> 출금가능금액: %s\" % format(self.withdraw, \",\"))\n\n # opw00018: 계좌평가잔고내역요청\n elif tr_code == \"opw00018\":\n # single (총매입금액,총평가손익금액,총수익률(%),조회건수)\n\n total_purchase_amount = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, 0,\n \"총매입금액\").strip()\n total_profit_loss_amount = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name,\n 0, \"총평가손익금액\").strip()\n total_earning_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, 0,\n \"총수익률(%)\").strip()\n hit_number = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, 0,\n \"조회건수\").strip()\n\n print(\"\")\n print(\">> 총매입금액: %s\" % format(int(total_purchase_amount), \",\"))\n print(\">> 총평가손익금액: %s\" % format(int(total_profit_loss_amount), \",\"))\n print(\">> 총수익률(%%): %s\" % float(total_earning_rate))\n print(\">> 조회건수: %s\" % int(hit_number))\n print(\"\")\n\n # multi\n rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", tr_code, rq_name)\n print(\">> 계좌평가잔고내역요청.보유 종목 건수: %s\" % rows)\n\n # 종목번호, 종목명, 보유수량, 매입가, 매입금액, 매매가능수량, 수익률(%), 현재가, 전일종가\n for i in range(rows):\n stock_code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"종목번호\").strip()[1:]\n stock_name = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"종목명\").strip()\n retention_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name,\n i, \"보유수량\").strip()\n purchase_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"매입가\").strip()\n purchase_amount = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"매입금액\").strip()\n tradable_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"매매가능수량\").strip()\n earning_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"수익률(%)\").strip()\n close_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"현재가\").strip()\n last_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"전일종가\").strip()\n tax = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i, \"세금\").strip()\n\n if stock_code in self.account_stock_dict:\n pass\n else:\n self.account_stock_dict[stock_code] = {}\n\n self.account_stock_dict[stock_code].update({\"종목명\": stock_name})\n self.account_stock_dict[stock_code].update({\"보유수량\": int(retention_quantity)})\n self.account_stock_dict[stock_code].update({\"매입가\": int(purchase_price)})\n self.account_stock_dict[stock_code].update({\"수익률(%)\": float(earning_rate)})\n self.account_stock_dict[stock_code].update({\"현재가\": int(close_price)})\n self.account_stock_dict[stock_code].update({\"매입금액\": int(purchase_amount)})\n self.account_stock_dict[stock_code].update({\"매매가능수량\": int(tradable_quantity)})\n\n print(\">> self.account_stock_dict: %s\" % self.account_stock_dict)\n\n # opt10075: 실시간미체결요청\n elif tr_code == \"opt10075\":\n rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", tr_code, rq_name)\n print(\"\")\n print(\">> 미체결 종목 건수: %s\" % rows)\n # 종목코드, 종목명, 주문번호, 주문상태, 주문수량, 주문가격, 주문구분, 미체결수량, 체결량\n for i in range(rows):\n order_number = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"주문번호\").strip()\n stock_code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"종목코드\").strip()\n stock_name = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"종목명\").strip()\n order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"주문상태\").strip() # 접수,확인,체결\n order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"주문수량\").strip()\n order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"주문가격\").strip()\n order_type = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name, i,\n \"주문구분\").strip().lstrip(\"+\").lstrip(\"-\")\n outstanding_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name,\n i, \"미체결수량\").strip()\n conclusion_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", tr_code, rq_name,\n i, \"체결량\").strip()\n\n if order_number in self.outstanding_stock_dict:\n pass\n else:\n self.outstanding_stock_dict[order_number] = {}\n\n self.outstanding_stock_dict[order_number].update({\"종목코드\": stock_code})\n self.outstanding_stock_dict[order_number].update({\"종목명\": stock_name})\n self.outstanding_stock_dict[order_number].update({\"주문번호\": order_number})\n self.outstanding_stock_dict[order_number].update({\"주문상태\": order_status})\n self.outstanding_stock_dict[order_number].update({\"주문수량\": order_quantity})\n self.outstanding_stock_dict[order_number].update({\"주문가격\": order_price})\n self.outstanding_stock_dict[order_number].update({\"주문구분\": order_type})\n self.outstanding_stock_dict[order_number].update({\"미체결수량\": outstanding_quantity})\n self.outstanding_stock_dict[order_number].update({\"체결량\": conclusion_quantity})\n\n print(\">> self.outstanding_stock_dict: %s\" % self.outstanding_stock_dict)\n\n self.disconnect_screen_number(self.screen_number_account)\n self.event_loop_tr_data.exit()\n print(\">>> function[slot_tr_data] end <<<\")\n\n def slot_real_data(self, stock_code, real_type, real_data):\n # print(\"\")\n # print(\">>> function[slot_real_data] start >>>\")\n # print(\">> stock_code: %s\" % stock_code)\n # print(\">> real_type: %s\" % real_type)\n # print(\">> real_data %s\" % real_data)\n\n if real_type == \"장시작시간\":\n\n # 215:장운영구분, 20:체결시간, 214:장시작예상잔여시간\n market_op_type = self.dynamicCall(\"GetCommRealData(QString, int)\", stock_code, 215)\n conclusion_time = self.dynamicCall(\"GetCommRealData(QString, int)\", stock_code, 20)\n market_open_remain_time = self.dynamicCall(\"GetCommRealData(QString, int)\", stock_code, 214)\n\n if market_op_type == \"0\":\n print(\">> [%s]장 시작 전\" % market_op_type)\n elif market_op_type == \"3\":\n print(\">> [%s]장 시작\" % market_op_type)\n elif market_op_type == \"2\":\n print(\">> [%s]장 종료, 동시호가로 넘어감\" % market_op_type)\n elif market_op_type == \"4\":\n print(\">> [%s]15시 30분 장 종료\" % market_op_type)\n\n # 실시간 연결 모두 끊기\n for code in self.portfolio_stock_dict.keys():\n self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code][\"스크린번호\"], code)\n print(\">> 종목코드[%s] 실시간 연결 종료.\" % code)\n\n # 다음 날을 위한 종목분석 시작\n QTest.qWait(5000)\n # self.file_delete() # 초기화(포트폴리오) 파일삭제 또는 디비 삭제\n # self.calculate_fnc() # 종목분석(알고리즘 적용) 후 파일 또는 디비 저장\n\n # 프로그램 종료\n sys.exit()\n\n print(\">> market_op_type: %s\" % market_op_type)\n print(\">> conclusion_time: %s\" % str(datetime.timedelta(conclusion_time)))\n print(\">> market_open_remain_time %s\" % str(datetime.timedelta(market_open_remain_time)))\n\n elif real_type == \"주식체결\":\n\n \"\"\"\n [20] = 체결시간\n [10] = 현재가\n [11] = 전일대비\n [12] = 등락율\n [27] = (최우선)매도호가\n [28] = (최우선)매수호가\n [15] = 거래량\n [13] = 누적거래량\n [14] = 누적거래대금\n [16] = 시가\n [17] = 고가\n [18] = 저가\n [25] = 전일대비기호\n [26] = 전일거래량대비(계약,주)\n [29] = 거래대금증감\n [30] = 전일거래량대비(비율)\n [31] = 거래회전율\n [32] = 거래비용\n [228] = 체결강도\n [311] = 시가총액(억)\n [290] = 장구분\n [691] = KO접근도\n [567] = 상한가발생시간\n [568] = 하한가발생시간 \n \"\"\"\n\n conclusion_time = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 20).strip()\n close_price = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 10).strip()\n net_change = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 11).strip()\n fluctuation_rate = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 12).strip()\n first_ask_price = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 27).strip()\n first_bid_price = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 28).strip()\n volume = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 15).strip()\n cumulative_volume = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 13).strip()\n high_price = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 17).strip()\n open_price = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 16).strip()\n low_price = self.dynamicCall(\"GetCommRealData(QString, in)\", stock_code, 18).strip()\n\n close_price = abs(int(close_price))\n net_change = abs(int(net_change))\n fluctuation_rate = float(fluctuation_rate)\n first_ask_price = abs(int(first_ask_price))\n first_bid_price = abs(int(first_bid_price))\n volume = abs(int(volume))\n cumulative_volume = abs(int(cumulative_volume))\n high_price = abs(int(high_price))\n open_price = abs(int(open_price))\n low_price = abs(int(low_price))\n\n print(\">> 체결시간: %s\" % conclusion_time)\n print(\">> 현재가: %s\" % format(close_price, \",\"))\n print(\">> 전일대비: %s\" % format(net_change, \",\"))\n print(\">> 등락율: %s\" % fluctuation_rate)\n print(\">> (최우선)매도호가: %s\" % format(first_ask_price, \",\"))\n print(\">> (최우선)매수호가: %s\" % format(first_bid_price, \",\"))\n print(\">> 거래량: %s\" % format(volume, \",\"))\n print(\">> 누적거래량: %s\" % format(cumulative_volume, \",\"))\n print(\">> 고가: %s\" % format(high_price, \",\"))\n print(\">> 시가: %s\" % format(open_price, \",\"))\n print(\">> 저가: %s\" % format(low_price, \",\"))\n\n # 스크리닝 종목 데이터를 포트폴리오 딕셔너리에 업데이트\n if stock_code not in self.portfolio_stock_dict:\n self.portfolio_stock_dict.update({stock_code: {}})\n\n self.portfolio_stock_dict[stock_code].update({\"체결시간\": conclusion_time})\n self.portfolio_stock_dict[stock_code].update({\"현재가\": close_price})\n self.portfolio_stock_dict[stock_code].update({\"전일대비\": net_change})\n self.portfolio_stock_dict[stock_code].update({\"등락율\": fluctuation_rate})\n self.portfolio_stock_dict[stock_code].update({\"(최우선)매도호가\": first_ask_price})\n self.portfolio_stock_dict[stock_code].update({\"(최우선)매수호가\": first_bid_price})\n self.portfolio_stock_dict[stock_code].update({\"거래량\": volume})\n self.portfolio_stock_dict[stock_code].update({\"누적거래량\": cumulative_volume})\n self.portfolio_stock_dict[stock_code].update({\"고가\": high_price})\n self.portfolio_stock_dict[stock_code].update({\"시가\": open_price})\n self.portfolio_stock_dict[stock_code].update({\"저가\": low_price})\n\n # (매도)계좌평가잔고내역의 보유 종목\n # 실시간 반영되지 않기 때문에 일회성으로 사용하고 더 이상 사용하지 않는다.\n # 종목이 딕셔너리에 있는지 확인하고 매매가능수량을 체크해서 매도주문한다.\n # 실시간 매수가 없어야 하므로 self.balance_stock_dict에 미존재여야 한다.\n if stock_code in self.account_stock_dict.keys() and stock_code not in self.balance_stock_dict.keys():\n account = self.account_stock_dict[stock_code]\n profit_loss_rate = (close_price - account[\"매입가\"]) / account[\"매입가\"] * 100\n\n if account[\"매매가능수량\"] > 0 and profit_loss_rate > 2:\n order_success = self.dynamicCall(\n \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",\n [\"신규매도\", self.portfolio_stock_dict[stock_code][\"주문용스크린번호\"],\n self.account_number, 2, stock_code, account[\"매매가능수량\"], 0, \"03\", \"\"\n ]\n )\n\n if order_success == 0:\n print(\">> [계좌평가잔고내역]매도주문 전달 성공[%s]\" % order_success)\n del self.account_stock_dict[stock_code]\n print(\">> self.account_stock_dict: %s\" % self.account_stock_dict)\n else:\n print(\">> [계좌평가잔고내역]매도주문 전달 실패[%s]\" % order_success)\n\n # (매도) 실시간 매수 종목\n # 수익률: (현재가-매입단가)/매입단가*100 (10000-9500)/9500*100\n elif stock_code in self.balance_stock_dict.keys():\n balance = self.balance_stock_dict[stock_code]\n\n # 수익률 계산\n profit_loss_rate = (close_price - balance[\"매입단가\"]) / balance[\"매입단가\"] * 100\n\n # 매도조건: 주문가능수량이 있고 수익률이 +-5 범위라면 익절 또는 손절한다.\n if balance[\"주문가능수량\"] > 0 and (profit_loss_rate > 2 or profit_loss_rate < -2):\n order_success = self.dynamicCall(\n \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",\n [\"신규매도\", self.portfolio_stock_dict[stock_code][\"주문용스크린번호\"],\n self.account_number, 2, stock_code, balance[\"주문가능수량\"], 0, \"03\", \"\"\n ]\n )\n\n if order_success == 0:\n print(\">> [실시간매수종목]매도주문 전달 성공[%s]\" % order_success)\n else:\n print(\">> [실시간매수종목]매도주문 전달 실패[%s]\" % order_success)\n\n # 종목 정보를 실시간으로 받는 도중 조건에 일치하면 매수 매도 주문을 넣는다.\n # 등락율이 2.0 이상이면 매수하도록 구성. 실시간 계좌 데이터 딕셔너리 미존재.\n # balance_stock_dict : 매수된 종목 관리, 매수된 종목은 매도해야한다. 매도주문.\n elif fluctuation_rate > 8.0 and stock_code not in self.balance_stock_dict:\n print(\">> 매수조건 통과: %s\" % stock_code)\n print(\">> 예수금 체크: %s\" % self.deposit)\n\n # 예수금의 10%만 사용하도록 한다. 비율은 추후 조정.\n result = (self.deposit * 0.1) / first_ask_price\n quantity = int(result)\n\n print(\">> 매수 가능 주식수: %s\" % quantity)\n\n if quantity < 10:\n pass\n\n # 매수 요청 (사용자구분명, 화면번호, 계좌번호, 주문유형, 종목코드, 주문수량, 주문가격, 거래구분, 원주문번호)\n order_success = self.dynamicCall(\n \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",\n [\"신규매수\",\n self.portfolio_stock_dict[stock_code][\"주문용스크린번호\"],\n self.account_number,\n 1, stock_code, quantity, first_ask_price, \"00\", \"\"]\n )\n\n if order_success == 0:\n print(\">> 매수주문 전달 [%s]성공: %s\" % (order_success, stock_code))\n else:\n print(\">> 매수주문 전달 [%s]실패: %s\" % (order_success, stock_code))\n\n # 미체결 수량 매수 취소 (주문번호만 뽑아 리스트로 만든다)\n outstanding_list = list(self.outstanding_stock_dict)\n print(\">> 미체결 주문 %s건 존재.\" % len(outstanding_list))\n for order_number in outstanding_list:\n print(\">> order_number: %s\" % order_number)\n\n # 종목코드, 주문가격, 미체결수량, 주문구분\n a = self.outstanding_stock_dict[order_number][\"종목코드\"]\n b = int(self.outstanding_stock_dict[order_number][\"주문가격\"])\n c = int(self.outstanding_stock_dict[order_number][\"미체결수량\"])\n d = self.outstanding_stock_dict[order_number][\"주문구분\"].lstrip(\"+\").lstrip(\"-\")\n\n print(\"\")\n print(\">> 종목코드(a): [%s]\" % a)\n print(\">> 주문가격(b): [%s]\" % b)\n print(\">> 미체결수량(c): [%s]\" % c)\n print(\">> 주문구분(d): [%s]\" % d)\n\n if d == \"매수\" and c > 0 and first_ask_price > b:\n order_success = self.dynamicCall(\n \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",\n [\"매수취소\", self.portfolio_stock_dict[stock_code][\"주문용스크린번호\"],\n self.account_number, 3, a, 0, 0, \"00\", order_number\n ]\n )\n\n if order_success == 0:\n print(\">> 매수취소 전달 [%s]성공: %s\" % (order_success, a))\n else:\n print(\">> 매수취소 전달 [%s]실패: %s\" % (order_success, a))\n\n elif c == 0:\n del self.outstanding_stock_dict[order_number]\n print(\">> 체결된(미체결수량:0) 주문번호[%s] 삭제\" % order_number)\n\n self.disconnect_screen_number(self.screen_number_market)\n self.event_loop_real_data.exit()\n\n \"\"\"\n [OnReceiveChejanData() 이벤트]\n\n OnReceiveChejanData(\n BSTR sGubun, // 체결구분: 접수와 체결시 '0'값, 국내주식 잔고전달은 '1'값, 파생잔고 전달은 '4'\n LONG nItemCnt, (미사용)\n BSTR sFIdList (미사용)\n )\n\n 주문요청후 주문접수, 체결통보, 잔고통보를 수신할 때 마다 호출되며 GetChejanData()함수를 이용해서 상세한 정보를 얻을수 있습니다. \n \"\"\"\n\n def slot_chejan_data(self, conclusion_type, item_cnt, fid_list):\n # print(\"\")\n # print(\">> 체결구분: %s\" % conclusion_type)\n # print(\">> 항목건수: %s\" % item_cnt)\n # print(\">> FID 목록: %s\" % fid_list)\n\n conclusion_type = int(conclusion_type)\n\n # 0:주문체결, 1:잔고\n \"\"\"\n Real Type : 주문체결\n [9201] = 계좌번호\n [9203] = 주문번호\n [9205] = 관리자사번\n [9001] = 종목코드,업종코드\n [912] = 주문업무분류\n [913] = 주문상태\n [302] = 종목명\n [900] = 주문수량\n [901] = 주문가격\n [902] = 미체결수량\n [903] = 체결누계금액\n [904] = 원주문번호\n [905] = 주문구분\n [906] = 매매구분\n [907] = 매도수구분\n [908] = 주문/체결시간\n [909] = 체결번호\n [910] = 체결가\n [911] = 체결량\n [10] = 현재가\n [27] = (최우선)매도호가\n [28] = (최우선)매수호가\n [914] = 단위체결가\n [915] = 단위체결량\n [938] = 당일매매수수료\n [939] = 당일매매세금\n [919] = 거부사유\n [920] = 화면번호\n [921] = 터미널번호\n [922] = 신용구분(실시간 체결용)\n [923] = 대출일(실시간 체결용) \n \"\"\"\n if conclusion_type == 0: # 주문체결\n account_number = self.dynamicCall(\"GetChejanData(int)\", 9201)\n stock_code = self.dynamicCall(\"GetChejanData(int)\", 9001)[1:]\n stock_name = self.dynamicCall(\"GetChejanData(int)\", 302).strip()\n origin_order_number = self.dynamicCall(\"GetChejanData(int)\", 904)\n order_number = self.dynamicCall(\"GetChejanData(int)\", 9203)\n order_status = self.dynamicCall(\"GetChejanData(int)\", 913)\n order_quantity = int(self.dynamicCall(\"GetChejanData(int)\", 900))\n order_price = int(self.dynamicCall(\"GetChejanData(int)\", 901))\n outstanding_quantity = int(self.dynamicCall(\"GetChejanData(int)\", 902))\n order_type = self.dynamicCall(\"GetChejanData(int)\", 905).strip().lstrip(\"+\").lstrip(\"-\")\n order_conclusion_time = self.dynamicCall(\"GetChejanData(int)\", 908)\n conclusion_price = self.dynamicCall(\"GetChejanData(int)\", 910).strip()\n conclusion_quantity = self.dynamicCall(\"GetChejanData(int)\", 911).strip()\n close_price = int(self.dynamicCall(\"GetChejanData(int)\", 10))\n first_ask_price = int(self.dynamicCall(\"GetChejanData(int)\", 27))\n first_bid_price = int(self.dynamicCall(\"GetChejanData(int)\", 28))\n\n if conclusion_price == '': conclusion_price = \"0\"\n if conclusion_quantity == '': conclusion_quantity = \"0\"\n\n conclusion_price = int(conclusion_price)\n conclusion_quantity = int(conclusion_quantity)\n\n # 신규 주문이면 미체결 딕셔너리에 주문번호 할당\n if order_number not in self.outstanding_stock_dict.keys():\n self.outstanding_stock_dict.update({order_number: {}})\n\n self.outstanding_stock_dict[order_number].update({\"종목코드\": stock_code})\n self.outstanding_stock_dict[order_number].update({\"주문번호\": order_number})\n self.outstanding_stock_dict[order_number].update({\"종목명\": stock_name})\n self.outstanding_stock_dict[order_number].update({\"주문상태\": order_status})\n self.outstanding_stock_dict[order_number].update({\"주문수량\": order_quantity})\n self.outstanding_stock_dict[order_number].update({\"주문가격\": order_price})\n self.outstanding_stock_dict[order_number].update({\"미체결수량\": outstanding_quantity})\n self.outstanding_stock_dict[order_number].update({\"원주문번호\": origin_order_number})\n self.outstanding_stock_dict[order_number].update({\"주문구분\": order_type})\n self.outstanding_stock_dict[order_number].update({\"주문/체결시간\": order_conclusion_time})\n self.outstanding_stock_dict[order_number].update({\"체결가\": conclusion_price})\n self.outstanding_stock_dict[order_number].update({\"체결량\": conclusion_quantity})\n self.outstanding_stock_dict[order_number].update({\"현재가\": close_price})\n self.outstanding_stock_dict[order_number].update({\"(최우선)매도호가\": first_ask_price})\n self.outstanding_stock_dict[order_number].update({\"(최우선)매수호가\": first_bid_price})\n print(\">> self.outstanding_stock_dict: %s\" % self.outstanding_stock_dict)\n\n elif conclusion_type == 1: # 잔고\n account_number = self.dynamicCall(\"GetChejanData(int)\", 9201)\n stock_code = self.dynamicCall(\"GetChejanData(int)\", 9001)[1:]\n stock_name = self.dynamicCall(\"GetChejanData(int)\", 302).strip()\n close_price = abs(int(self.dynamicCall(\"GetChejanData(int)\", 10)))\n retention_quantity = int(self.dynamicCall(\"GetChejanData(int)\", 930))\n orderable_quantity = int(self.dynamicCall(\"GetChejanData(int)\", 933))\n purchase_price = int(self.dynamicCall(\"GetChejanData(int)\", 931))\n total_purchase_price = int(self.dynamicCall(\"GetChejanData(int)\", 932))\n sell_buy_type = self.dynamicCall(\"GetChejanData(int)\", 946).strip()\n first_ask_price = int(self.dynamicCall(\"GetChejanData(int)\", 27))\n first_bid_price = int(self.dynamicCall(\"GetChejanData(int)\", 28))\n\n if stock_code not in self.balance_stock_dict.keys():\n self.balance_stock_dict.update({stock_code: {}})\n\n self.balance_stock_dict[stock_code].update({\"현재가\": close_price})\n self.balance_stock_dict[stock_code].update({\"종목코드\": stock_code})\n self.balance_stock_dict[stock_code].update({\"종목명\": stock_name})\n self.balance_stock_dict[stock_code].update({\"보유수량\": retention_quantity})\n self.balance_stock_dict[stock_code].update({\"주문가능수량\": orderable_quantity})\n self.balance_stock_dict[stock_code].update({\"매입단가\": purchase_price})\n self.balance_stock_dict[stock_code].update({\"총매입가\": total_purchase_price})\n self.balance_stock_dict[stock_code].update({\"매도/매수구분\": sell_buy_type})\n self.balance_stock_dict[stock_code].update({\"(최우선)매도호가\": first_ask_price})\n self.balance_stock_dict[stock_code].update({\"(최우선)매수호가\": first_bid_price})\n print(\">> self.balance_stock_dict: %s\" % self.balance_stock_dict)\n\n if retention_quantity == 0:\n del self.balance_stock_dict[stock_code]\n print(\">> after del > self.balance_stock_dict: %s\" % self.balance_stock_dict)\n\n def merge_stock_dict(self):\n print(\"\")\n self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict})\n self.all_stock_dict.update({\"미체결종목\": self.outstanding_stock_dict})\n self.all_stock_dict.update({\"포트폴리오종목\": self.portfolio_stock_dict})\n print(\">> all_stock_dict{}: %s\" % self.all_stock_dict)\n print(\"\")\n\n def set_screen_number(self):\n\n self.merge_stock_dict()\n screen_overwrite = []\n\n # 계좌평가잔고내역 종목\n for stock_code in self.account_stock_dict.keys():\n if stock_code not in screen_overwrite:\n screen_overwrite.append(stock_code)\n\n # 미체결종목\n for order_number in self.outstanding_stock_dict.keys():\n stock_code = self.outstanding_stock_dict[order_number][\"종목코드\"]\n if stock_code not in screen_overwrite:\n screen_overwrite.append(stock_code)\n\n # 포트폴리오 종목\n for stock_code in self.portfolio_stock_dict.keys():\n if stock_code not in screen_overwrite:\n screen_overwrite.append(stock_code)\n\n # print(\">> 스크린번호 리스트에 종목코드 %s건 통합 완료\" % len(screen_overwrite))\n # print(\">> 스크린번호 할당 시작\")\n\n cnt = 0\n for stock_code in screen_overwrite:\n screen_number_stock = int(self.screen_number_stock)\n screen_number_order = int(self.screen_number_order)\n\n # print(\">> 주식용 스크린번호: %s\" % screen_number_stock)\n # print(\">> 주문용 스크린번호: %s\" % screen_number_order)\n\n if (cnt % 50) == 0:\n screen_number_stock += 1\n screen_number_order += 1\n self.screen_number_stock = str(screen_number_stock)\n self.screen_number_order = str(screen_number_order)\n\n # 모든 스크린 번호는 포트폴리오 딕셔너리에 추가. 변하는 종목의 데이터 보과 및 업데이트 용도.\n if stock_code in self.portfolio_stock_dict.keys():\n self.portfolio_stock_dict[stock_code].update({\"스크린번호\": self.screen_number_stock})\n self.portfolio_stock_dict[stock_code].update({\"주문용스크린번호\": self.screen_number_order})\n elif stock_code not in self.portfolio_stock_dict.keys():\n print(\">> stock_code[%s] not in self.portfolio_stock_dict: \" % stock_code)\n self.portfolio_stock_dict.update(\n {stock_code: {\"스크린번호\": self.screen_number_stock, \"주문용스크린번호\": self.screen_number_order}})\n\n cnt += 1\n\n # print(\">> portfolio_stock_dict{}: %s\" % self.portfolio_stock_dict)\n print(\">> 스크린번호 %s건 할당 완료\" % cnt)\n\n # 대상 종목(보유,미체결,포트폴리오) 실시간 등록\n def all_stock_real_reg(self):\n for stock_code in self.portfolio_stock_dict.keys():\n screen_number = self.portfolio_stock_dict[stock_code][\"주문용스크린번호\"]\n fids = \"20;10;12;15;16;17;18\"\n self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_number, stock_code, fids, \"1\")\n\n def slot_msg(self, screen_number, rq_name, tr_code, msg):\n # print(\"\")\n print(\"> screen_number: %s, rq_name: %s, tr_code: %s, msg: %s,\" % (screen_number, rq_name, tr_code, msg))\n\n def disconnect_screen_number(self, screen_number=None):\n self.dynamicCall(\"DisconnectRealData(QString)\", screen_number)\n# print(\">>> The number[%s] of screen disconnected.\" % screen_number)\n\n","repo_name":"Geenie-Lee/week4","sub_path":"kiwoom/kiwoom.py","file_name":"kiwoom.py","file_ext":"py","file_size_in_byte":43501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"25653466937","text":"t = int(input())\n\nfor _ in range(t):\n n = int(input())\n nums = list(map(int, input().split()))\n\n for i, num in enumerate(nums):\n x = 0\n for j in range(n):\n if i == j:\n continue\n x ^= nums[j]\n \n if x == num:\n ans = x\n \n print(ans)","repo_name":"NaolB02/A2SV_Contests","sub_path":"CodeForces Tenth Contest/A_XOR_Mixup.py","file_name":"A_XOR_Mixup.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"30848505228","text":"from flask import Flask\nfrom flask import jsonify\n\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef base_url():\n \"\"\"Base url to test API.\"\"\"\n\n response = {\n 'response': 'Hello world!'\n }\n\n return jsonify(response)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"RodolfoFerro/docker-flask-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"}
+{"seq_id":"17571692813","text":"import json\nfrom flask import Flask, request\napp = Flask(__name__)\n\nyaks = [\n {'text': 'Hello world', 'votes': 5},\n {'text': 'This is a yak', 'votes': -2}\n ]\n\n\n@app.route(\"/\", methods=['GET'])\ndef mainpage():\n return app.send_static_file('index.html')\n\n\n@app.route(\"/yak\", methods=['POST'])\ndef addyak():\n yaks.insert(0, request.get_json())\n return \"okay\"\n\n\n@app.route(\"/yaks\", methods=['GET'])\ndef getyaks():\n return json.dumps(yaks)\n\n\n@app.route(\"/upvote\", methods=['POST'])\ndef upvote():\n yak = request.get_json()['id']\n yaks[yak]['votes'] += 1\n return \"okay\"\n\n\n@app.route(\"/downvote\", methods=['POST'])\ndef downvote():\n yak = request.get_json()['id']\n yaks[yak]['votes'] -= 1\n return \"okay\"\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"HackBinghamtonArchives/yikyak","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"72406824266","text":"import pandas as pd\nimport tensorflow as tf\nimport os\nfrom pathlib import Path\nimport numpy as np\nfrom math import e\nfrom statistics import mean\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_absolute_error\n\nmodel = '/multi_eq_1.0.h5'\nbase = os.getcwd()\nmodel_path = base+model\nload_model = tf.keras.models.load_model(model_path)\n\nprint(load_model.summary())\ndatabase = str(Path(os.getcwd()).parent)\ndatatail = '/new_fit/damped/damped_results_all_dir.csv'\nraw_data = pd.read_csv(database+datatail, sep=',')\n#print(raw_data.isna().sum())\n# raw_data = raw_data[(raw_data['Length (m)'] >= 2) | (raw_data['Heading'] != -90)]\nraw_data = raw_data[(raw_data['Length'] >= 2)]\nprint(raw_data.head())\nraw_data.dropna(axis=0, inplace=True)\ncolumn1 = raw_data['Length']\ncolumn2 = raw_data['Beam']\ncolumn3 = raw_data['Draft']\ncolumn4 = raw_data['Heading']\n\nraw_data.pop('R2surge')\nraw_data.pop('R2sway')\nraw_data.pop('R2heave')\nraw_data.pop('R2roll')\nraw_data.pop('R2pitch')\nraw_data.pop('R2yaw')\nraw_data.pop('MAEsurge')\nraw_data.pop('MAEsway')\nraw_data.pop('MAEheave')\nraw_data.pop('MAEroll')\nraw_data.pop('MAEpitch')\nraw_data.pop('MAEyaw')\n\n\ntrain_dataset = raw_data.sample(frac=0.8, random_state=0)\ntest_dataset = raw_data.drop(train_dataset.index)\n\n\ntrain_features = train_dataset.copy()\ntest_features = test_dataset.copy()\n\norder = 2\ntrain_labels = np.asarray(train_features.drop(train_features.columns[list(range(4,6*(order+1)+4))], axis=1, inplace=False))\ntest_labels = np.asarray(test_features.drop(test_features.columns[list(range(4,6*(order+1)+4))], axis=1, inplace=False))\n\ntrain_features = train_features.drop(train_features.columns[list(range(0,4))], axis=1, inplace=False)\ntest_features = test_features.drop(test_features.columns[list(range(0,4))], axis=1, inplace=False)\n\n# baseline = np.asarray(raw_data.sample(n=1))[0]\nbaseline = np.asarray(raw_data.loc[1400])\n# baseline = np.asarray(raw_data.loc[757])\nbaseline_input = baseline[0:4]\nbaseline_prediction = baseline[4:]\n\nnew_input = [baseline_input.tolist()]\n# new_input = [[35, 18, 16, 180]]\nnew_pred = load_model.predict(new_input)[0]\norig_x = []\norig_y = []\norig_z = []\norig_rx = []\norig_ry = []\norig_rz = []\npred_x = []\npred_y = []\npred_z = []\npred_rx = []\npred_ry = []\npred_rz = []\n\n\ndef damped_func(x, a, b, c):\n # Motion of a critically-damped harmonic motion system\n # Change this function to change the shape of the initial data, to better fit it.\n y = c * e**-(a*x) + b*x*e**-(a*x)\n return y\n\n\ndef gauss_func(x, a, b, c):\n # Motion of a critically-damped harmonic motion system\n # Change this function to change the shape of the initial data, to better fit it.\n y = a * e**-((x-b)**2/c)\n return y\n\n\ndef arctan_func(x, a, b, c):\n # Motion of a critically-damped harmonic motion system\n # Change this function to change the shape of the initial data, to better fit it.\n y = a * np.arctan((x * b + c)) + 0.5\n return y\n\n\norder = order+1\nprint(baseline_input)\nprint('\\n\\n')\n\nx_axis = np.linspace(0.1, 2.5, 60)\nfor i in x_axis:\n orig_x.append(damped_func(i, *baseline_prediction[0*order:0*order+order]))\n orig_y.append(damped_func(i, *baseline_prediction[1*order:1*order+order]))\n orig_z.append(arctan_func(i, *baseline_prediction[2*order:2*order+order]))\n orig_rx.append(gauss_func(i, *baseline_prediction[3*order:3*order+order]))\n orig_ry.append(gauss_func(i, *baseline_prediction[4*order:4*order+order]))\n orig_rz.append(gauss_func(i, *baseline_prediction[5*order:5*order+order]))\n pred_x.append(damped_func(i, *new_pred[0*order:0*order+order]))\n pred_y.append(damped_func(i, *new_pred[1*order:1*order+order]))\n pred_z.append(arctan_func(i, *new_pred[2*order:2*order+order]))\n pred_rx.append(gauss_func(i, *new_pred[3*order:3*order+order]))\n pred_ry.append(gauss_func(i, *new_pred[4*order:4*order+order]))\n pred_rz.append(gauss_func(i, *new_pred[5*order:5*order+order]))\n\n\nx_err_rpd = abs(round(mean(200*np.subtract(orig_x, pred_x)/(np.add(orig_x, pred_x))), 3))\ny_err_rpd = abs(round(mean(200*np.subtract(orig_y, pred_y)/(np.add(orig_y, pred_y))), 3))\nz_err_rpd = abs(round(mean(200*np.subtract(orig_z, pred_z)/(np.add(orig_z, pred_z))), 3))\nrx_err_rpd = abs(round(mean(200*np.subtract(orig_rx, pred_rx)/(np.add(orig_rx, pred_rx))), 3))\nry_err_rpd = abs(round(mean(200*np.subtract(orig_ry, pred_ry)/(np.add(orig_ry, pred_ry))), 3))\nrz_err_rpd = abs(round(mean(200*np.subtract(orig_rz, pred_rz)/(np.add(orig_rz, pred_rz))), 3))\n\nx_err_raw = round(mean_absolute_error(orig_x, pred_x), 3)\ny_err_raw = round(mean_absolute_error(orig_y, pred_y), 3)\nz_err_raw = round(mean_absolute_error(orig_z, pred_z), 3)\nrx_err_raw = round(mean_absolute_error(orig_rx, pred_rx), 3)\nry_err_raw = round(mean_absolute_error(orig_ry, pred_ry), 3)\nrz_err_raw = round(mean_absolute_error(orig_rz, pred_rz), 3)\n\n# plt.subplot(2, 3, 1)\nplt.rc('axes', titlesize=25)\nplt.rc('legend',fontsize=25)\nplt.rc('font', size=25)\n# title = 'Barge Dimensions ' + str(baseline_input[0]) + ' m Length, ' + str(baseline_input[1]) + ' m Beam, ' + \\\n # str(abs(baseline_input[2])) + ' m Draft - Waves Heading of: ' + str(baseline_input[3])\n# plt.suptitle(title)\nplt.plot(x_axis, pred_x, color='red', label='Predicted RAO')\nplt.plot(x_axis, orig_x, color='blue', linestyle='-.', label='True RAO')\nplt.title('Surge RAO')\nplt.ylabel('Response (m/m)')\nplt.text(mean(x_axis), (mean(orig_x)+mean(pred_x))/2, \"Relative % diff: \"+ str(x_err_rpd)+'\\n'+\"MAE: \" + str(x_err_raw))\nplt.grid()\n# plt.ylim([-0.5, 1.5])\nplt.legend()\nplt.show()\n# plt.show()\n# plt.subplot(2, 3, 2)\n# plt.rc('font', size=25)\nplt.plot(x_axis, pred_y, color='red', label='Predicted RAO')\nplt.plot(x_axis, orig_y, color='blue', linestyle='-.', label='True RAO')\nplt.title('Sway RAO')\nplt.ylabel('Response (m/m)')\n\nplt.text(mean(x_axis), (mean(orig_y)+mean(pred_y))/2, \"Relative % diff: \"+ str(y_err_rpd)+'\\n'+\"MAE: \" + str(y_err_raw))\n\nplt.grid()\nplt.legend()\nplt.show()\n# plt.rc('font', size=10)\n# plt.ylim([-0.5, 1.5])\n# plt.show()\nplt.subplot(2, 3, 3)\nplt.plot(x_axis, pred_z, color='red', label='Predicted RAO')\nplt.plot(x_axis, orig_z, color='blue', linestyle='-.', label='True RAO')\nplt.title('Heave RAO')\nplt.text(mean(x_axis), (mean(orig_z)+mean(pred_z))/2, \"Relative % diff: \"+ str(z_err_rpd)+'\\n'+\"MAE: \" + str(z_err_raw))\nplt.grid()\n# plt.ylim([-0.5, 1.5])\n\nplt.subplot(2, 3, 4)\nplt.plot(x_axis, pred_rx, color='red', label='Predicted RAO')\nplt.plot(x_axis, orig_rx, color='blue', linestyle='-.', label='True RAO')\nplt.title('Roll RAO')\nplt.text(mean(x_axis), (mean(orig_rx)+mean(pred_rx))/2, \"Relative % diff: \"+ str(rx_err_rpd)+'\\n'+\"MAE: \" + str(rx_err_raw))\n# plt.ylim([-0.5, 1.5])\nplt.ylabel('Response (Deg/m)')\nplt.xlabel('Wave Frequency (rad/s)')\nplt.grid()\n\nplt.subplot(2, 3, 5)\nplt.plot(x_axis, pred_ry, color='red', label='Predicted RAO')\nplt.plot(x_axis, orig_ry, color='blue', linestyle='-.', label='True RAO')\nplt.title('Pitch RAO')\nplt.text(mean(x_axis), (mean(orig_ry)+mean(pred_ry))/2, \"Relative % diff: \"+ str(ry_err_rpd)+'\\n'+\"MAE: \" + str(ry_err_raw))\n# plt.ylim([-0.5, 50])\nplt.grid()\nplt.xlabel('Wave Frequency (rad/s)')\n\nplt.subplot(2, 3, 6)\nplt.plot(x_axis, pred_rz, color='red', label='Predicted RAO')\nplt.plot(x_axis, orig_rz, color='blue', linestyle='-.', label='True RAO')\nplt.title('Yaw RAO')\nplt.text(mean(x_axis), (mean(orig_rz)+mean(pred_rz))/2, \"Relative % diff: \"+ str(rz_err_rpd)+'\\n'+\"MAE: \" + str(rz_err_raw))\n# plt.ylim([-0.5, 1.5])\nplt.grid()\nplt.xlabel('Wave Frequency (rad/s)')\n \n# plt.legend()\n# plt.get_current_fig_manager().full_screen_toggle()\nplt.show()","repo_name":"jafrizzell/RAO-Research","sub_path":"parameter_fit_data_with_code/damped_model_visualizer.py","file_name":"damped_model_visualizer.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"14698129988","text":"# Nama : Andrew Devito Aryo\n# NPM : 2306152494\n# Kelas : C\n# Kode Asdos : GAN\n\n# Fungsi untuk menampilkan Index Line dan Konten line\ndef lineNumber(line, text):\n return f\"{line:03d}. {text}\"\n\n# Fungsi Main\ndef main():\n try:\n print(\"Lab 04 -- DDP1 -- 2023\")\n counter_char = 0 # Membuat Variable Counter Character\n contain = \"\" # Variable untuk menampung konten yang akan di write\n input_file = input(\"Input File Name: \") # Meminta input nama file input dari User\n #my_dir = f\"File Kuliah\\\\\" # Melacak Directory dari File yang diingikan\n try:\n data = open(input_file, \"r\") # Membuka File yang diinginkan di read\n except:\n print(\"File Not Found!\") # Jika file tidak ditemukan, maka keluarkan message error\n quit()\n\n output_file = input(\"Output File Name: \") # Meminta input nama file output dari User\n \n for index, line in enumerate(data): # Mengiterasi setiap line yang ada di File # Menambah Counter\n contain += lineNumber(index + 1, line) # Meng-append setiap line kedalam Contain\n for char in line: # Mengiterasi setiap Char dalam Line\n if char.isalnum(): # Menambah Counter apabila Character bukan whitespace\n counter_char+=1\n\n with open(output_file, \"w\") as writer: # Membuka file output\n writer.write(contain + f\"\\n\\nThe total number of letters in the file {input_file} is {counter_char}\") # Menuliskan hasil iterasi kedalam file output\n\n print(\"-- End Of Program -- \")\n except KeyboardInterrupt:\n print(\"\\n-- Program di Terminate--\")\n\nif __name__ == '__main__': \n main() # Memanggil Fungsi Main","repo_name":"Andrew4Coding/ddp1-workspace","sub_path":"Lab/LAB4/GAN_AndrewDevitoAryo_2306152494_lab04.py","file_name":"GAN_AndrewDevitoAryo_2306152494_lab04.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"11569853581","text":"\"\"\"Function to run a bubble sort on a given list of numbers.\"\"\"\n\n\ndef bubble_sort(a_list):\n \"\"\"Bubble sort function.\"\"\"\n global loop_count\n loop_count = 1\n for i in range(len(a_list) - loop_count):\n if a_list[i] > a_list[i + 1]:\n a_list[i], a_list[i + 1] = a_list[i + 1], a_list[i]\n else:\n continue # pragma: no cover\n loop_count += 1\n bubble_sort(a_list)\n return a_list\n\nif __name__ == '__main__': # pragma: no cover\n import timeit\n from random import randint\n short_list = [randint(1, 50) for _ in range(10)]\n # long_list = [randint(1, 50) for _ in range(100)]\n small_numbers = [randint(1, 25) for _ in range(15)]\n big_numbers = [randint(101, 250) for _ in range(15)]\n mix_list = [x for x in zip(small_numbers, big_numbers)]\n\n short_list = timeit.timeit(\"bubble_sort(short_list)\", setup=\"from __main__ import short_list, bubble_sort\")\n # long_list = timeit.timeit(\"bubble_sort(long_list)\", setup=\"from __main__ import long_list, bubble_sort\")\n small_numbers = timeit.timeit(\"bubble_sort(small_numbers)\", setup=\"from __main__ import small_numbers, bubble_sort\")\n big_numbers = timeit.timeit(\"bubble_sort(big_numbers)\", setup=\"from __main__ import big_numbers, bubble_sort\")\n mix_list = timeit.timeit(\"bubble_sort(mix_list)\", setup=\"from __main__ import mix_list, bubble_sort\")\n print('Short list time: ', short_list)\n # print('Long list time: ', long_list)\n print('Small number list time: ', small_numbers)\n print('Big numbers list time:', big_numbers)\n print('Mix numbers list time: ', mix_list)\n","repo_name":"hcodydibble/data-structures","sub_path":"src/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"36916507614","text":"def sum_of_div(n):\n \"\"\"This function calculates and returns the sum of the divisors, including 1.\"\"\"\n s = 1\n for i in range(2, n - 1):\n if n % i == 0:\n s += i\n return s\n\ndef is_perfect(n):\n \"\"\"The function checks if the number is perfect or not.\n It returns True if the number is perfect and False otherwise.\"\"\"\n if n == sum_of_div(n):\n return True\n else:\n return False\n return True\n\ndef larger_perf(n):\n \"\"\"Generates the first perfect number larger that a given number n.\"\"\"\n i = 0\n while True:\n n += 1\n if is_perfect(n):\n i += 1\n if i > 49:\n print(\"there is no such a number\")\n else:\n return n\n \n \n\nn = int(input(\"Enter the number: \"))\n\nprint(int(sum_of_div(n)))\n\nif is_perfect(n):\n print(\"The number is perfect\")\nelse:\n print(\"The number is not perfect\")\n \nprint(larger_perf(n))\n","repo_name":"TeoMoisi/python-projects","sub_path":"lab1/ex15.py","file_name":"ex15.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"40586837848","text":"import cv2\nimport os \nimport numpy as np\n\ndef load_images_from_folder(folder):\n count = 0\n v = sorted(sorted(os.listdir(folder)), key=len)\n\n for filename in v:\n count+=1\n img = cv2.imread(os.path.join(folder,filename))\n\n v = np.zeros(img.shape)\n\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n if (img[i][j] == np.array([56, 56, 56])).all():\n v[i][j] = np.array([0, 0, 0])\n elif (img[i][j] == np.array([0, 0, 0])).all() or (img[i][j] == np.array([1, 1, 1])).all():\n v[i][j] = np.array([255, 255, 255])\n else:\n v[i][j] = np.array([0, 255, 0])\n\n cv2.imwrite(\"drone_basic/correct_output_images/img{}.png\".format(count), v)\n\nload_images_from_folder(\"drone_basic/input_images_2_copy\")","repo_name":"DavitMirzoyan/individual-project","sub_path":"drone_basic/scripts/colored_images_for_training.py","file_name":"colored_images_for_training.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"24719410088","text":"from pymongo import MongoClient\n\nconnection = \"mongodb+srv://velllum:0sxfeDlou9i66twP@cluster0.fs8kg.mongodb.net/freelancers?retryWrites=true&w=majority\"\n\nclient = MongoClient(connection)\ndb = client.freelancers[\"stomshop_pro\"]\n\n\ndef save_data(dic):\n \"\"\"- Сохраняет собранные данные в удаленную базу mongodb\"\"\"\n try:\n db.insert(dic)\n except Exception as e:\n print(f\"Произошла ошибка {e}\")\n\n\ndef remove_data():\n \"\"\"- Отчистить базу от всех данных\"\"\"\n db.delete_many({})\n","repo_name":"velllum/stomshop_pro_selenium","sub_path":"data_base.py","file_name":"data_base.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"36592072182","text":"import numpy as np\nimport matplotlib.pylab as plt\n\ndatFinicial = np.genfromtxt(\"Finicial.dat\", delimiter = \",\")\nxf = datFinicial[:,0]\nyf = datFinicial[:,1]\n\nplt.figure()\nplt.plot(xf,yf,label='Condición inicial')\nplt.xlabel(\"x\")\nplt.ylabel(\"h\")\nplt.ylim(-.5*np.max(yf),2*np.max(yf))\nplt.legend()\nplt.savefig(\"Finicial.png\")\nplt.close()\n\n\ndataOnda = np.genfromtxt(\"dataOnda.dat\", delimiter = \",\")\nt = dataOnda[:,0]\nfils, cols = dataOnda.shape[0], dataOnda[1]\nplt.figure()\nfor i in range(10):\n\tplt.plot(t,dataOnda[i,1:])\nplt.savefig(\"GrafOnda\")\n","repo_name":"johhanz099/HerramientasSemanas","sub_path":"S8_EDP1d/AnayaBryan_S8C1EDP.py","file_name":"AnayaBryan_S8C1EDP.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"38537224716","text":"\ncount = 0\n\ndef cache(f):\n d = dict()\n def cached_f(*args):\n if args not in d:\n d[args] = f(*args)\n return d[args]\n return cached_f\n\n# @cache\ndef f(a, b):\n global count\n if count == 40:\n exit()\n else:\n count += 1\n print(\"Calling f, a = {} b = {}\".format(a, b))\n if a==0:\n return b + 1\n if b==0:\n return f(a-1, h)\n return f(a-1, f(a, b-1))\n\nh = 1\na = 4\nb = 1\nf(a, b)\n\n# Find h s.t. f(4, 1) == 6\n","repo_name":"sw561/synacor","sub_path":"translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"32374929678","text":"def reverse(x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n x = str(x)\n reversed_num = x[::-1]\n str_to_int = int(reversed_num)\n return str_to_int\n\nprint(reverse(123))\n\n\"\"\"\nturn x into a string so we can reverse it \nreverse the string \nturn the reversed string into an integer \nreturn the reversed integer \n\"\"\"\n","repo_name":"ruklakhani/leetcode_SPD","sub_path":"leetcode-communication/reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"9672308664","text":"'''\n Correlation coefficient of\n Deaths/Mil.populations and TBC population(%:2018).\n\n step4.py\n\n 2020-06-04\n\n'''\nimport numpy as np\nimport csv\nimport math\nfrom scipy.stats import pearsonr\n\nfile = './DeMilTBC2018.csv'\n\ndata = []\n\nwith open(file) as f:\n reader = csv.reader(f)\n for row in reader:\n data.append(row)\n\ndel data[:1]\n\nX = []\nY = []\n\nfor dat in data:\n xx = float(dat[2])\n yy = float(dat[3])\n if (xx>0):\n X.append(math.log(xx))\n Y.append(math.log(yy))\n\nval,p = pearsonr(X, Y)\n\nprint('Corrcoef val %f p val %f'%(val, p))\n\n\n","repo_name":"IchiroYoshida/python_public","sub_path":"covid/calc/TB/step4.py","file_name":"step4.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"}
+{"seq_id":"9848468783","text":"from typing import List\n\n\ndef load_input() -> List:\n with open('inputs.txt', 'r') as file:\n return file.readlines()\n\n\ndef search_in_line(tree_line: List, coordinates: set, y_coordinate: int, mode: str) -> None:\n left_visible_tree = int(tree_line[0])\n right_visible_tree = int(tree_line[-1])\n tree_line = tree_line[1:-1]\n line_len = len(tree_line)\n\n for index, tree_height in enumerate(tree_line):\n backward_idx = line_len - index - 1\n l_inner_tree = int(tree_height)\n r_inner_tree = int(tree_line[backward_idx])\n if left_visible_tree < l_inner_tree:\n coordinate = (index, y_coordinate) if mode == 'row' else (\n y_coordinate, index)\n coordinates.add((coordinate))\n left_visible_tree = l_inner_tree\n if right_visible_tree < r_inner_tree:\n coordinate = (backward_idx, y_coordinate) if mode == 'row' else (\n y_coordinate, backward_idx)\n coordinates.add((coordinate))\n right_visible_tree = r_inner_tree\n\n\ndef transform_matrix(forest: List) -> List:\n matrix: List[List] = [[] for i in range(len(forest))]\n for line in forest:\n for index, tree in enumerate(line.rstrip()):\n matrix[index].append(tree)\n\n return matrix\n\n\ndef walkthrough_forest(forest: List, mode: str, coordinates: set) -> None:\n y_coordinate: int = 0\n\n for tree_line in forest[1:-1]:\n if mode == 'row':\n tree_line = tree_line.rstrip()\n search_in_line(tree_line, coordinates, y_coordinate, mode)\n y_coordinate += 1\n\n\ndef main():\n coordinates: set = set()\n forest: List[str] = load_input()\n matrix: List[List] = transform_matrix(forest)\n width: int = len(forest[0].rstrip())\n height: int = len(forest)\n # corner trees have to be subtracted\n outer_tree_number: int = 2 * (width + height) - 4\n\n walkthrough_forest(forest, 'row', coordinates)\n walkthrough_forest(matrix, 'column', coordinates)\n\n print(f'{outer_tree_number} : {len(coordinates)} : {outer_tree_number + len(coordinates)}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"needsomesl33p/2022-advent-of-code","sub_path":"8day/1_star.py","file_name":"1_star.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"30678693516","text":"import operator\n\ndef Main():\n\n in_file1 = open(\"players.csv\",\"r\")\n in_file2 = open(\"games.csv\", \"r\")\n\n header1 = next(in_file1)\n header2 = next(in_file2)\n\n #Players\n players = {}\n for line in in_file1: #PLAYER,SELO\n line= line.strip()\n line= line.split(\",\")\n name = line[0].strip()\n SELO = int(line[1].strip())\n players[name]=SELO\n\n\n #Games\n matches = []\n for line in in_file2: #PLAYER A,PLAYER B,RESULT\n line= line.strip()\n line=line.split(\",\")\n match = []\n pA = line[0].strip() #Player A\n pB = line[1].strip() #Player B\n result = line[2].strip().split(\"-\")\n if \"/\" in result[0]:\n result[0] = result[0].split(\"/\")\n A1 = int(result[0][0])\n A2 = int(result[0][1])\n rA = A1/A2\n else:\n rA= int(result[0]) #Result player A\n if \"/\" in result[1]:\n result[1] = result[1].split(\"/\")\n B1 = int(result[1][0])\n B2 = int(result[1][1])\n rB = B1/B2\n else:\n rB= int(result[1]) #Result player B\n match.append(pA)\n match.append(pB)\n match.append(rA)\n match.append(rB)\n matches.append(match)\n\n\n\n for i in range(len(matches)):\n playerA = matches[i][0]\n playerB = matches[i][1]\n if playerA not in players:\n players[playerA] = 1500\n SELOA = 1500\n else:\n SELOA = players[playerA]\n if playerB not in players:\n players[playerB] = 1500\n SELOB = 1500\n else:\n SELOB = players[playerB]\n delta_match = delta(SELOA, SELOB)\n if matches[i][2] > matches[i][3]:\n SELOA = SELOA +200*delta_match\n SELOB = SELOB -200*delta_match\n elif matches[i][2] < matches[i][3]:\n SELOA = SELOA -200*delta_match\n SELOB = SELOB +200*delta_match\n players[playerA] = round(SELOA)\n players[playerB] = round(SELOB)\n\n playersR = sorted(players.items(), key=operator.itemgetter(1), reverse=True)\n for k, v in playersR:\n print(f'{k}: {v}')\n\n in_file1.close()\n in_file2.close()\n\ndef delta(player_1, player_2):\n a = 1 / (1 + 2 ** ((player_1 - player_2) / 100))\n return a\n\nMain()\n\n","repo_name":"HardLoooking/CS-Exams","sub_path":"Chess/Chess.py","file_name":"Chess.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"16849804404","text":"# goolge colab 에서 작성\n\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom konlpy.tag import Okt\nimport re\nimport sys\nimport requests\nimport json\n\n\nurl = \"https://naveropenapi.apigw.ntruss.com/sentiment-analysis/v1/analyze\"\n\nheaders = { # 지원 api key\n \"X-NCP-APIGW-API-KEY-ID\": \"x4cybkvtso\",\n \"X-NCP-APIGW-API-KEY\": \"QX0KLAU5yVSuSayqr1t285G9hvI3YEWq09fIVQpv\",\n \"Content-Type\": \"application/json\"\n}\n\n\nload_wb = load_workbook(\n \"/content/drive/MyDrive/jw/seoul/yongsan_restaurants.xlsx\", data_only=True)\n\nload_ws = load_wb['용산구']\nwb = Workbook()\nws = wb.active\n\nreview_t = []\nreview = []\n\nfor row in load_ws.rows:\n row_value = []\n for cell in row:\n if cell.value != None:\n row_value.append(cell.value)\n review_t.append(row_value)\n\nfor i in review_t:\n t = ''\n for j in i:\n t = t+\" \"+j\n review.append(t)\n\n# print(review)\n\nokt = Okt()\nword = []\n\nfor i in range(len(review)):\n review[i] = re.sub(\"[^가-힣ㄱ-ㅎㅏ-ㅣ\\\\s]\", \"\", review[i]) # 한글이랑 공백만 남기고 다 지움\n review[i] = review[i].replace(\"\\n\", \" \")\n word.append(review[i])\n\n# print(word)\nprint(\"매장 개수\", len(word)) # len(review) == len(word) 매장 개수\n\nclova_cnt = 0\nfor i in range(len(word)):\n if (int(len(word[i])/950) == 0):\n clova_cnt += 1\n else:\n clova_cnt += int(len(word[i])/950)\n\nprint(\"api 호출 횟수\", clova_cnt) # api 호출 횟수\nprint(\"\\n\")\n\np_li = []\nn_li = []\n\nfor i in range(len(word)):\n pp = 0\n nn = 0\n pp_avg = 0\n nn_avg = 0\n n = 0\n\n if(len(word[i]) > 950):\n length = int(len(word[i])/950) # 매장 하나의 전체 리뷰 글자수를 950으로 나눈 값만큼 돌리기\n m = 950\n else:\n length = 1 # 950 미만이면 한번만\n m = len(word[i])\n\n for k in range(length):\n\n content = word[i][n:m]\n\n data = {\n \"content\": content\n }\n\n response = requests.post(url, data=json.dumps(data), headers=headers)\n rescode = response.status_code\n\n text = response.json()\n\n documnet = text['document']\n sentiment = text['document']['sentiment']\n positive = text['document']['confidence']['positive']\n negative = text['document']['confidence']['negative']\n\n pp += positive\n nn += negative\n\n n += 950\n m += 950\n\n pp_avg = pp/length\n nn_avg = nn/length\n\n if(rescode == 200):\n print(review_t[i][0], pp_avg, nn_avg)\n p_li.append([pp_avg, review_t[i][0]])\n n_li.append([nn_avg, review_t[i][0]])\n else:\n print(\"Error : \" + response.text)\n\np_li.sort(reverse=True)\nn_li.sort(reverse=True)\n\np_result = 0\nn_result = 0\n\nprint(\"\\n긍정\")\nfor i in range(len(word)):\n p_result = \"{:,.1f}\".format(p_li[i][0])\n print(p_li[i][1], p_result)\n\nprint(\"\\n부정\")\nfor i in range(len(word)):\n n_result = \"{:,.1f}\".format(n_li[i][0])\n print(n_li[i][1], n_result)\n","repo_name":"okonomiyakki/clova-sentiment","sub_path":"clova.py","file_name":"clova.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"21938049618","text":"#!/usr/bin/env python\n\n\"\"\"\nThis code was written by Kirstie Whitaker in January 2017 to accompany\nthe manuscript \"Neuroscientific insights into the development of analogical reasoning\".\n\nContact: kw401@cam.ac.uk\n\"\"\"\n\n#===============================================================================\n# Import what you need\n#===============================================================================\nimport matplotlib.pylab as plt\nimport matplotlib.image as mpimg\nimport matplotlib.patches as patches\nimport matplotlib.lines as mlines\nimport numpy as np\nimport os\nimport palettable\nimport pandas as pd\nfrom scipy.stats import pearsonr, ttest_rel\nimport seaborn as sns\nsns.set_context(\"poster\", font_scale=1)\nsns.set_style('white')\nfrom statsmodels.formula.api import ols\nimport string\n\n#===============================================================================\n# Write a few useful functions\n#===============================================================================\n\n#-------------------------------------------------------------------------------\n# Read in the analogy stimulus file\n#-------------------------------------------------------------------------------\ndef read_in_analogy(analogy_stimulus_file):\n\n img = mpimg.imread(analogy_stimulus_file)\n\n img = img[80:-5, 135:-165, :]\n\n return img\n\n\n#-------------------------------------------------------------------------------\n# Read in the semantic stimulus file\n#-------------------------------------------------------------------------------\ndef read_in_semantic(semantic_stimulus_file):\n\n img = mpimg.imread(semantic_stimulus_file)\n\n img = img[18:-55, 120:-140, :]\n\n return img\n\n\n#-------------------------------------------------------------------------------\n# Add the coloured boxes and text to the analogy example for figure 1\n#-------------------------------------------------------------------------------\ndef add_boxes_analogy(ax, color_dict):\n\n color_list = [ 'orange', 'purple', 'green', 'blue' ]\n\n text_list = [ 'semantic\\nlure', 'perceptual\\nlure', 'unrelated\\nlure', 'correct\\nchoice']\n\n for i, (color, text) in enumerate(zip(color_list, text_list)):\n\n # Add the coloured boxes\n ax.add_patch(\n patches.Rectangle(\n (40 + i*154, 430),\n 150,\n 150,\n fill=False,\n edgecolor=color_dict[color],\n linewidth=3\n )\n )\n\n # Add the text\n ax.text(\n 116 + i*154,\n 623,\n text,\n fontsize=15,\n color=color_dict[color],\n horizontalalignment='center',\n verticalalignment='center',\n fontname='arial'\n )\n\n return ax\n\n\n#-------------------------------------------------------------------------------\n# Add the coloured boxes and text to the semantic example for figure 1\n#-------------------------------------------------------------------------------\ndef add_boxes_semantic(ax, color_dict):\n\n color_list = [ 'red', 'green', 'green', 'purple' ]\n\n text_list = [ 'correct\\nchoice', 'unrelated\\nlure', 'unrelated\\nlure', 'perceptual\\nlure']\n\n for i, (color, text) in enumerate(zip(color_list, text_list)):\n\n # Add the coloured boxes\n ax.add_patch(\n patches.Rectangle(\n (51 + i*163, 455),\n 160,\n 160,\n fill=False,\n edgecolor=color_dict[color],\n linewidth=3\n )\n )\n\n # Add the text\n ax.text(\n 131 + i*163,\n 660,\n text,\n fontsize=15,\n color=color_dict[color],\n horizontalalignment='center',\n verticalalignment='center',\n fontname='arial'\n )\n\n return ax\n\n\n#-------------------------------------------------------------------------------\n# Add the panel labels for figure 1\n#-------------------------------------------------------------------------------\ndef add_panel_labels_fig1(ax_list):\n\n coords = (0.05, 0.95)\n color='k'\n fontsize=14\n\n letters = string.ascii_lowercase\n for i, ax in enumerate(ax_list):\n\n ax.text(coords[0], coords[1],\n '({})'.format(letters[i]),\n fontsize=fontsize,\n transform=ax.transAxes,\n color=color,\n horizontalalignment='center',\n verticalalignment='center',\n fontname='arial',\n fontweight='bold'\n )\n\n return ax_list\n\n\n#-------------------------------------------------------------------------------\n# Figure out the min and max of your data and then add 5% padding\n#-------------------------------------------------------------------------------\ndef get_min_max(data):\n\n data_range = np.max(data) - np.min(data)\n data_min = np.min(data) - (data_range * 0.05)\n data_max = np.max(data) + (data_range * 0.05)\n\n return data_min, data_max\n\n\n#-------------------------------------------------------------------------------\n# Add a line to your legend to make it look lovely\n#-------------------------------------------------------------------------------\ndef add_line_to_legend(ax, color_list=['blue'], label_list=['Blue stars'], loc=0, rev=False):\n\n if rev:\n color_list = color_list[::-1]\n label_list = label_list[::-1]\n\n line_list = []\n for color, label in zip(color_list, label_list):\n line_list += [mlines.Line2D([], [], color=color, marker=None, label=label)]\n ax.legend(handles=line_list, loc=loc)\n\n return ax\n\n#-------------------------------------------------------------------------------\n# Report the behavioural statistical models with age and behaviour\n#-------------------------------------------------------------------------------\ndef report_behav_age_correlations(y_name, df):\n\n formula = '{} ~ Age_scan'.format(y_name)\n mod = ols(formula=formula, data=df)\n res_lin = mod.fit()\n\n formula = '{} ~ Age_scan_sq + Age_scan'.format(y_name)\n mod = ols(formula=formula, data=df)\n res_quad = mod.fit()\n\n if 'R2' in y_name and not 'dis' in y_name and not 'sem' in y_name and not y_name.endswith('per'):\n formula = '{} ~ Age_scan_sq + Age_scan + {}'.format(y_name, y_name.replace('R2', 'R1'))\n mod = ols(formula=formula, data=df)\n res_corr = mod.fit()\n\n if 'R1' in y_name and not 'dis' in y_name and not 'sem' in y_name and not y_name.endswith('per'):\n formula = '{} ~ Age_scan_sq + Age_scan + {}'.format(y_name, y_name.replace('R1', 'R2'))\n mod = ols(formula=formula, data=df)\n res_corr = mod.fit()\n\n print ('=== {} ==='.format(y_name))\n print ('Linear w age')\n print (' Beta(Age) = {:2.4f}, P = {:2.4f}'.format(res_lin.params['Age_scan'], res_lin.pvalues['Age_scan']))\n print (' Rsq = {:2.3f}, Rsq_adj = {:2.3f}'.format(res_lin.rsquared, res_lin.rsquared_adj))\n print (' F({}, {}) = {:2.3f}, P = {:2.4f}'.format(res_lin.df_model, res_lin.df_resid, res_lin.fvalue, res_lin.f_pvalue))\n print ('Quadratic w age')\n print (' Beta(AgeSq) = {:2.4f}, P = {:2.4f}'.format(res_quad.params['Age_scan_sq'], res_quad.pvalues['Age_scan_sq']))\n print (' Beta(Age) = {:2.4f}, P = {:2.4f}'.format(res_quad.params['Age_scan'], res_quad.pvalues['Age_scan']))\n print (' Rsq = {:2.3f}, Rsq_adj = {:2.3f}'.format(res_quad.rsquared, res_quad.rsquared_adj))\n print (' F({}, {}) = {:2.3f}, P = {:2.4f}'.format(res_quad.df_model, res_quad.df_resid, res_quad.fvalue, res_quad.f_pvalue))\n if 'R2' in y_name and not 'dis' in y_name and not 'sem' in y_name and not y_name.endswith('per'):\n print ('Quadratic w age correcting for accuracy')\n print (' Beta(R1) = {:2.4f}, P = {:2.4f}'.format(res_corr.params[y_name.replace('R2', 'R1')],\n res_corr.pvalues[y_name.replace('R2', 'R1')]))\n print (' Beta(AgeSq) = {:2.4f}, P = {:2.4f}'.format(res_corr.params['Age_scan_sq'], res_corr.pvalues['Age_scan_sq']))\n print (' Beta(Age) = {:2.4f}, P = {:2.4f}'.format(res_corr.params['Age_scan'], res_corr.pvalues['Age_scan']))\n print (' Rsq = {:2.3f}, Rsq_adj = {:2.3f}'.format(res_corr.rsquared, res_corr.rsquared_adj))\n print (' F({}, {}) = {:2.3f}, P = {:2.4f}'.format(res_corr.df_model, res_corr.df_resid, res_corr.fvalue, res_corr.f_pvalue))\n if 'R1' in y_name and not 'dis' in y_name and not 'sem' in y_name and not y_name.endswith('per'):\n print ('Quadratic w age correcting for accuracy')\n print (' Beta(R2) = {:2.4f}, P = {:2.4f}'.format(res_corr.params[y_name.replace('R1', 'R2')],\n res_corr.pvalues[y_name.replace('R1', 'R2')]))\n print (' Beta(AgeSq) = {:2.4f}, P = {:2.4f}'.format(res_corr.params['Age_scan_sq'], res_corr.pvalues['Age_scan_sq']))\n print (' Beta(Age) = {:2.4f}, P = {:2.4f}'.format(res_corr.params['Age_scan'], res_corr.pvalues['Age_scan']))\n print (' Rsq = {:2.3f}, Rsq_adj = {:2.3f}'.format(res_corr.rsquared, res_corr.rsquared_adj))\n print (' F({}, {}) = {:2.3f}, P = {:2.4f}'.format(res_corr.df_model, res_corr.df_resid, res_corr.fvalue, res_corr.f_pvalue))\n\n\n#-------------------------------------------------------------------------------\n# Add the panel labels to figure 2\n#-------------------------------------------------------------------------------\ndef add_panel_labels_fig2(ax_list):\n\n x_list = [ -0.175, -0.115, -0.145 ]\n y = 1.0\n color='k'\n fontsize=18\n\n letters = string.ascii_lowercase\n for i, ax in enumerate(ax_list):\n\n ax.text(x_list[i], y,\n '({})'.format(letters[i]),\n fontsize=fontsize,\n transform=ax.transAxes,\n color=color,\n horizontalalignment='center',\n verticalalignment='center',\n fontname='arial',\n fontweight='bold'\n )\n\n return ax_list\n\n\n#-------------------------------------------------------------------------------\n# Read in the pial surface brain images\n#-------------------------------------------------------------------------------\ndef read_in_brains(results_surface_file):\n\n img = mpimg.imread(results_surface_file)\n\n img = img[21:-120, 44:-44, :]\n\n return img\n\n\n#-------------------------------------------------------------------------------\n# Read in the venn diagram\n#-------------------------------------------------------------------------------\ndef read_in_venn(venn_file):\n\n img = mpimg.imread(venn_file)\n\n return img\n\n\n#-------------------------------------------------------------------------------\n#\n#-------------------------------------------------------------------------------\ndef report_cluster_stats(f_behav, f_mri_cope1, f_mri_cope2, f_mri_cope4, cluster=1):\n\n name_dict = { 1 : 'SEMANTIC > FIX',\n 2 : 'ANALOGY > FIX',\n 4 : 'ANA > SEM' }\n file_dict = { 1 : f_mri_cope1,\n 2 : f_mri_cope2,\n 4 : f_mri_cope4 }\n\n for cope, name in name_dict.items():\n print('=== {} ==='.format(name))\n df = read_in_data(f_behav, file_dict[cope])\n\n print(' Corr cluster {} w age'.format(cluster))\n x_name = 'Age_scan'\n y_name = 'cluster_{}'.format(cluster)\n report_correlation(df, x_name, y_name)\n\n print(' Corr cluster 1 w R2acc')\n x_name = 'R2_percent_acc'\n y_name = 'cluster_{}'.format(cluster)\n report_correlation(df, x_name, y_name)\n\n print(' Corr cluster 1 w R2acc covar age')\n x_name = 'R2_percent_acc'\n y_name = 'cluster_{}'.format(cluster)\n covar_name = 'Age_scan'\n report_correlation(df, x_name, y_name, covar_name=covar_name)\n\n\n#-------------------------------------------------------------------------------\n# Report a correlation (partial if covariates are provided)\n#-------------------------------------------------------------------------------\ndef report_correlation(df, x_name, y_name, covar_name=None):\n\n if not covar_name:\n r, p = pearsonr(df[x_name], df[y_name])\n\n else:\n x_res = residuals(df[covar_name], df[x_name])\n y_res = residuals(df[covar_name], df[y_name])\n\n df['{}_res'.format(x_name)] = x_res\n df['{}_res'.format(y_name)] = y_res\n\n r, p = pearsonr(df['{}_res'.format(x_name)], df['{}_res'.format(y_name)])\n\n # Format nicely\n r, p = format_r_p(r, p, r_dp=3)\n\n print(' r {}, p {}'.format(r, p))\n\n\n#-------------------------------------------------------------------------------\n# Format r and p values to print out nicely\n#-------------------------------------------------------------------------------\ndef format_r_p(r, p, r_dp=2):\n\n r = '{:2.{width}f}'.format(r, width=r_dp)\n r = '= {}'.format(r)\n\n if p < 0.001:\n p = '< .001'\n else:\n p = '{:2.3f}'.format(p)\n p = '= {}'.format(p[1:])\n\n return r, p\n\n\n#-------------------------------------------------------------------------------\n# Read in the behavioural and extracted regional MRI values\n#-------------------------------------------------------------------------------\ndef read_in_data(f_behav, f_mri):\n behav_df = pd.read_csv(f_behav)\n\n mri_df = pd.read_csv(f_mri, sep=r\"\\s*\", engine='python')\n mri_df['subid_long'] = mri_df['sub_id']\n\n df = behav_df.merge(mri_df, on='subid_long')\n\n df.loc[:, 'Age_scan_sq'] = df.loc[:, 'Age_scan']**2\n\n return df\n\n#-------------------------------------------------------------------------------\n# Calculate residuals for a given covariate\n#-------------------------------------------------------------------------------\ndef residuals(x, y):\n '''\n A useful little function that correlates\n x and y together to give their residual\n values. These can then be used to calculate\n partial correlation values\n '''\n import numpy as np\n\n if len(x.shape) == 1:\n x = x[np.newaxis, :]\n A = np.vstack([x, np.ones(x.shape[-1])]).T\n B = np.linalg.lstsq(A, y)[0]\n m = B[:-1]\n c = B[-1]\n pre = np.sum(m * x.T, axis=1) + c\n res = y - pre\n return res\n\n\n#-------------------------------------------------------------------------------\n# Read in just the left lateral brain image\n#-------------------------------------------------------------------------------\ndef read_in_leftlatbrain(results_surface_file):\n\n img = mpimg.imread(results_surface_file)\n\n img = img[25:-665, 35:-785, :]\n\n return img\n\n#-------------------------------------------------------------------------------\n# Add a circle to figure 5\n#-------------------------------------------------------------------------------\ndef add_circle(ax):\n\n circle = plt.Circle((150, 350), 90,\n linestyle='dashed',\n fill=False,\n edgecolor='k',\n linewidth=4)\n ax.add_artist(circle)\n\n return ax\n\n#-------------------------------------------------------------------------------\n# Make the scatter plots for figure 5\n#-------------------------------------------------------------------------------\ndef figure5_scatterplots(f_behav, f_mri_cope1, f_mri_cope2, f_mri_cope4, ax_list, cluster=1, show_r_p=False):\n '''\n Create a 2x2 grid of scatter plots.\n\n [0,0] - sem > baseline vs age\n [0,1] - sem > baseline vs R2_accuracy PARTIAL\n [0,1] - ana > baseline vs age\n [1,1] - ana > baseline vs R2_accuracy PARTIAL\n\n '''\n df_cope1 = read_in_data(f_behav, f_mri_cope1)\n df_cope2 = read_in_data(f_behav, f_mri_cope2)\n\n colors_dict = { 'Age_scan' : sns.color_palette()[2],\n 'R2_percent_acc' : sns.color_palette()[1] }\n\n ax_list = ax_list.reshape(-1)\n\n x_name_list = [ 'Age_scan', 'R2_percent_acc' ]\n x_label_dict = { 'Age_scan' : 'Age (years)',\n 'Age_scan_res' : 'Age (years) [Partial]',\n 'R2_percent_acc' : 'Analogy accuracy (%)',\n 'R2_percent_acc_res' : 'Analogy accuracy (%) [Partial]',\n 'R2_percent_sem' : 'Semantic errors (%)',\n 'R2_Correct_dividedby_Semantic' : 'Accuracy / Semantic Err' }\n y_label_dict = { 0 : 'Semantic > Baseline',\n 1 : 'Analogy > Baseline' }\n\n df_dict = { 0 : df_cope1,\n 1 : df_cope2 }\n\n for i, ax in enumerate(ax_list[::2]):\n\n x_name = 'Age_scan'\n y_name = 'cluster_{}'.format(cluster)\n\n df = df_dict[i]\n\n sns.regplot(x_name, y_name, data=df, ax=ax, color=colors_dict[x_name])\n\n ax.locator_params(nbins=4)\n\n ax.set_xlim(get_min_max(df[x_name]))\n ax.set_ylim(get_min_max(df[y_name]))\n ax.set_xlabel(x_label_dict[x_name])\n ax.set_ylabel(y_label_dict[i])\n\n r, p = pearsonr(df[x_name], df[y_name])\n r, p = format_r_p(r, p)\n\n ax.axhline(0, color='k', linestyle='dashed', linewidth=1)\n\n if show_r_p:\n ax.text(0.05, 0.95,\n 'r {}\\np {}'.format(r, p),\n transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='top',\n size='large')\n\n if i == 0:\n ax.set_xlabel('')\n ax.yaxis.set_label_coords(-0.18, 0.5)\n\n for i, ax in enumerate(ax_list[1::2]):\n\n x_name = 'R2_percent_acc'\n y_name = 'cluster_{}'.format(cluster)\n covar_name = 'Age_scan'\n\n df = df_dict[i]\n\n x_res = residuals(df[covar_name], df[x_name])\n y_res = residuals(df[covar_name], df[y_name])\n\n df['{}_res'.format(x_name)] = x_res\n df['{}_res'.format(y_name)] = y_res\n\n sns.regplot('{}_res'.format(x_name),\n '{}_res'.format(y_name),\n data=df,\n ax=ax,\n color=colors_dict[x_name])\n\n ax.locator_params(nbins=4)\n\n ax.set_xlim(get_min_max(df['{}_res'.format(x_name)]))\n ax.set_ylim(get_min_max(df['{}_res'.format(y_name)]))\n ax.set_xlabel('{} [Partial]'.format(x_label_dict[x_name]))\n ax.set_ylabel('{} [Partial]'.format(y_label_dict[i]))\n\n r, p = pearsonr(df['{}_res'.format(x_name)], df['{}_res'.format(y_name)])\n r, p = format_r_p(r, p)\n\n ax.axhline(0, color='k', linestyle='dashed', linewidth=1)\n\n if show_r_p:\n ax.text(0.05, 0.95,\n 'r {}\\np {}'.format(r, p),\n transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='top',\n size='large')\n\n if i == 0:\n ax.set_xlabel('')\n\n ax.yaxis.set_label_coords(-0.18, 0.5)\n\n sns.despine()\n\n return ax_list\n\n\n#-------------------------------------------------------------------------------\n#\n#-------------------------------------------------------------------------------\n\n\n#-------------------------------------------------------------------------------\n#\n#-------------------------------------------------------------------------------\n\n\n#-------------------------------------------------------------------------------\n# Add panel labels to figure 5\n#-------------------------------------------------------------------------------\ndef add_panel_labels_fig5(ax_brain, ax_list):\n\n # First the letters\n letter_ax_list = [ax_brain, ax_list[0], ax_list[2]]\n x_list = [ 0.1, -0.24, -0.24 ]\n y_list = [ 0.95, 1.0, 1.0 ]\n color='k'\n fontsize=18\n\n letters = string.ascii_lowercase\n for i, ax in enumerate(letter_ax_list):\n\n ax.text(x_list[i], y_list[i],\n '({})'.format(letters[i]),\n fontsize=fontsize,\n transform=ax.transAxes,\n color=color,\n horizontalalignment='center',\n verticalalignment='center',\n fontname='arial',\n fontweight='bold'\n )\n\n # Then the lowercase roman numerals\n color='k'\n fontsize=18\n\n numerals = [ 'i', 'ii', 'i', 'ii' ]\n\n for i, ax in enumerate(ax_list):\n\n ax.text(0.02, 0.95,\n '{}'.format(numerals[i]),\n fontsize=fontsize,\n transform=ax.transAxes,\n color=color,\n horizontalalignment='left',\n verticalalignment='center',\n fontname='arial',\n fontweight='bold'\n )\n return ax_brain, ax_list\n\n\n\n#===============================================================================\n# Now write your main figure & stats reporting functions\n# Note that some of the stats reporting functions are above, these ones below\n# the line are likely to be called directly from the jupyter notebook.\n#===============================================================================\n\n#-------------------------------------------------------------------------------\n# Figure 1\n#-------------------------------------------------------------------------------\ndef make_figure1(analogy_stimulus_file, semantic_stimulus_file, color_dict):\n\n fig, ax_list = plt.subplots(1, 2, figsize=(9,4.5))\n\n # Put the example analogy stimulus on the left\n ax = ax_list[0]\n img_ana = read_in_analogy(analogy_stimulus_file)\n ax.imshow(img_ana)\n\n # Add the coloured boxes & text\n ax = add_boxes_analogy(ax, color_dict)\n\n # Put the example semantic stimulus on the right\n ax = ax_list[1]\n img_sem = read_in_semantic(semantic_stimulus_file)\n ax.imshow(img_sem)\n\n # Add the coloured boxes & text\n ax = add_boxes_semantic(ax, color_dict)\n\n # Add in panel labels\n ax_list = add_panel_labels_fig1(ax_list)\n\n # Turn off both of the axes\n for ax in ax_list:\n ax.set_axis_off()\n\n # Tighten up the layout\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1,\n wspace=0, hspace=0)\n\n # Save the figure\n fig.savefig('../FIGURES/Figure1_lowres.png', dpi=150, bbox_inches=0)\n fig.savefig('../FIGURES/Figure1.png', dpi=600, bbox_inches=0)\n fig.savefig('../FIGURES/Figure1.pdf', dpi=600, bbox_inches=0)\n\n plt.show()\n\n#-------------------------------------------------------------------------------\n# Figure 2\n#-------------------------------------------------------------------------------\ndef make_figure2(f_behav):\n\n df = pd.read_csv(f_behav)\n\n df.loc[:, 'Age_scan_sq'] = df.loc[:,'Age_scan']**2\n\n # Define the color list\n color_list = palettable.colorbrewer.get_map('Set1', 'qualitative', 5).mpl_colors\n\n colors_dict = { 'R1_percent_acc' : color_list[0],\n 'R2_percent_acc' : color_list[1],\n 'R1_meanRTcorr_cor' : color_list[0],\n 'R2_meanRTcorr_cor' : color_list[1],\n 'R2_percent_dis' : color_list[2],\n 'R2_percent_per' : color_list[3],\n 'R2_percent_sem' : color_list[4] }\n\n fig, ax_list = plt.subplots(1,3, figsize=(16,4.5))\n\n ax_list = ax_list.reshape(-1)\n\n x_label_dict = { 'Age_scan' : 'Age (years)' }\n y_label_dict = { 0 : 'Accuracy (% resp)',\n 1 : 'Reaction time (s)',\n 2 : 'Analogy error rate (% resp)' }\n y_measures_dict = { 0 : ['R1_percent_acc', 'R2_percent_acc'],\n 1 : ['R1_meanRTcorr_cor', 'R2_meanRTcorr_cor'],\n 2 : ['R2_percent_dis', 'R2_percent_per', 'R2_percent_sem']}\n y_measures_label_dict = { 'R1_percent_acc' : 'Semantic',\n 'R2_percent_acc' : 'Analogy',\n 'R1_meanRTcorr_cor' : 'Semantic',\n 'R2_meanRTcorr_cor' : 'Analogy',\n 'R2_percent_dis' : 'Unrelated',\n 'R2_percent_per' : 'Perceptual',\n 'R2_percent_sem' : 'Semantic' }\n legend_loc_dict = { 0 : 4,\n 1 : 1,\n 2 : 1 }\n legend_rev_dict = { 0 : False,\n 1 : False,\n 2 : True }\n\n for i, ax in enumerate(ax_list):\n\n x_name = 'Age_scan'\n y_name_list = y_measures_dict[i]\n\n colors_list = []\n labels_list = []\n\n for y_name in y_name_list:\n sns.regplot(x_name, y_name, data=df,\n ax=ax,\n color=colors_dict[y_name],\n order=2)\n colors_list += [colors_dict[y_name]]\n labels_list += [y_measures_label_dict[y_name]]\n\n ax.locator_params(nbins=6, axis='y')\n ax.set_xticks([6, 10, 14, 18])\n\n ax.set_xlim(get_min_max(df[x_name]))\n ax.set_ylim(get_min_max(df[y_name]))\n ax.set_xlabel(x_label_dict[x_name])\n ax.set_ylabel(y_label_dict[i])\n\n add_line_to_legend(ax,\n color_list=colors_list,\n label_list=labels_list,\n loc=legend_loc_dict[i],\n rev=legend_rev_dict[i])\n\n sns.despine()\n\n # Add in panel labels\n ax_list = add_panel_labels_fig2(ax_list)\n\n # Tight layout\n plt.tight_layout()\n\n # Save the figure\n fig.savefig('../FIGURES/Figure2_lowres.png', dpi=150, bbox_inches=0)\n fig.savefig('../FIGURES/Figure2.png', dpi=600, bbox_inches=0)\n fig.savefig('../FIGURES/Figure2.pdf', dpi=600, bbox_inches=0)\n\n plt.show()\n\n#-------------------------------------------------------------------------------\n# Report the descriptive behavoural stats\n#-------------------------------------------------------------------------------\ndef report_behav_stats(f_behav):\n\n df = pd.read_csv(f_behav)\n df.loc[:, 'Age_scan_sq'] = df.loc[:,'Age_scan']**2\n\n print('===== Behavioural Statistics ======')\n for measure in ['R1_percent_acc', 'R2_percent_acc',\n 'R1_meanRTcorr_cor', 'R2_meanRTcorr_cor',\n 'R2_percent_sem', 'R2_percent_per', 'R2_percent_dis']:\n print('{}: N = {:2.0f}, M = {:2.3f}, SD = {:2.3f}'.format(measure, df[measure].notnull().count(), df[measure].mean(), df[measure].std()))\n\n if 'R2' in measure and not 'dis' in measure and not 'sem' in measure and not measure.endswith('per'):\n measure_diff = measure.replace('R2', 'R2_sub_R1')\n df[measure_diff] = df[measure] - df[measure.replace('R2', 'R1')]\n print('{}: M = {:2.3f}, SD = {:2.3f}'.format(measure_diff, df[measure_diff].mean(), df[measure_diff].std()))\n print(' N R2 gt R1 = {}, N R2 lt R1 = {}, N same = {}'.format(np.sum(df[measure_diff]>0),\n np.sum(df[measure_diff]<0),\n np.sum(df[measure_diff]==0)))\n\n if 'R1' in measure:\n t, p = ttest_rel(df[measure], df[measure.replace('R1', 'R2')])\n print(' R1 vs R2 (paired): t({:2.0f}) = {:2.3f}, p = {:2.3f}'.format(df[measure].count()-1, t, p))\n\n if 'sem' in measure:\n t, p = ttest_rel(df['R2_percent_sem'], df['R2_percent_per'])\n print(' sem vs per (paired): t({:2.0f}) = {:2.3f}, p = {:2.3f}'.format(df['R2_percent_per'].count()-1, t, p))\n if 'dis' in measure:\n t, p = ttest_rel(df['R2_percent_per'], df['R2_percent_dis'])\n print(' per vs dis (paired): t({:2.0f}) = {:2.3f}, p = {:2.3f}'.format(df['R2_percent_per'].count()-1, t, p))\n\n print('\\n===== Correlations with age =====')\n measures_list = ['R1_percent_acc', 'R2_percent_acc',\n 'R1_meanRTcorr_cor', 'R2_meanRTcorr_cor',\n 'R2_percent_dis', 'R2_percent_per', 'R2_percent_sem']\n\n for measure in measures_list:\n report_behav_age_correlations(measure, df)\n\n\n#-------------------------------------------------------------------------------\n# Figure 3\n#-------------------------------------------------------------------------------\ndef make_figure3(mean_results_surface_file, venn_file):\n\n fig, ax = plt.subplots(figsize=(6,4.5))\n\n # Add in the brains\n img_brains = read_in_brains(mean_results_surface_file)\n\n ax.imshow(img_brains)\n\n # Turn off the axes\n ax.set_axis_off()\n\n # Overlay the venn diagram\n ax_venn = fig.add_axes([0.25, 0.21, 0.5, 0.5]) # inset axes\n img_venn = read_in_venn(venn_file)\n\n ax_venn.imshow(img_venn)\n\n # Turn off the axes\n ax_venn.set_axis_off()\n\n # Tighten up the layout\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1,\n wspace=0, hspace=0)\n\n # Save the figure\n fig.savefig('../FIGURES/Figure3_lowres.png', dpi=150, bbox_inches=0)\n fig.savefig('../FIGURES/Figure3.png', dpi=600, bbox_inches=0)\n fig.savefig('../FIGURES/Figure3.pdf', dpi=600, bbox_inches=0)\n\n plt.show()\n\n#-------------------------------------------------------------------------------\n# Figure 4\n#-------------------------------------------------------------------------------\ndef make_figure4(corrage_results_surface_file, venn_file, f_behav, f_mri_cope1, f_mri_cope2, f_mri_cope4):\n\n # Report the cluster statistics\n report_cluster_stats(f_behav, f_mri_cope1, f_mri_cope2, f_mri_cope4, cluster=1)\n\n # Now make the figure\n fig, ax = plt.subplots(figsize=(6,4.5))\n\n # Add in the brains\n img_brains = read_in_brains(corrage_results_surface_file)\n\n ax.imshow(img_brains)\n\n # Turn off the axes\n ax.set_axis_off()\n\n # Overlay the venn diagram\n ax_venn = fig.add_axes([0.25, 0.21, 0.5, 0.5]) # Add in a new axis\n img_venn = read_in_venn(venn_file)\n\n ax_venn.imshow(img_venn)\n\n # Turn off the axes\n ax_venn.set_axis_off()\n\n # Tighten up the layout\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1,\n wspace=0, hspace=0)\n\n # Save the figure\n fig.savefig('../FIGURES/Figure4_lowres.png', dpi=150, bbox_inches=0)\n fig.savefig('../FIGURES/Figure4.png', dpi=600, bbox_inches=0)\n fig.savefig('../FIGURES/Figure4.pdf', dpi=600, bbox_inches=0)\n\n plt.show()\n\n#-------------------------------------------------------------------------------\n# Figure 5\n#-------------------------------------------------------------------------------\ndef make_figure5(corracc_results_surface_file, f_behav, f_mri_cope1, f_mri_cope2, f_mri_cope4):\n\n # Report the cluster statistics\n report_cluster_stats(f_behav, f_mri_cope1, f_mri_cope2, f_mri_cope4, cluster=1)\n\n fig, ax_list = plt.subplots(2,2, figsize=(14,7.5))\n\n # Add in left lateral brain\n ax_brain = fig.add_axes([-0.01, 0.2, 0.36, 0.6]) # Add in the brain axis\n\n img_brain = read_in_leftlatbrain(corracc_results_surface_file)\n\n ax_brain.imshow(img_brain)\n\n # Add the dashed circle\n ax_brain = add_circle(ax_brain)\n\n # Turn off the axes\n ax_brain.set_axis_off()\n\n # Shift these subplots over to the right\n # to make space for the brain\n fig.subplots_adjust(left=0.42, right=0.99, bottom=0.1, top=0.97, wspace=0.3)\n\n # Add in the scatter plots\n ax_list = figure5_scatterplots(f_behav, f_mri_cope1, f_mri_cope2, f_mri_cope4,\n ax_list, cluster=1, show_r_p=False)\n\n # Add in the labels\n ax_brain, ax_list = add_panel_labels_fig5(ax_brain, ax_list)\n\n # Save the figure\n fig.savefig('../FIGURES/Figure5_lowres.png', dpi=150, bbox_inches=0)\n fig.savefig('../FIGURES/Figure5.png', dpi=600, bbox_inches=0)\n fig.savefig('../FIGURES/Figure5.pdf', dpi=600, bbox_inches=0)\n\n plt.show()\n","repo_name":"KirstieJane/NORA_WhitakerVendetti_DevSci2017","sub_path":"SCRIPTS/visan_results_figures.py","file_name":"visan_results_figures.py","file_ext":"py","file_size_in_byte":31583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"}
+{"seq_id":"2056587967","text":"from __future__ import print_function\nimport argparse\n\nimport pandas as pd\nfrom rootpy.root2hdf5 import root2hdf5\n\nparser = argparse.ArgumentParser(description='Convert ntuple from ROOT file to hdf5 store')\nparser.add_argument('root_file', type=argparse.FileType(), help='Path to the ROOT file')\nparser.add_argument('-i', '--input', help='Input tuple (default: %(default)s)', default='/ToolSvc.PatDebugTTTruthTool/DownstreamDebugTuple')\nparser.add_argument('-o', '--output', help='Output table in hdf5 (default: %(default)s)', default='DownstreamDebugTuple')\n\nargs = parser.parse_args()\n\ninput_filename = args.root_file.name\noutput_filename = input_filename[:input_filename.rindex('.root')] + '.h5'\n\nprint('Converting {} to {}'.format(input_filename, output_filename))\n\nrpath = ''\nif '/' in args.input[1:]:\n rpath = args.input[:args.input.rindex('/')]\n\nroot2hdf5(input_filename, output_filename, rpath)\n\nhdf = pd.HDFStore(output_filename)\ntable = hdf.get(args.input)\nhdf.put(args.output, table, 'table')\nhdf.remove(args.input)\nhdf.close()\n","repo_name":"jchmura/masters-analysis","sub_path":"root/convert_root_hdf5.py","file_name":"convert_root_hdf5.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"5910351823","text":"from PIL import Image\nfrom skimage.measure import compare_ssim\nimport math\nimport numpy as np\nimport openface\nimport os\nimport argparse\nimport cv2\n\ndataroot = '/dataset/AffineGAN_dataset/'\nconv_pattern = 'img_{0:04d}.png'\nvideoGAN_pattern = 'gen_1{0:04d}.png'\nflow_pattern = '001_{0:02d}_pred.png'\nganim_pattern = '{0:d}_out.jpg'\nconv_start = 1\nconv_end = 10\nvideoGAN_start = 1\nvideoGAN_end = 17\nflow_start = 0\nflow_end = 17\nours_start = 0\nours_end = 16\nganim_start = 0\nganim_end = 16\n\nsize = 128\ngray = 'RGB'\nfileDir = os.path.dirname(os.path.realpath(__file__))\nmodelDir = os.path.join(fileDir, '.', 'models')\ndlibModelDir = os.path.join(modelDir, 'dlib')\nopenfaceModelDir = os.path.join(modelDir, 'openface')\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--dlibFacePredictor', type=str, help=\"Path to dlib's face predictor.\",\n default=os.path.join(dlibModelDir, \"shape_predictor_68_face_landmarks.dat\"))\nparser.add_argument('--networkModel', type=str, help=\"Path to Torch network model.\",\n default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))\nparser.add_argument('--imgDim', type=int,\n help=\"Default image dimension.\", default=96)\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--mode', type=str, default='train')\n\nargs = parser.parse_args()\nnet = openface.TorchNeuralNet(args.networkModel, args.imgDim)\nalign = openface.AlignDlib(args.dlibFacePredictor)\n\n\ndef psnr(img1, img2):\n mse = np.mean((img1 - img2) ** 2)\n if mse == 0:\n return 100\n PIXEL_MAX = 255.0\n return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))\n\n\ndef get_ssim_psnr(ref_dir, pred_dir, pattern, start, end):\n ssim_conv = 0.\n psnr_conv = 0.\n imgs = sorted(os.listdir(ref_dir))\n img_num = len(imgs)\n for i in range(start, end):\n a = Image.open(\n os.path.join(ref_dir, imgs[int((i - 1) * img_num / (end - start))]).convert(gray).resize((size, size)))\n a = np.array(a)\n b = Image.open(pred_dir + pattern.format(i)).convert(gray).resize((size, size))\n b = np.array(b)\n ssim_conv += compare_ssim(a, b, multichannel=gray == 'RGB')\n psnr_conv += psnr(b, a)\n return ssim_conv / (end - start), psnr_conv / (end - start)\n\n\ndef get_rep(img_path):\n bgrImg = cv2.imread(img_path)\n bgrImg = cv2.resize(bgrImg, (size, size))\n rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)\n bb = align.getLargestFaceBoundingBox(rgbImg)\n if bb is None:\n return np.random.rand(128)\n alignedFace = align.align(args.imgDim, rgbImg, bb,\n landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)\n rep1 = net.forward(alignedFace)\n return rep1\n\n\ndef get_ACD(ref_dir, pred_dir, pattern, start, end):\n ACD_m = 0.\n ACD_i = 0.\n ACD_c = 0.\n ACD_ref_i = 0.\n ACD_ref_c = 0.\n imgs = sorted(os.listdir(ref_dir))\n img_num = len(imgs)\n first = get_rep(os.path.join(ref_dir, imgs[0]))\n pred_features = []\n ref_features = []\n for i in range(start, end):\n a = get_rep(os.path.join(ref_dir, imgs[int((i - 1) * img_num / (start - end))]))\n b = get_rep(os.path.join(pred_dir, pattern.format(i)))\n pred_features.append(b)\n ref_features.append(a)\n ACD_m += np.linalg.norm(a - b, 2)\n ACD_i += np.linalg.norm(b - first, 2)\n ACD_ref_i += np.linalg.norm(a - first, 2)\n for i in range(len(pred_features)):\n for j in range(i, len(pred_features)):\n ACD_c += np.linalg.norm(pred_features[i] - pred_features[j], 2)\n ACD_ref_c += np.linalg.norm(ref_features[i] - ref_features[j], 2)\n return ACD_m / (end - start), ACD_i / (end - start), ACD_c / (\n len(pred_features) * (len(pred_features) - 1)) * 2, ACD_ref_i / (end - start), ACD_ref_c / (\n len(pred_features) * (len(pred_features) - 1)) * 2\n\nresults_root = '/dataset/Results'\nacd_m = [0.] * 5\nacd_i = [0.] * 5\nacd_c = [0.] * 5\nacd_ref_i = [0.] *5\nacd_ref_c = [0.] * 5\nimg_num = 0\nfor category in os.listdir(dataroot):\n print(category, 'begin')\n ref_root = os.path.join(dataroot, category, 'test', 'img')\n imgs = sorted([f for f in os.listdir(ref_root)])\n img_num += len(imgs)\n for idx in range(len(imgs)):\n id = imgs[idx]\n ours_pattern = id + '_fake_B_list{0:d}.png'\n ref_dir = os.path.join(ref_root, id)\n conv_pred_dir = os.path.join(results_root, 'convlstm', 'results_valid', category, 'vid_{0:04d}'.format(idx + 1))\n videoGAN_pred_dir = os.path.join(results_root, 'videogan', 'results_valid', category, 'vis')\n flow_pred_dir = os.path.join(results_root, 'flowground', 'results_valid', category, id)\n ours_pred_dir = os.path.join(results_root, 'ours', 'results_valid', category, 'test_latest', 'images')\n ganim_pred_dir = os.path.join(results_root, 'ganimation', 'results_valid', category, id)\n acds_conv = get_ACD(ref_dir, conv_pred_dir, conv_pattern, conv_start, conv_end)\n acds_video = get_ACD(ref_dir, videoGAN_pred_dir, videoGAN_pattern, videoGAN_start, videoGAN_end)\n acds_flow = get_ACD(ref_dir, flow_pred_dir, flow_pattern, flow_start, flow_end)\n acds_ganim = get_ACD(ref_dir, ganim_pred_dir, ganim_pattern, ganim_start, ganim_end)\n acds_ours = get_ACD(ref_dir, ours_pred_dir, ours_pattern, ours_start, ours_end)\n acd_m[0] += acds_conv[0]\n acd_m[1] += acds_video[0]\n acd_m[2] += acds_flow[0]\n acd_m[3] += acds_ganim[0]\n acd_m[4] += acds_ours[0]\n acd_i[0] += acds_conv[1]\n acd_i[1] += acds_video[1]\n acd_i[2] += acds_flow[1]\n acd_i[3] += acds_ganim[1]\n acd_i[4] += acds_ours[1]\n acd_c[0] += acds_conv[2]\n acd_c[1] += acds_video[2]\n acd_c[2] += acds_flow[2]\n acd_c[3] += acds_ganim[2]\n acd_c[4] += acds_ours[2]\n acd_ref_i[0] += acds_conv[3]\n acd_ref_i[1] += acds_video[3]\n acd_ref_i[2] += acds_flow[3]\n acd_ref_i[3] += acds_ganim[3]\n acd_ref_i[4] += acds_ours[3]\n acd_ref_c[0] += acds_conv[4]\n acd_ref_c[1] += acds_video[4]\n acd_ref_c[2] += acds_flow[4]\n acd_ref_c[3] += acds_ganim[4]\n acd_ref_c[4] += acds_ours[4]\n print(category, 'done')\n\nprint(np.array(acd_m) / img_num)\nprint(np.array(acd_i) / img_num)\nprint(np.array(acd_c) / img_num)\nprint(np.array(acd_ref_i) / img_num)\nprint(np.array(acd_ref_c) / img_num)\n","repo_name":"sunlightsgy/AffineGAN","sub_path":"util/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"81"}
+{"seq_id":"35776759996","text":"import json\nimport logging\nimport logging.config\n\n\nfrom flask import render_template, request\nfrom flask_login import current_user, login_required\n\nfrom bookcrossing.views.books.base_book_view import BaseBookView\nfrom bookcrossing.models.book import BookModel\nfrom bookcrossing.models.category import CategoryModel\nfrom bookcrossing.forms.book import AddBookForm, UpdateBookForm\nfrom bookcrossing.config import LOGGING\n\nlogging.config.dictConfig(LOGGING)\n\n\nclass BooksView(BaseBookView):\n @login_required\n def get(self):\n \"\"\"\n Get all books from database\n \"\"\"\n form = AddBookForm(request.form)\n shelf = self.make_shelf(current_user.id)\n logging.debug('GET. All books of {} have been shown.'\n .format(current_user.login))\n return render_template('books.html', form=form.data, shelf=shelf)\n\n\n @login_required\n def post(self):\n \"\"\"\n Create book\n \"\"\"\n form = AddBookForm(request.form)\n print(request.form)\n if form.validate():\n logging.debug('POST. Add book form validated.')\n self.create_model(BookModel, CategoryModel, form.data)\n shelf = self.make_shelf(current_user.id)\n logging.debug('Hello from POST. Book created.'\n 'All books of {} have been shown.'\n .format(current_user.login))\n return render_template('books.html', form=form.data, shelf=shelf)\n\n else:\n logging.error('Hello from POST. Creation book error.')\n return json.dumps(form.errors)\n\n @login_required\n def put(self):\n \"\"\"\n Update book\n \"\"\"\n form = UpdateBookForm(request.form)\n if form.validate():\n logging.debug('Hello from PUT. Update book form validated.')\n self.update_model(BookModel, form.data)\n shelf = self.make_shelf(current_user.id)\n logging.debug('Hello from PUT. Book updated.'\n 'All books of {} have been shown.'\n .format(current_user.login))\n return render_template('books.html', form=form.data, shelf=shelf)\n\n else:\n logging.error('Hello from PUT. Update book error.')\n return json.dumps(form.errors)\n\n @login_required\n def delete(self):\n \"\"\"\n Delete book\n \"\"\"\n book = request.get_json()\n self.delete_model(book['id'], BookModel)\n logging.debug('DELETE. Book deletion error.')\n return json.dumps({'delete': 'ok', 'id': book['id']})\n\n\nclass BookProfileView(BaseBookView):\n def get(self, book_id):\n book, owner = self.get_book_profile(book_id)\n logging.debug('Hello from GET. Book {} profile template rendered.'\n .format(book['title']))\n return render_template('book_profile.html', book_id=book_id, book=book, owner=owner)\n","repo_name":"zkite/bookcrossing","sub_path":"bookcrossing/views/books/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"221676921","text":"import pandas as pd \nimport matplotlib.pyplot as plt \nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\ndf = pd.read_csv(\"../consumo_cerveja.csv\",sep=\";\")\n\nx = df[['temp_max']]\ny = df[['consumo']]\n\nX_train, X_test, y_train, y_test = train_test_split(x, y)\n\nlr = LinearRegression().fit(X_train, y_train)\n\ny_pred = lr.predict(X_test)\nx_pred = lr.predict(y_test)\nval2 = zip(x_pred,y_pred)\n\nvals = zip(y_pred, y_test.values)\n\nplt.scatter(X_test, y_test, color=\"black\")\nplt.plot(X_test, y_pred, color=\"blue\", linewidth=3)\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"VarleiDeCesare/IA","sub_path":"aula05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"7447678748","text":"from .base import *\n\nDEBUG = False\n\nALLOWED_HOSTS = ['localhost', '127.0.0.1', 'lifelog.piechika.com']\nCSRF_TRUSTED_ORIGINS = ['https://lifelog.piechika.com']\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n\nSTATIC_ROOT = os.getenv('STATIC_ROOT')\nMEDIA_ROOT = os.getenv('MEDIA_ROOT')","repo_name":"kettleTeacues/lifelog","sub_path":"lifelogProject/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"26659693635","text":"import geopandas as gpd\nimport numpy as np\nfrom shapely.geometry import Point\nfrom tqdm import tqdm\nimport warnings\nimport xarray as xr\n\nwarnings.filterwarnings(\"ignore\")\n\n\n#获取仿射矩阵信息\ndef Getgeotrans():\n dataset = xr.open_rasterio('./1931_modified.tif')\n return dataset.transform\n\ndef pixel2Coord(Xpixel,Ypixel,GeoTransform):\n XGeo = GeoTransform[0]+GeoTransform[1]*Xpixel+Ypixel*GeoTransform[2]\n YGeo = GeoTransform[3]+GeoTransform[4]*Xpixel+Ypixel*GeoTransform[5]\n return XGeo,YGeo\n\nPATH = \"../inventory_sgi1931_r2022/SGI_1931.shp\"\nseg = gpd.read_file(PATH)\n\nlabels = np.load(\"./mod_labels_small.npy\")\ny_len, x_len = labels.shape\nprint(\"x_len: \",x_len )\nprint(\"y_len: \",y_len )\n\nurbanData = xr.open_rasterio('./1931_modified.tif')\nur = xr.DataArray(urbanData, name='myData')\nur = ur.to_dataframe().reset_index() \nur = ur[ur['band'] == 1]\nx_l = ur['x']\ny_l = ur['y']\n\n\nx_list = []\ny_list = []\npoints = []\n\nend_point = 0\n\nfor i in tqdm(range(end_point, x_len)):\n for j in range(y_len):\n x_list.append(i)\n y_list.append(j)\n points.append(Point(x_l[i + j * x_len], y_l[i + j * x_len]))\n if j % 3000 == 0 or j == y_len - 1:\n res = gpd.GeoDataFrame({'geometry': points, 'x':x_list, 'y':y_list}).sjoin(seg, how=\"inner\", predicate='intersects')\n for _, (x,y) in enumerate(zip(res.x, res.y)):\n labels[y,x] = True\n x_list = []\n y_list = []\n points = []\n if i % 500 == 0:\n np.save(\"./mod_labels_small_1931.npy\", labels)\n\nnp.save(\"./mod_labels_small_1931.npy\", labels)","repo_name":"TJZhiyeWang/Digital_Humanity_Project","sub_path":"label/generate_label_modified.py","file_name":"generate_label_modified.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"71758334026","text":"import tkinter as tk\nfrom tkinter import* \nfrom PIL import Image, ImageTk\nfrom tkinter import Canvas, PhotoImage\nimport random\nimport webbrowser\n\ndef sum2():\n noodlelist=[\"ก๋วยเตี๋ยวน้ำใส\",\"ก๋วยเตี๋ยวน้ำตก\",\"ก๋วยเตี๋ยวเรือ\",\"ก๋วยเตี๋ยวต้มยำน้ำข้น/ใส\",\"ก๋วยเตี๋ยวต้มยำแห้ง\",\n \"เย็นตาโฟ\",\"ราดหน้า\",\"ก๋วยเตี๋ยวคั่วไก่\",\"เกี๊ยวน้ำ\",\"ข้าวซอย\",\"สุกี้น้ำ\",\"สุกี้แห้ง\",\n \"ก๋วยจั๊บ\"]#ลิสต์ก๋วยเตี๋ยว\n\n caldict = {\"ก๋วยเตี๋ยวน้ำใส\":147,\n \"ก๋วยเตี๋ยวน้ำตก\":462,\n \"ก๋วยเตี๋ยวเรือ\":180,\n \"ก๋วยเตี๋ยวต้มยำน้ำข้น/ใส\":335,\n \"ก๋วยเตี๋ยวต้มยำแห้ง\":310,\n \"เย็นตาโฟ\":420,\n \"ราดหน้า\":690,\n \"ก๋วยเตี๋ยวคั่วไก่\":435,\n \"เกี๊ยวน้ำ\":275,\n \"ข้าวซอย\":395,\n \"สุกี้น้ำ\":345,\n \"สุกี้แห้ง\":350,\n \"ก๋วยจั๊บ\":240}\n\n linkdict = {\"ก๋วยเตี๋ยวน้ำใส\":\"https://www.wongnai.com/recipes/ugc/6c273742a82a4d588d5a610113a62bf9\",\n \"ก๋วยเตี๋ยวน้ำตก\":\"https://cooking.kapook.com/view117325.html\",\n \"ก๋วยเตี๋ยวเรือ\":\"https://www.wongnai.com/recipes/boat-noodle\",\n \"ก๋วยเตี๋ยวต้มยำน้ำข้น/ใส\":\"https://www.wongnai.com/recipes/tom-yum-noodle\",\n \"ก๋วยเตี๋ยวต้มยำแห้ง\":\"https://cookpad.com/th/recipes/9052448-%E0%B8%81%E0%B8%A7%E0%B8%A2%E0%B9%80%E0%B8%95%E0%B8%A2%E0%B8%A7%E0%B9%81%E0%B8%AB%E0%B8%87%E0%B8%95%E0%B8%A1%E0%B8%A2%E0%B8%B3%E0%B9%82%E0%B8%9A%E0%B8%A3%E0%B8%B2%E0%B8%93\",\n \"เย็นตาโฟ\":\"https://www.wongnai.com/recipes/yen-ta-four\",\n \"ราดหน้า\":\"https://www.wongnai.com/recipes/thai-noodle-with-pork-in-gravy\",\n \"ก๋วยเตี๋ยวคั่วไก่\":\"https://www.happyfresh.co.th/blog/recipe/stir-fried-noodles-with-chicken/\",\n \"เกี๊ยวน้ำ\":\"https://www.wongnai.com/recipes/dumpling-soup\",\n \"ข้าวซอย\":\"https://www.wongnai.com/recipes/northern-thai-curried-noodles-soup\",\n \"สุกี้น้ำ\":\"https://www.wongnai.com/recipes/homemade-sukiyaki\",\n \"สุกี้แห้ง\":\"https://www.wongnai.com/recipes/ugc/47161c946e194a3d82defc69589cf035\",\n \"ก๋วยจั๊บ\":\"https://www.wongnai.com/recipes/chinese-roll-noodle-soup\"}\n\n pricedict = {\"ก๋วยเตี๋ยวน้ำใส\":35,\n \"ก๋วยเตี๋ยวน้ำตก\":40,\n \"ก๋วยเตี๋ยวเรือ\":40,\n \"ก๋วยเตี๋ยวต้มยำน้ำข้น/ใส\":40,\n \"ก๋วยเตี๋ยวต้มยำแห้ง\":40,\n \"เย็นตาโฟ\":40,\n \"ราดหน้า\":40,\n \"ก๋วยเตี๋ยวคั่วไก่\":45,\n \"เกี๊ยวน้ำ\":45,\n \"ข้าวซอย\":45,\n \"สุกี้น้ำ\":40,\n \"สุกี้แห้ง\":40,\n \"ก๋วยจั๊บ\":35}\n\n immagedict ={\"ก๋วยเตี๋ยวน้ำใส\":\"น้ำใส.jpg\",\n \"ก๋วยเตี๋ยวน้ำตก\":\"น้ำตก.jpeg\",\n \"ก๋วยเตี๋ยวเรือ\":\"เรือ.jpg\",\n \"ก๋วยเตี๋ยวต้มยำน้ำข้น/ใส\":\"ต้มยำน้ำข้น.jpg\",\n \"ก๋ว���เตี๋ยวต้มยำแห้ง\":\"ต้มยำแห้ง.jpg\",\n \"เย็นตาโฟ\":\"เย็นตาโฟ.jpg\",\n \"ราดหน้า\":\"ราดหน้า.jpg\",\n \"ก๋วยเตี๋ยวคั่วไก่\":\"คั่วไก่.jpg\",\n \"เกี๊ยวน้ำ\":\"เกี๊ยวน้ำ.jpg\",\n \"ข้าวซอย\":\"ช้าวซอย.jpg\",\n \"สุกี้น้ำ\":\"สุกี้น้ำ.jpg\",\n \"สุกี้แห้ง\":\"สุกี้แห้ง.jpg\",\n \"ก๋วยจั๊บ\":\"ก๋วยจั๊บ.jpg\"}\n\n menu = random.choice(noodlelist)#โค้ดที่เป็นตัวสุ่ม เวลาจะสุ่มอีกครั้งสามารถกลับมาที่โค้ดนี้ได้เลย\n cal = caldict[menu]#ดึงค่าแคลลอลี่จากดิจ\n link = linkdict[menu]#ดึงลิงค์จากดิจ\n price = pricedict[menu]#ดึงราคาจากดิจราคา\n immage= immagedict[menu]#ดึงชื่อรูปจากดิจ\n\n def callback(url):\n webbrowser.open_new(url)\n\n\n window = tk.Toplevel()\n window.geometry(\"600x800\")\n window.minsize(600, 800)\n window.maxsize(600, 800)\n window.title(\"วันนี้กินไรดี?\")\n window.configure(bg=\"#54A2A4\")\n\n frame1 = tk.Frame(master = window)\n label1 = tk.Label(master = frame1, text=\"Frame 1\", width=100, height=100)\n\n bg3 = Image.open(\"D:\\\\งาน\\\\comproglab\\\\วันนี้กินไรดี กลุ่ม 14\\\\Project\\\\draft_p3_bg.jpg\")\n bg3 = bg3.resize((600,800), Image.ANTIALIAS)\n label_bg3 = ImageTk.PhotoImage(bg3)\n labelbg3 = tk.Label(master = frame1 ,image=label_bg3)\n labelbg3.place(x=-2,y=0)\n\n frame1.pack()\n label1.pack()\n\n #pic of food\n label_2 = tk.Label(master = frame1, text=\"Frame 1\", width=100, height=100)\n bg = Image.open(immage)\n bg = bg.resize((300,230), Image.ANTIALIAS)\n label_bg = ImageTk.PhotoImage(bg)\n label7 = tk.Label(master = frame1,image=label_bg)\n label7.place(x=152, y=205)\n\n label_2.pack()\n\n\n label = tk.Label(master = window,text = menu,font=\"Ayuthaya 30\",width=10,height=1,fg=\"white\",bg=\"#FCAF38\")\n label1 = tk.Label(master = window,text = str(price) + \" บาท\",font=\"Ayuthaya 20\",width=13,height=3,fg=\"white\",bg=\"#674A40\")\n label2 = tk.Label(master = window,text = str(cal) + \" แคลอรี\",font=\"Ayuthaya 20\",width=13,height=3,fg=\"white\",bg=\"#674A40\")\n Button3 = tk.Button(master = window,text = \"วิธีทำ\",font=\"Ayuthaya 20\",width=12,height=1)\n Button4 = tk.Button(master = window,text = \"สุ่มอีกครั้ง\",font=\"Ayuthaya 18\",width=10,height=1,fg=\"white\", bg=\"#F95335\", command = window.destroy)\n\n Button3.bind(\"\",lambda e: callback(link))\n\n label.place(x=200,y=60)\n label1.place(x=60,y=490)\n label2.place(x=340,y=490)\n Button3.place(x=205,y=640)\n Button4.place(x=410,y=725)\n \n \n window.mainloop()\n","repo_name":"ncqxm/305272-Advanced-Computer-Programming","sub_path":"Project/program2.py","file_name":"program2.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"12364269847","text":"import argparse\nimport json\nimport logging\nimport pathlib\nimport pickle\nimport os\nimport random\nimport time\nimport yaml\n\nimport numpy as np\nimport torch\nfrom transformers import (\n AutoTokenizer,\n AutoModel,\n PreTrainedTokenizer,\n PreTrainedModel\n)\n\nfrom utils.dataloader import load_corpus, CorpusDataset\nfrom utils.embed import encode, batch_encode\n\nrandom.seed(0)\n\n\ndef embed_documents(\n logger: logging.Logger,\n tokenizer: PreTrainedTokenizer,\n embedder: PreTrainedModel,\n corpus: CorpusDataset,\n max_sequence_length: int = 128,\n batch_size: int = 1,\n n_runs: int = 100,\n output_file: str = None,\n benchmark_mode: bool = False) -> None:\n \"\"\"Embed documents using a pretrained tokenizer and embedder\n\n Args:\n logger (logging.Logger):\n logger\n tokenizer (PreTrainedTokenizer):\n pretrained tokenizer to to use for tokenizing raw text.\n embedder (PreTrainedModel):\n pretrained model to use to embed documents.\n corpus (CorpusDataset):\n CorpusDataset to embed.\n max_seq_length (int, optional):\n max sequence length. Defaults to 128.\n batch_size (int, optional):\n batch size. Defaults to 1.\n n_runs (int, optional):\n number of iterations for benchmarks. Defaults to 100.\n output_file (str, optional):\n file to output embeddings to. Defaults to None.\n benchmark_mode (bool, optional):\n whether to run in benchmark mode. Defaults to False.\n \"\"\"\n\n # Run benchmarks\n if benchmark_mode:\n\n times = []\n # Generate random sample inputs for benchmarking execution time\n sample_inputs = [\n random.sample(\n range(tokenizer.vocab_size), max_sequence_length) for\n _ in range(batch_size)]\n\n sample_inputs = tokenizer.batch_decode(sample_inputs)\n for i in range(10 + n_runs):\n start = time.time()\n encode(\n tokenizer, embedder,\n sample_inputs, max_length=max_sequence_length)\n end = time.time()\n if i > 10:\n times.append(end - start)\n\n logger.info(\"Batch Size = %d, Max Seq Length = %d\",\n batch_size, max_sequence_length)\n logger.info(\"Average Inference Time : %f\", np.mean(times))\n\n else:\n\n start = time.time()\n\n embeddings = batch_encode(\n tokenizer,\n embedder,\n corpus,\n max_length=max_sequence_length,\n batch_size=batch_size\n )\n\n end = time.time()\n\n logger.info(\"Batch Size = %d, Max Seq Length = %d, Documents = %d\",\n batch_size, max_sequence_length, len(corpus))\n logger.info(\"Embedding Time : %f\", end - start)\n\n if output_file is not None:\n path = pathlib.Path(output_file)\n path.parent.mkdir(parents=True, exist_ok=True)\n\n with open(output_file, 'wb') as f:\n pickle.dump({\n '_ids': corpus.get_ids(),\n 'embeddings': embeddings},\n f, protocol=pickle.HIGHEST_PROTOCOL)\n\n else:\n out = []\n for i in range(embeddings.shape[0]):\n out.append({\n \"index\": i,\n \"embedding\": embeddings[i, :].tolist()\n })\n print(json.dumps(out, indent=2))\n\n\ndef main(flags):\n \"\"\"Run embedding of documents using a passed in model.\n\n Args:\n flags : run flags\n \"\"\"\n\n if flags.logfile == \"\":\n logging.basicConfig(level=logging.DEBUG)\n\n else:\n path = pathlib.Path(flags.logfile)\n path.parent.mkdir(parents=True, exist_ok=True)\n logging.basicConfig(filename=flags.logfile, level=logging.DEBUG)\n\n logger = logging.getLogger()\n\n if not os.path.exists(flags.vse_config):\n logger.error(\"VSE configuration %s not found!\", flags.vse_config)\n return\n\n # parse the yaml model config\n with open(flags.vse_config, 'r') as stream:\n conf = yaml.safe_load(stream)\n\n tokenizer = AutoTokenizer.from_pretrained(\n conf['model']['pretrained_model'])\n\n if conf[\"model\"][\"format\"] == \"default\":\n\n # load the pretrained embedding model\n embedder = AutoModel.from_pretrained(conf['model']['pretrained_model'])\n\n elif conf[\"model\"][\"format\"] == \"inc\":\n\n # load an INC model by loading pretrained model and updating weights\n from neural_compressor.utils.pytorch import load\n\n embedder = AutoModel.from_pretrained(conf['model']['pretrained_model'])\n embedder = load(conf[\"model\"][\"path\"], embedder)\n\n # re-establish logger because it breaks from above\n logging.getLogger().handlers.clear()\n\n if flags.logfile == \"\":\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(filename=flags.logfile, level=logging.DEBUG)\n logger = logging.getLogger()\n\n elif conf[\"model\"][\"format\"] == \"ipex_int8\":\n\n # load a PT saved model saved using torch.save\n if not os.path.exists(conf['model']['path'] + \"/saved_model.pt\"):\n logger.error(\"Saved model %s not found!\",\n conf['model']['path'] + \"/saved_model.pt\")\n return\n embedder = torch.jit.load(conf['model']['path'] + \"/saved_model.pt\")\n\n else:\n return\n\n embedder.eval()\n max_sequence_length = conf['model']['max_seq_length']\n\n # use IPEX to optimize model\n if flags.intel:\n import intel_extension_for_pytorch as ipex\n embedder = ipex.optimize(embedder, dtype=torch.float32)\n\n sample_inputs = tokenizer.batch_decode([\n random.sample(\n range(tokenizer.vocab_size), max_sequence_length) for\n _ in range(1)])\n dummy_input = tokenizer(\n sample_inputs,\n padding=True,\n truncation=True,\n max_length=max_sequence_length,\n return_tensors='pt'\n )\n\n with torch.no_grad():\n embedder = torch.jit.trace(\n embedder,\n [dummy_input['input_ids'], dummy_input['attention_mask']],\n check_trace=False,\n strict=False)\n embedder = torch.jit.freeze(embedder)\n\n # read in corpus dataset\n corpus = load_corpus(flags.input_corpus)\n\n embed_documents(\n logger=logger,\n tokenizer=tokenizer,\n embedder=embedder,\n corpus=corpus,\n max_sequence_length=max_sequence_length,\n batch_size=flags.batch_size,\n n_runs=flags.n_runs,\n output_file=flags.output_file,\n benchmark_mode=flags.benchmark_mode)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--logfile',\n type=str,\n default=\"\",\n help=\"Log file to output benchmarking results to.\")\n\n parser.add_argument('--vse_config',\n type=str,\n required=True,\n help=\"Vertical Search Engine model config yml\"\n )\n\n parser.add_argument('--input_corpus',\n required=True,\n help=\"path to corpus to embed\",\n type=str\n )\n\n parser.add_argument('--output_file',\n required=False,\n help=\"file to output corpus embeddings to\",\n type=str,\n default=None\n )\n\n parser.add_argument('--batch_size',\n required=False,\n help=\"batch size for embedding. defaults to 32.\",\n type=int,\n default=32\n )\n\n parser.add_argument('--benchmark_mode',\n required=False,\n help=\"toggle to benchmark embedding\",\n action=\"store_true\",\n default=False\n )\n\n parser.add_argument('--n_runs',\n required=False,\n help=\"number of iterations to benchmark embedding\",\n type=int,\n default=100\n )\n\n parser.add_argument('--intel',\n required=False,\n help=\"use intel pytorch extension to optimize model\",\n action=\"store_true\",\n default=False\n )\n\n FLAGS = parser.parse_args()\n\n main(FLAGS)\n","repo_name":"oneapi-src/vertical-search-engine","sub_path":"src/run_document_embedder.py","file_name":"run_document_embedder.py","file_ext":"py","file_size_in_byte":8714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"33371109304","text":"#!/usr/bin/python3\r\n\r\nfrom sys import stdin, stderr\r\nimport traceback\r\n\r\n\r\nclass Node:\r\n def __init__(self):\r\n self.barn = {}\r\n self.posi = []\r\n\r\ndef buildTree(node, word):\r\n letter = word[0]\r\n word = word[1:]\r\n if not letter in node.barn:\r\n node.barn[letter] = Node()\r\n if not word:\r\n return node.barn[letter]\r\n return buildTree(node.barn[letter], word)\r\n\r\ndef bygg(ordliste):\r\n toppNode = Node()\r\n for tuppel in ordliste:\r\n endNode = buildTree(toppNode, tuppel[0])\r\n endNode.posi.append(tuppel[1])\r\n return toppNode\r\n\r\ndef posisjoner(ord, indeks, node):\r\n pos = []\r\n for letter in ord:\r\n pos.sort()\r\n if letter in node.barn:\r\n node = node.barn[letter]\r\n indeks += 1\r\n if indeks == len(ord):\r\n for i in node.posi:\r\n pos.append(i)\r\n return pos\r\n elif letter == '?':\r\n indeks += 1\r\n if indeks == len(ord):\r\n for b in node.barn:\r\n barnet = node.barn[b]\r\n for i in barnet.posi:\r\n pos.append(i)\r\n return pos\r\n for i in node.barn:\r\n index = indeks\r\n barnet = node.barn[i]\r\n if ord[index] in barnet.barn:\r\n while index != len(ord):\r\n barnet = barnet.barn[ord[index]]\r\n index += 1\r\n for p in barnet.posi:\r\n pos.append(p)\r\n if pos:\r\n return pos\r\n return pos\r\n\r\ndef main():\r\n try:\r\n ord = stdin.readline().split()\r\n ordliste = []\r\n pos = 0\r\n for o in ord:\r\n ordliste.append((o, pos))\r\n pos += len(o) + 1\r\n toppnode = bygg(ordliste)\r\n for sokeord in stdin:\r\n sokeord = sokeord.strip()\r\n print(\"%s:\" % sokeord, end='')\r\n posi = posisjoner(sokeord, 0, toppnode)\r\n posi.sort()\r\n for p in posi:\r\n print(\" %s\" % p, end='')\r\n print()\r\n except:\r\n traceback.print_exc(file=stderr)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"matssa/Algdat","sub_path":"oving2.py","file_name":"oving2.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"36133900709","text":"#!/usr/bin/python3\n\"\"\"\ntakes your GitHub credentials (username and password)\nand uses the GitHub API to display your id\n\"\"\"\nimport sys\nimport requests\n\n\nif __name__ == \"__main__\":\n url = 'https://api.github.com/user'\n html = requests.get(url, auth=(sys.argv[1], sys.argv[2]))\n try:\n print(html.json().get('id'))\n except ValueError:\n print(\"Not a valid JSON\")\n","repo_name":"phemsie/alx-higher_level_programming","sub_path":"0x11-python-network_1/10-my_github.py","file_name":"10-my_github.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"19720061399","text":"import matplotlib\nimport pandas as pd\n\nmatplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.\nimport numpy as np\nfrom astropy.io import fits\nfrom custom_image_utils import scaled_plot\nfrom matplotlib import pyplot as plt\nimport scipy.spatial.distance\nimport skimage.transform\n\n# recreate the methods of Pawlik et al 2016\n# https://arxiv.org/pdf/1512.02000v2.pdf\n\n\n# binary mask created from 8-connected pixels in background-subtracted image at > 1 sig above bkg\n\n# Rmax is distance between 'center' (brightest pixel in mask) and most distant pixel\n\n\n\ndef findMax(im):\n # print(im.shape)\n center_flat = np.argmax(im)\n x_c, y_c = np.unravel_index(center_flat, im.shape)\n return x_c, y_c\n\ndef findMin(im):\n # print(im.shape)\n center_flat = np.argmin(im)\n x_c, y_c = np.unravel_index(center_flat, im.shape)\n return x_c, y_c\n\ndef normMatrix(im):\n norm_matrix = np.zeros((im_height,im_width))\n x_c, y_c = findMax(im)\n # print(x_c, y_c)\n for x in range(im_height):\n for y in range(im_width):\n x_dist = x - x_c\n y_dist = y - y_c\n norm_matrix[x,y] = np.linalg.norm((x_dist, y_dist))\n # print(norm_matrix[126:132,126:132]) # verify center\n return norm_matrix\n\ndef findRmax(im, mask):\n norm_matrix = normMatrix(im) * mask\n Rmax = norm_matrix.max()\n # print(Rmax) # max distance in pixels between maxima and furthest mask pixel\n return Rmax\n\ndef cmask(index,radius,array):\n a,b = index\n nx,ny = array.shape\n y,x = np.ogrid[-a:nx-a,-b:ny-b]\n mask = x*x + y*y <= radius*radius\n return(mask)\n\ndef estimateCenter(im):\n x_len, y_len = im.shape\n x_c, y_c = int(x_len / 2), int(y_len/2)\n # print(x_len)\n # x_rot_c, y_rot_c = findMax(im) # this causes errors in images with bright interlopers\n # maximum in central 50 pixel box for initial guess\n box_width = 25 # x2\n box_mask = np.zeros_like(im)\n box_mask[x_c - box_width:x_c + box_width, y_c - box_width:y_c + box_width] += 1\n box_masked_im = im * box_mask\n # plt.imshow(np.sqrt(box_masked_im))\n # plt.show()\n x_rot_c, y_rot_c = findMax(box_masked_im)\n return x_rot_c, y_rot_c\n\ndef findResidual(im, mask, rot_center_shift, mode='standard'):\n x_rot_c, y_rot_c = estimateCenter(im)\n # print(x_rot_c, y_rot_c)\n mask_center = (x_rot_c, y_rot_c)\n Rmax = findRmax(im, mask)\n if mode == 'mask': im = mask\n im = im * cmask(mask_center, Rmax, im) # circular mask of radius Rmax\n # plt.imshow(im)\n # plt.show()\n # if rot_center_shift == None: rot_center_shift = (x_len/2-x_c, y_len/2-y_c) # calculate rot center if not provided\n tform_c = skimage.transform.SimilarityTransform(translation=rot_center_shift) # radians\n im = skimage.transform.warp(im, inverse_map=tform_c, preserve_range=True) # re-center\n im_r = skimage.transform.rotate(im, 180, preserve_range=True) # rotated image, degrees\n im_res = np.abs(im - im_r)\n # plt.imshow(im_res)\n # plt.show()\n return im_res\n\ndef findAsymmetry(im, mask):\n x_len, y_len = im.shape\n x_c, y_c = findMax(im)\n # print(x_c, y_c)\n # find minimum A asymmetry, and corresponding rotation center\n\n # rot_center_shift = (x_len/2-x_c, y_len/2-y_c) # default shift was center\n rot_center_shift = (0,0) # no shift, already start from brightest central pixel!\n\n A = np.ones((max_path*2+1,max_path*2+1))\n x_tweak = np.arange(-max_path, max_path+step, step)\n y_tweak = np.arange(-max_path, max_path+step, step)\n\n hunt = True\n curr_x = max_path # start at center of 0 to 4 grid, current x\n curr_y = max_path # similarly\n while hunt:\n # measure A at 8-connected pixels\n for alt_x in [curr_x - 1, curr_x, curr_x + 1]:\n for alt_y in [curr_y - 1, curr_y, curr_y + 1]:\n if A[alt_x, alt_y] == 1:\n final_rot_center_shift = (\n rot_center_shift[0] + x_tweak[alt_x],\n rot_center_shift[1] + y_tweak[alt_y]\n )\n im_res = findResidual(im, mask, final_rot_center_shift, mode='standard')\n A[alt_x, alt_y] = np.sum(im_res) / (2 * np.sum(im))\n # print(A)\n # print('\\n')\n min_x, min_y = findMin(A)\n # print(min_x, min_y)\n if min_x == curr_x and min_y == curr_y: hunt=False # stop if no change\n if min_x == 0: hunt = False # stop if at left edge\n if min_x == max_path*2: hunt = False # right edge\n if min_y == 0: hunt = False # top edge\n if min_y == max_path*2: hunt = False # bottom edge\n curr_x, curr_y = min_x, min_y\n\n # measure mask asymmetry about that rotation center\n best_x, best_y = findMin(A)\n best_rot_center_shift = (rot_center_shift[0] + x_tweak[best_x] , rot_center_shift[1] + y_tweak[best_y])\n im_res_mask = findResidual(im, mask, rot_center_shift=best_rot_center_shift, mode='mask')\n A_mask = np.sum(im_res_mask) / (2 * np.sum(mask))\n # plot best residual\n # best_im_res= findResidual(im, mask, rot_center_shift=best_rot_center_shift, mode='standard')\n # plt.imshow(best_im_res)\n # plt.show()\n # print(best_rot_center_shift, A.min())\n return A.min(), A_mask\n\ndef replicatePawlik():\n\n if aws == False:\n directory = r'/media/mike/SandiskLinux/threshold/threshold/'\n meta = pd.read_csv('tables/meta_table_saved.csv')[39:54]\n meta_full = pd.read_csv('tables/meta_table_saved.csv')[39:54]\n if aws == True:\n directory = r'/exports/aws/scratch/s1220970/regenerated/512/'\n meta = pd.read_csv('SemesterOne/meta_table.csv')\n meta_full = pd.read_csv('SemesterOne/meta_table.csv')\n\n meta_full['standard_A'] = np.zeros(len(meta_full), dtype=float)\n meta_full['mask_A'] = np.zeros(len(meta_full), dtype=float)\n\n print(meta.head())\n # _threshold is the filled mask, _threshold_mask is the binary mask only\n\n standard_A_0 = []\n mask_A_0 = []\n\n standard_A_4 = []\n mask_A_4 = []\n\n\n print(len(meta))\n for meta_index in range(len(meta)):\n # print(meta_index)\n im = fits.getdata(\n directory + meta.iloc[meta_index]['threshold_5sig_filename']) # read (the first) original image for base file\n im = np.squeeze(im)\n\n mask = fits.getdata(\n directory + meta.iloc[meta_index][\n 'threshold_mask_5sig_filename']) # read (the first) original image for base file\n mask = np.squeeze(mask)\n\n # scaled_plot(im, plt, clip_q=True, rel_clip=False)\n # plt.show()\n\n min_A, mask_A = findAsymmetry(im, mask)\n print(min_A, mask_A)\n # print(min_A + ' , ' + mask_A)\n\n meta_full = meta_full.set_value(meta_index, 'standard_A', float(min_A))\n meta_full = meta_full.set_value(meta_index, 'mask_A', float(mask_A))\n\n\n # if meta.iloc[meta_index]['FEAT'] != 'N':\n # standard_A_4.append(min_A)\n # mask_A_4.append(mask_A)\n # else:\n # standard_A_0.append(min_A)\n # mask_A_0.append((mask_A))\n\n if aws:meta_full.to_csv('meta_with_A_full_eddie_5sig.csv')\n else: meta_full.to_csv('meta_with_A_full_local.csv')\n\n\n# aws=True\n# im_height = 512\n# im_width = 512\n\naws = True\nim_height = 512\nim_width = 512\n\n# max_path = 20\n# step = 0.5\n\nmax_path = 8\nstep = 0.5\n\nreplicatePawlik()\n\n\n\n\n# plt.scatter(standard_A_4, mask_A_4, color='r')\n# plt.scatter(standard_A_0, mask_A_0, color='k')\n# plt.xlim([0,1])\n# plt.ylim([0,1])\n# plt.legend(['tidal', 'non'],loc=0)\n# plt.savefig('pawlik_scatter_1000_3sig.png')\n# plt.show()\n\n# Implements path-hopping from Conselice\n# Runs on background-subtracted images so does not include -bkg term\n# Selects center by maxima: is this smart?\n# it could be that the brightest point within the central mask is NOT the galactic center\n\n# the threshold significance level of the mask is quite important in the size of the mask -> including interlopers\n# at one sigma, almost all images include a bright interloper.","repo_name":"mwalmsley/tidal-features-classifier","sub_path":"tidalclassifier/pawlik/pawlik.py","file_name":"pawlik.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"15418345211","text":"w, h, n = map(int, input().split())\n\nxmin, xmax, ymin, ymax = 0,w,0,h\n\nfor i in range(n):\n xi,yi,ai = map(int, input().split())\n if ai == 1:\n if xmin < xi:\n xmin = xi\n if ai == 2:\n if xmax > xi:\n xmax = xi\n if ai == 3:\n if ymin < yi:\n ymin = yi\n if ai == 4:\n if ymax > yi:\n ymax = yi\n\nif xmax < xmin or ymax < ymin:\n menseki = 0\nelse:\n menseki = (xmax-xmin) * (ymax-ymin)\nprint(menseki)","repo_name":"NagaTaku/atcoder_abc_edition","sub_path":"ABC047/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"6348404788","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 15 19:44:50 2020\r\n\r\n@author: pit\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom mlxtend.frequent_patterns import apriori as ap\r\nfrom mlxtend.frequent_patterns import association_rules as ar\r\n\r\npd.set_option('display.max_columns', None) # display all columns in df, default = 4\r\n\r\n\r\n#1/ Wczytaj dane z pliku \r\n\r\nfile = 'D:\\MachineLearningPandaIT\\Materials\\e-commerce.csv'\r\n\r\ndef data_load(file):\r\n try:\r\n df = pd.read_csv(file, sep=',', encoding = 'ISO-8859-1',\r\n index_col=False, error_bad_lines = False)#, nrows=1000)\r\n except:\r\n print('incorrect file path')\r\n \r\n return df\r\ndf = data_load(file)\r\n\r\n# 2/Wyczyść dane\r\n# 3/ Wykonaj wstępną analizę danych\r\n\r\ndef data_cleanup(df):\r\n df.head()\r\n df.isna().sum()\r\n df_clean = df.drop(columns = ['InvoiceNo', 'StockCode','InvoiceDate'])\r\n df_clean.isna().sum()\r\n df_clean = df_clean.dropna(subset=['Description'])\r\n df_clean['Value'] = df_clean['Quantity'] * df_clean['UnitPrice']\r\n df_clean.dtypes\r\n unique_clients = list(df_clean['CustomerID'].unique())\r\n uc = len(unique_clients)\r\n item_counter = df_clean['Description']\r\n ic = item_counter.value_counts()\r\n country_counter = df_clean[['Country','Value']]\r\n cc = country_counter['Country'].value_counts()\r\n cc_val = country_counter.groupby('Country')['Value'].sum()\r\n cc_val = cc_val.astype(int)\r\n \r\n return df_clean, uc, ic, cc, cc_val\r\n \r\ndf_clean = data_cleanup(df)[0]\r\nuc = data_cleanup(df)[1]\r\nic = data_cleanup(df)[2]\r\ncc = data_cleanup(df)[3]\r\ncc_val = data_cleanup(df)[4]\r\n\r\nic = ic.nlargest(n = 10)\r\ncc = cc.nlargest(n = 10)\r\ncc_val = cc_val.nlargest(n = 10)\r\n\r\nprint('\\n number of unique clients:', uc)\r\nprint('\\n item counter top 10: \\n', ic)\r\nprint('\\n country by transactions top 10: \\n', cc)\r\nprint('\\n country by values top 10: \\n', cc_val)\r\n\r\n### narysuj histogram obrazujący 10 nazw państw z największą liczbą transakcji\r\n\r\ndef charts():\r\n import matplotlib.pyplot as plt\r\n labels1 = cc.index.values\r\n values1 = cc.values\r\n labels2 = cc_val.index.values\r\n values2 = cc_val.values\r\n\r\n#1 pie chart with %\r\n explode = (0.5, 0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0)\r\n fig1, ax1 = plt.subplots()\r\n ax1.pie(values1, explode=explode, labels=labels1, autopct='%1.1f%%',\r\n shadow=True, startangle=90)\r\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\r\n plt.title('% contribution in overall transaction count by Country')\r\n plt.show()\r\n#1 bar chart \r\n fig = plt.figure()\r\n ax = fig.add_axes([0,0,1,1])\r\n ax.bar(labels1,values1)\r\n plt.title('numeric contribution in overall transaction count by Country')\r\n plt.show()\r\n\r\n#2 pie chart with %\r\n explode = (0.5, 0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0)\r\n fig1, ax1 = plt.subplots()\r\n ax1.pie(values2, explode=explode, labels=labels2, autopct='%1.1f%%',\r\n shadow=True, startangle=90)\r\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\r\n plt.title('% contribution in overall value by Country')\r\n plt.show()\r\n#2 bar chart \r\n fig = plt.figure()\r\n ax = fig.add_axes([0,0,1,1])\r\n ax.bar(labels2,values2)\r\n plt.title('% contribution in overall value by Country')\r\n plt.show()\r\n\r\n#4. Wybierz dane tylko dla jednego kraju (np. Polski)\r\n#5. Przekonwertuj dane do postaci którą przyjmuję algorytm Apriori (DataFrame gdzie indexami są\r\n#numery transakcji a wartościami kolumn True lub False w zależności czy dany produkt wystąpił w\r\n#transakcji).\r\n#6. Wygeneruj listę reguł dla wybranej przez Ciebie wartości min_support. Przejrzyj reguły a następnie\r\n#wybierz 5 Twoim zdaniem najlepszych.\r\n#7. Spróbuj zwiększyć wartość min_support, co się wtedy dzieję z liczbą reguł?\r\n#8. Wypisz wszystkie reguły, których wartość lift jest większa niż 5 i wartość confidence\r\n#jest większa niż 0.8\r\n\r\n\r\ndf_EIRE = df[df['Country'] == 'EIRE']\r\ndf_EIRE = df_EIRE[['InvoiceNo', 'Description', 'Quantity']]\r\ndf_EIRE = (df_EIRE.groupby(['InvoiceNo', 'Description'])['Quantity'].\r\n sum().unstack().fillna(0))\r\ndf_EIRE[df_EIRE == 0] = False\r\ndf_EIRE[df_EIRE != 0] = True\r\n\r\nfreq_items = ap(df_EIRE,\r\n min_support = 0.05,\r\n use_colnames=True)\r\nfreq_items.head(10)\r\n\r\nrules = ar(freq_items,\r\n metric = 'lift',\r\n min_threshold = 1)\r\nrules.head(10)\r\n\r\nrules['confidence'].sort_values(ascending = False).head(10)\r\nrules['lift'].sort_values(ascending = False).head(10)\r\nrules[ (rules['lift'] >= 1) & (rules['confidence'] >= 0.8)]\r\n","repo_name":"PUlanowski/Python_Machine_Learning","sub_path":"apriori2.py","file_name":"apriori2.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"41873596657","text":"#!/usr/bin/env python\n\nfrom math import cos, sin\nimport sys\nimport rospy\nfrom lab_5.srv import oint\n\n\ndef ellipse():\n frequency = 30.0\n ax = 0.3\n ay = 0.2\n az = 0.2\n theta = 0.0\n dth = 0.5 * 3.1415 / frequency\n\n t = 1.0 / frequency\n x0 = 0.5\n y0 = -0.5\n z0 = 0.8\n\n interpol = rospy.ServiceProxy('oint_control_srv', oint)\n interpol(x0 + ax*cos(theta), y0 + ay*sin(theta), z0 + az*sin(theta), 0.0, 0.0, 0.0, 1.0, 2.0)\n\n rospy.wait_for_service('oint_control_srv')\n rate = rospy.Rate(frequency)\n while not rospy.is_shutdown():\n x = x0 + ax * cos(theta)\n y = y0 + ay * sin(theta)\n z = z0 + az * sin(theta)\n move = interpol(x, y, z, 0.0, 0.0, 0.0, 1.0, t)\n\n theta = theta + dth\n rate.sleep()\n\n\nif __name__ == \"__main__\":\n rospy.init_node('ellipse')\n ellipse()\n","repo_name":"pw-eiti-anro-20l/kostrzenski_stojke","sub_path":"lab_5/src/ocmd.py","file_name":"ocmd.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"35704148909","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# import libraries\nimport tkinter as tk \nfrom tkinter import ttk \n\n#Connect database\nimport cx_Oracle\nimport oracledbconnect as dbconnect\nconnect=cx_Oracle.connect(dbconnect.user,dbconnect.password,dbconnect.dns)\ncur=connect.cursor()\n\n\n# In[2]:\n\n\nwindow=tk.Tk()\nwindow.geometry('1000x500')\nwindow.title('PO Application')\ntabControl=ttk.Notebook(window)\n#define tabs\ntab1=ttk.Frame(tabControl)\ntab2=ttk.Frame(tabControl)\ntab3=ttk.Frame(tabControl)\n\n#add tab titles\ntabControl.add(tab1,text=\"PO Enquiry\")\ntabControl.add(tab2,text=\"PO Header\")\ntabControl.add(tab3,text=\"PO Line\")\n\n#grid for tab\ntabControl.grid()\n\n\n# In[3]:\n\n\n#Add widgets to tab1 PO Enquiry Tab\nlabelframe=tk.LabelFrame(tab1,text='Enter details to Enquire PO')\nlabelframe.grid(column=1,row=1)\n\ntk.Label(labelframe,text='PO#').grid(column=0,row=1,sticky='E',pady=5)\nponum=tk.Entry(labelframe,width=10)\nponum.grid(column=1,row=1,pady=5)\n\ntk.Label(labelframe,text=' ').grid(column=2,row=1,padx=50, pady=5)\n\ntk.Label(labelframe,text='PO Date').grid(column=3,row=1,sticky='E',pady=5)\npondate=tk.Entry(labelframe,width=10)\npondate.grid(column=4,row=1,pady=5)\n\ntk.Label(labelframe,text='Supplier#').grid(column=0,row=2,sticky='W',pady=5)\nposuppnum=tk.Entry(labelframe,width=10)\nposuppnum.grid(column=1,row=2,pady=5)\n\ntk.Label(labelframe,text=' ').grid(column=2,row=2,padx=50, pady=5)\n\ntk.Label(labelframe,text='Supplier Name').grid(column=3,row=2,sticky='W',pady=5)\nposupname=tk.Entry(labelframe,width=10)\nposupname.grid(column=4,row=2,pady=5)\n\n\n# In[ ]:\n\n\nwindow.mainloop()\n\n","repo_name":"simrit1/Purchase-Order-Application","sub_path":"POEnquiryTabCreation1.py","file_name":"POEnquiryTabCreation1.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"}
+{"seq_id":"24596219016","text":"class Solution:\n def lemonadeChange(self, bills: [int]) -> bool:\n\n fives, tens, twenties = 0,0,0\n print(bills)\n for bill in bills:\n \tif bill == 5:\n \t\tfives+=1\n \telif bill == 10:\n \t\ttens +=1\n \t\tif fives>=1:\n \t\t\tfives -=1\n \t\telse:\n \t\t\treturn False\n \t\n \telif bill == 20:\n \t\ttwenties+=1\n \t\tif fives>=1 and tens>=1:\n \t\t\tfives -=1\n \t\t\ttens -=1\n \t\telif fives>=3 and tens ==0:\n \t\t\tfives -=3\n \t\telse:\n \t\t\treturn False \n\n \tprint(fives, tens, twenties) \t\t\n \n return True\nbills1 = [5,5,20,5,5,10,5,10,5,20]\nsol = Solution()\nprint(sol.lemonadeChange(bills1))","repo_name":"aituar-kenges/leetcode","sub_path":"python/860_lemonadeChange.py","file_name":"860_lemonadeChange.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"34584242648","text":"import sys\r\nimport time\r\n\r\ndef isPrime(n):\r\n if n < 2: return False\r\n for x in range(2, int(n**0.5) + 1):\r\n if n % x == 0:\r\n return False\r\n return True\r\n\r\nsek = [1,3]\r\nprimes = [3]\r\n\r\ndef sekvens(k):\r\n\tstart_time = time.time()\r\n\tsys.setrecursionlimit(5000)\r\n\ttemp = []\r\n\r\n\tif len(primes) == 100:\r\n\t\tprint(\"calculating\")\r\n\t\treturn sum(primes)\r\n\r\n\r\n\tfor i in range(0, len(sek)-1):\r\n\t\tfor j in range(i+1, len(sek)):\r\n\t\t\tif sek[i] == sek[j]:\r\n\t\t\t\tcontinue\r\n\t\t\tif sek[i] + sek[j] > sek[-1] + k or len(temp) > 1:\r\n\t\t\t\tbreak\r\n\t\t\tif sek[i] + sek[j] == sek[-1] + k:\r\n\r\n\t\t\t\ttemp.append(sek[i] + sek[j])\r\n\tif len(temp) == 1:\r\n\t\tif isPrime(temp[0]):\r\n\t\t\tprimes.append(temp[0])\r\n\t\tsek.append(temp[0])\r\n\t\tk = 1\r\n\t\treturn sekvens(k)\r\n\treturn sekvens(k+1)\r\n\r\nprint(sekvens(1))\r\nprint(\"Tok\", time.time() - start_time)","repo_name":"Mortefal/Julekalender","sub_path":"Luke 13/julamsekvens.py","file_name":"julamsekvens.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"3178212969","text":"import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom class_algorithm import GeneticAlgorithm\n\n\ndef main_algorith(sols, weight, count_items, graphing=False):\n # Algorithm\n alg_gen = GeneticAlgorithm(sols, weight, count_items)\n new_seed = alg_gen.gen_seed()\n best_fitness_overall = 0\n\n if os.path.exists(\"log_file.csv\"):\n os.remove('log_file.csv')\n\n for i in range(1000):\n # 1. Restart with updated steed\n seed = new_seed\n\n # 2. Put through fitness function to determine fitness\n fitness, solution_array = alg_gen.test_solutions(seed)\n\n # 3. Select the best solutions\n best_fitness, best_sols = alg_gen.order_fitness(fitness, solution_array)\n\n # 4. Crossover solutions\n crossed_sols = alg_gen.crossover_function(best_sols)\n\n # 5. Mutate solutions\n new_seed = alg_gen.mutation(crossed_sols)\n if best_fitness_overall < best_fitness[0]:\n best_fitness_overall = best_fitness[0]\n\n print(f\"Iteration: {i} - Best Fitness: {best_fitness[0]} - Record Fitness: {best_fitness_overall}\")\n GeneticAlgorithm.log_info(i, best_fitness[0], best_fitness_overall)\n\n if graphing:\n data = pd.read_csv(\"log_file.csv\", header=None, names=[\"Iteration\", \"Fitness\", \"Best Fitness\"])\n plt.plot(data[\"Fitness\"])\n plt.plot(data[\"Best Fitness\"])\n plt.legend([\"Value\", \"Best Value\"])\n plt.show()","repo_name":"mianigro/Knapsack-Genetic-Algoirthm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"21529729310","text":"#!/usr/bin/python3\nimport sys\nimport os\nimport time\n# import json\n# import getopt\n# import socket\n# import struct\n# import codecs\n# import binascii\nfrom pymodbus.client.sync import ModbusTcpClient\nfrom pymodbus.constants import Endian\nfrom pymodbus.payload import BinaryPayloadDecoder\n\nnamed_tuple = time.localtime() # getstruct_time\ntime_string = time.strftime(\"%m/%d/%Y, %H:%M:%S keba setcurr.py\", named_tuple)\n\nfile_string = '/var/www/html/openWB/ramdisk/keba_setcurr.log'\n\n\ndef getstat(ip):\n # 1 enabled, 0 disable, 9 undef\n file_ip = '/var/www/html/openWB/ramdisk/keba_' + ip\n status = 9\n if os.path.isfile(file_ip):\n f = open(file_ip, 'r')\n status = int(f.read())\n f.close()\n return status\n\n\ndef setstat(ip, status):\n file_ip = '/var/www/html/openWB/ramdisk/keba_' + ip\n f1 = open(file_ip, 'w')\n f1.write(str(status))\n f1.close()\n\n\nif os.path.isfile(file_string):\n f = open(file_string, 'a')\nelse:\n f = open(file_string, 'w')\n\nipaddress = str(sys.argv[1])\nnewcurr = int(sys.argv[2])\n\nclient = ModbusTcpClient(ipaddress, port=502)\n\n# maxcurrent state 1100\nresp = client.read_holding_registers(1100, 2, unit=255)\ndecoder = BinaryPayloadDecoder.fromRegisters(resp.registers, byteorder=Endian.Big, wordorder=Endian.Big)\nfinal = float(decoder.decode_32bit_uint()) / 1000\noldcurr = int(\"%.f\" % final)\n\n# cable state 1004\nresp = client.read_holding_registers(1004, 2, unit=255)\ndecoder = BinaryPayloadDecoder.fromRegisters(resp.registers, byteorder=Endian.Big, wordorder=Endian.Big)\nfinal2 = float(decoder.decode_32bit_uint())\nplugs = \"%.f\" % final2\n\n# max supported current 1110\nresp = client.read_holding_registers(1110, 2, unit=255)\ndecoder = BinaryPayloadDecoder.fromRegisters(resp.registers, byteorder=Endian.Big, wordorder=Endian.Big)\nfinal2 = float(decoder.decode_32bit_uint()) / 1000\nsupcur = int(\"%.f\" % final2)\nif plugs == \"7\":\n if (oldcurr != newcurr):\n if (newcurr == 0):\n # disable station\n print('%s oldcurr %d, newcurr %d, ipadr %s disable station' % (\n time_string, oldcurr, newcurr, ipaddress), file=f)\n rq = client.write_register(5014, 0, unit=255)\n setstat(ipaddress, 0)\n else:\n if (oldcurr == 0):\n # enable station\n print('%s oldcurr %d, newcurr %d, ipadr %s enable station ' % (\n time_string, oldcurr, newcurr, ipaddress), file=f)\n rq = client.write_register(5014, 1, unit=255)\n setstat(ipaddress, 1)\n # setting new current\n print('%s oldcurr %d, newcurr %d, hwlimit %d ipadr %s setting new curr' % (\n time_string, oldcurr, newcurr, supcur, ipaddress), file=f)\n newcurrt = newcurr * 1000\n rq = client.write_register(5004, newcurrt, unit=255)\nelse:\n status = getstat(ipaddress)\n if ((newcurr == 0) and (status != 0)):\n # disable station\n print('%s No car detected, ipadr %s disable station' % (time_string, ipaddress), file=f)\n rq = client.write_register(5014, 0, unit=255)\n setstat(ipaddress, 0)\n else:\n if ((newcurr > 0) and (status != 1)):\n # enable station\n print('%s No car detected, ipadr %s enable station ' % (time_string, ipaddress), file=f)\n rq = client.write_register(5014, 1, unit=255)\n setstat(ipaddress, 1)\nf.close()\n","repo_name":"snaptec/openWB","sub_path":"modules/keballlp1/setcurrkeba.py","file_name":"setcurrkeba.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":322,"dataset":"github-code","pt":"81"}
+{"seq_id":"16999091253","text":"# -*- coding: utf-8 -*-\n# @Author: Catofes\n# @Date: 2015-08-15\n\n\n'''\nClass to cache songs into local storage.\n'''\n\nfrom singleton import Singleton\nimport threading\nimport subprocess\nfrom const import Constant\nfrom config import Config\nimport os\nimport logger\nimport signal\n\nlog = logger.getLogger(__name__)\n\n\nclass Cache(Singleton):\n def __init__(self):\n if hasattr(self, '_init'):\n return\n self._init = True\n self.const = Constant()\n self.config = Config()\n self.download_lock = threading.Lock()\n self.check_lock = threading.Lock()\n self.downloading = []\n self.aria2c = None\n self.stop = False\n self.enable = self.config.get_item(\"cache\")\n self.aria2c_parameters = self.config.get_item(\"aria2c_parameters\")\n\n\n def start_download(self):\n check = self.download_lock.acquire(False)\n if not check:\n return False\n while True:\n if self.stop:\n break\n if not self.enable:\n break\n self.check_lock.acquire()\n if len(self.downloading) <= 0:\n self.check_lock.release()\n break\n data = self.downloading.pop()\n self.check_lock.release()\n song_id = data[0]\n song_name = data[1]\n artist = data[2]\n url = data[3]\n onExit = data[4]\n output_path = Constant.download_dir\n output_file = str(artist) + \" - \" + str(song_name) + \".mp3\"\n try:\n para = ['aria2c', '--auto-file-renaming=false', '--allow-overwrite=true', '-d', output_path, '-o',\n output_file, url]\n para[1:1] = self.aria2c_parameters\n self.aria2c = subprocess.Popen(para,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n self.aria2c.wait()\n except Exception:\n log.debug(str(song_id) + \" Cache Error\")\n if self.aria2c.returncode == 0:\n log.debug(str(song_id) + \" Cache OK\")\n onExit(song_id, output_path + \"/\" + output_file)\n self.download_lock.release()\n\n\n def add(self, song_id, song_name, artist, url, onExit):\n self.check_lock.acquire()\n self.downloading.append([song_id, song_name, artist, url, onExit])\n self.check_lock.release()\n\n def quit(self):\n self.stop = True\n try:\n os.kill(self.aria2c.pid, signal.SIGKILL)\n except:\n pass\n\n","repo_name":"smileboywtu/MusicRecommend","sub_path":"netease/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"}
+{"seq_id":"18423978955","text":"import requests\r\n# importing requests to call API\r\nimport sys\r\n#importing sys module to control python runtime environment\r\nfrom kivy.app import App\r\n#importing App class to run our application\r\nfrom kivy.properties import StringProperty,NumericProperty\r\n#importing kivy properties\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.dropdown import DropDown\r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.uix.floatlayout import FloatLayout\r\n# importing different widgets\r\n\r\nfrom getcityname import GetCityName\r\n# importing GetCityName class in order to get current location\r\n# this file is named as getcityname in same project\r\n\r\n\r\nclass WeatherProjApp(App):\r\n def build(self):\r\n weatherclass = WeatherClass()\r\n return weatherclass\r\n\r\n\r\nclass WeatherClass(FloatLayout):\r\n # This class is the root class and inherits FloatLayout\r\n # This app calls geocoder API for getting the location attributes of the city searched first\r\n # and then after getting correct logitude and latitude its call weather API for getting the data\r\n # However, it is possible to send the name(City Searched) directly to weather API\r\n # But as per Current Weather Data API documentation this version is less accurate and will be deprecated soon\r\n # and therefore has been advised to use Geocoder API\r\n # also , if using directly weatherAPI , it is not possible to get all the cities in world with same name\r\n # So for Example, there could be more than one city named as London in World such as one in England and one in Canada\r\n #This is what their documentation reveals:\r\n #You can use the geocoder built into this API by default,\r\n #but please note that this version is less accurate than the Geocoder API and will be deprecated soon.\r\n APIid = \"\"\r\n # Here you will need to use your access key for API\r\n # Access key is same for CurrentWeatherData Api as well as Geocoder API\r\n # As both API are from openweathermap\r\n units = \"metric\"\r\n # Weather can be taken in any measurement system, I chose Units as metrics system\r\n # Howevenr Data is aveailable in Metric , Fahrenheit as well as imperical system\r\n condition_descriptors = StringProperty(None)\r\n condition_icon = StringProperty(None)\r\n temperature_description = StringProperty(None)\r\n maximum_temperature= NumericProperty(None)\r\n minimum_temperature=NumericProperty(None)\r\n humidity=NumericProperty(None)\r\n pressure=NumericProperty(None)\r\n wind=NumericProperty(None)\r\n visibility=NumericProperty(None)\r\n feels_like=NumericProperty(None)\r\n # Above variable have been made kivy properties in order to use them in .kv file\r\n getcurrentloc=GetCityName()\r\n getcurrentinfo=getcurrentloc.getcurrentlocattributes()\r\n #getcurrentinfo has been made variable to hold the current location data\r\n #getcurrentinfo will be invalid in case of no internet connectivity\r\n #as specified in getcityname file\r\n if(getcurrentinfo==\"invalid\"):\r\n sys.exit(\"Internet is not available\")\r\n # Therefore, in case of no internet connectivity system will exit and will not run application\r\n current_city=getcurrentinfo['city']\r\n current_longitude=getcurrentinfo['longitude']\r\n current_latitude=getcurrentinfo['latitude']\r\n \"\"\"The ipstack API as called in getcityname files provides geo coordinates as well ,\r\n therefore for the current location of user we don't need to call geocoder API\r\n and current weather data api can be called directly\"\"\"\r\n parameters = {\r\n \"lat\": current_latitude,\r\n \"lon\": current_longitude,\r\n \"appid\": APIid,\r\n \"units\": units\r\n }\r\n dropdown = DropDown(size_hint=(1, .1),\r\n pos_hint={'right': 1, 'top': 0.9})\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n # self.orientation=\"horizontal\"\r\n textinput = TextInput(text=self.current_city, size_hint=(.7, .1),\r\n pos_hint={'right': 0.7, 'top': 1})\r\n button = Button(text=\"Search\",\r\n size_hint=(.3, .1),\r\n pos_hint={'right': 1, 'top': 1})\r\n button.bind(on_press=lambda button: self.buttonFunction(textinput.text))\r\n #buttonfunction method has been binded with this button on press event\r\n # this function basically takes the input of user which is city name to be searched for\r\n # and create the dropdown for different places in world with same name\r\n mainbutton = Button(text=str(textinput.text), size_hint=(1, .1),\r\n pos_hint={'right': 1, 'top': 0.9})\r\n # We are not adding any dropdown for current location as current location can't be more than one\r\n # therefore instead of creating dropdown here a button with same name as current location has been created\r\n # this has been exclusively define in constructor so that our first screen can show weather of current location\r\n self.add_widget(textinput)\r\n self.add_widget(button)\r\n self.add_widget(mainbutton)\r\n self.dropfunction(self.parameters,self.dropdown,mainbutton)\r\n \"\"\" drop function takes the specific locations founded by buttonfunction and then resets the current \r\n variables correspondingly to be shown in app and these variables then can be accessed in .ky file\"\"\"\r\n\r\n def gettcitiesLongandLot(self, text):\r\n \"\"\" This is the function that is used to get longitude and latitude\r\n of the city searched for , Note here that this function fetches the long\r\n and lat of all the cities with same name . Basically this is the function\r\n which helps us to know the different geographical cities with same name with their geo coordinates\"\"\"\r\n baseURL=\"http://api.openweathermap.org\"\r\n endpoints=\"/geo/1.0/direct\"\r\n URL=baseURL+endpoints\r\n param={\r\n 'q': text,\r\n 'limit':'5',\r\n 'appid':'b62cb3f2a3cc1514e250f0c76b4007dc'\r\n }\r\n try:\r\n response = requests.get(URL,params=param)\r\n names = response.json()\r\n # print(names)\r\n return names\r\n except requests.exceptions.ConnectionError as e:\r\n print(\"Connection cant be established\")\r\n sys.exit(\"no internet connectivity\")\r\n # although connection connectivity was checked before as well in getcityname file\r\n # and application won't start in case of no network connectivity\r\n # but it is possible that some user was connected to internet at any time when application was started\r\n # But application was not closed and now when city is being searched , user is not connected to internet anymore\r\n except requests.exceptions.JSONDecodeError as e:\r\n print(\"Make sure API call is correct, There seems to be problem with API addess\")\r\n # if anyhow endpoint goes wrong for example while refracting the code or anyhow\r\n # json code error exception will be thrown , as otherwise if checking with status code==200\r\n # this can go wrong as in case of error only in endpoint but in baseURL\r\n # still response with status code 200 will be received with success=failre in content\r\n\r\n def buttonFunction(self, text):\r\n names = self.gettcitiesLongandLot(text)\r\n dropdown = DropDown(size_hint=(1, .1),\r\n pos_hint={'right': 1, 'top': 0.9})\r\n for city in names:\r\n if \"state\" not in city:\r\n city['state'] = city['name']\r\n btn = Button(text='% s %s %s' % (city['name'], city['state'], city['country']), size_hint_y=None,\r\n height=40)\r\n parameters = {\r\n \"lat\": city['lat'],\r\n \"lon\": city['lon'],\r\n \"appid\": self.APIid,\r\n \"units\": self.units\r\n }\r\n btn.id = parameters\r\n btn.bind(on_release=lambda btn: self.dropfunction(btn.id, dropdown, btn))\r\n dropdown.add_widget(btn)\r\n dropdown.select(btn.text)\r\n \"\"\"The above for loop will create buttons for different cities found with same name and will add them ro \r\n dropdown menu .However question arises that in case if user enters the name of the city which is \r\n inaccurate or in other terms no such city exist in World , Then this loop will not be executed as the \r\n response array will contain no value or in other term it will be empty \"\"\"\r\n mainbutton = Button(text=self.dropdowntext(names) + str(text), size_hint=(1, .1),\r\n pos_hint={'right': 1, 'top': 0.9})\r\n mainbutton.bind(on_release=dropdown.open)\r\n dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x))\r\n self.add_widget(mainbutton)\r\n\r\n def dropdowntext(self,names):\r\n \"\"\"This function find what is to be displayed on Drop Down menu for example if no result is found for searched term\r\n then this will display We couldn't find any result for searched term \"\"\"\r\n if len(names)==0:\r\n text=\"Sorry, We couldn't find any result for\";\r\n else:\r\n text=\"following results have been found for\"\r\n return text\r\n\r\n\r\n def dropfunction(self, parameters, dropdown, btn):\r\n \"\"\"this function will run when a city from dropdown will be selected as stated before drop function takes the\r\n specific locations founded by buttonfunction and then resets the current variables correspondingly to be\r\n shown in app and these variables then can be accessed in .ky file. This function is responsible for setting all\r\n weather related variables as this function calls currentweatherAPI and receives response \"\"\"\r\n dropdown.select(btn.text)\r\n # print(parameters)\r\n # for i in self.children:\r\n # if type(i) == kivy.uix.label.Label:\r\n # self.remove_widget(i)\r\n try:\r\n response = requests.get(f\"https://api.openweathermap.org/data/2.5/weather\", params=parameters)\r\n if(response.status_code==200):\r\n response = response.json()\r\n except requests.exceptions.ConnectionError as e:\r\n print(\"internet is not available\")\r\n sys.exit(\"Something went wrong\")\r\n # Internet connectivity is checked here once again because it is again possible that at some\r\n #user has internet connectivity when searching for city but has lost it now\r\n self.condition_descriptors = str(response['weather'][0][\"main\"]) + \",\" + str(\r\n response['weather'][0][\"description\"])\r\n self.condition_icon = \"./images/\" + str(response['weather'][0][\"icon\"]) + \".png\"\r\n self.temperature_description = \"Temperature is %d\" % (response['main'][\"temp\"])\r\n self.maximum_temperature=response['main']['temp_max']\r\n self.minimum_temperature=response['main']['temp_min']\r\n self.humidity=response['main']['humidity']\r\n self.visibility=response['visibility']\r\n self.feels_like=response['main']['feels_like']\r\n self.wind=response['wind']['speed']\r\n self.pressure=response['main']['pressure']\r\n # print(response['main']['temp'])\r\n # print(response)\r\n # print(\"I am called\")\r\n # return response\r\n # label1 = Label(\r\n # text=\"Temprature is %d and feels like %d\" % (response['main']['temp'], response['main']['feels_like']))\r\n # label = Label(text='%s , %s' % (response['weather'][0]['main'], response['weather'][0]['description']))\r\n # self.add_widget(label)\r\n # self.add_widget(label1)\r\n\r\n\r\nif __name__ == '__main__':\r\n WeatherProjApp().run()\r\n","repo_name":"prinklegulati/weatherApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"38678430800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 15 12:24:01 2022\n\n@author: Necro\n\"\"\"\nfrom PIL import Image,ImageOps\nfrom numpy import asarray\nfrom mtcnn.mtcnn import MTCNN\nimport numpy as np\nfrom scipy.spatial.distance import cosine\nfrom keras_vggface.vggface import VGGFace\nfrom keras_vggface.utils import preprocess_input\nimport os\nfrom math import ceil\n\n\nimport sqlite3\n\n\ndef extract_face(filepath,required_size = (224, 224)):\n #load image\n #print(filepath)\n image = filepath\n print(\"######################################\")\n print(image.size)\n print(\"######################################\")\n \n # convert to RGB, if needed\n print(filepath)\n image = image.convert('RGB')\n #if filepath != \"C:/Users/Moina/Desktop/Proj_FaceRecognition/face_images/train/Simin/20211225_142558.jpg\":\n #image = image.resize((3000, 3000))\n #image = image.transpose(Image.ROTATE_270)\n image =ImageOps.exif_transpose(image)\n #image.show()\n #convert to np array\n pixels = asarray(image)\n \n # create the detector, using default weights\n detector = MTCNN()\n \n #detect faces in the image\n results = detector.detect_faces(pixels)\n \n print(\"######################################\")\n print(results)\n print(\"######################################\")\n if len(results)==0:\n print(results)\n return 1\n \n # extract the bounding box from the first face\n x1, y1, width, height = results[0]['box']\n \n # bug fix\n x1, y1 = abs(x1), abs(y1)\n x2, y2 = x1 + width, y1 + height\n\n # extract the face\n face = pixels[y1:y2, x1:x2]\n \n # resize pixels to the model size\n image = Image.fromarray(face)\n image = image.resize(required_size)\n numpydata = asarray(image)\n \n #expand the dimensions according to the required tflite model input \n npd = np.expand_dims(numpydata, axis=0)\n \n #img = cv2.resize(image,(224,224)) # resize image to match model's expected sizing\n #img = image.reshape(1,224,224,3) # return the image with shaping that TF wants.\n print(npd.size)\n face_array = npd\n \n return face_array\n\n\n\n# extract faces and calculate face embeddings for a list of photo files\ndef get_embeddings(filenames):\n\t# extract faces\n\t#faces = [extract_face(f) for f in filenames]\n faces = extract_face(filenames)\n\t# convert into an array of samples\n samples = asarray(faces, 'float32')\n\t# prepare the face for the model, e.g. center pixels\n samples = preprocess_input(samples, version=2)\n \n\t# create a vggface model\n model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')\n\t# perform prediction\n\t\n yhat = model.predict(samples)\n\t\n return yhat\n\n\n\ndef is_match(known_embedding, candidate_embedding,i,thresh=0.50):\n # calculate distance between embeddings\n score = cosine(known_embedding,candidate_embedding)\n if score <= thresh:\n print('>face is a Match (%.3f <= %.3f)' % (score, thresh))\n \n #count =+ 1\n else:\n print('>face is NOT a Match (%.3f > %.3f)' % (score, thresh))\n #show_face(faces[i])\n\ndef is_match2(known_embedding, candidate_embedding,thresh=0.50):\n # calculate distance between embeddings\n score = cosine(known_embedding,candidate_embedding)\n if score <= thresh:\n print('>face is a Match (%.3f <= %.3f)' % (score, thresh))\n #theta = (score//0.50)*100*(33.33/100)\n theta = (1-score)*(100/3)\n return [True,theta]\n else:\n print('>face is NOT a Match (%.3f > %.3f)' % (score, thresh))\n #theta = -(((score//1)*100)-50)\n theta = (1-score)*(100/3)\n return [False,theta]\n\n#*#\ndef get_embedding_id():\n \n conn = sqlite3.connect('faces.db')\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM faces')\n id_list = []\n rows = cursor.fetchall()\n for row in rows:\n id_list.append(str(row[1]))\n conn.close()\n return id_list\n \n#*# \ndef Insert_embedding(X,embeddings_per_id,name):\n '''\n insert emb1,emb2,emb3 for the new face\n '''\n \n conn = sqlite3.connect('faces.db')\n cursor = conn.cursor()\n emb_list = []\n for i in range(embeddings_per_id):\n img = Image.open(X[i].stream)\n emb=get_embeddings(img)\n emb_list.append(emb)\n n = str(name)\n cursor.execute(''' INSERT INTO faces (name,embeddings1,embeddings2,embeddings3) VALUES(?,?,?,?)''',(n,sqlite3.Binary(emb_list[0].tostring()),sqlite3.Binary(emb_list[1].tostring()),sqlite3.Binary(emb_list[2].tostring()),))\n conn.commit()\n conn.close() \n\n#*#\ndef Check_id(ID):\n \n conn = sqlite3.connect('faces.db')\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM faces')\n rows = cursor.fetchall()\n conn.close()\n for row in rows:\n if row[1]== ID:\n return True\n return False\n \n#*#\ndef Check_embedding(X,embeddings_per_id):\n #embeddings = np.load('embeddings.npy')\n #embeddings_id = np.load('embedding_id.npy')\n \n conn = sqlite3.connect('faces.db')\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM faces')\n rows = cursor.fetchall()\n \n score_dict = dict()\n for row in rows:\n score_dict[row[1]]=0\n \n for i in range(embeddings_per_id):\n img = Image.open(X[i].stream)\n new_embedding=get_embeddings(img)\n \n for idx,name,embedding1,embedding2,embedding3 in rows:\n #convert blob to np array\n pred_1 = is_match2(np.fromstring(embedding1, dtype=np.float32),new_embedding)\n pred_2 = is_match2(np.fromstring(embedding2, dtype=np.float32),new_embedding)\n pred_3 = is_match2(np.fromstring(embedding3, dtype=np.float32),new_embedding)\n \n if (pred_1[0]):\n score_dict[name]+=pred_1[1]\n else:\n score_dict[name]-=pred_1[1]\n if (pred_2[0]):\n score_dict[name]+=pred_2[1]\n else:\n score_dict[name]-=pred_2[1]\n if (pred_3[0]):\n score_dict[name]+=pred_3[1]\n else:\n score_dict[name]-=pred_3[1]\n conn.close()\n \n print(score_dict)\n max_value = max(score_dict.values())\n if max_value >= 90:\n print(\"possible match, value :\",max_value )\n return True\n else:\n print(\"No Match\")\n #pred_class = max(score_dict, key=score_dict.get)\n #print(type(pred_class))\n return False\n#*# \ndef Check_face(X,embeddings_per_id):\n face_table=[True]*3\n \n for i in range(embeddings_per_id):\n image = Image.open(X[i].stream)\n # convert to RGB, if needed\n image = image.convert('RGB')\n #face align\n image =ImageOps.exif_transpose(image)\n \n #convert to np array\n pixels = asarray(image)\n \n # create the detector, using default weights\n detector = MTCNN()\n \n #detect faces in the image\n results = detector.detect_faces(pixels)\n \n if len(results)==0:\n face_table[i]=False\n truth_count=0\n false_idx=list()\n for j in range(len(face_table)):\n if face_table[j]:\n truth_count+=1\n else:\n false_idx.append(j+1)\n if truth_count == embeddings_per_id:\n return [True,0]\n else:\n return [False,false_idx]\n \n \n \n#*#\n# For Scalability add sub category such as Eye color or other facial features to reduce time complexity, better performance when there are alot of rows.\ndef pred(X,embeddings_per_id):\n img = Image.open(X.stream)\n new_embedding=get_embeddings(img)\n \n ##\n conn = sqlite3.connect('faces.db')\n\n cursor = conn.cursor()\n \n cursor.execute('SELECT * FROM faces')\n \n # Fetch all the rows\n rows = cursor.fetchall()\n \n ##\n #embeddings_id = np.load('embedding_id.npy')\n #embeddings = np.load('embeddings.npy')\n \n \n score_dict = dict()\n for row in rows:\n score_dict[row[1]]=0\n \n for idx,name,embedding1,embedding2,embedding3 in rows:\n #convert blob to np array*\n pred_1 = is_match2(np.fromstring(embedding1, dtype=np.float32),new_embedding)\n pred_2 = is_match2(np.fromstring(embedding2, dtype=np.float32),new_embedding)\n pred_3 = is_match2(np.fromstring(embedding3, dtype=np.float32),new_embedding)\n \n if (pred_1[0]):\n score_dict[name]+=pred_1[1]\n else:\n score_dict[name]-=pred_1[1]\n if (pred_2[0]):\n score_dict[name]+=pred_2[1]\n else:\n score_dict[name]-=pred_2[1]\n if (pred_3[0]):\n score_dict[name]+=pred_3[1]\n else:\n score_dict[name]-=pred_3[1]\n conn.close()\n \n \n \n print(score_dict)\n max_value = max(score_dict.values())\n if max_value <= -15:\n return \"No match\"\n else:\n print(max_value)\n pred_class = max(score_dict, key=score_dict.get)\n print(type(pred_class))\n return pred_class\n \n\ndef delete_face(name):\n conn = sqlite3.connect('faces.db')\n cursor = conn.cursor()\n cursor.execute('DELETE FROM faces WHERE name = ?',(name,))\n conn.commit()\n conn.close()\n \n \n ","repo_name":"Moinahmed7777/Facial-Recognition-App","sub_path":"Api/Face_pred.py","file_name":"Face_pred.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"655006339","text":"\"\"\"\nBinary search trees are a data structure that enforce an ordering over \nthe data they store. That ordering in turn makes it a lot more efficient \nat searching for a particular piece of data in the tree. \n\nThis part of the project comprises two days:\n1. Implement the methods `insert`, `contains`, `get_max`, and `for_each`\n on the BSTNode class.\n2. Implement the `in_order_print`, `bft_print`, and `dft_print` methods\n on the BSTNode class.\n\"\"\"\n\nfrom queue import Queue\nfrom stack import Stack\n\n\nclass BinarySearchTree:\n ''' implement a bst '''\n\n def __init__(self, value):\n self.value = value # root node created when object instantiated\n self.left = None\n self.right = None\n\n def insert(self, value):\n ''' Insert the given value into the tree '''\n # if value to insert is greater than current node, go right\n # in a prior cohort's video the instructor said put dupe values\n # on the right so using a <= here\n if self.value <= value:\n # is there is no right node, make one\n if self.right is None:\n self.right = BinarySearchTree(value)\n # otherwise treat the node as the start of a new tree\n else:\n self.right.insert(value)\n # reverse of the \"go right\" logic above\n if self.value > value:\n if self.left is None:\n self.left = BinarySearchTree(value)\n else:\n self.left.insert(value)\n\n # Return True if the tree contains the value\n # False if it does not\n def contains(self, target):\n ''' Traverses the tree to find the input target variable '''\n if self.value == target:\n return True\n # target is larger the current node, go right\n if self.value < target:\n if self.right is None: # but there is no right so the target isn't here\n return False\n else: # go right and make it the current node\n return self.right.contains(target)\n else: # reverse of the \"go right\" logic above\n if self.left is None:\n return False\n else:\n return self.left.contains(target)\n\n def get_max(self):\n ''' Return the maximum value found in the tree '''\n # does root have a right node? n=root is max, y=right is current\n # current is new root so recurse\n if self.right:\n return self.right.get_max()\n else:\n return self.value\n\n def for_each(self, fn):\n ''' Call the function `fn` on the value of each node '''\n # run the function on the current node\n fn(self.value)\n # if there's a right node, run the function on it\n # this will take care of all rights\n if self.right:\n self.right.for_each(fn)\n # similarly for any left nodes\n if self.left:\n self.left.for_each(fn)\n\n # Part 2 -----------------------\n\n def in_order_print(self, node):\n ''' Print all the values in order from low to high\n Hint: Use a recursive, depth first traversal '''\n #\n # if node is none, we've reached a bottom\n if node is None:\n return None\n\n # print node's value\n # print(node.value)\n\n # go print the lefts\n self.in_order_print(node.left)\n\n # print node's value\n # print goes here so lefts get printed first\n print(node.value)\n\n # now print the rights\n self.in_order_print(node.right)\n # if self.left:\n # self.left.in_order_print(node.left)\n # if self.right:\n # self.right.in_order_print(node.right)\n\n def bft_print(self, node):\n ''' Print the value of every node, starting with the given node,\n in an iterative breadth first traversal '''\n # create a queue object and initialize it\n queue = Queue()\n queue.enqueue(node)\n # loop until the queue is empty\n while queue.size:\n # get the first in value and print\n x = queue.dequeue()\n print(x.value)\n # add the left and right children to the queue\n if x.left:\n queue.enqueue(x.left)\n if x.right:\n queue.enqueue(x.right)\n\n def dft_print(self, node):\n ''' Print the value of every node, starting with the given node,\n in an iterative depth first traversal '''\n # create a stack object and initialize it\n stack = Stack()\n stack.push(node)\n # loop until the stack is empty\n while stack.size:\n # get the top value from the stack and print\n # pop from stack must be before pushes below to prevent inf loop\n x = stack.pop()\n print(x.value)\n # add the left and right children to the stack\n if x.left:\n stack.push(x.left)\n if x.right:\n stack.push(x.right)\n\n # Stretch Goals -------------------------\n # Note: Research may be required\n\n # Print Pre-order recursive DFT\n def pre_order_dft(self, node):\n pass\n\n # Print Post-order recursive DFT\n def post_order_dft(self, node):\n pass\n\n\n# bst = BinarySearchTree(1)\n# bst.insert(8)\n# bst.insert(5)\n# bst.insert(7)\n# bst.insert(6)\n# bst.insert(3)\n# bst.insert(4)\n# bst.insert(2)\n# bst.in_order_print(bst)\n# bst.bft_print(bst)\n# bst.dft_print(bst)\n","repo_name":"1aaronscott/Data-Structures","sub_path":"binary_search_tree/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"41005052235","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom . import views\nfrom . import feeds\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', views.JobList.as_view(), name='job_list'),\n url(r'^new/$', views.JobCreate.as_view(), name='job_create'),\n url(r'^mine/$', views.MyListings.as_view(), name='job_list_mine'),\n url(r'^(?P\\d+)/$', views.JobDetail.as_view(), name='job_detail'),\n url(r'^(?P\\d+)/edit/$', views.JobEdit.as_view(), name='job_edit'),\n url(r'^(?P\\d+)/publish/$', views.PublishJob.as_view(), name='job_publish'),\n url(r'^(?P\\d+)/archive/$', views.ArchiveJob.as_view(), name='job_archive'),\n url(r'^(?P\\d+)/flag/$', views.FlagJob.as_view(), name='job_flag'),\n url(r'^feed/$', feeds.JobFeed(), name='job_feed'),\n url(r'^login/$', views.Login.as_view(), name='login'),\n url(r'^flags/$', views.ReviewFlags.as_view(), name='review_flags'),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'', include('social_auth.urls')),\n)\n","repo_name":"jacobian/djobs","sub_path":"jobs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"81"}
+{"seq_id":"71139376586","text":"from multiprocessing import Pool\nimport bs4 as bs\nimport random\nimport requests\nimport string\nimport time\nimport zlib\nfrom random import randint\nimport shutil\nimport os\n\n\ndef main(num_process):\n movies = {\"Aladdin\": \"testFiles/Aladdin\",\n \"BeautyandtheBeast\": \"testFiles/BeautyandtheBeast\",\n \"LionKing\": \"testFiles/LionKing\",\n \"Tarzan\": \"testFiles/Tarzan\",\n \"TheLittleMermaid\": \"testFiles/TheLittleMermaid\"}\n\n clean()\n os.mkdir(\"compressedFiles\")\n os.mkdir(\"uncompressedFiles\")\n\n start = time.time()\n p = Pool(processes=num_process)\n p.starmap(compress, list(movies.items()))\n p.close()\n p.join()\n\n end = time.time()\n duration = end - start\n\n return duration\n\n\ndef compress(name, path_to_file):\n\n text = open(path_to_file+\".txt\", \"rb\").read()\n with open(\"compressedFiles/\"+name+\".zlib\", \"wb\") as myFile:\n myFile.write(zlib.compress(text))\n\n compressedText = open(\"compressedFiles/\"+name+\".zlib\", \"rb\").read()\n\n with open(\"uncompressedFiles/\"+name+\".txt\", \"wb\") as myFile:\n myFile.write(zlib.decompress(compressedText))\n\n\ndef clean():\n if(os.path.isdir(os.getcwd()+\"/compressedFiles\")):\n shutil.rmtree(os.getcwd()+\"/compressedFiles\")\n if(os.path.isdir(os.getcwd()+\"/uncompressedFiles\")):\n shutil.rmtree(os.getcwd()+\"/uncompressedFiles\")\n","repo_name":"jonathankingfc/multiprocessing_py","sub_path":"compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"10600748716","text":"#\tGoogle Drive + excessive \"driving\" == G[oogle] RoadHog\r\n\r\n# This script recovers the downloaded GRoadHog blocks back to the original content.\r\n# It looks for the folder containing all the neccessary blocks.\r\n# It proceeds them all till the resulting file is recovered and ready to use.\r\n\r\nimport sys\r\n\r\n#Name of the resulting file (from the command prompt)\r\nDESTINATION_FILE=sys.argv[1]\r\n\r\nfrom os import walk\r\nfrom os.path import join as fullpath\r\n\r\n# This part of code depends on \"xlrd\" library which can read data from Excel files -\r\n# the file format for exported Google Docs\r\nimport xlrd\r\n#This function reads our former CSV file block (now encoded as Excel Workbook) cell-by-cell,\r\n#deciphers the block content and writes it back to the original file\r\ndef ReadWorkBook(f, path):\r\n\t#Open workbook and the first sheet\r\n\twb=xlrd.open_workbook(path)\r\n\tsheet=wb.sheet_by_index(0)\r\n\t#Read every cell one-by-one\r\n\tfor x in range(sheet.nrows):\r\n\t\tfor y in range(sheet.ncols):\r\n\t\t\tcell_value=sheet.cell_value(rowx=x, colx=y)\r\n\t\t\t#Write decoded byte value back into original (f)ile\r\n\t\t\t[f.write(i) for i in decode(cell_value)]\r\n#\"xlrd\"-dependent part is over\r\n\r\n#Decode the cell\r\ndef decode(cell):\r\n\t#One byte encoded as a pair of hexademical digits\r\n\tpairs=[cell[i : i + 2] for i in range(0, len(cell), 2)]\r\n\tdecoded=[]\r\n\tfor p in pairs:\r\n\t\t#Those digits are encoded with the Caesar cipher \r\n\t\tdecoded += [bytes([int('%x%x' % \r\n\t\t(ord(p[0]) - ord('a'), ord(p[1]) - ord('a')), 16)])]\r\n\t#The result is a set of decoded bytes from one cell\r\n\treturn decoded\r\n\r\n#Find a folder that contains no subfolders but files with '.xlsx' extension:\r\nfor root, folders, files in walk(\".\"):\r\n\tif any([f.endswith('.xlsx') for f in files]) and not folders:\r\n\t\t#Sort the files by their number (0.xlsx, 1.xlsx, 2.xlsx, ..., 10.xlsx, ...)\r\n\t\tsorted_files=sorted(files, key=lambda item : int(item.split('.')[0]))\r\n\t\tbreak\r\n\t\t\r\n#Start writing the destination file: read books, decode, fill the resulting file\r\nwith open(DESTINATION_FILE, \"wb\") as destination:\r\n\tfor f in sorted_files:\r\n\t\tReadWorkBook(destination, fullpath(root, f))","repo_name":"RedSerge/fun","sub_path":"Experiments/GRoadHog/GRoadHog_recover.py","file_name":"GRoadHog_recover.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"6444034282","text":"import tensorflow as tf\nfrom tensorflow import keras\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#导入 Fashion MNIST 数据集\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n#查看数据\ntrain_images.shape\nlen(train_labels)\ntrain_labels\ntest_images.shape\nlen(test_labels)\n\nplt.figure()\nplt.imshow(train_images[0])\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\n#预处理数据\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\nplt.show()\n\n#构建模型\n#设置层\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10)\n])\n'''\n该网络的第一层 tf.keras.layers.Flatten 将图像格式从二维数组(28 x 28 像素)转换成一维数组(28 x 28 = 784 像素)。\n将该层视为图像中未堆叠的像素行并将其排列起来。该层没有要学习的参数,它只会重新格式化数据。\n\n展平像素后,网络会包括两个 tf.keras.layers.Dense 层的序列。它们是密集连接或全连接神经层。\n第一个 Dense 层有 128 个节点(或神经元)。第二个(也是最后一个)层会返回一个长度为 10 的 logits 数组。每个节点都包含一个得分,用来表示当前图像属于 10 个类中的哪一类。\n'''\n#编译模型\n'''\n损失函数 - 用于测量模型在训练期间的准确率。您会希望最小化此函数,以便将模型“引导”到正确的方向上。\n优化器 - 决定模型如何根据其看到的数据和自身的损失函数进行更新。\n指标 - 用于监控训练和测试步骤。以下示例使用了准确率,即被正确分类的图像的比率。\n'''\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n'''\n训练模型\n训练神经网络模型需要执行以下步骤:\n\n将训练数据馈送给模型。在本例中,训练数据位于 train_images 和 train_labels 数组中。\n模型学��将图像和标签关联起来。\n要求模型对测试集(在本例中为 test_images 数组)进行预测。\n验证预测是否与 test_labels 数组中的标签相匹配。\n'''\n#向模型馈送数据\nmodel.fit(train_images, train_labels, epochs=10)\n\n#评估准确率\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint('\\nTest accuracy:', test_acc)\n\n#进行预测\n\n'''\n在模型经过训练后,您可以使用它对一些图像进行预测。\n模型具有线性输出,即 logits。您可以附加一个 softmax 层,将 logits 转换成更容易理解的概率。\n'''\nprobability_model = tf.keras.Sequential([model, \n tf.keras.layers.Softmax()])\npredictions = probability_model.predict(test_images)\n\npredictions[0]\n\n#最大置信度值\nprint(class_names[np.argmax(predictions[0])])","repo_name":"DannySunsan/tf-practice","sub_path":"test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"34633087745","text":"import deeplabcut\n\niterations = 500000\n\nProjectFolderName = '//wsl$/Ubuntu-20.04/home/nghess/Hand Test-NG Hess-2022-06-30'\nVideoType = '.mp4'\n\n# Set paths\nvideofile_path = [ProjectFolderName + '/videos']\npath_config_file = ProjectFolderName+'/config.yaml'\n\n# Set the shuffle you want to create, train, evaluate, use for analysis, etc (1 is default):\nSHUF = 1\n\n# Create training data\n# There are a lot of other arguments for this function. Might be worth trying some variations\n# DLC-Live appears to run faster using MobileNetV2-0.35 networks\ndeeplabcut.create_training_dataset(path_config_file, net_type='resnet_50', augmenter_type='imgaug')\n\n# Train\ndeeplabcut.train_network(path_config_file, shuffle=SHUF, displayiters=10, saveiters=500, maxiters=iterations)\n\n# Evaluate\ndeeplabcut.evaluate_network(path_config_file, Shuffles=[SHUF], plotting=False)\n","repo_name":"nghess/dlc","sub_path":"dlc-train.py","file_name":"dlc-train.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"42964434789","text":"import numpy as np\r\nfrom keras.datasets import cifar10\r\n\r\nnp.random.seed(10)\r\n\r\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\r\n\r\nx_train = x_train.astype('float32') / 255.0\r\nx_test = x_test.astype('float32') / 255.0\r\n\r\nfrom keras.utils import np_utils\r\n\r\ny_train_1hot = np_utils.to_categorical(y_train)\r\ny_test_1hot = np_utils.to_categorical(y_test)\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dropout,Dense,Flatten,Conv2D,MaxPooling2D\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(filters=32,kernel_size=(3,3),input_shape=(32,32,3),activation='relu',padding='same'))\r\nmodel.add(Dropout(rate=0.25))\r\nmodel.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\nmodel.add(Conv2D(filters=64,kernel_size=(3,3),activation='relu',padding='same'))\r\nmodel.add(Dropout(rate=0.25))\r\nmodel.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Dense(1024, activation='relu'))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\nprint(model.summary())\r\n\r\nmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\r\ntrain_history = model.fit(x_train,y_train_1hot,batch_size=128,epochs=10,verbose=1,validation_split=0.2)\r\n\r\nfrom show import show_train_history\r\n\r\nshow_train_history('acc','val_acc')\r\nshow_train_history('loss','val_loss')\r\n\r\nscores = model.evaluate(x_test,y_test_1hot,verbose=0)\r\nprint (scores)\r\n\r\npred = model.predict_classes(x_test)\r\nprint(pred[:10])\r\n\r\nfrom show import plot_images_labels_prediction\r\nplot_images_labels_prediction(x_test,y_test,pred,0,10)","repo_name":"KyrieSu/Deep-Learning","sub_path":"CIFAR10_CNN.py","file_name":"CIFAR10_CNN.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"617919305","text":"import os\nimport glob\n\n\ndef main():\n folder4k = '/DATA7_DB7/data/4khdr/data/Dataset/train_4k'\n folder540p = '/DATA7_DB7/data/4khdr/data/Dataset/train_540p'\n\n folders = sorted(os.listdir(folder4k))\n folders_540p = sorted(os.listdir(folder540p))\n len_folders = len(folders)\n\n for i,folder in enumerate(folders):\n\n print(folder)\n path4k = os.path.join(folder4k, folder)\n path540p = os.path.join(folder540p, folder)\n\n images = sorted(os.listdir(path540p))\n for img in images:\n img_path_4k = os.path.join(path4k, img)\n img_path_540p = os.path.join(path540p, img)\n index = int(img[:-4]) - 1\n new_img = '{:05d}.png'.format(index)\n\n new_img_path_4k = os.path.join(path4k, new_img)\n new_img_path_540p = os.path.join(path540p, new_img)\n\n os.rename(img_path_4k, new_img_path_4k)\n os.rename(img_path_540p, new_img_path_540p)\n\n\n print('Finished.')\n\n\ndef DIV2K(path):\n img_path_l = glob.glob(os.path.join(path, '*'))\n for img_path in img_path_l:\n new_path = img_path.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '')\n os.rename(img_path, new_path)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"laomao0/EDVR_4K_HDR","sub_path":"data_scripts/rename_folder_images.py","file_name":"rename_folder_images.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"}
+{"seq_id":"8084253717","text":"from bs4 import BeautifulSoup\nimport requests\n\nURL = \"https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=Data+Science&txtLocation=\"\n\nhtml_text = requests.get(URL).text\n\nsoup = BeautifulSoup(html_text, 'lxml')\n\njobs = soup.find_all('li', class_=\"clearfix job-bx wht-shd-bx\")\nfor job in jobs:\n published_date = job.find('span', class_=\"sim-posted\").text.strip()\n \n if published_date == \"Posted few days ago\":\n job_name = job.find('a').text.strip()\n company = job.find('h3', class_=\"joblist-comp-name\").text.strip()\n skills = job.find('span', class_=\"srp-skills\").text.replace(\" \", \"\").strip()\n\n print(f'''\nJob Title: {job_name}\nCompany: {company}\nRequired Skills: {skills}\n-------------------------------\n\n''')\n\n","repo_name":"mubarakmayyeri/beautifulsoup-exercises","sub_path":"scraper_{real_websites}/job_details.py","file_name":"job_details.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"33343181558","text":"import pickle\nimport deeplake\nimport numpy as np\nimport pytest\nfrom functools import partial\nfrom deeplake.util.exceptions import EmptyTensorError, TensorDoesNotExistError\n\nfrom deeplake.util.remove_cache import get_base_storage\nfrom deeplake.core.index.index import IndexEntry\nfrom deeplake.tests.common import (\n requires_torch,\n requires_libdeeplake,\n convert_data_according_to_torch_version,\n)\nfrom deeplake.core.dataset import Dataset\nfrom deeplake.constants import KB\n\nfrom PIL import Image # type: ignore\n\ntry:\n from torch.utils.data._utils.collate import default_collate\nexcept ImportError:\n pass\n\nfrom unittest.mock import patch\n\n\n# ensure tests have multiple chunks without a ton of data\nPYTORCH_TESTS_MAX_CHUNK_SIZE = 5 * KB\n\n\ndef double(sample):\n return sample * 2\n\n\ndef identity(batch):\n return batch\n\n\ndef identity_collate(batch):\n return batch\n\n\ndef to_tuple(sample, t1, t2):\n return sample[t1], sample[t2]\n\n\ndef reorder_collate(batch):\n x = [((x[\"a\"], x[\"b\"]), x[\"c\"]) for x in batch]\n return default_collate(x)\n\n\ndef dict_to_list(sample):\n return [sample[\"a\"], sample[\"b\"], sample[\"c\"]]\n\n\ndef my_transform_collate(batch):\n x = [(c, a, b) for a, b, c in batch]\n return default_collate(x)\n\n\ndef index_transform(sample):\n return sample[\"index\"], sample[\"xyz\"]\n\n\ndef dummy_init_fn(arg):\n return f\"function called with arg {arg}\"\n\n\n@requires_libdeeplake\ndef test_setting_woker_init_function(local_auth_ds):\n dl = local_auth_ds.dataloader().pytorch()\n\n assert dl.worker_init_fn == None\n dl.worker_init_fn = partial(dummy_init_fn, 1024)\n assert dl.worker_init_fn() == \"function called with arg 1024\"\n\n\n@requires_torch\n@requires_libdeeplake\ndef test_offset_ds_iteration(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"abc\", htype=\"generic\", dtype=\"uint16\")\n ds.abc.extend([i for i in range(10)])\n\n dl = (\n local_auth_ds.dataloader()\n .offset(4)\n .transform(identity)\n .pytorch(collate_fn=identity)\n )\n\n idx_table = [4, 5, 6, 7, 8, 9, 0, 1, 2, 3]\n for i, item in enumerate(dl):\n assert idx_table[i] == item[0][\"index\"].astype(int)\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\n \"ds\",\n [\n pytest.param(\n \"hub_cloud_ds\",\n marks=[pytest.mark.slow, pytest.mark.skip(\"Causing lockups\")],\n ),\n \"local_auth_ds\",\n ],\n indirect=True,\n)\n@pytest.mark.flaky\n@pytest.mark.skip(\"causing lockups\")\ndef test_pytorch_small(ds):\n with ds:\n ds.create_tensor(\"image\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.image.extend(([i * np.ones((i + 1, i + 1)) for i in range(16)]))\n ds.commit()\n ds.create_tensor(\"image2\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.image2.extend(np.array([i * np.ones((12, 12)) for i in range(16)]))\n dl = ds.dataloader().batch(1).pytorch(num_workers=2)\n\n assert len(dl.dataset) == 16\n\n for _ in range(2):\n for i, batch in enumerate(dl):\n np.testing.assert_array_equal(\n batch[\"image\"].numpy(), i * np.ones((1, i + 1, i + 1))\n )\n np.testing.assert_array_equal(\n batch[\"image2\"].numpy(), i * np.ones((1, 12, 12))\n )\n\n sub_ds = ds[5:]\n sub_dl = sub_ds.dataloader().pytorch(num_workers=0)\n\n for i, batch in enumerate(sub_dl):\n np.testing.assert_array_equal(\n batch[\"image\"].numpy(), (5 + i) * np.ones((1, 6 + i, 6 + i))\n )\n np.testing.assert_array_equal(\n batch[\"image2\"].numpy(), (5 + i) * np.ones((1, 12, 12))\n )\n\n sub_ds2 = ds[8:12]\n sub_dl2 = sub_ds2.dataloader().pytorch(num_workers=0)\n\n for _ in range(2):\n for i, batch in enumerate(sub_dl2):\n np.testing.assert_array_equal(\n batch[\"image\"].numpy(), (8 + i) * np.ones((1, 9 + i, 9 + i))\n )\n np.testing.assert_array_equal(\n batch[\"image2\"].numpy(), (8 + i) * np.ones((1, 12, 12))\n )\n\n sub_ds3 = ds[:5]\n sub_dl3 = sub_ds3.dataloader().pytorch(num_workers=0)\n\n for _ in range(2):\n for i, batch in enumerate(sub_dl3):\n np.testing.assert_array_equal(\n batch[\"image\"].numpy(), (i) * np.ones((1, i + 1, i + 1))\n )\n np.testing.assert_array_equal(\n batch[\"image2\"].numpy(), (i) * np.ones((1, 12, 12))\n )\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky(retry_count=3)\n@pytest.mark.slow\n@pytest.mark.timeout(10)\n@pytest.mark.skip(\"causing lockups\")\ndef test_pytorch_transform(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"image\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.image.extend(([i * np.ones((i + 1, i + 1)) for i in range(16)]))\n ds.checkout(\"alt\", create=True)\n ds.create_tensor(\"image2\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.image2.extend(np.array([i * np.ones((12, 12)) for i in range(16)]))\n\n dl = (\n ds.dataloader()\n .batch(1)\n .transform(to_tuple, t1=\"image\", t2=\"image2\")\n .pytorch(num_workers=2, collate_fn=identity_collate)\n )\n\n for _ in range(2):\n for i, batch in enumerate(dl):\n actual_image, actual_image2 = batch[0]\n expected_image = i * np.ones((i + 1, i + 1))\n expected_image2 = i * np.ones((12, 12))\n np.testing.assert_array_equal(actual_image, expected_image)\n np.testing.assert_array_equal(actual_image2, expected_image2)\n\n\n@requires_libdeeplake\n@pytest.mark.flaky\ndef test_inequal_tensors_dataloader_length(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"images\")\n ds.create_tensor(\"label\")\n ds.images.extend(([i * np.ones((i + 1, i + 1)) for i in range(16)]))\n\n ld = local_auth_ds.dataloader().batch(1).pytorch()\n assert len(ld) == 0\n ld1 = local_auth_ds.dataloader().batch(2).pytorch(tensors=[\"images\"])\n assert len(ld1) == 8\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky\n@pytest.mark.slow\ndef test_pytorch_transform_dict(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"image\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.image.extend(([i * np.ones((i + 1, i + 1)) for i in range(16)]))\n ds.create_tensor(\"image2\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.image2.extend(np.array([i * np.ones((12, 12)) for i in range(16)]))\n ds.create_tensor(\"image3\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.image3.extend(np.array([i * np.ones((12, 12)) for i in range(16)]))\n\n dl = ds.dataloader().transform({\"image\": double, \"image2\": None}).pytorch()\n\n assert len(dl.dataset) == 16\n\n for _ in range(2):\n for i, batch in enumerate(dl):\n assert set(batch.keys()) == {\"image\", \"image2\"}\n np.testing.assert_array_equal(\n batch[\"image\"].numpy(), 2 * i * np.ones((1, i + 1, i + 1))\n )\n np.testing.assert_array_equal(\n batch[\"image2\"].numpy(), i * np.ones((1, 12, 12))\n )\n\n for _ in range(2):\n for i, (image, image2) in enumerate(dl):\n np.testing.assert_array_equal(\n image.numpy(), 2 * i * np.ones((1, i + 1, i + 1))\n )\n np.testing.assert_array_equal(image2.numpy(), i * np.ones((1, 12, 12)))\n\n\n@pytest.mark.slow\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky\ndef test_pytorch_with_compression(local_auth_ds: Dataset):\n # TODO: chunk-wise compression for labels (right now they are uncompressed)\n with local_auth_ds as ds:\n images = ds.create_tensor(\n \"images\",\n htype=\"image\",\n sample_compression=\"png\",\n max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE,\n )\n labels = ds.create_tensor(\n \"labels\", htype=\"class_label\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE\n )\n\n assert images.meta.sample_compression == \"png\"\n\n images.extend(np.ones((16, 12, 12, 3), dtype=\"uint8\"))\n labels.extend(np.ones((16, 1), dtype=\"uint32\"))\n\n dl = ds.dataloader().pytorch(num_workers=0)\n\n for _ in range(2):\n for batch in dl:\n X = batch[\"images\"].numpy()\n T = batch[\"labels\"].numpy()\n assert X.shape == (1, 12, 12, 3)\n assert T.shape == (1, 1)\n\n\n@pytest.mark.slow\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky\ndef test_custom_tensor_order(local_auth_ds):\n with local_auth_ds as ds:\n tensors = [\"a\", \"b\", \"c\", \"d\"]\n for t in tensors:\n ds.create_tensor(t, max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds[t].extend(np.random.random((3, 4, 5)))\n\n with pytest.raises(TensorDoesNotExistError):\n ds.dataloader().pytorch(tensors=[\"c\", \"d\", \"e\"])\n\n dl = ds.dataloader().pytorch(tensors=[\"c\", \"d\", \"a\"], return_index=False)\n\n for i, batch in enumerate(dl):\n c1, d1, a1 = batch\n a2 = batch[\"a\"]\n c2 = batch[\"c\"]\n d2 = batch[\"d\"]\n assert \"b\" not in batch\n np.testing.assert_array_equal(a1, a2)\n np.testing.assert_array_equal(c1, c2)\n np.testing.assert_array_equal(d1, d2)\n np.testing.assert_array_equal(a1[0], ds.a.numpy()[i])\n np.testing.assert_array_equal(c1[0], ds.c.numpy()[i])\n np.testing.assert_array_equal(d1[0], ds.d.numpy()[i])\n batch = pickle.loads(pickle.dumps(batch))\n c1, d1, a1 = batch\n a2 = batch[\"a\"]\n c2 = batch[\"c\"]\n d2 = batch[\"d\"]\n np.testing.assert_array_equal(a1, a2)\n np.testing.assert_array_equal(c1, c2)\n np.testing.assert_array_equal(d1, d2)\n np.testing.assert_array_equal(a1[0], ds.a.numpy()[i])\n np.testing.assert_array_equal(c1[0], ds.c.numpy()[i])\n np.testing.assert_array_equal(d1[0], ds.d.numpy()[i])\n\n\n@pytest.mark.slow\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky\n@pytest.mark.skip(\"causing lockups\")\n@pytest.mark.timeout(10)\ndef test_readonly_with_two_workers(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"images\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.create_tensor(\"labels\", max_chunk_size=PYTORCH_TESTS_MAX_CHUNK_SIZE)\n ds.images.extend(np.ones((10, 12, 12)))\n ds.labels.extend(np.ones(10))\n\n base_storage = get_base_storage(ds.storage)\n base_storage.flush()\n base_storage.enable_readonly()\n ds = Dataset(\n storage=ds.storage,\n token=ds.token,\n read_only=True,\n verbose=False,\n )\n\n ptds = ds.dataloader().pytorch(num_workers=2)\n # no need to check input, only care that readonly works\n for _ in ptds:\n continue\n\n\n@pytest.mark.xfail(raises=NotImplementedError, strict=True)\ndef test_corrupt_dataset():\n raise NotImplementedError\n\n\n@pytest.mark.xfail(raises=NotImplementedError, strict=True)\ndef test_pytorch_local_cache():\n raise NotImplementedError\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_groups(local_auth_ds, compressed_image_paths):\n img1 = deeplake.read(compressed_image_paths[\"jpeg\"][0])\n img2 = deeplake.read(compressed_image_paths[\"png\"][0])\n with local_auth_ds as ds:\n ds.create_tensor(\"images/jpegs/cats\", htype=\"image\", sample_compression=\"jpeg\")\n ds.create_tensor(\"images/pngs/flowers\", htype=\"image\", sample_compression=\"png\")\n for _ in range(10):\n ds.images.jpegs.cats.append(img1)\n ds.images.pngs.flowers.append(img2)\n\n another_ds = deeplake.dataset(\n ds.path,\n token=ds.token,\n )\n dl = another_ds.dataloader().pytorch(return_index=False)\n for i, (cat, flower) in enumerate(dl):\n assert cat[0].shape == another_ds.images.jpegs.cats[i].numpy().shape\n assert flower[0].shape == another_ds.images.pngs.flowers[i].numpy().shape\n\n dl = another_ds.images.dataloader().pytorch(return_index=False)\n for sample in dl:\n cat = sample[\"images/jpegs/cats\"]\n flower = sample[\"images/pngs/flowers\"]\n np.testing.assert_array_equal(cat[0], img1.array)\n np.testing.assert_array_equal(flower[0], img2.array)\n\n\n@pytest.mark.slow\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky\ndef test_string_tensors(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"strings\", htype=\"text\")\n ds.strings.extend([f\"string{idx}\" for idx in range(5)])\n\n ptds = ds.dataloader().pytorch()\n for idx, batch in enumerate(ptds):\n np.testing.assert_array_equal(batch[\"strings\"], f\"string{idx}\")\n\n\n@pytest.mark.xfail(raises=NotImplementedError, strict=True)\ndef test_pytorch_large():\n raise NotImplementedError\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\n \"index\",\n [\n slice(2, 7),\n slice(3, 10, 2),\n slice(None, 10),\n slice(None, None, -1),\n slice(None, None, -2),\n [2, 3, 4],\n [2, 4, 6, 8],\n [2, 2, 4, 4, 6, 6, 7, 7, 8, 8, 9, 9, 9],\n [4, 3, 2, 1],\n ],\n)\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_pytorch_view(local_auth_ds, index):\n arr_list_1 = [np.random.randn(15, 15, i) for i in range(10)]\n arr_list_2 = [np.random.randn(40, 15, 4, i) for i in range(10)]\n label_list = list(range(10))\n\n with local_auth_ds as ds:\n ds.create_tensor(\"img1\")\n ds.create_tensor(\"img2\")\n ds.create_tensor(\"label\")\n ds.img1.extend(arr_list_1)\n ds.img2.extend(arr_list_2)\n ds.label.extend(label_list)\n\n ptds = ds[index].dataloader().pytorch()\n idxs = list(IndexEntry(index).indices(len(ds)))\n for idx, batch in enumerate(ptds):\n idx = idxs[idx]\n np.testing.assert_array_equal(batch[\"img1\"][0], arr_list_1[idx])\n np.testing.assert_array_equal(batch[\"img2\"][0], arr_list_2[idx])\n np.testing.assert_array_equal(batch[\"label\"][0], idx)\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\"shuffle\", [True, False])\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_pytorch_collate(local_auth_ds, shuffle):\n with local_auth_ds as ds:\n ds.create_tensor(\"a\")\n ds.create_tensor(\"b\")\n ds.create_tensor(\"c\")\n for _ in range(100):\n ds.a.append(0)\n ds.b.append(1)\n ds.c.append(2)\n\n ptds = ds.dataloader().batch(4).pytorch(collate_fn=reorder_collate)\n if shuffle:\n ptds = ptds.shuffle()\n for batch in ptds:\n assert len(batch) == 2\n assert len(batch[0]) == 2\n np.testing.assert_array_equal(batch[0][0], np.array([0, 0, 0, 0]).reshape(4, 1))\n np.testing.assert_array_equal(batch[0][1], np.array([1, 1, 1, 1]).reshape(4, 1))\n np.testing.assert_array_equal(batch[1], np.array([2, 2, 2, 2]).reshape(4, 1))\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\"shuffle\", [True, False])\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_pytorch_transform_collate(local_auth_ds, shuffle):\n with local_auth_ds as ds:\n ds.create_tensor(\"a\")\n ds.create_tensor(\"b\")\n ds.create_tensor(\"c\")\n for _ in range(100):\n ds.a.append(0 * np.ones((300, 300)))\n ds.b.append(1 * np.ones((300, 300)))\n ds.c.append(2 * np.ones((300, 300)))\n\n ptds = (\n ds.dataloader()\n .batch(4)\n .pytorch(\n collate_fn=my_transform_collate,\n )\n .transform(dict_to_list)\n )\n if shuffle:\n ptds = ptds.shuffle()\n for batch in ptds:\n assert len(batch) == 3\n for i in range(2):\n assert len(batch[i]) == 4\n np.testing.assert_array_equal(batch[0], 2 * np.ones((4, 300, 300)))\n np.testing.assert_array_equal(batch[1], 0 * np.ones((4, 300, 300)))\n np.testing.assert_array_equal(batch[2], 1 * np.ones((4, 300, 300)))\n\n\n@pytest.mark.xfail(raises=NotImplementedError, strict=True)\ndef test_pytorch_ddp():\n raise NotImplementedError\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\"compression\", [None, \"jpeg\"])\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_pytorch_decode(local_auth_ds, compressed_image_paths, compression):\n with local_auth_ds as ds:\n ds.create_tensor(\"image\", sample_compression=compression)\n ds.image.extend(\n np.array([i * np.ones((10, 10, 3), dtype=np.uint8) for i in range(5)])\n )\n ds.image.extend([deeplake.read(compressed_image_paths[\"jpeg\"][0])] * 5)\n\n ptds = ds.dataloader().pytorch(decode_method={\"image\": \"tobytes\"})\n\n for i, batch in enumerate(ptds):\n image = convert_data_according_to_torch_version(batch[\"image\"])\n assert isinstance(image, bytes)\n if i < 5 and not compression:\n np.testing.assert_array_equal(\n np.frombuffer(image, dtype=np.uint8).reshape(10, 10, 3),\n i * np.ones((10, 10, 3), dtype=np.uint8),\n )\n elif i >= 5 and compression:\n with open(compressed_image_paths[\"jpeg\"][0], \"rb\") as f:\n assert f.read() == image\n\n if compression:\n ptds = ds.dataloader().numpy(decode_method={\"image\": \"pil\"})\n for i, batch in enumerate(ptds):\n image = batch[0][\"image\"]\n assert isinstance(image, Image.Image)\n if i < 5:\n np.testing.assert_array_equal(\n np.array(image), i * np.ones((10, 10, 3), dtype=np.uint8)\n )\n elif i >= 5:\n with Image.open(compressed_image_paths[\"jpeg\"][0]) as f:\n np.testing.assert_array_equal(np.array(f), np.array(image))\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky\n@pytest.mark.slow\ndef test_rename(local_auth_ds):\n group_name = \"red/green\"\n with local_auth_ds as ds:\n ds.create_tensor(\"abc\")\n ds.create_tensor(\"blue/green\")\n ds.abc.append([1, 2, 3])\n ds.rename_tensor(\"abc\", \"xyz\")\n ds.rename_group(\"blue\", \"red\")\n ds[group_name].append([1, 2, 3, 4])\n loader = ds.dataloader().pytorch(return_index=False)\n for sample in loader:\n assert set(sample.keys()) == {\"xyz\", group_name}\n np.testing.assert_array_equal(np.array(sample[\"xyz\"]), np.array([[1, 2, 3]]))\n np.testing.assert_array_equal(\n np.array(sample[group_name]), np.array([[1, 2, 3, 4]])\n )\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\n \"num_workers\",\n [\n 0,\n pytest.param(2, marks=pytest.mark.skip(reason=\"causing lockups\")),\n ],\n)\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_indexes(local_auth_ds, num_workers):\n with local_auth_ds as ds:\n ds.create_tensor(\"xyz\")\n for i in range(8):\n ds.xyz.append(i * np.ones((2, 2)))\n\n ptds = ds.dataloader().batch(4).pytorch(num_workers=num_workers, return_index=True)\n ptds = ptds.shuffle()\n\n for batch in ptds:\n assert batch.keys() == {\"xyz\", \"index\"}\n for i in range(len(batch)):\n np.testing.assert_array_equal(batch[\"index\"][i], batch[\"xyz\"][i][0, 0])\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.slow\n@pytest.mark.parametrize(\n \"num_workers\",\n [\n 0,\n pytest.param(2, marks=pytest.mark.skip(\"causing lockups\")),\n ],\n)\n@pytest.mark.flaky\ndef test_indexes_transform(local_auth_ds, num_workers):\n with local_auth_ds as ds:\n ds.create_tensor(\"xyz\")\n for i in range(8):\n ds.xyz.append(i * np.ones((2, 2)))\n\n ptds = (\n ds.dataloader()\n .batch(4)\n .transform(index_transform)\n .pytorch(\n num_workers=num_workers, return_index=True, collate_fn=identity_collate\n )\n )\n\n for batch in ptds:\n assert len(batch) == 4\n assert len(batch[0]) == 2\n assert len(batch[1]) == 2\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\n \"num_workers\", [0, pytest.param(2, marks=pytest.mark.skip(\"causing lockups\"))]\n)\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_indexes_transform_dict(local_auth_ds, num_workers):\n with local_auth_ds as ds:\n ds.create_tensor(\"xyz\")\n for i in range(8):\n ds.xyz.append(i * np.ones((2, 2)))\n\n ptds = (\n ds.dataloader()\n .batch(4)\n .transform({\"xyz\": double, \"index\": None})\n .pytorch(num_workers=num_workers, return_index=True)\n )\n\n for batch in ptds:\n assert batch.keys() == {\"xyz\", \"index\"}\n for i in range(len(batch)):\n np.testing.assert_array_equal(2 * batch[\"index\"][i], batch[\"xyz\"][i][0, 0])\n\n ptds = (\n ds.dataloader()\n .batch(4)\n .transform({\"xyz\": double})\n .pytorch(num_workers=num_workers, return_index=True)\n )\n\n for batch in ptds:\n assert batch.keys() == {\"xyz\"}\n\n\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.parametrize(\n \"num_workers\", [0, pytest.param(2, marks=pytest.mark.skip(\"causing lockups\"))]\n)\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_indexes_tensors(local_auth_ds, num_workers):\n with local_auth_ds as ds:\n ds.create_tensor(\"xyz\")\n for i in range(8):\n ds.xyz.append(i * np.ones((2, 2)))\n\n with pytest.raises(ValueError):\n (\n ds.dataloader()\n .batch(4)\n .pytorch(\n num_workers=num_workers, return_index=True, tensors=[\"xyz\", \"index\"]\n )\n )\n\n ptds = (\n ds.dataloader()\n .batch(4)\n .pytorch(num_workers=num_workers, return_index=True, tensors=[\"xyz\"])\n )\n\n for batch in ptds:\n assert batch.keys() == {\"xyz\", \"index\"}\n\n\n@requires_libdeeplake\n@requires_torch\n@pytest.mark.flaky\n@pytest.mark.slow\ndef test_uneven_iteration(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"x\")\n ds.create_tensor(\"y\")\n ds.x.extend(list(range(5)))\n ds.y.extend(list(range(10)))\n ptds = ds.dataloader().pytorch()\n for i, batch in enumerate(ptds):\n x, y = np.array(batch[\"x\"][0]), np.array(batch[\"y\"][0])\n np.testing.assert_equal(x, i)\n np.testing.assert_equal(y, i)\n\n\n@requires_libdeeplake\n@requires_torch\n@pytest.mark.slow\ndef test_pytorch_error_handling(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\"x\")\n ds.create_tensor(\"y\")\n ds.x.extend(list(range(5)))\n\n ptds = ds.dataloader().pytorch()\n with pytest.raises(EmptyTensorError):\n for _ in ptds:\n continue\n\n ptds = ds.dataloader().pytorch(tensors=[\"x\", \"y\"])\n with pytest.raises(EmptyTensorError):\n for _ in ptds:\n continue\n\n ptds = ds.dataloader().pytorch(tensors=[\"x\"])\n for _ in ptds:\n continue\n\n\n@requires_libdeeplake\n@requires_torch\ndef test_batch_sampler_attribute(local_auth_ds):\n ld = local_auth_ds.dataloader().pytorch()\n\n from torch.utils.data import BatchSampler\n\n assert isinstance(ld.batch_sampler, BatchSampler)\n assert ld.batch_sampler.sampler is not None\n\n\n@requires_libdeeplake\n@requires_torch\n@pytest.mark.slow\n@pytest.mark.flaky\ndef test_pil_decode_method(local_auth_ds):\n from indra.pytorch.exceptions import CollateExceptionWrapper # type: ignore\n\n with local_auth_ds as ds:\n ds.create_tensor(\"x\", htype=\"image\", sample_compression=\"jpeg\")\n ds.x.extend(np.random.randint(0, 255, (10, 10, 10, 3), np.uint8))\n\n ptds = ds.dataloader().pytorch(return_index=False)\n for batch in ptds:\n assert len(batch.keys()) == 1\n assert \"x\" in batch.keys()\n assert batch[\"x\"].shape == (1, 10, 10, 3)\n\n ptds = ds.dataloader().pytorch(decode_method={\"x\": \"pil\"})\n with pytest.raises(CollateExceptionWrapper):\n for _ in ptds:\n continue\n\n def custom_transform(batch):\n batch[\"x\"] = np.array(batch[\"x\"])\n return batch\n\n ptds = (\n ds.dataloader()\n .pytorch(decode_method={\"x\": \"pil\"}, return_index=False)\n .transform(custom_transform)\n )\n for batch in ptds:\n assert len(batch.keys()) == 1\n assert \"x\" in batch.keys()\n assert batch[\"x\"].shape == (1, 10, 10, 3)\n\n\n@patch(\"deeplake.constants.RETURN_DUMMY_DATA_FOR_DATALOADER\", True)\n@requires_torch\n@requires_libdeeplake\n@pytest.mark.flaky\ndef test_pytorch_dummy_data(local_auth_ds):\n x_data = [\n np.random.randint(0, 255, (100, 100, 3), dtype=\"uint8\"),\n np.random.randint(0, 255, (120, 120, 3), dtype=\"uint8\"),\n ]\n y_data = [np.random.rand(100, 100, 3), np.random.rand(120, 120, 3)]\n z_data = [\"hello\", \"world\"]\n with local_auth_ds as ds:\n ds.create_tensor(\"x\")\n ds.create_tensor(\"y\")\n ds.create_tensor(\"z\")\n ds.x.extend(x_data)\n ds.y.extend(y_data)\n ds.z.extend(z_data)\n\n ptds = ds.dataloader()\n for i, batch in enumerate(ptds):\n x = x_data[i]\n dummy_x = batch[0][\"x\"]\n assert dummy_x.shape == x.shape\n assert dummy_x.dtype == x.dtype\n\n y = y_data[i]\n dummy_y = batch[0][\"y\"]\n assert dummy_y.shape == y.shape\n assert dummy_y.dtype == y.dtype\n\n dummy_z = batch[0][\"z\"]\n assert dummy_z[0] == \"a\"\n\n\n@requires_libdeeplake\n@requires_torch\n@pytest.mark.flaky\n@pytest.mark.slow\ndef test_json_data_loader(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\n \"json\",\n htype=\"json\",\n sample_compression=None,\n )\n d = {\"x\": 1, \"y\": 2, \"z\": 3}\n for _ in range(10):\n ds.json.append(d)\n\n dl = ds.dataloader().batch(2)\n\n for batch in dl:\n sample1 = batch[0][\"json\"]\n sample2 = batch[1][\"json\"]\n\n assert sample1 == d\n assert sample2 == d\n\n\n@requires_libdeeplake\n@requires_torch\n@pytest.mark.flaky\n@pytest.mark.slow\ndef test_list_data_loader(local_auth_ds):\n with local_auth_ds as ds:\n ds.create_tensor(\n \"list\",\n htype=\"list\",\n sample_compression=None,\n )\n l = [1, 2, 3]\n for _ in range(10):\n ds.list.append(l)\n\n dl = ds.dataloader().batch(2)\n\n for batch in dl:\n sample1 = batch[0][\"list\"]\n sample2 = batch[1][\"list\"]\n assert sample1.tolist() == l\n assert sample2.tolist() == l\n\n\n@requires_libdeeplake\n@requires_torch\n@pytest.mark.flaky\n@pytest.mark.slow\ndef test_pytorch_data_decode(local_auth_ds, cat_path):\n with local_auth_ds as ds:\n ds.create_tensor(\"generic\")\n for i in range(10):\n ds.generic.append(i)\n ds.create_tensor(\"text\", htype=\"text\")\n for i in range(10):\n ds.text.append(f\"hello {i}\")\n ds.create_tensor(\"json\", htype=\"json\")\n for i in range(10):\n ds.json.append({\"x\": i})\n ds.create_tensor(\"list\", htype=\"list\")\n for i in range(10):\n ds.list.append([i, i + 1])\n ds.create_tensor(\"class_label\", htype=\"class_label\")\n animals = [\n \"cat\",\n \"dog\",\n \"bird\",\n \"fish\",\n \"horse\",\n \"cow\",\n \"pig\",\n \"sheep\",\n \"goat\",\n \"chicken\",\n ]\n ds.class_label.extend(animals)\n ds.create_tensor(\"image\", htype=\"image\", sample_compression=\"jpeg\")\n for i in range(10):\n ds.image.append(deeplake.read(cat_path))\n\n decode_method = {tensor: \"data\" for tensor in list(ds.tensors.keys())}\n ptds = (\n ds.dataloader()\n .transform(identity)\n .pytorch(decode_method=decode_method, collate_fn=identity_collate)\n )\n for i, batch in enumerate(ptds):\n sample = batch[0]\n assert sample[\"text\"][\"value\"] == f\"hello {i}\"\n assert sample[\"json\"][\"value\"] == {\"x\": i}\n assert sample[\"list\"][\"value\"].tolist() == [i, i + 1]\n assert sample[\"class_label\"][\"value\"] == [i]\n assert sample[\"class_label\"][\"text\"] == [animals[i]]\n assert sample[\"image\"][\"value\"].shape == (900, 900, 3)\n assert sample[\"generic\"][\"value\"] == i\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/enterprise/test_pytorch.py","file_name":"test_pytorch.py","file_ext":"py","file_size_in_byte":28046,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"}
+{"seq_id":"20434678731","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 18 20:18:34 2021\n\n@author: erwan\n\n\nAdd a RulerTool to measure spectra\n\n Based on work from TerranJP\n https://github.com/terranjp/matplotlib-tools\n\n\nExamples\n--------\n::\n\n from radis.tools.plot_tools import add_ruler\n fig = plt.figure()\n add_ruler(fig)\n\n.. image:: https://user-images.githubusercontent.com/16088743/122615292-95a9e080-d088-11eb-9927-bf1187d5a94a.png\n\n\n\n\"\"\"\nimport warnings\n\n\nclass ParamRange:\n def __init__(self, valmin=0, valmax=1, valinit=None):\n r\"\"\"Used in :py:func:`radis.lbl.factory.SpectrumFactory.eq_spectrum_gpu_interactive`\"\"\"\n self.valmin = valmin\n self.valmax = valmax\n\n if valinit is None:\n valinit = valmin\n self.valinit = valinit\n self.val = self.valinit\n self.name = None\n self.widget = None\n\n def set_widget(self, w, spec, func=lambda x: 0):\n self.widget = w\n self.spec = spec\n self.update_callback = func\n w.on_changed(self.widget_callback)\n\n def widget_callback(self, val):\n self.val = val\n self.spec.conditions[self.name] = val\n self.update_callback(val)\n\n def __repr__(self):\n return \"ParamRange({:s} .. {:s} [{:s}] @ {:s})\".format(\n self.valmin.__repr__(),\n self.valmax.__repr__(),\n self.valinit.__repr__(),\n self.val.__repr__(),\n )\n\n\ndef add_ruler(fig, wunit=\"\", Iunit=\"\", ax=None):\n \"\"\"\n\n Add a RulerTool to measure spectra\n\n Based on work from TerranJP\n https://github.com/terranjp/matplotlib-tools\n\n\n Examples\n --------\n ::\n\n from radis.tools.plot_tools import add_ruler\n fig = plt.figure()\n add_ruler(fig)\n\n .. image:: https://user-images.githubusercontent.com/16088743/122615292-95a9e080-d088-11eb-9927-bf1187d5a94a.png\n\n \"\"\"\n\n import matplotlib.pyplot as plt\n import numpy as np\n from matplotlib.backend_tools import ToolToggleBase\n from matplotlib.widgets import AxesWidget\n\n # TODO: build it only on demand/ if add_tool called?\n class Ruler(AxesWidget):\n r\"\"\"\n A ruler to measure distances and angles on an axes instance.\n\n For the ruler to remain responsive you must keep a reference to it.\n\n Parameters\n ----------\n ax : the :class:`matplotlib.axes.Axes` instance\n active : bool, default is True\n Whether the ruler is active or not.\n wunit, Iunit: str\n unit of spectra\n print_text : bool, default is False\n Whether the length measure string is printed to the console\n useblit : bool, default is False\n If True, use the backend-dependent blitting features for faster\n canvas updates.\n lineprops : dict, default is None\n Dictionary of :class:`matplotlib.lines.Line2D` properties\n markerprops : dict, default is None\n Dictionary of :class:`matplotlib.markers.MarkerStyle` properties\n textprops: dict, default is None\n Dictionary of :class:`matplotlib.text.Text` properties. To reposition the\n textbox you can override the defaults which position the box in the top left\n corner of the axes.\n\n Notes\n -----\n\n Usage:\n\n 1. Hold left click drag and release to draw the ruler in the axes.\n - Hold shift while dragging to lock the ruler to the horizontal axis.\n - Hold control while drawing to lock the ruler to the vertical axis.\n\n 2. Right click one of the markers to move the ruler.\n\n The keyboard can be used to activate and deactivate the ruler and toggle\n visibility of the line and text:\n\n 'm' : Toggles the ruler on and off.\n\n 'ctl+m' : Toggles the visibility of the ruler and text.\n\n Example\n -------\n\n >>> xCoord = np.arange(0, 5, 1)\n >>> yCoord = [0, 1, -3, 5, -3]\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n\n >>> markerprops = dict(marker='o', markersize=5, markeredgecolor='red')\n >>> lineprops = dict(color='red', linewidth=2)\n\n >>> ax.grid(True)\n >>> ax.plot(xCoord, yCoord)\n\n >>> ruler = Ruler(ax=ax,\n useblit=True,\n markerprops=markerprops,\n lineprops=lineprops)\n\n >>> plt.show()\n\n Typical output :\n\n .. image:: https://user-images.githubusercontent.com/16088743/122615292-95a9e080-d088-11eb-9927-bf1187d5a94a.png\n\n \"\"\"\n\n def __init__(\n self,\n ax,\n active=True,\n print_text=False,\n useblit=False,\n lineprops=None,\n textprops=None,\n markerprops=None,\n wunit=\"\",\n Iunit=\"\",\n ):\n \"\"\"\n Add a ruler to *ax*. If ``ruler_active=True``, the ruler will be\n activated when the plot is first created. If ``ruler_unit`` is set the\n string will be appended to the length text annotations.\n\n \"\"\"\n AxesWidget.__init__(self, ax)\n\n self.connect_events()\n\n self.ax = ax\n self.fig = ax.figure\n\n self._print_text = print_text\n self._visible = True\n self.active = active\n self.wunit = wunit\n self.Iunit = Iunit\n\n self.useblit = useblit and self.canvas.supports_blit\n\n self._mouse1_pressed = False\n self._mouse3_pressed = False\n self._shift_pressed = False\n self._control_pressed = False\n self._y0 = None\n self._x1 = None\n self._y1 = None\n self._line_start_coords = None\n self._line_end_coords = None\n self._ruler_marker = None\n self._background = None\n self._ruler_moving = False\n self._end_a_lock = False\n self._end_b_lock = False\n self._end_c_lock = False\n self._old_marker_a_coords = None\n self._old_marker_c_coords = None\n self._old_mid_coords = None\n\n if lineprops is None:\n lineprops = {}\n\n bbox = dict(\n facecolor=\"white\", alpha=0.5, boxstyle=\"round\", edgecolor=\"0.75\"\n )\n\n used_textprops = dict(\n xy=(0, 1),\n xytext=(10, -10),\n xycoords=\"axes fraction\",\n textcoords=\"offset points\",\n ha=\"left\",\n va=\"center\",\n size=12,\n bbox=bbox,\n )\n\n x0 = np.nan\n y0 = np.nan\n\n (self._ruler,) = self.ax.plot([x0, x0], [y0, y0], **lineprops)\n\n used_markerprops = dict(\n marker=\"s\",\n markersize=3,\n markerfacecolor=\"white\",\n markeredgecolor=\"black\",\n markeredgewidth=0.5,\n picker=True,\n pickradius=5,\n visible=False,\n )\n\n # If marker or text props are given as an argument combine with the\n # default marker props. Don't really want to override the entire props\n # if a user only gives one value.\n\n if markerprops is not None:\n used_markerprops.update(markerprops)\n\n if textprops is not None:\n used_textprops.update(used_textprops)\n\n self._axes_text = self.ax.annotate(text=\"\", **used_textprops)\n self.ax.add_artist(self._axes_text)\n\n (self._marker_a,) = self.ax.plot((x0, y0), **used_markerprops)\n (self._marker_b,) = self.ax.plot((x0, y0), **used_markerprops)\n (self._marker_c,) = self.ax.plot((x0, y0), **used_markerprops)\n\n self._artists = [\n self._axes_text,\n self._ruler,\n self._marker_a,\n self._marker_b,\n self._marker_c,\n ]\n\n def connect_events(self):\n \"\"\"\n Connect all events to the various callbacks\n \"\"\"\n self.connect_event(\"button_press_event\", self._on_press)\n self.connect_event(\"button_release_event\", self._on_release)\n self.connect_event(\"motion_notify_event\", self._on_move)\n self.connect_event(\"key_press_event\", self._on_key_press)\n self.connect_event(\"key_release_event\", self._on_key_release)\n\n def ignore(self, event):\n \"\"\"\n Ignore events if the cursor is out of the axes or the widget is locked\n \"\"\"\n if not self.canvas.widgetlock.available(self):\n return True\n if event.inaxes != self.ax.axes:\n return True\n if not self.active:\n return True\n\n def _on_key_press(self, event):\n \"\"\"\n Handle key press events.\n\n If shift is pressed the ruler will be constrained to horizontal axis\n If control is pressed the ruler will be constrained to vertical axis\n If m is pressed the ruler will be toggled on and off\n If ctrl+m is pressed the visibility of the ruler will be toggled\n \"\"\"\n\n if event.key == \"shift\":\n self._shift_pressed = True\n\n if event.key == \"control\":\n self._control_pressed = True\n\n if event.key == \"m\":\n self.toggle_ruler()\n\n if event.key == \"ctrl+m\":\n self.toggle_ruler_visibility()\n\n def _on_key_release(self, event):\n \"\"\"\n Handle key release event, flip the flags to false.\n \"\"\"\n if event.key == \"shift\":\n self._shift_pressed = False\n\n if event.key == \"control\":\n self._control_pressed = False\n\n def toggle_ruler(self):\n \"\"\"\n Called when the 'm' key is pressed. If ruler is on turn it off, and\n vise versa\n\n If off, hide it.\n \"\"\"\n\n self.active = not self.active\n\n self.canvas.draw_idle()\n\n def toggle_ruler_visibility(self):\n \"\"\"\n Called when the 'ctl+m' key is pressed. If ruler is visible turn it off\n , and vise versa\n \"\"\"\n if self._visible is True:\n for artist in self._artists:\n artist.set_visible(False)\n self.active = False\n self._visible = False\n\n elif self._visible is False:\n for artist in self._artists:\n artist.set_visible(True)\n self._visible = True\n\n self.canvas.draw_idle()\n\n def _on_press(self, event):\n \"\"\"\n On mouse button press check which button has been pressed and handle\n \"\"\"\n if self.ignore(event):\n return\n if event.button == 1 and self._mouse3_pressed is False:\n self._handle_button1_press(event)\n elif event.button == 3:\n self._handle_button3_press(event)\n\n def _handle_button1_press(self, event):\n \"\"\"\n On button 1 press start drawing the ruler line from the initial\n press position\n \"\"\"\n\n self._mouse1_pressed = True\n self._x0 = event.xdata\n self._y0 = event.ydata\n self._marker_a.set_data((event.xdata, event.ydata))\n self._marker_a.set_visible(True)\n\n if self.useblit:\n self._marker_a.set_data(self._x0, self._y0)\n for artist in self._artists:\n artist.set_animated(True)\n self.canvas.draw()\n self._background = self.canvas.copy_from_bbox(self.fig.bbox)\n\n def _handle_button3_press(self, event):\n \"\"\"\n If button 3 is pressed (right click) check if cursor is at one of the\n ruler markers and the move the ruler accordingly.\n \"\"\"\n contains_a, _ = self._marker_a.contains(event)\n contains_b, _ = self._marker_b.contains(event)\n contains_c, _ = self._marker_c.contains(event)\n\n if not (contains_a or contains_b or contains_c):\n return\n\n self._end_a_lock = contains_a\n self._end_b_lock = contains_b\n self._end_c_lock = contains_c\n\n line_coords = self._ruler.get_path().vertices\n self._x0 = line_coords[0][0]\n self._y0 = line_coords[0][1]\n self._x1 = line_coords[1][0]\n self._y1 = line_coords[1][1]\n\n self._old_marker_a_coords = self._marker_a.get_path().vertices\n self._old_marker_c_coords = self._marker_c.get_path().vertices\n self._old_mid_coords = self.midline_coords\n\n def _on_move(self, event):\n \"\"\"\n On motion draw the ruler if button 1 is pressed. If one of the markers\n is locked indicating move the ruler according to the locked marker\n \"\"\"\n\n if event.inaxes != self.ax.axes:\n return\n\n # if self._end_a_lock or self._end_b_lock or self._end_c_lock is True:\n # self._move_ruler(event)\n\n if self._mouse1_pressed is True:\n self._draw_ruler(event)\n\n # def _move_ruler(self, event):\n # \"\"\"\n # If one of the markers is locked move the ruler according the selected\n # marker.\n # \"\"\"\n\n # # This flag is used to prevent the ruler from clipping when a marker is\n # # first selected\n # if self._ruler_moving is False:\n # if self.useblit:\n # for artist in self._artists:\n # artist.set_animated(True)\n # self.canvas.draw()\n # self._background = self.canvas.copy_from_bbox(self.fig.bbox)\n # self._ruler_moving = True\n\n # if self._end_a_lock is True:\n # # If marker a is locked only move end a.\n # pos_a = event.xdata, self._x1\n # pos_b = event.ydata, self._y1\n # self._marker_a.set_data(event.xdata, event.ydata)\n # self._ruler.set_data(pos_a, pos_b)\n # self._set_midline_marker()\n\n # if self._end_c_lock is True:\n # # If marker a is locked only move end c.\n # pos_a = self._x0, event.xdata\n # pos_b = self._y0, event.ydata\n # self._marker_c.set_data(event.xdata, event.ydata)\n # self._ruler.set_data(pos_a, pos_b)\n # self._set_midline_marker()\n\n # if self._end_b_lock is True:\n # # If marker b is locked shift the whole ruler.\n # b_dx = event.xdata - self._old_mid_coords[0]\n # b_dy = event.ydata - self._old_mid_coords[1]\n # pos_a = self._x0 + b_dx, self._x1 + b_dx\n # pos_b = self._y0 + b_dy, self._y1 + b_dy\n\n # marker_a_coords = (\n # self._old_marker_a_coords[0][0] + b_dx,\n # self._old_marker_a_coords[0][1] + b_dy,\n # )\n # marker_c_coords = (\n # self._old_marker_c_coords[0][0] + b_dx,\n # self._old_marker_c_coords[0][1] + b_dy,\n # )\n\n # self._ruler.set_data(pos_a, pos_b)\n # self._marker_a.set_data(marker_a_coords)\n # self._marker_b.set_data(event.xdata, event.ydata)\n # self._marker_c.set_data(marker_c_coords)\n\n # self._update_text()\n # self._update_artists()\n\n # def _set_midline_marker(self):\n # self._marker_b.set_visible(True)\n # self._marker_b.set_data(self.midline_coords)\n\n # @property\n # def midline_coords(self):\n # pos0, pos1 = self._ruler.get_path().vertices\n # mid_line_coords = (pos0[0] + pos1[0]) / 2, (pos0[1] + pos1[1]) / 2\n # return mid_line_coords\n\n def _draw_ruler(self, event):\n \"\"\"\n If the left mouse button is pressed and held draw the ruler as the\n mouse is dragged\n \"\"\"\n\n self._x1 = event.xdata\n self._y1 = event.ydata\n\n # If shift is pressed ruler is constrained to horizontal axis\n if self._shift_pressed is True:\n pos_a = self._x0, self._x1\n pos_b = self._y0, self._y0\n # If control is pressed ruler is constrained to vertical axis\n elif self._control_pressed is True:\n pos_a = self._x0, self._x0\n pos_b = self._y0, self._y1\n # Else the ruler follow the mouse cursor\n else:\n pos_a = self._x0, self._x1\n pos_b = self._y0, self._y1\n\n self._ruler.set_data([pos_a], [pos_b])\n x1 = self._ruler.get_path().vertices[1][0]\n y1 = self._ruler.get_path().vertices[1][1]\n self._marker_c.set_visible(True)\n self._marker_c.set_data(x1, y1)\n # self._set_midline_marker()\n self._update_text()\n self._update_artists()\n\n def _update_artists(self):\n if self.useblit:\n if self._background is not None:\n self.canvas.restore_region(self._background)\n else:\n self._background = self.canvas.copy_from_bbox(self.fig.bbox)\n\n for artist in self._artists:\n self.ax.draw_artist(artist)\n\n self.canvas.blit(self.fig.bbox)\n else:\n self.canvas.draw_idle()\n\n def _update_text(self):\n detail_string = \"{:0.4f} {}; {:0.3f} {}\".format(\n self.ruler_dx, self.wunit, self.ruler_dy, self.Iunit\n )\n\n self._axes_text.set_text(detail_string)\n if self._print_text is True:\n print(detail_string)\n\n def _on_release(self, event):\n self._mouse1_pressed = False\n self._mouse3_pressed = False\n self._ruler_moving = False\n self._end_a_lock = False\n self._end_b_lock = False\n self._end_c_lock = False\n\n @property\n def ruler_dx(self):\n pos0, pos1 = self._ruler.get_path().vertices\n return pos1[0] - pos0[0]\n\n @property\n def ruler_dy(self):\n pos0, pos1 = self._ruler.get_path().vertices\n return pos1[1] - pos0[1]\n\n # %% Add in toolbars\n\n class RulerTool(ToolToggleBase):\n r\"\"\"Add a RulerTool to measure spectra\n\n Based on work from TerranJP\n https://github.com/terranjp/matplotlib-tools\n \"\"\"\n\n # keyboard shortcut\n default_keymap = \"m\"\n description = \"Ruler Tool\"\n default_toggled = False\n image = \"ruler\"\n\n def __init__(self, *args, **kwargs):\n ax = kwargs.get(\"ax\", plt.gca())\n self.ruler = None\n self.ax = ax\n self.wunit = (None,) # x-axis unit\n self.Iunit = (None,) # y-axis unit\n super().__init__(*args, **kwargs)\n\n def enable(self, event):\n \"\"\"Connect press/release events and lock the canvas.\"\"\"\n pass\n\n def disable(self, event):\n \"\"\"Release the canvas and disconnect press/release events.\"\"\"\n pass\n\n def trigger(self, *args, **kwargs):\n \"\"\"When toolbar button clicked\"\"\"\n super().trigger(*args, **kwargs)\n if self.ruler is None:\n self.ruler = Ruler(\n ax=self.ax, # TODO UPDATE\n useblit=True,\n wunit=self.wunit,\n Iunit=self.Iunit,\n ) # , markerprops=markerprops, lineprops=lineprops)\n else:\n self.ruler.toggle_ruler()\n\n # %% Build Class\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n plt.rcParams[\"toolbar\"] = \"toolmanager\"\n # Filters https://github.com/matplotlib/matplotlib/issues/15284\n\n if fig.canvas.manager.toolmanager is None:\n from warnings import warn\n\n warn(\n \"Couldn't add Ruler tool (still an experimental feature in RADIS : please report the error !)\"\n )\n return\n\n if \"📐\" in fig.canvas.manager.toolmanager.tools:\n return\n\n # Add the custom tools that we created\n fig.canvas.manager.toolmanager.add_tool(\"📐\", RulerTool)\n\n # Define units\n rulertool = fig.canvas.manager.toolmanager.get_tool(\"📐\")\n rulertool.wunit = wunit\n rulertool.Iunit = Iunit\n if ax is not None: # add the Ruler only on a specific ax\n rulertool.ax = ax\n\n # Add it to new group `radis_tools`\n fig.canvas.manager.toolbar.add_tool(\"📐\", \"radis_tools\")\n\n\nif __name__ == \"__main__\":\n\n # fig = plt.figure()\n # plt.plot([1, 2, 3])\n\n # add_ruler(fig)\n #%%\n from radis import calc_spectrum\n\n s = calc_spectrum(\n 1900,\n 2300, # cm-1\n molecule=\"CO\",\n isotope=\"1,2,3\",\n pressure=1.01325, # bar\n Tgas=700, # K\n mole_fraction=0.1,\n path_length=1, # cm\n databank=\"hitran\", # or use 'hitemp'\n )\n s.apply_slit(0.5, \"nm\") # simulate an experimental slit\n # line = s.plot('radiance', nfig='same')\n\n from radis import plot_diff\n\n plot_diff(s, s.take(\"radiance\") * 1.1, show_ruler=True)\n","repo_name":"radis/radis","sub_path":"radis/tools/plot_tools.py","file_name":"plot_tools.py","file_ext":"py","file_size_in_byte":21762,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"81"}
+{"seq_id":"43759308182","text":"from typing import Dict, Optional, Tuple\nfrom sympy import Ci\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom torchvision import transforms\nfrom torchvision.utils import save_image, make_grid\n\nfrom mindiffusion.unet import NaiveUnet\nfrom mindiffusion.ddpm import DDPM\n\nfrom dataset_creator import ImagesDataset\nimport os\n\n\n# add to get computing device\ndef get_computing_device():\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = torch.device('cpu')\n return device\n\n\ndefault_device = get_computing_device()\n\n\n# -----\n\ndef train_maps(n_epoch=100, device=default_device, load_state=False):\n ddpm = DDPM(eps_model=NaiveUnet(3, 3, n_feat=128), betas=(1e-4, 0.02), n_T=100)\n\n if load_state:\n ddpm.load_state_dict(torch.load(\"ddpm_maps.pth\"))\n ddpm.to(device)\n\n # Added transforamtions and custom dataset creation\n tr = transforms.Compose(\n [\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n transforms.RandomHorizontalFlip(0.5),\n transforms.RandomVerticalFlip(0.5)\n ]\n )\n\n dataset = ImagesDataset(\n os.path.join(os.getcwd(), '..', 'EuroSAT/2750'),\n tr)\n # -----\n dataloader = DataLoader(dataset, batch_size=128, shuffle=True, num_workers=8)\n optim = torch.optim.Adam(ddpm.parameters(), lr=1e-5)\n\n for i in range(n_epoch):\n print(f\"Epoch {i} : \")\n ddpm.train()\n\n pbar = tqdm(dataloader)\n loss_ema = None\n for x in pbar:\n optim.zero_grad()\n x = x.to(device)\n loss = ddpm(x)\n loss.backward()\n if loss_ema is None:\n loss_ema = loss.item()\n else:\n loss_ema = 0.9 * loss_ema + 0.1 * loss.item()\n pbar.set_description(f\"loss: {loss_ema:.4f}\")\n optim.step()\n\n ddpm.eval()\n with torch.no_grad():\n xh = ddpm.sample(8, (3, 64, 64), device)\n xset = torch.cat([xh, x[:8]], dim=0)\n grid = make_grid(xset, normalize=True, value_range=(-1, 1), nrow=4)\n save_image(grid, f\"./contents/ddpm_sample_maps{i}.png\")\n\n # save model\n torch.save(ddpm.state_dict(), f\"./ddpm_maps.pth\")\n\n\nif __name__ == \"__main__\":\n train_maps()\n","repo_name":"IgnatMelnikov/sk_task_repo","sub_path":"minDiffusion/train_euroSAT.py","file_name":"train_euroSAT.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"5110135912","text":"from copy import deepcopy\nimport pandas\nimport json\n\ndef json_normalizer(in_data):\n new_debt_dic = {}\n def flatten_json(data, prev_heading=''):\n if isinstance(data, dict):\n for key, value in data.items():\n flatten_json(value, prev_heading + '.' + key)\n elif isinstance(data, list):\n if len(data)<=1:\n for i in range(len(data)):\n flatten_json(data[i], prev_heading)\n else:\n for i in range(len(data)):\n flatten_json(data[i], prev_heading+\"-\"+str(i+1))\n else:\n new_debt_dic[prev_heading[1:]] = data\n \n\n return new_debt_dic\n return flatten_json(in_data)\n\n \n\n\nif __name__ == '__main__':\n json_file='''{\"coffee\":{\"region\":[{\"id\":1,\"name\":\"John Doe\"},{\"id\":2,\"name\":\"Don Joeh\"}],\"country\":{\"id\":2,\"company\":\"ACME\"}},\"brewing\":{\"region\":[{\"id\":1,\"name\":\"John Doe\"},{\"id\":2,\"name\":\"Don Joeh\"}],\"country\":{\"id\":2,\"company\":\"ACME\"}}}'''\n json_data = json.loads(json_file)\n df = flatten_json(json_data)\n df=pandas.DataFrame(df)\n df.to_csv(\"C:/Users/sprakashreddychin/Pictures/js_to_da.csv\")\n print(df)\n","repo_name":"suryaprakashreddy034/usefulcodes","sub_path":"json_normalize with out cross.py","file_name":"json_normalize with out cross.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"19848412060","text":"\nimport os\nimport shutil\nfrom functools import partial\nfrom kivy.animation import Animation\nfrom kivy.clock import Clock\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.lang import Builder\nfrom kivy.properties import StringProperty\nfrom kivy.uix.dropdown import DropDown\nfrom kivymd.uix.card import MDSeparator\nfrom widgets.dropitem import DropItem\nfrom plyer import filechooser\nfrom utils.refresh_notes import refresh\nBuilder.load_string(\"\"\"\n:\n size_hint_y:None\n height:\"50dp\"\n padding:15,0,15,0\n canvas:\n Color:\n rgba:(1,1,1,.5) if app.theme_cls.theme_style==\"Dark\" else (0,0,0,.5)\n RoundedRectangle:\n pos:self.pos\n size:self.size\n MDLabel:\n text:root.text\n MDIconButton:\n icon:\"delete\"\n on_release:root.remove_instrument(root.parent)\n\n:\n size_hint_y:None\n height:\"220dp\"\n orientation:\"vertical\"\n padding:\"10dp\"\n \n MDTextField:\n size_hint_y:None\n height:\"40dp\"\n id:instrumentname\n hint_text: \"Instrument Name\"\n \n BoxLayout:\n size_hint_y:None\n height:\"40dp\"\n MDLabel:\n text:\"Select Folder\"\n color:theme.get_color(app.theme_cls.theme_style,\"PrimaryTextColor\")\n MDIconButton:\n icon:\"folder-plus-outline\"\n on_release:root.select_folder()\n MDLabel:\n size_hint_y:None\n height:\"10dp\" if self.text==\"\" else \"40dp\" \n id:path\n text:\"\"\n color:theme.get_color(app.theme_cls.theme_style,\"PrimaryTextColor\")\n font_size:\"12sp\"\n MDLabel:\n size_hint_y:None\n height:\"1dp\" if self.text==\"\" else \"30dp\"\n id:warning\n text:\"\"\n color:(1,0,0,1)\n font_size:\"12sp\"\n FloatLayout:\n MDRaisedButton:\n text:\"Cancel\"\n pos_hint: {\"x\":0.35, \"center_y\":0.4}\n on_release:root.remove_self(root.parent)\n MDRaisedButton:\n text:\"Add\"\n pos_hint:{\"x\":.65,\"center_y\":0.4}\n on_release:root.add_instrument(root.parent)\n\n:\n #Backgroud\n BoxLayout:\n orientation:\"vertical\"\n canvas:\n Color:\n rgba:theme.get_color(app.theme_cls.theme_style,\"BackgroundColor\")\n Rectangle:\n pos:self.pos\n size:self.size\n \n ScrollView:\n do_scroll_x:True\n BoxLayout:\n size_hint_x:None\n width: self.minimum_width+80\n spacing:\"40dp\"\n padding: 30 ,20,0,root.height-430\n # Genral Setting\n BoxLayout:\n orientation:\"vertical\"\n size_hint:None,None\n height:\"400dp\"\n width:\"400dp\"\n padding:\"30dp\"\n canvas:\n Color:\n rgba:[0.15,0.15,0.15,1] if app.theme_cls.theme_style==\"Dark\" else [1,1,1,.8]\n RoundedRectangle:\n pos:self.pos\n size:self.size\n MDLabel:\n size_hint_y:None\n height:\"50dp\"\n text:\"General\"\n font_size:\"20sp\"\n color:theme.get_color(app.theme_cls.theme_style,\"PrimaryTextColor\")\n MDSeparator:\n #Dark Mode\n BoxLayout:\n size_hint_y:None\n height:\"40dp\"\n padding:10,0,10,0\n MDLabel:\n text:\"Dark Mode\"\n color:theme.get_color(app.theme_cls.theme_style,\"PrimaryTextColor\")\n MDSwitch:\n size_hint:(None,None)\n height:\"40dp\"\n width:\"40dp\"\n active: True if app.theme_cls.theme_style==\"Dark\" else False\n on_active:\n app.theme_cls.theme_style=\"Dark\" if self.active\\\n else \"Light\"\n MDSeparator:\n BoxLayout:\n size_hint_y:None\n height:\"40dp\"\n padding:10,0,0,0\n MDLabel:\n text:\"Camera\"\n color:theme.get_color(app.theme_cls.theme_style,\"PrimaryTextColor\")\n DropItem:\n size_hint_x:None\n width:\"180dp\" if len(self.text)>8 else \"150dp\" if len(self.text)>4 else \"130dp\"\n id:dropdown_item\n transparent:True\n text:\"Default\"\n icon:\"chevron-down\"\n font_size:\"12sp\" if len(self.text)>10 else \"14sp\"\n on_release:root.open_camera_list(self)\n \n Widget:\n # Add Additional instrument\n BoxLayout:\n orientation:\"vertical\"\n size_hint:None,None\n height:\"400dp\"\n width:\"400dp\"\n padding:\"30dp\"\n canvas:\n Color:\n rgba:[0.15,0.15,0.15,1] if app.theme_cls.theme_style==\"Dark\" else [1,1,1,.8]\n RoundedRectangle:\n pos:self.pos\n size:self.size\n BoxLayout:\n orientation:\"vertical\"\n BoxLayout:\n size_hint_y:None\n height:\"40dp\"\n MDLabel:\n size_hint_y:None\n height:\"50dp\"\n text:\"Add Instrument\"\n font_size:\"20sp\"\n color:theme.get_color(app.theme_cls.theme_style,\"PrimaryTextColor\")\n MDIconButton:\n icon:\"plus\"\n on_release:root.add_box(root.ids.instaddbox)\n MDSeparator:\n Widget:\n size_hint_y:None\n height:\"2dp\"\n ScrollView:\n do_scroll_y:True\n BoxLayout:\n id:wholebox\n spacing:\"5dp\"\n size_hint_y:None\n height:self.minimum_height\n orientation:\"vertical\"\n BoxLayout:\n id:instaddbox\n size_hint_y:None\n height:\"0dp\"\n canvas:\n Color:\n rgba:1,0,1,.15\n RoundedRectangle:\n pos:self.pos\n size:self.size \n \n Widget:\n size_hint_y:None\n height:\"2dp\"\n \n \n\"\"\")\n\n\nclass InstrumentList(BoxLayout):\n text = StringProperty()\n\n def __init__(self, text, **kwargs):\n self.text = text\n super(InstrumentList, self).__init__(**kwargs)\n\n def remove_instrument(self, parent_wid):\n # remove dir folder\n shutil.rmtree(\"assets/tones/\"+self.text.lower(), ignore_errors=True)\n # os.rmdir(os.path.abspath(\"assets/tones/\"+self.text))\n\n # remove from manual_instr list\n import utils.selected_instrument as ut\n ut.manual_instr.remove(self.text.lower())\n st = \"selected_instr = '{}'\\ncamera_indx = {}\\nmanual_instr = {}\".format(\n ut.selected_instr, ut.camera_indx, ut.manual_instr)\n\n with open('utils/selected_instrument.py', 'w') as file:\n file.write(st)\n\n # remove from notes.json\n refresh()\n\n # remove from map_data.json\n import json\n with open('utils/map_data.json', 'r') as file:\n data = json.load(file)\n\n data.pop(self.text.lower())\n\n with open('utils/map_data.json', 'w') as json_file:\n json.dump(data, json_file)\n\n parent_wid.remove_widget(self)\n\n\nclass AddInstrumentBox(BoxLayout):\n\n def add_instrument(self, parent_wid):\n self.ids.warning.text = \"\"\n name = self.ids.instrumentname.text.rstrip().lower()\n path = self.ids.path.text\n from utils.selected_instrument import manual_instr\n from utils.constant import instruments_items\n if(name != ''):\n if(name not in manual_instr+instruments_items):\n if(path != ''):\n files = [each for each in os.listdir(path) if each.split(\n \".\")[-1].lower() in ['wav', 'ogg']]\n print(files)\n if(files != []):\n if(not os.path.exists(\"assets/tones/\"+name)):\n os.mkdir(\"assets/tones/\"+name)\n\n # copying all the assets to program folder\n for file in files:\n if os.path.isfile(os.path.join(path, file)):\n shutil.copyfile(os.path.join(path, file), os.path.join(\n os.path.abspath(\"assets/tones/\"+name), file))\n\n # Set instrument which is manually added,to file\n import utils.selected_instrument as ut\n ut.manual_instr.append(name)\n st = \"selected_instr = '{}'\\ncamera_indx = {}\\nmanual_instr = {}\".format(\n ut.selected_instr, ut.camera_indx, ut.manual_instr)\n\n with open('utils/selected_instrument.py', 'w') as file:\n file.write(st)\n\n # refresh notes.json\n refresh()\n\n #add in map_data.json\n import json\n with open('utils/map_data.json', 'r') as file:\n data = json.load(file)\n\n data[name] = [{\"1\": \"\", \"2\": \"\", \"3\": \"\", \"4\": \"\", \"5\": \"\"}, {\n \"1\": \"\", \"2\": \"\", \"3\": \"\", \"4\": \"\", \"5\": \"\"}]\n\n with open('utils/map_data.json', 'w') as json_file:\n json.dump(data, json_file)\n\n self.remove_self(parent_wid)\n Clock.schedule_once(partial(\n self.add_instr_to_list, parent_wid, InstrumentList(name.capitalize())), .6)\n\n else:\n self.ids.warning.text = \"No file found with ext .wav,.ogg\"\n else:\n self.ids.warning.text = \"! Folder Not Selected\"\n else:\n self.ids.warning.text = \"! Name already taken\"\n else:\n self.ids.warning.text = \"! Instrument Name left blank\"\n\n def add_instr_to_list(self, parent_wid, list_widget, *args):\n \"\"\"\n this function creted only for delay\n \"\"\"\n parent_wid.parent.add_widget(list_widget)\n\n def select_folder(self):\n path = filechooser.choose_dir(title=\"Select Instrument Folder\")\n if(path != []):\n self.ids.path.text = path[0]\n\n def remove_self(self, parent_wid):\n parent_wid.remove_widget(self)\n anim = Animation(height=0,\n duration=.5\n )\n anim.start(parent_wid)\n\n\nclass SettingScreen(Screen):\n def __init__(self, **kw):\n Clock.schedule_once(self.set_camera)\n Clock.schedule_once(self.build_instr_list)\n super(SettingScreen, self).__init__(**kw)\n\n def build_instr_list(self, *args):\n from utils.selected_instrument import manual_instr\n for each in manual_instr:\n self.ids.wholebox.add_widget(InstrumentList(each.capitalize()))\n\n def set_camera(self, *args):\n from utils.selected_instrument import camera_indx\n try:\n from pygrabber.dshow_graph import FilterGraph\n self.ids.dropdown_item.text = FilterGraph().get_input_devices()[\n camera_indx]\n except:\n pass\n\n def open_camera_list(self, inst):\n menu = DropDown(auto_width=False, width=200)\n try:\n from pygrabber.dshow_graph import FilterGraph\n device_list = FilterGraph().get_input_devices()\n except:\n device_list = []\n for i, each in enumerate(device_list):\n btn = DropItem(text=each, icon=\"webcam\", font_size=15)\n btn.camera_indx = i\n btn.bind(on_release=lambda btn: self.set_item(menu, btn))\n menu.add_widget(btn)\n menu.spacing = 0\n menu.add_widget(MDSeparator())\n menu.open(inst)\n\n def set_item(self, dropdown, selected_btn):\n self.ids.dropdown_item.text = selected_btn.text\n import utils.selected_instrument as ut\n ut.camera_indx = selected_btn.camera_indx\n\n st = \"selected_instr = '{}'\\ncamera_indx = {}\\nmanual_instr = {}\".format(\n ut.selected_instr, selected_btn.camera_indx, ut.manual_instr)\n\n with open('utils/selected_instrument.py', 'w') as file:\n file.write(st)\n dropdown.dismiss()\n\n def add_box(self, inst):\n if(len(inst.children) == 0):\n anim = Animation(height=220,\n duration=.5\n )\n anim.start(inst)\n Clock.schedule_once(partial(self.add, inst), .5)\n\n def add(self, inst, *args):\n if(len(inst.children) == 0):\n inst.add_widget(AddInstrumentBox())\n","repo_name":"anandnet/Virtual-Music","sub_path":"screens/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":14321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"11638322068","text":"import asyncio\nimport json\nfrom rki_covid_parser.model.state import State\n\nasync def test_state():\n state = State(\"Teststate\")\n state.id = 1337\n state.cases = 657\n state.deaths = 13\n state.recovered = 18\n state.newCases = 3\n assert \"State(id=1337, name='Teststate', cases=657, deaths=13, recovered=18, newCases=3\\n)\" == state.__str__()\n","repo_name":"thebino/rki-covid-parser","sub_path":"tests/model/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"}
+{"seq_id":"70172841864","text":"from numpy import array\r\nfrom PIL import Image\r\nfrom Crypto.Util.number import inverse\r\n\r\nimg = Image.open('enc.png')\r\nimage = array(img)\r\nx, y, z = image.shape\r\ns = [''] * x\r\nfor a in range (0, x):\r\n for b in range (0, 1):\r\n p = image[a, b]\r\n tensa = p[1] - p[0] if p[1] > p[0] else 251 - p[0] + p[1]\r\n s[a] = chr(tensa * inverse(10, 251) % 251)\r\nprint(''.join(s))\r\n","repo_name":"CTF-STeam/ctf-writeups","sub_path":"2020/RaziCTF/Mod Is Coming/script_sol.py","file_name":"script_sol.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"}
+{"seq_id":"31237280172","text":"import asyncio\nimport base64\nimport os\nimport urllib\n\nimport aiohttp\nimport aiofiles\n\n# -----------------------\n# -----------------------\nbucket = 'bucket_name' # 这里就是空间名称\nusername = 'username' # 操作员账号\npassword = 'password' # 操作员密码\n# 空间外联地址 因为又拍云的http下载没有频率限制,所以使用http下载 不适用restful的api接口下载\nhostname = \"http://xxxxx\"\n\n# 这里是本地保存的根路径 这样下载后路径地址就跟空间内的地址是相对的了\nbase_save_path = 'f:'\n# -----------------------\n\nheaders = {}\nauth = base64.b64encode(f'{username}:{password}'.encode(encoding='utf-8'))\nheaders['Authorization'] = 'Basic ' + str(auth) # 又拍云认证header头\nheaders['User-Agent'] = \"UPYUN_DOWNLOAD_SCRIPT\"\nheaders['x-list-limit'] = '300'\n\nthread_sleep = 1\n\n\ndef is_dic(url):\n \"\"\"判断key是否是目录 根据是否有后缀名判断\"\"\"\n # print(f'判断url:{url}')\n url = url.replace('http://v0.api.upyun.com/', '')\n if len(url.split('.')) == 1:\n return True\n else:\n return False\n\n\nclass Crawler:\n def __init__(self, init_key, hostname, max_tasks=10, pic_tsak=50):\n '''初始化爬虫'''\n self.loop = asyncio.get_event_loop()\n self.max_tries = 4 # 每个图片重试册数\n self.max_tasks = max_tasks # 接口请求进程数\n self.key_queue = asyncio.Queue(loop=self.loop) # 接口队列\n self.pic_queue = asyncio.Queue(loop=self.loop) # 图片队列\n self.session = aiohttp.ClientSession(loop=self.loop) # 接口异步http请求\n self.key_queue.put_nowait(\n {'key': init_key, 'x-list-iter': None, 'hostname': hostname}) # 初始化接口队列 push需要下载的目录\n self.pic_tsak = pic_tsak # 图片下载队列\n\n def close(self):\n \"\"\"回收http session\"\"\"\n self.session.close()\n\n async def work(self):\n \"\"\"接口请求队列消费者\"\"\"\n try:\n while True:\n url = await self.key_queue.get()\n # print('key队列数量:' + await self.key_queue.qsize())\n await self.handle(url)\n self.key_queue.task_done()\n await asyncio.sleep(thread_sleep)\n except asyncio.CancelledError:\n pass\n\n async def work_pic(self):\n \"\"\"图片请求队列消费者\"\"\"\n try:\n while True:\n url = await self.pic_queue.get()\n await self.handle_pic(url)\n self.pic_queue.task_done()\n await asyncio.sleep(thread_sleep)\n except asyncio.CancelledError:\n pass\n\n async def handle_pic(self, key):\n \"\"\"处理图片请求\"\"\"\n url = (lambda x: x[0] == '/' and x or '/' + x)(key['key'])\n url = url.encode('utf-8')\n url = urllib.parse.quote(url)\n\n pic_url = key['hostname'] + url + '!s400'\n\n tries = 0\n while tries < self.max_tries:\n try:\n print(f'请求图片:{pic_url}')\n async with self.session.get(pic_url, timeout=60) as response:\n async with aiofiles.open(key['save_path'], 'wb') as f:\n # print('保存文件:{}'.format(key['save_path']))\n await f.write(await response.read())\n break\n except aiohttp.ClientError:\n pass\n tries += 1\n\n async def handle(self, key):\n\n \"\"\"处理接口请求\"\"\"\n url = '/' + bucket + \\\n (lambda x: x[0] == '/' and x or '/' + x)(key['key'])\n url = url.encode('utf-8')\n url = urllib.parse.quote(url)\n\n if key['x-list-iter'] is not None:\n if key['x-list-iter'] is not None or not 'g2gCZAAEbmV4dGQAA2VvZg':\n headers['X-List-Iter'] = key['x-list-iter']\n\n tries = 0\n while tries < self.max_tries:\n try:\n reque_url = \"http://v0.api.upyun.com\" + url\n print(f'请求接口:{reque_url}')\n async with self.session.get(reque_url, headers=headers, timeout=60) as response:\n content = await response.text()\n try:\n iter_header = response.headers.get('x-upyun-list-iter')\n except:\n iter_header = 'g2gCZAAEbmV4dGQAA2VvZg'\n list_json_param = content + \"`\" + \\\n str(response.status) + \"`\" + str(iter_header)\n await self.do_file(self.get_list(list_json_param), key['key'], key['hostname'])\n break\n except aiohttp.ClientError:\n pass\n tries += 1\n\n def get_list(self, content):\n # print(content)\n if content:\n content = content.split(\"`\")\n items = content[0].split('\\n')\n content = [dict(zip(['name', 'type', 'size', 'time'], x.split('\\t'))) for x in items] + content[1].split() + \\\n content[2].split()\n return content\n else:\n return None\n\n async def do_file(self, list_json, key, hostname):\n \"\"\"处理接口数据\"\"\"\n for i in list_json[:-2]:\n if not i['name']:\n continue\n new_key = key + i['name'] if key == '/' else key + '/' + i['name']\n try:\n if i['type'] == 'F':\n self.key_queue.put_nowait(\n {'key': new_key, 'x-list-iter': None, 'hostname': hostname})\n else:\n try:\n if not os.path.exists(bucket + key):\n os.makedirs(bucket + key)\n except OSError as e:\n print('新建文件夹错误:' + str(e))\n save_path = base_save_path + '/' + bucket + new_key\n if not os.path.isfile(save_path):\n self.pic_queue.put_nowait(\n {'key': new_key, 'save_path': save_path, 'x-list-iter': None, 'hostname': hostname})\n else:\n print(f'文件已存在:{save_path}')\n except Exception as e:\n print('下载文件错误!:' + str(e))\n async with aiofiles.open('download_err.txt', 'a') as f:\n await f.write(new_key + '\\n')\n if list_json[-1] != 'g2gCZAAEbmV4dGQAA2VvZg':\n self.key_queue.put_nowait(\n {'key': key, 'x-list-iter': list_json[-1], 'hostname': hostname})\n\n async def run(self):\n \"\"\"初始化任务进程\"\"\"\n workers = [asyncio.Task(self.work(), loop=self.loop)\n for _ in range(self.max_tasks)]\n\n workers_pic = [asyncio.Task(self.work_pic(), loop=self.loop)\n for _ in range(self.pic_tsak)]\n\n await self.key_queue.join()\n await self.pic_queue.join()\n\n workers.append(workers_pic)\n for w in workers:\n w.cancel()\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n crawler = Crawler('/', hostname, max_tasks=5, pic_tsak=150)\n loop.run_until_complete(crawler.run())\n\n crawler.close()\n\n loop.close()\n","repo_name":"Hupo510/WebCrawler","sub_path":"demo/python异步多线程超高性能爬虫爬取又拍云图片例程.py","file_name":"python异步多线程超高性能爬虫爬取又拍云图片例程.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"37597392519","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport csv\r\nfrom decimal import Decimal\r\n\r\n\r\nCONST_EPOCH = 11644473600000000000 #nseconds between win epoch and unix epoch\r\n\r\ndef parse():\r\n '''\r\n parses formatted text obtained from hololens archive, aligns timelines as well.\r\n '''\r\n ## String Pattern for Accel and Gyro\r\n #acclstr = \"header ax ay az norm delta wintime universaltime\"\r\n #gyrostr = \"header gx gy gz norm delta wintime universaltime\"\r\n \r\n ##extract and create new hl imu files\r\n with open ('accel.txt', 'r') as accf:\r\n txt_readeracc = accf.readlines()\r\n \r\n with open ('gyro.txt', 'r') as gyrf:\r\n txt_readergyr = gyrf.readlines()\r\n \r\n \r\n for i in range(len(txt_readeracc)):\r\n txt_readeracc[i] = txt_readeracc[i].split() \r\n \r\n for i in range(len(txt_readergyr)):\r\n txt_readergyr[i] = txt_readergyr[i].split()\r\n \r\n \r\n with open('accelnew.txt','w+',newline='') as csv_writer:\r\n for it in range(len(txt_readeracc)): \r\n time = int(txt_readeracc[it][6])\r\n adjusted_time = int(( time*100 )-( CONST_EPOCH ))\r\n extr = [txt_readeracc[it][1],txt_readeracc[it][2],txt_readeracc[it][3],txt_readeracc[it][4],txt_readeracc[it][5], str(adjusted_time)]\r\n write = csv.writer(csv_writer)\r\n write.writerow(extr)\r\n with open('gyronew.txt','w+',newline='') as csv_writer:\r\n for it in range(len(txt_readergyr)): \r\n time = int(txt_readergyr[it][6])\r\n adjusted_time = int(( time*100 )-( CONST_EPOCH ))\r\n extr = [txt_readergyr[it][1],txt_readergyr[it][2],txt_readergyr[it][3],txt_readergyr[it][4],txt_readergyr[it][5],str(adjusted_time)]\r\n write = csv.writer(csv_writer)\r\n write.writerow(extr)\r\n\r\n\r\ndef extract():\r\n accelData = np.loadtxt('accelnew.txt', dtype = np.float64, delimiter=',',usecols=(0,1,2,3,4))\r\n gyroData = np.loadtxt('gyronew.txt', dtype = np.float64, delimiter=',',usecols=(0,1,2,3,4))\r\n h_a_time = np.loadtxt('accelnew.txt', dtype=Decimal, delimiter=',', usecols = (5))\r\n h_g_time = np.loadtxt('gyronew.txt', dtype=Decimal, delimiter=',', usecols = (5))\r\n \r\n h_a_time_epoch = np.arange(len(h_a_time),dtype = Decimal)\r\n h_a_time_experiment = np.arange(len(h_a_time),dtype = Decimal)\r\n h_a_time_secs = np.arange(len(h_a_time),dtype = np.float64)\r\n h_a_time_nsecs = np.arange(len(h_a_time),dtype = np.float64) \r\n \r\n h_g_time_epoch = np.arange(len(h_g_time),dtype = Decimal)\r\n h_g_time_experiment = np.arange(len(h_g_time),dtype = Decimal)\r\n h_g_time_secs = np.arange(len(h_g_time),dtype = np.float64)\r\n h_g_time_nsecs = np.arange(len(h_g_time),dtype = np.float64) \r\n \r\n decs_a= np.arange(len(h_a_time),dtype = Decimal)\r\n decns_a= np.arange(len(h_a_time),dtype = Decimal)\r\n\r\n decs_g= np.arange(len(h_g_time),dtype = Decimal)\r\n decns_g= np.arange(len(h_g_time),dtype = Decimal)\r\n \r\n for i in range(len(h_a_time)):\r\n tempStr = str(h_a_time[i])\r\n h_a_time_secs[i] = float(tempStr[0:10])\r\n h_a_time_nsecs[i] = float(tempStr[10:19])\r\n \r\n decs_a[i] =Decimal(h_a_time_secs[i])\r\n decns_a[i] = Decimal(h_a_time_nsecs[i])\r\n \r\n h_a_time_epoch[i] = decs_a[i] + decns_a[i]*Decimal(1e-9)\r\n tempStr = str(h_a_time_epoch[i])\r\n tempStr = tempStr[0:20]\r\n h_a_time_epoch[i] = Decimal(tempStr)\r\n h_a_time_experiment[i] = h_a_time_epoch[i] - h_a_time_epoch[0]\r\n\r\n\r\n \r\n for i in range(len(h_g_time)): \r\n tempStr = str(h_g_time[i])\r\n h_g_time_secs[i] = float(tempStr[0:10])\r\n h_g_time_nsecs[i] = float(tempStr[10:19])\r\n \r\n decs_g[i] =Decimal(h_g_time_secs[i])\r\n decns_g[i] = Decimal(h_g_time_nsecs[i])\r\n \r\n h_g_time_epoch[i] = decs_g[i] + decns_g[i]*Decimal(1e-9)\r\n tempStr = str(h_g_time_epoch[i])\r\n tempStr = tempStr[0:20]\r\n h_g_time_epoch[i] = Decimal(tempStr)\r\n h_g_time_experiment[i] = h_g_time_epoch[i] - h_g_time_epoch[0] \r\n \r\n \r\n #grav axis is signed (-) which doesnt fit with the ENU frame\r\n # threfore manually corrected here\r\n hlAccX = -accelData[:,0]\r\n hlAccY = accelData[:,1]\r\n hlAccZ = accelData[:,2]\r\n hlAccNorm = accelData[:,3]\r\n hlAccSoCtime = accelData[:,4]\r\n \r\n hlGyrX = gyroData[:,0]\r\n hlGyrY = gyroData[:,1]\r\n hlGyrZ = gyroData[:,2]\r\n hlGyrNorm = gyroData[:,3]\r\n hlGyrSoCtime = gyroData[:,4]\r\n \r\n return (hlAccX, hlAccY, hlAccZ,\r\n hlAccNorm, hlGyrX, hlGyrY,\r\n hlGyrZ, hlGyrNorm, h_a_time_epoch,\r\n h_a_time_experiment, h_g_time_epoch, h_g_time_experiment)\r\n\r\n","repo_name":"ozgunkaratas/mastersthesis","sub_path":"src/readHL.py","file_name":"readHL.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"19633575207","text":"import numpy as np\nfrom mayavi import mlab\n\n\nZP = np.load(\"orderbook_shorter.npy\")\nX, Y = np.mgrid[0 : ZP.shape[0] : 1, 0 : ZP.shape[1] : 1]\nprint(X.shape, Y.shape, ZP.shape)\n\ns = mlab.barchart(X, Y, ZP)\nmlab.show()\n","repo_name":"LeaveMyYard/CryptoMarketRLGym","sub_path":"orderbook_viewer.py","file_name":"orderbook_viewer.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"}
+{"seq_id":"36930264274","text":"#Ejercicio 1\r\n#Cree un programa que reciba como parámetro el valor de las medidas en centímetros de la altura y la base de un cuadrado\r\n# y que calcule el valor del área de ese cuadrado. La función deberá retornar un mensaje del estilo:\r\n#El área del cuadrado con base de .... cm y altura de .... cm es de: .... cm cuadrados\r\nprint('Inicio de calculo de area de cuadrado')\r\ndef areadecuadrado(altura, base):\r\n return base * altura\r\n \r\nbase = float(input('Ingrese el valor de base: '))\r\naltura = float(input('Ingrese el valor de altura: '))\r\nprint(f'El área del cuadrado con base de {base} cm y altura de {altura} cm es de: {areadecuadrado(base,altura)} cm cuadrados')\r\n\r\n#Ejercicio 2\r\n#Cree el mismo programa que en el ejercicio anterior pero ahora el usuario es quien\r\n#indica las medidas del cuadrado.\r\nprint('Inicio de calculo de area de cuadrado')\r\ndef areadecuadrado(altura, base):\r\n return base * altura\r\n \r\nbase = float(input('Ingrese el valor de base: '))\r\naltura = float(input('Ingrese el valor de altura: '))\r\nprint(f'El área del cuadrado con base de {base} cm y altura de {altura} cm es de: {areadecuadrado(base,altura)} cm cuadrados')\r\n\r\n#Ejercicio 3\r\n#Desarrollar un programa que realice el llamado de una función que calcule la\r\n#potencia entre dos números ingresados por teclado.\r\nprint('Inicio de calculo de potencia')\r\ndef calculopotencia(basepot, exponente):\r\n potenaux = 1\r\n for pot in range(exponente):\r\n potenaux *= basepot\r\n return potenaux\r\nbasepot = int(input('Ingrese la base: '))\r\nexponente = int(input('Ingrese el exponente: '))\r\nprint(f'La potencia de {basepot} con exponente {exponente} es {calculopotencia(basepot, exponente)} ')\r\n#Ejercicio 4\r\n#Desarrollar un programa que llame a una función que calcule el perímetro y a otra\r\n#que calcule la superficie de un círculo. Los datos deben ser ingresados por el\r\n#usuario.\r\n#Perímetro del círculo → (𝝅 * 2r) o (𝝅 * d)\r\n#Superficie de un círculo → (𝝅 * r²)\r\nimport perimetros\r\nprint(\"Jugando con el circulo.\\n\")\r\nprint(\"1.Circulo Perimetro.\\n2.Circulo Superficie.\\n\")\r\n\r\nx=int(input(\"Escoja la opción: \"))\r\nr=0\r\nd=0\r\n\r\nif x==1:\r\n opcion = input('Ingrese si tiene diametro(d), si tiene radio(r): ')\r\n if opcion == 'r':\r\n r=int(input('Ingrese el radio del circulo: '))\r\n perimetros.per_circulo_conradio(r)\r\n if opcion =='d':\r\n d=int(input('Ingrese el diametro del circulo: '))\r\n perimetros.per_circulo_condiametro(d) \r\n\r\n\r\nif x==2:\r\n r=int(input('Ingrese el radio del circulo: '))\r\n perimetros.per_circulo_conradio(r)\r\n\r\n#Ejercicio 5\r\n#Cree un programa que defina una calculadora básica con las 4 principales\r\n#operaciones aritméticas. Utilizar para cada una de ellas una función que reciba\r\n#como parámetros los valores intervinientes en la operación (dos argumentos) y\r\n#muestre la operación y el resultado de esta por pantalla.\r\nimport calculadora\r\nprint(\"Calculadora 1.0 \\n\")\r\nprint(\"1.Suma.\\n2.Resta.\\n3.Multiplicación.\\n4.División\")\r\n\r\nx=int(input(\"Escoja la operación: \"))\r\na=0\r\nb=0\r\n\r\nif x==1:\r\n a=float(input('Ingrese el valor a: '))\r\n b=float(input('Ingrese el valor b: '))\r\n calculadora.suma(a,b)\r\n\r\nif x==2:\r\n a=float(input('Ingrese el valor a: '))\r\n b=float(input('Ingrese el valor b: '))\r\n calculadora.resta(a,b)\r\n\r\nif x==3:\r\n a=float(input('Ingrese el valor a: '))\r\n b=float(input('Ingrese el valor b: '))\r\n calculadora.multiplica(a,b)\r\n\r\nif x==4:\r\n a=float(input('Ingrese el valor a: '))\r\n b=float(input('Ingrese el valor b: '))\r\n calculadora.division(a,b)\r\n","repo_name":"cimojeda/Python_Basic","sub_path":"CursoPython/Funciones.py","file_name":"Funciones.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"74314777865","text":"\n# get apiKey from .env file\n\nfrom utils.helpers import *\nimport requests\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\napiKey = os.getenv(\"apiKey\")\n\n\ndef get_response(url):\n response = requests.get(url)\n return response\n\n\ndef get_basic_info(ticker):\n resp = get_response(\n f\"https://api.polygon.io/v3/reference/tickers/{ticker}?apiKey={apiKey}\")\n try:\n result = resp.json()[\"results\"]\n except:\n return [None]*7\n # absolute fields - name, ticker\n name = result[\"name\"]\n ticker = result[\"ticker\"]\n try:\n primary_exchange = result[\"primary_exchange\"]\n except:\n primary_exchange = None\n try:\n type_ = result[\"type\"]\n except:\n type_ = None\n try:\n list_date = result[\"list_date\"]\n except:\n list_date = None\n try: \n market_cap = result[\"market_cap\"]\n except:\n market_cap = None\n try:\n share_class_shares_outstanding = result[\"share_class_shares_outstanding\"]\n except:\n share_class_shares_outstanding = None\n return name, ticker, primary_exchange, type_, list_date, market_cap, share_class_shares_outstanding\n\n\ndef get_news(ticker, curr_day):\n resp = get_response(\n f\"https://api.polygon.io/v2/reference/news?published_utc={curr_day}&ticker={ticker}&apiKey={apiKey}\")\n try:\n results = resp.json()[\"results\"]\n except:\n return [None]*4\n result = None\n publisher_name = None\n article_url = None\n description = None\n keywords = None\n for res in results:\n if len(res[\"tickers\"]) == 1:\n result = res\n break\n # absolute fields - publisher_name, article_url\n if result:\n publisher_name = result[\"publisher\"][\"name\"]\n print(f\"publisher_name: {publisher_name}\")\n article_url = result[\"article_url\"]\n print(f\"article_url: {article_url}\")\n try:\n description = result[\"description\"]\n except:\n description = None\n print(f\"description: {description}\")\n try:\n keywords = result[\"keywords\"]\n except:\n keywords = None\n print(f\"keywords: {keywords}\")\n return publisher_name, article_url, description, keywords\n\n\ndef get_daily_data(ticker):\n curr_day = get_curr_day()\n resp = get_response(\n f\"https://api.polygon.io/v2/aggs/ticker/{ticker}/range/1/day/{curr_day}/{curr_day}?adjusted=true&sort=asc&apiKey={apiKey}\")\n try:\n results = resp.json()[\"results\"]\n except:\n return [None]*7\n result = results[0]\n c = result[\"c\"]\n h = result[\"h\"]\n l = result[\"l\"]\n o = result[\"o\"]\n v = result[\"v\"]\n vw = result[\"vw\"]\n n = result[\"n\"]\n return c, h, l, o, v, vw, n\n\n\ndef get_2_minute_data(ticker, from_time, to_time):\n resp = get_response(\n f\"https://api.polygon.io/v2/aggs/ticker/{ticker}/range/2/minute/{from_time}/{to_time}?adjusted=true&sort=asc&apiKey={apiKey}\")\n try:\n results = resp.json()[\"results\"]\n except:\n return None\n return results\n\n\ndef get_prev_day_data(ticker):\n prev_day = get_prev_day()\n resp = get_response(\n f\"https://api.polygon.io/v2/aggs/ticker/{ticker}/range/1/day/{prev_day}/{prev_day}?adjusted=true&sort=asc&apiKey={apiKey}\")\n try:\n results = resp.json()[\"results\"]\n except:\n return None\n result = results[0]\n return result\n","repo_name":"desitrader/stock","sub_path":"utils/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"23818964862","text":"#Client RPC\r\nfrom xmlrpc.client import ServerProxy \r\nimport threading\r\n\r\ndef inicializa():\r\n cliente = ServerProxy('http://localhost:20064', allow_none=True) \r\n altura = input(\"Digite a altura \\n\")\r\n sexo = input(\"(1) Masculino (2) Feminino \\n\")\r\n print(cliente.classificacao(altura, sexo))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n thread = threading.Thread(target=inicializa, args=[])\r\n thread.start()","repo_name":"SD-20221-2/time_5_listas","sub_path":"LISTA 3/Exercicio 4/RPC - Exercicio 4/cliente04.py","file_name":"cliente04.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"2375447119","text":"'''\nCreated on Mar 16, 2009\n\n@author: sethchase\n'''\n\nfrom DBdata import runDBcount\nfrom time import time\n\ndef DBdataApp(self, action, variable):\n if action == \"count\":\n database = variable\n now = int(time())\n end = now + (25)\n complete = False\n attempts = 5\n while end >= now and attempts != 0:\n status, dbCount = runDBcount(database)\n if status == \"complete\":\n complete = True\n break\n now = int(time())\n attempts = attempts - 1\n if complete == True:\n return dbCount\n else:\n return \"run again\" \n else:\n # allow option to make query, show results and count\n return \"error: no action sent\"\n # pass\n","repo_name":"sethc23/BD_Scripts","sub_path":"Projects/TestApp/src/controllers/DBhome.py","file_name":"DBhome.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"4775898206","text":"import string\n\n#! Tokens and Tokenizers\n\nDIGITS = '0123456789'\nLETTERS = string.ascii_letters\nLETTERS_DIGITS = LETTERS + DIGITS\n\n#! TOKEN\nTOK_INT = 'INT'\nTOK_FLOAT = 'FLOAT'\nTOK_STRING = 'STRING'\n\nTOK_MUL = 'MUL'\nTOK_DIV = 'DIV'\nTOK_ADD = 'ADD'\nTOK_SUB = 'SUB'\nTOK_POW = 'POW'\nTOK_EOF = 'EOF'\nTOK_REMAINDER = 'REMAINDER'\n\nTOK_LBRACKET = 'LBRACKET'\nTOK_RBRACKET = 'RBRACKET'\nTOK_LSQUARE = 'LSQUARE'\nTOK_RSQUARE = 'RSQUARE'\n\nTOK_IDENTIFIER = 'IDENTIFIER'\nTOK_KEYWORD = 'KEYWORD'\nTOK_EQUAL = 'EQUALS'\n\nTOK_DEQUAL = 'DOUBLEQUALS'\nTOK_NTEQUAL = 'NOTEQUALS'\nTOK_GT = 'GREATERTHAN'\nTOK_GTE = 'GREATERTHANEQTO'\nTOK_LT = 'LESSTHAN'\nTOK_LTE = 'LESSTHANEQTO'\n\nTOK_ARROW = 'ARROW'\nTOK_COMMA = 'COMMA' \n\nTOK_NEWLINE = 'NEWLINE'\n\nTOK_BREAK = 'BREAK'\nTOK_RETURN = 'RETURN'\nTOK_CONTINUE = 'CONTINUE'\n\nTOK_END = 'END'\nTOK_SON = 'SON'\n\n#! Dict for the Operator\nOP_TOK_TAG = {\n '+' : 'ADD',\n '/' : 'DIV',\n '*' : 'MUL',\n '{' : 'LBRACKET',\n '}' : 'RBRACKET',\n '(' : 'LBRACKET',\n ')' : 'RBRACKET',\n '[' : 'LSQUARE',\n ']' : 'RSQUARE',\n '^' : 'POW',\n '%' : 'REMAINDER'\n}\n\n#! A list of the keywords for yakamoz\nKEYWORDS = [\n 'oyleki',\n 'yoket',\n 've',\n 'veya',\n\n 'eger',\n 'yoksaeger',\n 'yoksa',\n 'ise',\n\n 'for',\n 'den',\n 'kadar',\n 'adim',\n\n 'while',\n 'fonk', \n 'dondur',\n 'devam',\n 'break',\n \n 'son',\n 'sonra'\n]\n\nclass Token:\n def __init__(self, type_ , value=None , pos_start=None , pos_end=None):\n self.type = type_\n self.value = value\n\n if pos_start:\n self.pos_start = pos_start.copy()\n self.pos_end = pos_start.copy()\n self.pos_end.advance()\n\n if pos_end:\n self.pos_end = pos_end \n\n def matches(self, type_, value):\n return self.type == type_ and self.value == value\n\n def __repr__(self):\n if self.value: return f'{self.value} : {self.type}'\n return f'{self.type}'","repo_name":"waasiq/yakamoz","sub_path":"src/Lib/Tokens.py","file_name":"Tokens.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"}
+{"seq_id":"36483143321","text":"#!/usr/bin/env python\n#coding=utf-8\n\n\nimport socket\nimport json\nimport time\nimport threading\nfrom math import pi\n\nimport rospy\nfrom riki_msgs.msg import Battery\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import JointState\n\n\ndef send_msg():\n while app_is_run:\n robot_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n robot_socket.connect((\"127.0.0.1\", 11322))\n\n xrJson = {\n \"version\": \"b0.0\",\n \"keep_alive\": True,\n \"url\": \"/servo/arm\",\n \"method\": \"post\",\n \"data\": {\"servo\": ['30', '150', '0', '-30']},\n }\n\n tem_data = json.dumps(xrJson).encode(\"utf-8\")\n print(tem_data)\n robot_socket.send(tem_data)\n time.sleep(0.5)\n\n xrJson = {\n \"version\": \"b0.0\",\n \"keep_alive\": True,\n \"url\": \"/servo/tilt\",\n \"method\": \"post\",\n \"data\": {\"servo\": ['90', '90']},\n }\n\n tem_data = json.dumps(xrJson).encode(\"utf-8\")\n print(tem_data)\n robot_socket.send(tem_data)\n time.sleep(2)\n break\n except Exception:\n time.sleep(2)\n continue\n\n while app_is_run:\n print(\"init socket\")\n robot_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n robot_socket.settimeout(5)\n\n try:\n try:\n time.sleep(1)\n robot_socket.connect((\"127.0.0.1\", 11322))\n \n except Exception:\n continue\n\n while app_is_run:\n\n try:\n if send_bytes:\n robot_socket.send(send_bytes.pop())\n recv = robot_socket.recv(200)\n time.sleep(0.01)\n\n # read move command\n xrJson = {\n \"version\": \"b0.0\",\n \"keep_alive\": True,\n \"url\": \"/movement\",\n \"method\": \"get\",\n \"data\": {},\n }\n\n tem_data = json.dumps(xrJson).encode(\"utf-8\")\n robot_socket.send(tem_data)\n\n recv = robot_socket.recv(200)\n # print(recv)\n try:\n recv = json.loads(recv)\n # print(recv)\n if \"direction\" in recv[\"data\"]:\n # print(\"data got\")\n pub_movement(recv[\"data\"][\"direction\"].encode(\"utf-8\"))\n except ValueError:\n continue\n \n time.sleep(0.01)\n\n # read arm servo command\n xrJson = {\n \"version\": \"b0.0\",\n \"keep_alive\": True,\n \"url\": \"/servo/arm\",\n \"method\": \"get\",\n \"data\": {},\n }\n\n tem_data = json.dumps(xrJson).encode(\"utf-8\")\n robot_socket.send(tem_data)\n arm_recv = robot_socket.recv(200)\n # print(recv)\n try:\n arm_recv = json.loads(arm_recv)\n # print(recv)\n if \"servo\" in arm_recv[\"data\"]:\n for i in range(4):\n global servos_is_update\n\n temp_servo = float(arm_recv[\"data\"][\"servo\"][i])/180.0 * pi\n if not temp_servo == servos[i]:\n servos_is_update = True\n servos[i] = temp_servo\n \n except ValueError:\n continue\n\n time.sleep(0.01)\n\n # read tilt servo command\n xrJson = {\n \"version\": \"b0.0\",\n \"keep_alive\": True,\n \"url\": \"/servo/tilt\",\n \"method\": \"get\",\n \"data\": {},\n }\n\n tem_data = json.dumps(xrJson).encode(\"utf-8\")\n robot_socket.send(tem_data)\n tilt_recv = robot_socket.recv(200)\n # print(recv)\n try:\n tilt_recv = json.loads(tilt_recv)\n if \"servo\" in recv[\"data\"]:\n for i in range(2):\n global servos_is_update\n\n temp_servo = (float(recv[\"data\"][\"servo\"][i]) -90) / 180.0 * pi\n if not temp_servo == servos[i+4]:\n servos[i+4] = temp_servo\n servos_is_update = True\n\n except ValueError:\n continue\n\n if servos_is_update:\n global servos_is_update\n servo.position = servos\n servo_pub.publish(servo)\n rospy.loginfo(\"servo pub\")\n servos_is_update = False\n\n time.sleep(0.1)\n\n except socket.error:\n robot_socket.close()\n break\n\n except KeyboardInterrupt:\n robot_socket.close()\n break\n finally:\n robot_socket.close()\n\n\ndef pub_movement(movement):\n global robot_is_stop\n # print(type(movement), type(\"\"))\n if not isinstance(movement, type(\"\")):\n # print(\"break\")\n return False\n\n move_msg = Twist()\n if movement == \"stop\":\n if robot_is_stop:\n return None\n rospy.loginfo(\"stop\")\n # move_msg.angular.z = 0.0\n # move_msg.linear.x = 0.0\n robot_is_stop = True\n\n elif movement == \"left\":\n rospy.loginfo(\"turn left\")\n move_msg.angular.z = 2.5*robot_speed\n move_msg.linear.x = 0.0\n robot_is_stop = False\n\n elif movement == \"right\":\n rospy.loginfo(\"turn right\")\n move_msg.angular.z = -2.5*robot_speed\n move_msg.linear.x = 0.0\n robot_is_stop = False\n\n elif movement == \"forward\":\n rospy.loginfo(\"forward\")\n move_msg.angular.z = 0.0\n move_msg.linear.x = 1.2*robot_speed\n robot_is_stop = False\n\n elif movement == \"backward\":\n rospy.loginfo(\"backward\")\n move_msg.angular.z = 0.0\n move_msg.linear.x = -0.5*robot_speed\n robot_is_stop = False\n\n movement_pub.publish(move_msg)\n\n\ndef battery_callback(msg):\n global send_bytes\n remaining_electricity = msg.battery\n rospy.loginfo(msg.battery)\n\n xrJson = {\n \"version\": \"b0.0\",\n \"keep_alive\": True,\n \"url\": \"/xrrobot/battery\",\n \"method\": \"post\",\n \"data\": {\"remaining_electricity\": float(remaining_electricity)},\n }\n\n send_bytes.append(json.dumps(xrJson).encode(\"utf-8\"))\n\n\nsend_bytes = []\n\nrobot_speed = 1.5\n\nservos = [0.0 for i in range(6)]\n\nrobot_thread = threading.Thread(target=send_msg)\n\nbattery_msg = Battery()\nPOWER_SUB = rospy.Subscriber('battery', Battery, battery_callback)\n\nservo = JointState()\nservo.name = ['a', 'b', 'c', 'd', 'x', 'y']\nservo_pub = rospy.Publisher('joint_states', JointState, queue_size=10)\nmovement_pub = rospy.Publisher('cmd_vel', Twist, queue_size=0)\n\nrospy.init_node('robot_status_interaction')\n\nrospy.loginfo('robot_status_interaction node is running!')\n\napp_is_run = True\nrobot_is_stop = True\nservos_is_update = False\nrobot_thread.start()\n\nrospy.spin()\n\napp_is_run = False\nrobot_thread.join()\n","repo_name":"n00bhax/MrRoboto","sub_path":"xrrobot/script/robot_status_interaction.py","file_name":"robot_status_interaction.py","file_ext":"py","file_size_in_byte":7865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"27656796104","text":"import pygame\nimport random\nimport importlib\nfrom settings import *\nfrom functions import *\n\nplayer1 = importlib.import_module(\"players.\" + PLAYER_1_NAME)\nplayer2 = importlib.import_module(\"players.\" + PLAYER_2_NAME)\nplayers = [PLAYER_1_NAME, PLAYER_2_NAME]\nturn = 0\n\n# initialize pygame and create window\npygame.init()\npygame.font.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Tic Tac Toe\")\nclock = pygame.time.Clock()\nfont = pygame.font.SysFont(TEXT_FONT, TEXT_SIZE)\n\n# define player tables\nplayer1_table = [[\" \" for j in range(0, TABLE_WIDTH)] for i in range(0, TABLE_HEIGHT)] \nplayer2_table = [[\" \" for j in range(0, TABLE_WIDTH)] for i in range(0, TABLE_HEIGHT)]\ntable = [[\" \" for j in range(0, TABLE_WIDTH)] for i in range(0, TABLE_HEIGHT)]\n\n# Game loop\nrunning = True\nclose = False\nwhile running:\n # keep loop running at the right speed\n clock.tick(FPS)\n # Process input (events)\n for event in pygame.event.get():\n # check for closing window\n if event.type == pygame.QUIT:\n running = False\n close = True\n\n # Update\n\n # check if we have a winner\n winner = get_winner(table)\n if winner is not None:\n write(screen, font, (10, HEIGHT - 40), BLACK, players[(turn + 1) % 2] + \" won as \" + winner)\n pygame.display.flip()\n break\n\n # check if we table is full\n if table_is_full(table):\n write(screen, font, (10, HEIGHT - 40), BLACK, \"Tie!\")\n pygame.display.flip()\n break\n if turn:\n # player 2's turn\n try:\n for i in range(0, TABLE_HEIGHT):\n for j in range(0, TABLE_WIDTH):\n if table[i][j] == \"O\":\n player2_table[i][j] = \"X\"\n elif table[i][j] == \"X\":\n player2_table[i][j] = \"O\"\n else:\n player2_table[i][j] = \" \"\n row, column = player2.get_move(player2_table)\n if check_move(table, row, column):\n table[row][column] = \"O\"\n else:\n print(PLAYER_2_NAME, \"false move\\n\")\n except Exception as e:\n print(PLAYER_2_NAME, \"error:\\n\", e, \"\\n\")\n\n else:\n # player 1's turn\n try:\n for i in range(0, TABLE_HEIGHT):\n for j in range(0, TABLE_WIDTH):\n player1_table[i][j] = table[i][j]\n row, column = player1.get_move(player1_table)\n if check_move(table, row, column):\n table[row][column] = \"X\"\n else:\n print(PLAYER_1_NAME, \"false move\\n\")\n except Exception as e:\n print(PLAYER_1_NAME, \"error:\\n\", e, \"\\n\")\n turn = (turn + 1) % 2\n\n # Draw / render\n screen.fill(WHITE)\n draw_table(screen, table)\n write(screen, font, (10, 10), BLACK, \"X is \" + PLAYER_1_NAME)\n write(screen, font, (10, 40), BLACK, \"O is \" + PLAYER_2_NAME)\n # write(screen, font, (10, 70), BLACK, players[turn] + \"'s turn\")\n\n # *after* drawing everything, flip the display\n pygame.display.flip()\n pygame.event.pump()\n pygame.time.delay(1000)\n\nif close:\n pygame.quit()\n exit()\nelse:\n while running:\n for event in pygame.event.get():\n # check for closing window\n if event.type == pygame.QUIT:\n running = False\n pygame.quit()\n exit()\n","repo_name":"mohsenbeygi/IB_computer_science","sub_path":"tic tac toe/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"41336145414","text":"# Fönstrets mått\nHEIGHT, WIDTH = 800, 800\nBORDER = 600\naudio_pos_x, audio_pos_y = 5, 5\naudio_icon_width, audio_icon_height = 55, 45\n\n# Färger\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nBLUE = (4, 51, 255)\nGREEN = (0, 255, 0)\n\n# Spel state\nrun = True\nboss_state = False\naudio_state = True\nstatus = \"on\"\n\n# Listor\nenemy_list = []\nbullets = []\ninvader_img = []\nboss_img = []\nboss_bullets = []\n\n# Variabler\nenemy_count = 5\nboss_start_health = 100\nenemy_vel = 0.5\npoints_to_enemy = 100\npoints_until_new_enemy = points_to_enemy\nbullet_count = 5\nplayer_vel = 0\nstart_player_vel = 5\nFPS = 60\nscore = 0\ntimer = 0\nwave = 1\n","repo_name":"Rasmus-04/Game-library","sub_path":"Space_invader/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"33554451636","text":"import idlelib.tooltip\nfrom threading import Thread\nimport tkinter as tk\nfrom datetime import datetime, timedelta\nfrom tkinter.messagebox import showerror\n\nimport webuntis\nfrom src import UntisBreaks, TKUtils, Constants\n\n\n\nclass DisplayFrame(TKUtils.FillerFrame):\n \n\n def __init__(self, parent, session: webuntis.Session):\n super().__init__(parent)\n self.session = session\n self.is_today = True\n self.data = None\n self.currentBreak = None\n # For pretending to have a different day, otherwise should be 0.\n self.natural_offset = 0\n self.break_offset_hours = 0\n self._build()\n\n\n def finishMainloop(self):\n self.winfo_toplevel().destroy()\n\n\n '''\n Errorhandling: Interaction with webuntis\n '''\n def _api_fail_save(self, callback):\n try:\n callback()\n except webuntis.errors.NotLoggedInError:\n showerror(\"Verbindung abgelaufen\", message=\"Sitzung ist abgelaufen. Melden Sie sich erneut an.\")\n self.winfo_toplevel().selectLoginFrame()\n except webuntis.errors.Error:\n showerror(\"Server Fehler\", message=\"Ein Fehler während der Kommunikation mit Webuntis ist aufgetreten.\")\n self.winfo_toplevel().selectLoginFrame()\n except OSError:\n showerror(\"Verbindung verloren\", message=\"Die Verbindung zum Server wurde verloren.\")\n\n \n\n\n def _threaded_fail_save(self, callback):\n try:\n callback()\n except Exception as e:\n TKUtils.TKErrorHandler.report_callback_exception(self, e.__class__.__name__, str(e), e.__traceback__)\n\n\n\n # TODO try-catch connection -> send back to login frame + Error-Popup\n '''\n For the \"current\" day's table (or whatever the natural_offset suggests).\n '''\n def fetch_break_info(self):\n def do():\n self.data = UntisBreaks.get_offset_supervisions(self.session, self.natural_offset)\n current_time = datetime.now()+timedelta(days=self.natural_offset, hours=self.break_offset_hours)\n self.currentBreak = UntisBreaks.next_break_time(self.data.keys(), current=current_time)\n self._api_fail_save(do)\n\n \n def fetch_nextday_info(self):\n def do():\n # for Mo-Do => Move 1, else move to Monday\n next_day = self._get_next_day() + self.natural_offset\n self.data = UntisBreaks.get_offset_supervisions(self.session, next_day)\n self.currentBreak = UntisBreaks.next_break_time(self.data.keys())\n self._api_fail_save(do)\n\n\n def _get_next_day(self):\n weekday = (datetime.now()+timedelta(days=self.natural_offset)).weekday()\n return 1 if weekday < 4 else 7-weekday\n\n\n # ======== building =======>\n\n\n def _build(self):\n self.settings_bar = self._create_settings_bar()\n self.table_frame = self._create_table_frame(forceEmpty=(True, \"läd Inhalte..\"))\n exit_bar = self._create_exit_bar()\n\n self._pack_contents(self.settings_bar, self.table_frame, exit_bar)\n self.after(10, self.after_init)\n\n\n def _pack_contents(self, settings_bar, table_frame, exit_bar):\n settings_bar.pack(anchor=tk.N, fill=tk.X, expand=False, side=tk.TOP )\n exit_bar.pack(anchor=tk.N, fill=tk.X, expand=False, side=tk.BOTTOM)\n table_frame.pack(fill=tk.BOTH, expand=True, side=tk.TOP)\n\n\n def after_init(self, data_source=None):\n data_source = self._after_init_prep(data_source)\n def do():\n try:\n self._after_init_can_fail(data_source)\n except webuntis.errors.NotLoggedInError:\n showerror(\"Verbindung abgelaufen\", message=\"Sitzung ist abgelaufen. Melden Sie sich erneut an.\")\n self.winfo_toplevel().selectLoginFrame()\n except RuntimeError as e:\n try: # if 'winfo_exists' throws an error, then tk closed => do nothing\n if self.winfo_exists():\n TKUtils.TKErrorHandler.report_callback_exception(self, e.__class__.__name__, str(e), e.__traceback__)\n except: pass\n Thread(target=lambda: self._threaded_fail_save(do)).start()\n\n\n def _after_init_prep(self, data_source):\n self.toggle_load_buttons(False)\n if hasattr(self, 'day_label') and self.day_label: self.day_label.destroy()\n return data_source if data_source else self.fetch_break_info\n\n\n def _after_init_can_fail(self, data_source):\n try:\n data_source()\n self._change_table(self.currentBreak)\n self.update_day_label()\n self.toggle_load_buttons(True)\n except Exception as e: \n try:\n # throws error if frame is destroyed => sucks up error\n if self.winfo_exists():\n raise e\n except: pass\n\n\n\n # ====== settings bar ======>\n\n\n def _create_settings_bar(self):\n settings_bar = tk.Frame(self, bg=Constants.BACKGROUND, height=60, relief='groove', highlightthickness=2)\n self.retry = tk.Button(settings_bar, text=\"\\u27F3\", padx=-1, pady=-1, font=(\"SherifSans\", 20), borderwidth=0)\n self.retry.configure(activeforeground=\"blue\", activebackground=Constants.BACKGROUND, bg=Constants.BACKGROUND)\n self.retry.configure(command=self.reload_tables)\n self.retry.place(x=20, y=3)\n\n self.toggle_day = tk.Button(settings_bar, text=\"Nächstes >>\", borderwidth=0)\n self.toggle_day.config(font=(\"Arial\", 13), bg=Constants.BACKGROUND, activeforeground=\"blue\", activebackground=Constants.BACKGROUND)\n self.toggle_day.place(relx=0.5, rely=0.55, anchor=tk.CENTER)\n self.toggle_day.config(command=self.toggleDay)\n return settings_bar\n\n \n def toggleDay(self):\n self.toggle_load_buttons(False)\n self._change_table(None, _fEmpty=True, _fMessage=\"aktualisiert Inhalte...\")\n self.is_today = not self.is_today\n if self.is_today:\n self.toggle_day.config(text=\"Nächstes >>\")\n return self.after_init(data_source=self.fetch_break_info)\n self.toggle_day.config(text=\"<< Aktuelles\")\n self.after_init(data_source=self.fetch_nextday_info)\n\n\n def toggle_load_buttons(self, activate):\n if activate:\n self.retry[\"state\"] = 'normal'\n self.toggle_day[\"state\"] = 'normal'\n return\n self.retry[\"state\"] = 'disabled'\n self.toggle_day[\"state\"] = 'disabled'\n\n \n '''\n Spawns label with day-info on the top right in the settings bar.\n Recalling will automatically replace the old label.\n '''\n def update_day_label(self):\n if hasattr(self, 'day_label') and self.day_label: self.day_label.destroy()\n ref_time = self._get_reference_day()\n self.day_label = TKUtils.DayLabel(self.settings_bar, ref_time, borderwidth=0)\n self.day_label.config(font=(\"Arial\", 13), bg=Constants.BACKGROUND)\n self.day_label.place(relx=0.95, rely=0.55, anchor=tk.E)\n\n\n '''\n A datetime object from the day of the shown table\n '''\n def _get_reference_day(self) -> datetime:\n if self.currentBreak: return self.currentBreak\n if self.is_today:\n return datetime.now() + timedelta(days=self.natural_offset)\n return datetime.now() + timedelta(days=self.natural_offset+self._get_next_day())\n\n\n # ======= exit bar =======>\n\n\n def _create_exit_bar(self):\n exit_bar = tk.Frame(self, bg=Constants.BACKGROUND, height=80, relief='groove', highlightthickness=2)\n b1 = tk.Button(exit_bar, padx=5, text=\"Log Out\", command=self.winfo_toplevel().selectLoginFrame)\n b2 = tk.Button(exit_bar, padx=5, text=\"Beenden\", command=self.finishMainloop)\n b1.place(relx=0.45, rely=0.5, anchor=tk.E)\n b2.place(relx=0.55, rely=0.5, anchor=tk.W)\n return exit_bar\n\n\n\n # =============== middle frame =================>\n\n\n # whether a table can be created\n def is_displayable(self):\n return self.currentBreak\n\n\n def reload_tables(self):\n self.toggle_load_buttons(False)\n self._change_table(None, _fEmpty=True, _fMessage=\"aktualisiert Inhalte...\")\n self.update()\n get = self.fetch_break_info if self.is_today else self.fetch_nextday_info\n self.after_init(data_source=get)\n\n\n '''\n param forceEmpty: 2-tuple with [0: create-empty-table] and [1: Text to be displayed on empty frame]\n '''\n def _create_table_frame(self, forceEmpty=(False, None)):\n frame = TKUtils.FillerFrame(self)\n self._create_arrow(frame, '\\u2B9C', -1).pack(anchor=tk.W, fill=tk.Y, side=tk.LEFT)\n if not forceEmpty[0] and self.is_displayable():\n self._create_table(frame).pack(anchor=tk.W, fill=tk.BOTH, expand=True, side=tk.LEFT)\n else:\n self._create_empty_table(frame, forceEmpty[1]).pack(anchor=tk.W, fill=tk.BOTH, expand=True, side=tk.LEFT)\n self._create_arrow(frame, \"\\u2B9E\", 1).pack(anchor=tk.W, fill=tk.Y, side=tk.RIGHT)\n return frame\n\n\n def _create_arrow(self, parent, text, offset):\n arrow_frame = TKUtils.FillerFrame(parent, width=120)\n\n arrow = tk.Button(arrow_frame, text=text)\n arrow.configure(borderwidth=0, font=(\"Arial\", 30))\n self._toggle_button(arrow, offset)\n arrow.place(relx=0.5, rely=0.5, anchor=tk.CENTER)\n return arrow_frame\n\n\n '''\n existence of previous/next decides whether arrow should be activated or not\n @param arrow: arrow-button to toggle\n @param offset: how many breaks too look back or forward to. Since the arrows only change break position by one, this value should be 1 or -1.\n '''\n def _toggle_button(self, arrow, offset):\n bg=Constants.BACKGROUND\n def do():\n if self.currentBreak:\n rel_break = UntisBreaks.get_relative_break(self.currentBreak, self.data.keys(), offset)\n # deactivate if necessary\n if not (self.currentBreak and rel_break):\n arrow[\"state\"] = \"disabled\"\n return arrow.configure(activeforeground=bg, bg=bg, activebackground=bg, foreground=\"gray\")\n # activate\n arrow[\"state\"] = \"normal\"\n arrow.configure(activeforeground=\"blue\", bg=bg, activebackground=bg, foreground=\"black\")\n arrow.config(command=lambda:self._change_table(rel_break))\n self._api_fail_save(do)\n\n \n '''\n Difference to reload_tables: Not refreshing data, only displaying different breaks.\n '''\n def _change_table(self, selected_break_time, _fEmpty=False, _fMessage=None):\n self.currentBreak = selected_break_time\n self.table_frame.destroy()\n self.table_frame = self._create_table_frame(forceEmpty=(_fEmpty, _fMessage))\n self.table_frame.pack(fill=tk.BOTH, expand=True, side=tk.TOP)\n\n\n\n # ==== empty table ====>\n\n\n def _create_empty_table(self, parent, message):\n msg = message if message else \"Hier ist nichts zu sehen :/\"\n empty = tk.Frame(parent, bg=Constants.BACKGROUND)\n empty.pack_propagate(False)\n label = tk.Label(empty, text=msg, bg=Constants.BACKGROUND)\n label.configure(font=\"Helvetica 10 italic\")\n label.place(relx=0.5, rely=0.5, anchor=tk.CENTER)\n return empty\n\n\n # ==== inner Table (scrollbar-layer) ====>\n\n\n def _create_table(self, parent):\n table = TKUtils.FillerFrame(parent, bg=\"blue\")\n self._addTime(table)\n container = TKUtils.WidthControlledScrollContainer(table, Constants.ITEM_WIDTH)\n container.pack(fill=tk.BOTH, expand=True)\n self._add_periods_to_grid(container.get_frame())\n return table\n\n\n def _add_periods_to_grid(self, frame: tk.Frame):\n frame.rowconfigure(0, weight=1)\n for i, period in enumerate(self.data[self.currentBreak]):\n frame.columnconfigure(i, weight=1)\n\n pFrame = tk.Frame(frame, bg=self.getPeriodBG(period), width=Constants.ITEM_WIDTH, padx=40)\n pFrame.config(highlightbackground=\"gray\", highlightthickness=1)\n self.config_tooltip(period, pFrame)\n pFrame.pack_propagate(False)\n pFrame.columnconfigure(0, weight=1)\n pFrame.rowconfigure(0, weight=1)\n pFrame.rowconfigure(1, weight=1)\n\n self._add_teachers(pFrame, period)\n self._add_rooms(pFrame, period)\n pFrame.grid(row=0, column=i, sticky=tk.NSEW)\n\n\n def config_tooltip(self, period, widget):\n if period.code == 'cancelled':\n idlelib.tooltip.Hovertip(widget,'Abgesagte Stunde')\n if period.code == 'irregular':\n idlelib.tooltip.Hovertip(widget,'Irreguläre Stunde')\n\n\n def _add_teachers(self, parent, period):\n # gray border frame to fill space\n borderFrame = tk.Frame(parent, bg=\"gray\", width=Constants.ITEM_WIDTH)\n borderFrame.pack_propagate(False)\n borderFrame.grid(row=0, column=0, sticky=tk.NSEW)\n\n # then frame with pad-bottom=1 => showing gray border\n tFrame = tk.Frame(borderFrame, bg=self.getPeriodBG(period))\n tFrame.pack(fill=tk.BOTH, expand=True, side=tk.TOP, anchor=tk.NW, pady=(0,1), padx=(0,0))\n teachers_text = self._str_teachers(period)\n tLabel = tk.Label(tFrame, text=teachers_text[:-2], font=(\"Arial\", 10))\n tLabel.config(fg=self.getPeriodFG(period), bg=self.getPeriodBG(period))\n tLabel.bind('', lambda e: tLabel.config(wraplength=tLabel.winfo_width()))\n tLabel.pack( side=tk.BOTTOM, pady=(10,20))\n\n\n '''\n For unknown reasons, an index error is thrown if the list is empty.\n Printing the period show a teacher as id=0, but no such teachers exists,\n therefore I can only reckon this is the webuntis way of saying there is no teacher.\n '''\n def _str_teachers(self, period):\n teachers_text = \"\"\n try:\n for t in period.teachers:\n teachers_text += t.full_name + \"\\n\" + \"\\n\"\n except IndexError:\n pass\n for t in period.original_teachers:\n teachers_text += f\"({t.full_name})\\n\\n\"\n return teachers_text if teachers_text else \"----\"\n\n\n def _add_rooms(self, parent, period):\n rFrame = tk.Frame(parent, bg=self.getPeriodBG(period))\n rFrame.pack_propagate(False)\n rFrame.grid(row=1, column=0, sticky=tk.NSEW)\n room_text = \"\"\n for r in period.rooms:\n room_text += r.long_name + \"\\n\" +\"\\n\"\n for r in period.original_rooms:\n room_text += f\"({r.long_name})\\n\\n\"\n room_text = room_text if room_text else \"----\"\n rLabel = tk.Label(rFrame, text=room_text[:-2], font=(\"Arial\", 10))\n rLabel.config(bg=self.getPeriodBG(period), fg=self.getPeriodFG(period))\n rLabel.pack(side=tk.TOP, pady=(20,10))\n\n\n def _addTime(self, parent):\n upperside = TKUtils.FillerFrame(parent, height=70)\n tk.Label(upperside, text=self._getTime(), padx=5, pady=5, bg=Constants.BACKGROUND, font=(\"Courier\", 18)).pack(anchor=tk.W)\n upperside.pack(anchor=tk.N, fill=tk.X, expand=False, side=tk.TOP)\n\n\n def _getTime(self):\n date=self.currentBreak\n minute = date.minute if date.minute > 9 else f\"0{date.minute}\"\n return f\"Start: {date.hour}:{minute}\"\n\n\n # ==== color configs =====>\n \n\n def getPeriodBG(self, period):\n if period.code=='cancelled':\n return '#ff4d4d'\n if period.code=='irregular':\n return '#fc00fc'\n return Constants.C_PERIOD\n\n\n def getPeriodFG(self, period):\n if period.code=='cancelled':\n return \"white\"\n return \"black\"","repo_name":"BenAufGitHub/webuntis-break-supervision-display","sub_path":"src/DisplayFrame.py","file_name":"DisplayFrame.py","file_ext":"py","file_size_in_byte":15669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"71022362506","text":"#!/usr/bin/env python\n# cron_cluster_wrapper.py\n# By Erik Redding \n\"\"\"\nThis is a wrapper that is cluster-aware for cronjobs. Two-node clusters.\n\nit runs clustat and based on the output, finds cluster members and what \nservices are running on that node. the argument --service specifies the\nservice that is important to this cronjob. If this member is running the\nservice, then the wrapper will return 0, otherwise 1. \n\nOptions:\n--service= CASE SENSITIVE!\n\nExample:\n\n##### in crontab:\n0 2 0 0 0 /usr/local/sbin/cron_cluster_wrapper.py --service mysql && \n\n\n\"\"\"\n# \n\nimport os\nimport sys\nimport subprocess\nimport re\nimport optparse\n\ncmd_clustat='/usr/sbin/clustat'\n#cmd_clustat='./clustat_nonmaster'\n\ndef grab_clustat_output( ):\n \"\"\" \n this runs clustat and returns the output in a list. from there, deal with it.\n \"\"\"\n p = subprocess.Popen( cmd_clustat ,stdout=subprocess.PIPE ).stdout\n return p.readlines( )\n\ndef find_cluster_members( clustat_output ):\n \"\"\"\n finds the list of cluster members\n\n this function moves through the list clustat_output that is the output\n of running the clustat binary in RHEL cluster, and snags cluster\n members based on their Online status. It finds the node the \n clustat command is running on by the Local keyword in the output.\n\n the return value is the dictionary containing the node list. \n\n supporting more than 2 node clusters wouldn't be hard, but I'm being lazy.\n\n \"\"\"\n # nodelist: dict providing local and sibling node entries\n nodelist = {}\n for line in clustat_output:\n match = re.search( '^ ([A-Za-z0-9_-]+).*Online, (Local)?.*', line )\n if match is not None:\n if match.group(2) is not None:\n nodelist['local'] = match.group(1) \n else:\n nodelist['node']= match.group(1) \n return nodelist\n\ndef is_service_running(service, thishost, clustat_output ):\n \"\"\"\n finds if service is running based on clustat_output list\n \"\"\"\n for line in clustat_output:\n match = re.search( '^ service:(%s)[ ]+(%s)[ ]+(started).*' % ( service, thishost ) , line )\n if match is not None:\n # MATCH! Lets return True.\n return True\n # We didn't find it. Return False!\n return False\n\ndef main():\n usage = \"usage: %prog [options]\"\n parser = optparse.OptionParser(usage)\n parser.add_option(\"-s\", \"--service\", action=\"store\",\n help=\"service name\")\n opts, ____ = parser.parse_args()\n\n # run clustat, get output\n output=grab_clustat_output()\n # give output to find our cluster members\n cluster_members=find_cluster_members(output)\n\n # if the service is running, on the local node, pass the output and script will:\n if is_service_running( opts.service, cluster_members['local'], output ):\n # exit 0 if the node is running the service\n sys.exit(0)\n else:\n # exit 1 if the node is NOT running the service\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()","repo_name":"jeredding/cron_cluster_wrapper","sub_path":"cron_cluster_wrapper.py","file_name":"cron_cluster_wrapper.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"32354988707","text":"import os\nimport numpy as np\nimport cv2\nfrom multiprocessing import Process\n\ndef pick(path):\n output = \"D:\\\\00_work\\\\data\\\\kaggle datasets\\\\DR\\\\green\\\\\"\n file_list = os.listdir(path)\n num = len(file_list)\n file_list1 = file_list[0:num//2]\n file_list2 = file_list[num//2:num]\n \n p1 = Process(target=fork, args = ('P1', file_list1, output, path))\n p2 = Process(target=fork, args = ('P2', file_list2, output, path))\n \n p1.start()\n p2.start()\n \n p1.join()\n p2.join()\n\n \ndef fork(name, file_list, output, path):\n print(\"{0} started\".format(name))\n count = 0\n num = len(file_list)\n for f in file_list:\n src = path + f \n dst = output + f \n green(src, dst)\n count+=1\n if count%200 == 0:\n print(\"{0} Done {1}/{2}\".format(name, count, num))\n print(\"{0} end\".format(name))\n \ndef green(src, dst):\n image=cv2.imread(src)\n image[:,:,[0,2]] = 0\n cv2.imwrite(dst,image)\n \nif __name__ == '__main__':\n path = \"D:\\\\00_work\\\\data\\\\kaggle datasets\\\\DR\\\\working\\\\\"\n pick(path)","repo_name":"huyvd7/diabetic-retinopathy-deep-learning","sub_path":"take_green.py","file_name":"take_green.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"1250564434","text":"import json\nfrom pdx.logger import logger\nfrom pdx.models.model import Model\nfrom pdx.prompt.prompt_session import PromptSession\nfrom pdx.models.cohere.client import CohereClient\nfrom pdx.models.metadata import ModelResponse, ResponseMetadata, ModelTokenUsage\n\n\nclass GenerationModel(Model):\n def __init__(self,\n api_key: str,\n model: str,\n max_tokens: int = 1200,\n stop: list = [],\n temperature: float = 0,\n **kwargs,\n ):\n\n self._api_url = \"v1/generate\"\n\n self._provider = \"cohere\"\n self._client = CohereClient(api_key)\n self._model = model\n self._max_tokens = max_tokens\n self._temperature = temperature\n if kwargs.get('stop_sequences', None):\n self._stop_sequences = kwargs.get('stop_sequences', None)\n else:\n self._stop_sequences = stop\n self._end_sequences = kwargs.get('end_sequences', [])\n self._num_generations = kwargs.get('num_generations', 1)\n self._preset = kwargs.get('preset', None)\n self._k = kwargs.get('k', 0)\n self._p = kwargs.get('p', 0.75)\n self._frequency_penalty = kwargs.get('frequency_penalty', 0.0)\n self._presence_penalty = kwargs.get('presence_penalty', 0.0)\n self._return_likelihoods = kwargs.get('return_likelihoods', 'NONE')\n self._logit_bias = kwargs.get('logit_bias', {})\n self._truncate = kwargs.get('truncate', 'END')\n self._retries = kwargs.get('retries', 2)\n\n def _preprocess(self, prompt: PromptSession):\n\n _prompt = prompt.text_prompt({})\n\n request_params = {\n 'prompt': _prompt,\n 'model': self._model,\n 'num_generations': self._num_generations,\n 'max_tokens': self._max_tokens,\n 'preset': self._preset,\n 'temperature': self._temperature,\n 'k': self._k,\n 'p': self._p,\n 'frequency_penalty': self._frequency_penalty,\n 'presence_penalty': self._presence_penalty,\n 'return_likelihoods': self._return_likelihoods,\n 'truncate': self._truncate,\n }\n\n if self._logit_bias != {}:\n request_params['logit_bias'] = self._logit_bias\n\n if self._end_sequences != []:\n request_params['end_sequences'] = self._end_sequences\n\n if self._stop_sequences != []:\n request_params['stop_sequences'] = self._stop_sequences\n\n return request_params\n\n def _postprocess(self, response: dict, request_params: dict, request_time) -> ModelResponse:\n _prompt = request_params.pop('prompt', None)\n _r = json.loads(response)\n\n token_usage = ModelTokenUsage(\n response=None,\n prompt=None,\n total=None)\n response_metadata = ResponseMetadata(\n model=request_params['model'],\n api_log_id=_r['id'],\n warnings=_r['meta'].get('warnings', None),\n token_usage=token_usage,\n latency=request_time)\n model_response = ModelResponse(\n metadata=response_metadata,\n request_params=request_params,\n data=_r['generations'][0]['text'])\n\n return model_response\n","repo_name":"pdx-labs/pdx","sub_path":"src/pdx/models/cohere/generation.py","file_name":"generation.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"}
+{"seq_id":"31149419304","text":"#!/usr/bin/python3\ndef divisible_by_2(my_list=[]):\n \"\"\"Finds all multiples of two\"\"\"\n\n duplicate = []\n for number in my_list:\n if number % 2 == 0:\n duplicate.append(True)\n else:\n duplicate.append(False)\n\n return duplicate\n","repo_name":"Chiemelie10/alx-higher_level_programming","sub_path":"0x03-python-data_structures/10-divisible_by_2.py","file_name":"10-divisible_by_2.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"28143538154","text":"import codecs\nimport os\nimport sys\nimport logging\nimport numpy as np\n\ndef main(src, des):\n\n for usg in os.listdir(src):\n dr1 = src+os.sep+usg\n if not os.path.isdir(dr1):\n continue\n for spk in os.listdir(dr1):\n dr2 = dr1+os.sep+spk\n pitches = []\n for f in os.listdir(dr2):\n fp = dr2+os.sep+f\n fp =codecs.open(fp, \"r\", encoding = \"utf8\")\n content = fp.readlines()\n fp.close()\n\n for line in content[1:]:\n line = line.strip().split(\",\")\n p = 0\n try:\n p = float(line[1])\n except:\n continue\n pitches.append(p)\n pitches = np.array(pitches)\n# miu = pitches.mean()\n# sigma = pitches.var()\n#\n dr = \"/\".join([des, usg])\n if not os.path.isdir(dr):\n os.makedirs(dr)\n fout = dr+\"/\"+spk+\".info\"\n fp = codecs.open(fout, \"w\", encoding = \"utf8\")\n# fp.write(miu+\"\\n\")\n# fp.write(sigma+\"\\n\")\n pitches = [\"{: .3f}\".format(x) for x in pitches]\n fp.write(\" \".join(pitches))\n fp.close()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"This script grouping the pitch data in terms of individual speaker.\")\n print(\"Usage: {} src des\".format(sys.argv[0]))\n exit(1)\n\n main(sys.argv[1], sys.argv[2])\n","repo_name":"wenjie-p/pitch_range","sub_path":"src/data-prep/group_pitch_by_spk.py","file_name":"group_pitch_by_spk.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"34598400199","text":"# data_methods.py\nfrom create_database import Database\nimport pandas as pd\n\n\nclass DataMethods(Database):\n def insert_data(self):\n # Fetch maximum user_id\n self.c.execute('''\n SELECT MAX(user_id) FROM users\n ''')\n max_id = self.c.fetchone()[0]\n if max_id is None:\n max_id = 0 # In case the table is empty\n\n # User input for new entry\n first_name = input(\"Enter the First Name: \")\n last_name = input(\"Enter the Last Name: \")\n favorite_food = input(\"Enter the Favorite Food: \")\n\n self.c.execute('''\n INSERT INTO users (user_id, first_name, last_name, favorite_food)\n VALUES (?, ?, ?, ?)\n ''', (max_id + 1, first_name, last_name, favorite_food))\n self.conn.commit()\n\n def delete_row(self):\n request = input('Are you sure you like to delete your account? Yes or No\\n')\n if request.lower() == 'yes':\n input_id = input('Enter your user ID number\\n')\n\n self.c.execute('''\n DELETE from users where user_id = ?\n ''', (int(input_id),))\n self.conn.commit()\n else:\n print('did not enter a valid id')\n\n def update_row(self):\n # enter your user ID number\n print('Enter your user ID number')\n # if the user ID number is registered in the database, ask what needs to be updated\n user_id_num = input()\n if self.check_db(int(user_id_num)) is True:\n print('What would you like to update?')\n entryToUpdate = input(' 1. First Name\\n 2. Last Name\\n 3. Favorite Food\\n')\n if entryToUpdate.lower() == 'first name' or entryToUpdate == '1':\n #update the first name column in the user_id row\n new_value = input(\"Enter the new First Name: \")\n self.update_entry(int(user_id_num), 'first_name', new_value)\n\n elif entryToUpdate.lower() == 'last name' or entryToUpdate == '2':\n #update the last name column in the user_id row\n new_value = input(\"Enter the new Last Name: \")\n self.update_entry(int(user_id_num), 'last_name', new_value)\n\n elif entryToUpdate.lower() == 'favorite food' or entryToUpdate == '3':\n #update their favorite food\n new_value = input(\"Enter the new Favorite Food: \")\n self.update_entry(int(user_id_num), 'favorite_food', new_value)\n\n else:\n print(\"That isn't a valid column to choose\")\n # if the user id number is not within the database, state that its invalid\n else:\n print('Could not find this user ID within our records')\n\n # update the correct column based off row (user_id)\n # ask if there are any more updates needed\n # if not, go back to menu?\n\n def check_db(self, user_id):\n self.c.execute('''\n SELECT * FROM users WHERE user_id = ?\n ''', (user_id,))\n\n result = self.c.fetchone()\n if result:\n return True\n else:\n return False\n\n def update_entry(self, user_id, field, new_value):\n query = f\"UPDATE users SET {field} = ? WHERE user_id = ?\"\n self.c.execute(query, (new_value, user_id))\n self.conn.commit()\n\n\n def display(self):\n self.c.execute('''\n SELECT * FROM users\n ''')\n df = pd.DataFrame(self.c.fetchall(), columns=['user_id', 'first_name', 'last_name', 'favorite_food'])\n df.set_index('user_id', inplace=True)\n print(df)\n","repo_name":"nckmnfrd/pythonCRUD","sub_path":"data_methods.py","file_name":"data_methods.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"23897241176","text":"###################################\n# Library Imports\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport nibabel as nib #Used to open nifTi or .nii files \nimport matplotlib.pyplot as plt \n###################################\n'''\nDataset loader obtained from the link below\nhttps://medicalsegmentation.com/covid19/\n\n# 100 total train images and 10 test images of\n# resolution = 512 x 512 \n'''\ntrain_imgs_filename = './data/train/tr_im.nii.gz'\ntrain_masks_filename = './data/train/tr_mask.nii.gz'\n\ntrain_imgs_nib = nib.load(train_imgs_filename) \ntrain_masks_nib = nib.load(train_masks_filename)\n\n#convert nifti objects in to a numpy array\ntrain_imgs = train_imgs_nib.get_fdata() \n\n# train_imgs = (train_imgs - np.min(train_imgs))/(np.max(train_imgs)-np.min(train_imgs))\n\n# train_imgs = (train_imgs * 255).astype(int)\n\ntrain_masks = train_masks_nib.get_fdata() \n\nassert(train_imgs.shape == train_masks.shape)\n\nprint(train_masks.shape)\nprint(np.unique(train_masks))\n\nprint(train_imgs.shape)\nprint(np.unique(train_imgs))\n\n###################################\n'''\nload saved unet model\n'''\nmodel = tf.keras.models.load_model(\"./model/unet\")\nmodel.summary()\n###################################\n'''\nplot 10 train images their ground truth and the prediction from our unet model\n'''\ngroundTruth = np.zeros((512,512,3))\nprediction = np.zeros((512,512,3))\n\nfor i in range(0,6,3):\n img_idx = 0\n # plot grayscale ct scan\n plt.subplot(2,3,i+1)\n plt.imshow(train_imgs[:,:,img_idx],cmap='gray')\n # plot color labeled gt of ct scan\n plt.subplot(2,3,i+2)\n # get labels and color code them\n red_x,red_y = np.where(train_masks[:,:,img_idx] == 0)\n red_channels = np.zeros(len(red_x)).astype(int)\n\n green_x,green_y = np.where(train_masks[:,:,img_idx] == 1)\n green_channels = np.zeros(len(green_x)).astype(int)\n \n blue_x,blue_y = np.where(train_masks[:,:,img_idx] == 2)\n blue_channels = (np.ones(len(blue_x))*2).astype(int)\n\n yellow_x,yellow_y = np.where(train_masks[:,:,img_idx] == 2)\n yellow_channel_1 = (np.zeros(len(yellow_x))).astype(int)\n yellow_channel_2 = (np.ones(len(yellow_x))).astype(int)\n\n #set values\n groundTruth[red_x,red_y,red_channels] = 255\n groundTruth[green_x,green_y,green_channels] = 255\n groundTruth[blue_x,blue_y,blue_channels] = 255\n groundTruth[yellow_x,yellow_y,yellow_channel_1] = 255\n groundTruth[yellow_x,yellow_y,yellow_channel_2] = 255\n plt.imshow(groundTruth)\n #plot mdoel prediction\n plt.subplot(2,3,i+3)\n pred = model(train_imgs[np.newaxis,:,:,img_idx,np.newaxis])\n pred = np.argmax(pred,axis=3)\n pred = pred[0,:,:] #reduces unnnescary batch dimension ehre\n print(pred.shape)\n # get labels and color code them\n indices_red = np.where(pred == 0)\n indices_green = np.where(pred == 1)\n indices_blue = np.where(pred == 2)\n indices_yellow = np.where(pred == 3)\n #set values\n prediction[indices_red,0] = 255\n prediction[indices_green,1] = 255\n prediction[indices_blue,2] = 255\n prediction[indices_yellow,0] = 255\n prediction[indices_yellow,1] = 255\n plt.imshow(prediction)\n\nplt.savefig(\"./results/training_visualization.png\")\n\n\n\n","repo_name":"aparedes8/Covid19ImageSegmentation","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"}
+{"seq_id":"71897878664","text":"from time import sleep\nfrom selenium import webdriver\n\nfrom settings import USER_DATA_DIR, PROFILE_DIRECTORY\n\ndef init_driver():\n options = webdriver.ChromeOptions()\n options.add_argument(f\"user-data-dir={USER_DATA_DIR}\")\n options.add_argument(f\"--profile-directory={PROFILE_DIRECTORY}\")\n return webdriver.Chrome(\"./chromedriver\", options=options)\n\ndef go_to_whatsapp(driver):\n driver.get(\"https://web.whatsapp.com/\")\n\ndef archive_chats(driver):\n chats = driver.find_elements_by_class_name(\"_2WP9Q\")\n for chat in chats:\n chat.click()\n sleep(1)\n driver.find_element_by_xpath(\"//span[@data-icon='down']\").click()\n sleep(1)\n driver.find_element_by_xpath(\"//div[@title='Archive chat']\").click()\n sleep(1)\n\ndriver = init_driver()\n\ntry:\n go_to_whatsapp(driver)\n sleep(10)\n archive_chats(driver)\nexcept:\n pass\n\nsleep(5)\ndriver.close()\n","repo_name":"danielSbastos/arquive-wapp-chats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"38985733799","text":"import ast\nimport codecs\nimport string\nfrom collections import Counter\n\nimport spacy\nimport nltk\nimport re\n\nfrom cbc.pipeline import \\\n ItemModifier, IteratorModifier, Iterator, IteratorConsumer, LineSourceIterator, STANDARD_SEPARATOR\nfrom nltk.tokenize import TreebankWordTokenizer\nimport logging\n\nlogger = logging.getLogger('cbc.nlp.base')\n\nnltk.download('stopwords')\n\nSTANDARD_STOPWORD = nltk.corpus.stopwords.words('german')\nSTANDARD_FILTER_SYMBOLS = (\n '|', '*', '``', \"''\", '“', '„', '–', '-', '\"', ')', '(', \"'\", \".\", \",\", '`', \":\", \"?\", \";\",\n \"‘\", \"{\", \"}\", \"#\", \"&\", \"!\", \"]\", \"[\", \"%\", \"−\", \"...\"\n)\n\nRE_NUMBER = re.compile(r\"^\\d+[.,eE]?\\d*?$\")\nRE_SINGLE_LETTER = re.compile(r\"^\\w$\")\nRE_REPLACE_SPACE_CHARS = re.compile(r'[|\\\"/()—]')\nRE_REMOVE_CHARS = re.compile(r\"[\\\\'\\-]\")\n\nRESTR_STD_PARAGRAPH_DEL = r\"\\s*\\n\\s*\\n\\s*\"\n\"\"\"\nStandard paragraph delimiter: At least two newlines which may include and be surrounded by\nfurther arbitrary space\n\"\"\"\n\nRE_WHITESPACE = re.compile(r\"\\s+\")\n\nDEFAULT_LEMMATIZER = \"de_core_news_sm\"\n\nLEMMATIZE_MAX_SIZE = 10000\n\nLEMMATIZE_MAX_CHUNK_SIZE = 100000\n\nVOWELS = \"aeiouäöüyAEIOUÄÖÜY\"\nCONSONANTS = \"bcdfghjklmnpqrstvwxzBCDFGHJKLMNPQRSTVWXY\"\n\n\ndef split_into_chunks(text, max_chunk_length=LEMMATIZE_MAX_CHUNK_SIZE):\n \"\"\"\n Split a text in chunks of given maximum length splitting only at white space\n (leaving words intact)\n \"\"\"\n l_ = len(text)\n i = 0\n j = l_\n chunks = []\n while i < l_:\n if j - i > max_chunk_length:\n j = text[0:i + max_chunk_length].rfind(\" \")\n if j > i:\n chunks.append(text[i:j])\n i = j\n j = l_\n else:\n break\n return chunks\n\n\nclass Lower(ItemModifier):\n \"\"\"\n Expects a list of strings as item.\n Transforms each member of the list to lower case.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n (No arguments allowed.)\n \"\"\"\n super(Lower, self).__init__(\n f=lambda tokens: [t.lower() for t in tokens]\n )\n\n\nclass ReSub(ItemModifier):\n def __init__(self, reg_ex_list, replace):\n my_re_list = tuple([re.compile(reg_ex) for reg_ex in reg_ex_list])\n\n def f(w):\n x = w\n for my_re in my_re_list:\n x = my_re.sub(replace, x)\n return x\n\n super(ReSub, self).__init__(f=f)\n\n\nclass ReSplit(ItemModifier):\n def __init__(self, reg_ex):\n my_re = re.compile(reg_ex)\n super(ReSplit, self).__init__(\n f=lambda t: my_re.split(t)\n )\n\n\nclass Append(ItemModifier):\n def __init__(self, append=\"_DE\"):\n self.append = append\n super(Append, self).__init__(\n f=lambda tokens: [t + self.append for t in tokens]\n )\n\n\nclass LowerAppend(ItemModifier):\n def __init__(self, append=\"_DE\"):\n self.append = append\n\n def f(tokens):\n return [t.lower() + self.append for t in tokens]\n super(LowerAppend, self).__init__(f=f)\n\n\nclass IsNLText(ItemModifier):\n \"\"\"\n Check whether a string consist of Natural Language (NL) Text using\n heuristics on distribution of characters.\n\n Kwargs:\n :lb_vows_by_letters (float, default=0.25): lower bound on \"vowels by letters\"\n :ub_vows_by_letters (float, default=0.53): upper bound on \"vowels by letters\"\n :lb_letters_by_chars (float, default=0.67): lower bound on \"letters by all chars\"\n :ub_spaces_by_chars (float, default=0.2): lower bound on \"spaces by all chars\"\n :ub_digits_by_chars (float, default=0.2): lower bound on \"digits by all chars\"\n \"\"\"\n\n def __init__(self,\n lb_vows_by_letters=0.25,\n ub_vows_by_letters=0.53,\n lb_letters_by_chars=0.67,\n ub_spaces_by_chars=0.2,\n ub_digits_by_chars=0.2\n ):\n self.lb_vows_by_letters = lb_vows_by_letters\n self.ub_vows_by_letters = ub_vows_by_letters\n self.lb_letters_by_chars = lb_letters_by_chars\n self.ub_spaces_by_chars = ub_spaces_by_chars\n self.ub_digits_by_chars = ub_digits_by_chars\n\n def f(w):\n dist = Counter(w)\n chars = len(w)\n vows = sum([dist.get(c) for c in VOWELS if c in dist])\n letters = sum([dist.get(c) for c in string.ascii_letters if c in dist])\n spaces = dist.get(\" \", 0)\n digits = sum([dist.get(c) for c in string.digits if c in dist])\n result = None\n if \\\n chars > 0 and \\\n letters > 0 and \\\n self.ub_vows_by_letters > vows / letters > self.lb_vows_by_letters and \\\n letters / chars > self.lb_letters_by_chars and \\\n self.ub_spaces_by_chars > spaces / chars and \\\n self.ub_digits_by_chars > digits / chars:\n result = w\n return result\n\n super(IsNLText, self).__init__(f=f)\n\n\nclass LemmatizeModifier(ItemModifier):\n def __init__(self,\n lemmatizer=None,\n chunksize=LEMMATIZE_MAX_SIZE\n ):\n if lemmatizer is None:\n try:\n lemmatizer=spacy.load(DEFAULT_LEMMATIZER)\n except OSError:\n print(\n f\"Cannot find default lemmatizer {DEFAULT_LEMMATIZER}.\\n\"\n f\"Please download via: $ python -m spacy download {DEFAULT_LEMMATIZER}\\n\"\n \"For furhter information, please refer to https://spacy.io/usage/models#download.\"\n )\n self.lemmatizer = lemmatizer\n self.chunksize = chunksize\n\n def f(tokens):\n result = [\n t.lemma_ for i in range(0, len(tokens), self.chunksize)\n for t in self.lemmatizer(\n \" \".join(tokens[i: i + self.chunksize])\n )\n ]\n return result\n super(LemmatizeModifier, self).__init__(f=f)\n\n\nclass Remove(ItemModifier):\n def __init__(self,\n stopwords=STANDARD_STOPWORD,\n filter_function=lambda w: RE_NUMBER.match(w) is None and RE_SINGLE_LETTER.match(w) is None,\n filter_symbols=STANDARD_FILTER_SYMBOLS\n ):\n self.stopwords = stopwords\n self.filter_function = filter_function\n self.filter_symbols = list(filter_symbols)\n self.ff = None\n self.create_ff()\n super(Remove, self).__init__(\n f=lambda tokens: list(filter(self.ff, tokens))\n )\n\n def create_ff(self):\n self.ff = \\\n lambda w: self.filter_function(w) and w.lower() not in self.stopwords + self.filter_symbols\n\n def add_stopwords(self, a_list):\n self.stopwords = self.stopwords + a_list\n return self\n\n def add_filter_symbols(self, a_list):\n self.filter_symbols = self.filter_symbols + a_list\n return self\n\n\ndef tokenize_text(text, re_replace_space_chars=RE_REPLACE_SPACE_CHARS):\n t = text\n if re_replace_space_chars is not None:\n t = re_replace_space_chars.sub(\" \", text)\n return TreebankWordTokenizer().tokenize(t)\n\n\nclass TokenizeText(ItemModifier):\n def __init__(self,\n re_replace_space_chars=RE_REPLACE_SPACE_CHARS\n ):\n self.re_replace_space_chars = re_replace_space_chars\n\n def f(text):\n t = text\n if self.re_replace_space_chars is not None:\n t = self.re_replace_space_chars.sub(\" \", text)\n return TreebankWordTokenizer().tokenize(t)\n\n super(TokenizeText, self).__init__(f=f)\n\n\nclass LemmaTokenizeText(ItemModifier):\n def __init__(self,\n lemmatizer=None,\n max_chunk_length=LEMMATIZE_MAX_CHUNK_SIZE,\n re_replace_space_chars=RE_REPLACE_SPACE_CHARS,\n re_remove_chars=RE_REMOVE_CHARS\n ):\n if lemmatizer is None:\n try:\n lemmatizer=spacy.load(DEFAULT_LEMMATIZER)\n except OSError:\n print(\n f\"Cannot find default lemmatizer {DEFAULT_LEMMATIZER}.\\n\"\n f\"Please download via: $ python -m spacy download {DEFAULT_LEMMATIZER}\\n\"\n \"For furhter information, please refer to https://spacy.io/usage/models#download.\"\n )\n\n self.lemmatizer = lemmatizer\n self.maxChunkLength = max_chunk_length\n self.re_replace_space_chars = re_replace_space_chars\n self.re_remove_chars = re_remove_chars\n\n def f(text):\n txt = text\n if self.re_replace_space_chars is not None:\n txt = self.re_replace_space_chars.sub(\" \", txt)\n if self.re_remove_chars is not None:\n txt = self.re_remove_chars.sub(\"\", txt)\n txt = RE_WHITESPACE.sub(\" \", txt)\n chunks = split_into_chunks(txt, self.maxChunkLength)\n return [t.lemma_.strip() for chunk in chunks for t in self.lemmatizer(chunk)]\n\n super(LemmaTokenizeText, self).__init__(f=f)\n\n\nclass SplitText(IteratorModifier):\n def __init__(self,\n re_text_separator=r\"\\s*\\n\\s*\\n\\s*\",\n min_text_length=0,\n do_trim_text=True\n ):\n self.re_text_separator = re.compile(re_text_separator)\n self.minTextLength = min_text_length\n self.do_trim_text = do_trim_text\n\n def __call__(self, other):\n if other.is_tagged:\n def generator():\n n = 0\n for text in other:\n p_c = 0\n paragraphs = self.re_text_separator.split(text[0].strip())\n for p in paragraphs:\n if self.do_trim_text:\n p = p.strip()\n if len(p) >= self.minTextLength:\n yield p, text[1] + [tuple(text[1] + [p_c])]\n n += 1\n p_c += 1\n else:\n def generator():\n n = 0\n for text in other:\n paragraphs = self.re_text_separator.split(text.strip())\n for p in paragraphs:\n if self.do_trim_text:\n p = p.strip()\n if len(p) >= self.minTextLength:\n yield p\n n += 1\n return Iterator(generator, is_tagged=other.is_tagged)\n\n\nclass LineSourceTokenizer(LineSourceIterator):\n def __init__(self,\n input_file,\n tokenizer=TokenizeText(),\n **kwargs\n ):\n self.tokenizer = tokenizer\n super(LineSourceTokenizer, self).__init__(input_file, **kwargs)\n self.handle_first_line()\n if self.is_tagged:\n def get_line(line):\n ll = line.split(self.tag_separator)\n return tokenizer(ll[0]), ast.literal_eval(ll[1])\n else:\n def get_line(line):\n return tokenizer(line)\n self.get_line = get_line\n\n\nclass Re(ItemModifier):\n def __init__(self, reg_ex):\n my_re = re.compile(reg_ex)\n super(Re, self).__init__(\n f=lambda t: list(filter(lambda w: my_re.match(w) is None, t))\n )\n\n\nclass MinMaxTokens(IteratorModifier):\n def __init__(self, min_tokens=1, max_tokens=-1):\n self.minTokens = min_tokens\n self.maxTokens = max_tokens\n\n def __call__(self, iterator):\n if self.maxTokens >= 0:\n if iterator.is_tagged:\n def generator():\n for t in iterator:\n if self.minTokens <= len(t[0]) <= self.maxTokens:\n yield t\n else:\n def generator():\n for t in iterator:\n if self.minTokens <= len(t) <= self.maxTokens:\n yield t\n else:\n if iterator.is_tagged:\n def generator():\n for t in iterator:\n if len(t[0]) >= self.minTokens:\n yield t\n else:\n def generator():\n for t in iterator:\n if len(t) >= self.minTokens:\n yield t\n return Iterator(generator, is_tagged=iterator.is_tagged)\n\n\nclass TokensToFile(IteratorConsumer):\n def __init__(self,\n filename,\n output_tag=True,\n tag_separator=STANDARD_SEPARATOR,\n output_encoding='utf-8',\n input_type=list\n ):\n self.filename = filename\n self.output_tag = output_tag\n self.tag_separator = tag_separator\n self.output_encoding = output_encoding\n self.input_type = input_type\n\n def __call__(self, iterator):\n if iterator.is_tagged:\n if self.output_tag:\n if self.input_type == list:\n def to_str(t_):\n return \" \".join(t_[0]) + self.tag_separator + str(t_[1])\n elif self.input_type == str:\n def to_str(t_):\n return t_[0] + self.tag_separator + str(t_[1])\n else:\n raise (TypeError, \"Unsupported input type %i\" % str(self.input_type))\n else:\n if self.input_type == list:\n def to_str(t_):\n return \" \".join(t_[0])\n elif self.input_type == str:\n def to_str(t_):\n return t_[0]\n else:\n raise (TypeError, \"Unsupported input type %i\" % str(self.input_type))\n else:\n if self.input_type == list:\n def to_str(t_):\n return \" \".join(t_)\n elif self.input_type == str:\n def to_str(t_):\n return t_\n else:\n raise (TypeError, \"Unsupported input type %i\" % str(self.input_type))\n n = 0\n try:\n if self.output_encoding is None:\n file = codecs.open(self.filename, 'w')\n else:\n file = codecs.open(self.filename, 'w', self.output_encoding)\n for t in iterator:\n n += 1\n file.write(to_str(t))\n file.write(\"\\n\")\n file.close()\n except IOError:\n s = \"could not write to file '%s'\" % self.filename\n logger.error(s, exc_info=True)\n self.number = n\n return self\n\n\nclass CountTokens(IteratorConsumer):\n def __init__(self, word_counter=None, tagged_counter=None):\n if word_counter is None:\n self.word_counter = Counter()\n else:\n self.word_counter = word_counter\n if tagged_counter is None:\n self.tagged_counter = Counter()\n else:\n self.tagged_counter = tagged_counter\n\n def __call__(self, iterator):\n if iterator.is_tagged:\n def count(tokens_):\n self.word_counter.update(tokens_[0])\n self.tagged_counter.update([(w, \";\".join([str(t) for t in tokens_[1]])) for w in tokens_[0]])\n else:\n def count(tokens_):\n self.word_counter.update(tokens_)\n for tokens in iterator:\n count(tokens)\n return self\n","repo_name":"sebastian-sohr/cbc-nlp","sub_path":"src/cbc/nlp/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":15510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"22792813429","text":"from django.conf.urls import patterns, url, include\nfrom django.conf import settings\nfrom web import views\n\nurlpatterns = patterns('',\n url(r'^$', 'web.views.index', name='index'),\n\n url(r'category/(?P\\d+)/$',\n 'web.views.category_photos',\n name=\"category_photos\"),\n\n url(r'^start$',\n views.start,\n name=\"start\"),\n)\n","repo_name":"joshuacox/phial-api","sub_path":"demo/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"35703390325","text":"import os\nimport json\nimport constant\nfrom dotenv import load_dotenv\nfrom datetime import datetime, timedelta\nfrom google.cloud import bigquery\nfrom mock import Mock\nimport pytest\nfrom main \\\n import convert_date_string_to_millisecond_int, \\\n generate_drop_off_users_query_with_params, \\\n generate_recovery_users_query_with_params, \\\n calculate_date_of_yesterday_at_utc, \\\n extract_required_keys_for_generate_recovery_query, \\\n fetch_dropoff_and_recovery_user_count_given_steps, \\\n fetch_dropoff_and_recovery_users_count_given_list_of_steps, \\\n fetch_from_all_user_events_table, \\\n calculate_date_n_days_ago_at_utc, \\\n extract_events_and_dates_from_request, \\\n extract_required_keys_for_generate_dropoff_query\n\nimport main\n\nload_dotenv()\n\n# these credentials are used to access google cloud services. See https://cloud.google.com/docs/authentication/getting-started\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"service-account-credentials.json\"\n\nproject_id = main.project_id\nBIG_QUERY_DATASET_LOCATION = main.BIG_QUERY_DATASET_LOCATION\ndataset_id = main.dataset_id\ntable_id = main.table_id\nFULL_TABLE_URL=main.FULL_TABLE_URL\n\nHOUR_MARKING_START_OF_DAY=constant.HOUR_MARKING_START_OF_DAY\nHOUR_MARKING_END_OF_DAY=constant.HOUR_MARKING_END_OF_DAY\n\nsampleEvents = {\n \"stepBeforeDropOff\": \"ENTERED_ONBOARD_SCREEN\",\n \"nextStepList\": [\"ENTERED_REFERRAL_CODE\", \"USER_LOGGED_IN\"],\n}\n\n# For example, if you want one day => startDate: \"2019-04-03\", endDate: \"2019-04-03\"\n# For example, if you want three days => startDate: \"2019-04-03\", endDate: \"2019-04-05\"\nsampleDateIntervals = {\n \"startDate\": \"2019-04-03\", # converted to beginning of the day\n \"endDate\": \"2019-04-06\", # converted to end of the day\n # \"timezone\": \"Africa/Kigali\" # if not passed would assume UTC\n}\n\nstepBeforeDropOff = sampleEvents[\"stepBeforeDropOff\"]\nnextStepList = sampleEvents[\"nextStepList\"]\nrecoveryStep = sampleEvents[\"nextStepList\"]\n\nsampleEventsAndDatesList = [\n {\n \"events\": {\n \"stepBeforeDropOff\": \"ENTERED_ONBOARD_SCREEN\",\n \"nextStepList\": [\"ENTERED_REFERRAL_CODE\", \"USER_LOGGED_IN\"],\n },\n \"dateIntervals\": sampleDateIntervals\n },\n {\n \"events\": {\n \"stepBeforeDropOff\": \"ENTERED_REFERRAL_CODE\",\n \"nextStepList\": [\"USER_LOGGED_IN\"],\n },\n \"dateIntervals\": sampleDateIntervals\n }\n]\n\n@pytest.fixture\ndef mock_big_query():\n return Mock(spec=bigquery.Client())\n\n@pytest.fixture\ndef mock_json_request():\n return Mock()\n\ndef test_calculate_date_of_yesterday_at_utc():\n assert calculate_date_of_yesterday_at_utc() == (datetime.utcnow().date() - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n\ndef test_calculate_date_n_days_ago_at_utc():\n FOUR_DAYS_AGO = 4\n assert calculate_date_n_days_ago_at_utc(FOUR_DAYS_AGO) == (datetime.utcnow().date() - timedelta(days=FOUR_DAYS_AGO)).strftime(\"%Y-%m-%d\")\n\ndef test_convert_date_string_to_millisecond_int():\n assert convert_date_string_to_millisecond_int(\"2019-04-03\", \"00:00:00\") == int(1554249600000.0)\n assert convert_date_string_to_millisecond_int(\"2019-04-06\", \"23:59:59\") == int(1554595199000.0)\n\n\ndef test_extract_required_keys_for_generate_dropoff_query():\n expectedKeysForQuery = {\n \"stepBeforeDropOff\": stepBeforeDropOff,\n \"nextStepList\": nextStepList,\n \"startDateInMilliseconds\": convert_date_string_to_millisecond_int(sampleDateIntervals[\"startDate\"], HOUR_MARKING_START_OF_DAY),\n \"endDateInMilliseconds\": convert_date_string_to_millisecond_int(sampleDateIntervals[\"endDate\"], HOUR_MARKING_END_OF_DAY)\n }\n\n assert extract_required_keys_for_generate_dropoff_query(sampleEvents, sampleDateIntervals) == expectedKeysForQuery\n\ndef test_extract_required_keys_for_generate_recovery_query():\n expectedKeysForQuery = {\n \"stepBeforeDropOff\": stepBeforeDropOff,\n \"nextStepList\": nextStepList,\n \"recoveryStep\": recoveryStep\n }\n assert extract_required_keys_for_generate_recovery_query(sampleEvents) == expectedKeysForQuery\n\n\ndef test_generate_drop_off_users_query_with_params():\n startDateInMilliseconds = convert_date_string_to_millisecond_int(sampleDateIntervals[\"startDate\"], HOUR_MARKING_START_OF_DAY)\n endDateInMilliseconds = convert_date_string_to_millisecond_int(sampleDateIntervals[\"endDate\"], HOUR_MARKING_END_OF_DAY)\n\n expectedDropOffQuery = (\n \"\"\"\n select distinct(`user_id`)\n from `{full_table_url}`\n where `user_id` not in\n (\n select `user_id`\n from `{full_table_url}`\n where `event_type` in UNNEST(@nextStepList)\n and `time_transaction_occurred` <= @endDateInMilliseconds\n )\n and `event_type` = @stepBeforeDropOff\n and `time_transaction_occurred` between @startDateInMilliseconds and @endDateInMilliseconds\n \"\"\"\n .format(full_table_url=FULL_TABLE_URL)\n )\n\n expectedDropOffParams = [\n bigquery.ScalarQueryParameter(\"stepBeforeDropOff\", \"STRING\", stepBeforeDropOff),\n bigquery.ArrayQueryParameter(\"nextStepList\", \"STRING\", nextStepList),\n bigquery.ScalarQueryParameter(\"startDateInMilliseconds\", \"INT64\", startDateInMilliseconds),\n bigquery.ScalarQueryParameter(\"endDateInMilliseconds\", \"INT64\", endDateInMilliseconds),\n ]\n\n expectedDropOffQueryAndParams = {\n \"dropOffQuery\": expectedDropOffQuery,\n \"dropOffParams\": expectedDropOffParams\n }\n\n assert generate_drop_off_users_query_with_params(sampleEvents, sampleDateIntervals) == expectedDropOffQueryAndParams\n\n\ndef test_generate_recovery_users_query_with_params():\n dateOfYesterday = calculate_date_of_yesterday_at_utc()\n beginningOfYesterdayInMilliseconds = convert_date_string_to_millisecond_int(dateOfYesterday, HOUR_MARKING_START_OF_DAY)\n endOfYesterdayInMilliseconds = convert_date_string_to_millisecond_int(dateOfYesterday, HOUR_MARKING_END_OF_DAY)\n\n expectedRecoveryQuery = (\n \"\"\"\n select distinct(`user_id`)\n from `{full_table_url}`\n where `event_type` in UNNEST(@recoveryStep)\n and `time_transaction_occurred` between @beginningOfYesterdayInMilliseconds and @endOfYesterdayInMilliseconds\n and `user_id` in\n (\n select `user_id`\n from `{full_table_url}`\n where `user_id` not in\n (\n select `user_id`\n from `{full_table_url}`\n where `event_type` in UNNEST(@nextStepList)\n and `time_transaction_occurred` <= @endOfYesterdayInMilliseconds\n )\n and `event_type` = @stepBeforeDropOff\n and `time_transaction_occurred` between @beginningOfYesterdayInMilliseconds and @endOfYesterdayInMilliseconds\n )\n \"\"\"\n .format(full_table_url=FULL_TABLE_URL)\n )\n\n expectedRecoveryParams = [\n bigquery.ArrayQueryParameter(\"recoveryStep\", \"STRING\", recoveryStep),\n bigquery.ScalarQueryParameter(\"stepBeforeDropOff\", \"STRING\", stepBeforeDropOff),\n bigquery.ArrayQueryParameter(\"nextStepList\", \"STRING\", nextStepList),\n bigquery.ScalarQueryParameter(\"beginningOfYesterdayInMilliseconds\", \"INT64\", beginningOfYesterdayInMilliseconds),\n bigquery.ScalarQueryParameter(\"endOfYesterdayInMilliseconds\", \"INT64\", endOfYesterdayInMilliseconds),\n ]\n\n expectedRecoveryQueryAndParams = {\n \"recoveryQuery\": expectedRecoveryQuery,\n \"recoveryParams\": expectedRecoveryParams\n }\n\n assert generate_recovery_users_query_with_params(sampleEvents, sampleDateIntervals) == expectedRecoveryQueryAndParams\n\n\ndef test_fetch_from_all_user_events_table(mock_big_query):\n sampleEventType = \"ENTERED_ONBOARD_SCREEN\"\n sampleQuery = (\n \"\"\"\n select `user_id` from `{full_table_url}` where `event_type` = @eventType\n \"\"\"\n .format(full_table_url=FULL_TABLE_URL)\n )\n sampleParams = [\n bigquery.ScalarQueryParameter(\"eventType\", \"STRING\", sampleEventType),\n ]\n expectedUserIdList = [\n { \"user_id\": \"1a\" },\n { \"user_id\": \"3k\" },\n ]\n\n main.client = mock_big_query\n mock_big_query.query.return_value = expectedUserIdList\n\n result = fetch_from_all_user_events_table(sampleQuery, sampleParams)\n assert result == expectedUserIdList\n mock_big_query.query.assert_called_once()\n\n queryArgs = mock_big_query.query.call_args.args\n queryKeywordArgs = mock_big_query.query.call_args.kwargs\n\n assert queryArgs[0] == sampleQuery\n assert queryKeywordArgs['location'] == BIG_QUERY_DATASET_LOCATION\n\n\n\n# given a dropoff step and expected next step:\n# will assemble the dropoff and recovery query\n# fetch from big query\n# format and return response\ndef test_fetch_dropoff_and_recovery_user_count_given_steps(mock_big_query):\n expectedUserIdList = [\n { \"user_id\": \"1a\" },\n { \"user_id\": \"3k\" },\n ]\n\n expectedUserCount = {\n \"dropOffCount\": len(expectedUserIdList),\n \"recoveryCount\": len(expectedUserIdList),\n }\n\n main.client = mock_big_query\n mock_big_query.query.return_value = expectedUserIdList\n\n result = fetch_dropoff_and_recovery_user_count_given_steps(sampleEvents, sampleDateIntervals)\n main.client.query.assert_called()\n assert main.client.query.call_count == 2\n assert result == expectedUserCount\n\ndef test_extract_events_and_dates_from_request(mock_json_request):\n mock_json_request.get_json.return_value = {\n \"eventsAndDatesList\": sampleEventsAndDatesList\n }\n\n assert extract_events_and_dates_from_request(mock_json_request) == sampleEventsAndDatesList\n\n\n\n# given a list of dropoff steps and next steps find dropoffs/recovery users for all list items\ndef test_fetch_dropoff_and_recovery_users_count_given_list_of_steps(mock_big_query, mock_json_request):\n expectedUserIdList = [\n { \"user_id\": \"1a\" },\n { \"user_id\": \"3k\" },\n ]\n\n expectedUserCountList = [\n {\n \"dropOffCount\": len(expectedUserIdList),\n \"recoveryCount\": len(expectedUserIdList),\n \"dropOffStep\": \"ENTERED_ONBOARD_SCREEN\",\n }, {\n \"dropOffCount\": len(expectedUserIdList),\n \"recoveryCount\": len(expectedUserIdList),\n \"dropOffStep\": \"ENTERED_REFERRAL_CODE\",\n }]\n\n main.EVENTS_AND_DATES_LIST = sampleEventsAndDatesList\n main.client = mock_big_query\n mock_big_query.query.return_value = expectedUserIdList\n mock_json_request.get_json.return_value = None\n\n result = fetch_dropoff_and_recovery_users_count_given_list_of_steps(mock_json_request)\n main.client.query.assert_called()\n assert main.client.query.call_count == 4\n assert result == json.dumps(expectedUserCountList)\n","repo_name":"luke-jordan/jupiter-data","sub_path":"functions/python/funnel-analysis/funnel-analysis_test.py","file_name":"funnel-analysis_test.py","file_ext":"py","file_size_in_byte":10835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"16303529970","text":"\"\"\"\nModule for search algorithms\n\"\"\"\nimport heapq\nfrom constants import SIZE\nfrom input_parser import StateExtractor\nfrom search_tree import move_down, move_up, move_horizontal, remove_vehicle\n\n\nclass GenericSearch:\n \"\"\"\n Class for universal cost search\n \"\"\"\n\n def __init__(self, function_g, function_h, name=\"\", heuristic_name=\"NA\") -> None:\n self.name = name\n self.heuristic_name = heuristic_name\n self.function_g = function_g\n self.function_h = function_h\n self.search_path = []\n self.closed_list = (\n {}\n ) # a normal list, can be a map for fast check, only need the key = \"state_string\"\n self.open_list = (\n []\n ) # the max-heap / sorted queue, storing the tuples (cost, state_string, fuel_update)\n heapq.heapify(self.open_list)\n self.is_final_state_reached = False\n self.final_state = StateExtractor(\"\")\n self.ori_input = \"\"\n\n def search(self, input_str: str):\n \"\"\"\n Run search algo\n \"\"\"\n if input_str.strip() == \"\": # invalid input\n return\n\n fuel_update = \" \"\n parent_state = \"\"\n curr_cost = 0\n self.ori_input = input_str\n init_h = self.function_h(input_str[:36])\n # add root\n self.open_list.append(\n (\n curr_cost + init_h,\n input_str,\n fuel_update,\n parent_state,\n \"\",\n (curr_cost, init_h),\n )\n )\n\n # loop\n while len(self.open_list) > 0:\n # Stop conditions:\n if self.is_final_state_reached:\n break\n\n next_state = heapq.heappop(self.open_list)\n curr_cost, input_str, parent_state = (\n next_state[5][0],\n next_state[1],\n next_state[3],\n )\n\n fuel_update = (\n next_state[2] if len(next_state[1]) < 38 else next_state[1][37:]\n ) # only the first node\n\n if self.closed_list.get(input_str) is not None: # a visited state\n continue\n\n extractor = StateExtractor(input_str, fuel_update)\n\n # Step 0: Add curr_state to the closed_list\n self.closed_list[input_str] = (\n parent_state,\n next_state[4],\n fuel_update[len(fuel_update) - 3 : len(fuel_update)][1:],\n ) # store \n heapq.heappush(\n self.open_list,\n (\n cost_g + cost_h,\n new_move[0],\n fuel_update + \" \" + new_move[1],\n input_str,\n new_move[2],\n (cost_g, cost_h),\n ),\n )\n\n distance += 1\n\n\nclass UCS(GenericSearch):\n \"\"\"'\n UCS algo\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__(\n function_g=self.calculate_g, function_h=self.calculate_heuristic, name=\"UCS\"\n )\n\n def calculate_g(self, curr_cost: int):\n \"\"\"\n find the cost based on number of parents\n \"\"\"\n return curr_cost + 1\n\n def calculate_heuristic(self, __state_str__: str):\n \"\"\"'\n Calculate heruristic based on a string\n \"\"\"\n return 0\n\n\nclass GBFS(GenericSearch):\n \"\"\"'\n Greedy best first search algo\n \"\"\"\n\n def __init__(self, heuristic_function, heuristic_name: str) -> None:\n super().__init__(\n function_g=self.calculate_g,\n function_h=heuristic_function,\n name=\"GBFS\",\n heuristic_name=\"h\" + heuristic_name,\n )\n\n def calculate_g(self, __curr_cost__: int):\n \"\"\"\n find the cost based on number of parents\n \"\"\"\n return 0\n\n\nclass AlgoA(GenericSearch):\n \"\"\"'\n Algorithm A\n \"\"\"\n\n def __init__(self, heuristic_function, heuristic_name: str) -> None:\n super().__init__(\n function_g=self.calculate_g,\n function_h=heuristic_function,\n name=\"A/A*\",\n heuristic_name=\"h\" + heuristic_name,\n )\n\n def calculate_g(self, curr_cost: int):\n \"\"\"\n find the cost based on number of parents\n \"\"\"\n return curr_cost + 1\n\n\ndef calculate_heuristic_1(state_str: str):\n \"\"\"'\n Calculate heruristic based on a string\n\n Heuristic 1: Number of blocking vehicles\n \"\"\"\n # first, locate A on row 3\n row_loc = 2\n slow = SIZE * row_loc\n fast = slow\n end = slow + (SIZE - 1)\n is_ambulance_found = False\n heuristic = 0\n while fast <= end:\n if state_str[fast] == \"A\":\n # goal: slow is the first cell of A\n if not is_ambulance_found:\n is_ambulance_found = True\n slow = fast\n elif is_ambulance_found:\n if state_str[fast] != \".\" and state_str[fast] != state_str[slow]:\n slow = fast\n heuristic += 1\n\n fast += 1\n return heuristic\n\n\ndef calculate_heuristic_2(state_str: str):\n \"\"\"'\n Calculate heruristic based on a string\n\n Heuristic 2: Number of blocked positions\n (position which is not . towards the exit)\n \"\"\"\n # first, locate A on row 3\n row_loc = 2\n slow = SIZE * row_loc\n fast = slow\n end = slow + (SIZE - 1)\n is_ambulance_found = False\n heuristic = 0\n while fast <= end:\n if state_str[fast] == \"A\":\n # goal: slow is the first cell of A\n if not is_ambulance_found:\n is_ambulance_found = True\n slow = fast\n elif is_ambulance_found and (state_str[fast] != \".\"):\n heuristic += 1\n\n fast += 1\n return heuristic\n\n\ndef calculate_heuristic_3(state_str: str):\n \"\"\"'\n Calculate heruristic based on a string\n\n Heuristic 3: Multiply heuristic 1 with a constant = 5\n \"\"\"\n return calculate_heuristic_1(state_str) * 5\n\n\ndef calculate_heuristic_4(state_str: str):\n \"\"\"'\n Calculate heruristic based on a string\n\n Heuristic 4: Measure difference between the distance\n from the ambulance (last cell) to the exit and\n the number of available cells\n \"\"\"\n row_loc = 2\n slow = SIZE * row_loc\n fast = slow\n end = slow + (SIZE - 1)\n is_ambulance_found = False\n distance = 0\n num_of_empty_spot = 0\n while fast <= end:\n if state_str[fast] == \"A\": # first, locate A on row 3\n # goal: slow is the last cell of A\n if not is_ambulance_found:\n is_ambulance_found = True\n\n slow = fast\n elif is_ambulance_found: # calculate the distance\n distance = end - slow\n break\n\n fast += 1\n\n while fast <= end:\n if state_str[fast] == \".\":\n num_of_empty_spot += 1\n\n fast += 1\n\n return distance - num_of_empty_spot\n","repo_name":"kennguyen0303/COMP472-MP2","sub_path":"src/search_algos.py","file_name":"search_algos.py","file_ext":"py","file_size_in_byte":9667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"27978447480","text":"import requests, pytz, sys\nfrom datetime import datetime, date, timedelta, time\nfrom influxdb import InfluxDBClient\nfrom influxdb.exceptions import InfluxDBClientError\n\nRA_API_KEY = ''\nRA_USERNAME = ''\nINFLUXDB_HOST = 'localhost'\nINFLUXDB_PORT = 8086\nINFLUXDB_USERNAME = 'root'\nINFLUXDB_PASSWORD = 'root'\nINFLUXDB_DATABASE = 'gaming'\npoints = []\n\ntry:\n client = InfluxDBClient(host=INFLUXDB_HOST, port=INFLUXDB_PORT, username=INFLUXDB_USERNAME, password=INFLUXDB_PASSWORD)\n client.create_database(INFLUXDB_DATABASE)\n client.switch_database(INFLUXDB_DATABASE)\nexcept InfluxDBClientError as err:\n print(\"InfluxDB connection failed: %s\" % (err))\n sys.exit()\n\nend = datetime.utcnow().timestamp()\nstart = end - 604800\n\ntry:\n response = requests.get('https://retroachievements.org/API/API_GetAchievementsEarnedBetween.php?z=' + RA_USERNAME + '&y=' + RA_API_KEY + '&u=' + RA_USERNAME +'&f=' + str(start) + '&t=' + str(end))\n response.raise_for_status()\nexcept requests.exceptions.HTTPError as err:\n print(\"HTTP request failed: %s\" % (err))\n sys.exit()\n\ndata = response.json()\nprint(\"Got %s achievements from RetroAchievements\" % len(data))\n\nfor achievement in data:\n date = datetime.strptime(achievement['Date'], \"%Y-%m-%d %H:%M:%S\")\n\n points.append({\n \"measurement\": \"achievement\",\n \"time\": date.isoformat(),\n \"tags\": {\n \"player_id\": RA_USERNAME,\n \"platform\": achievement['ConsoleName'],\n \"player_name\": RA_USERNAME,\n \"title\": achievement['GameTitle'],\n \"application_id\": str(achievement['GameID']),\n \"apiname\": str(achievement['AchievementID']),\n },\n \"fields\": {\n \"name\": achievement['Title'],\n \"description\": achievement['Description'],\n \"icon\": 'https://retroachievements.org' + achievement['BadgeURL']\n }\n })\n\ntry:\n client.write_points(points)\nexcept InfluxDBClientError as err:\n print(\"Unable to write points to InfluxDB: %s\" % (err))\n sys.exit()\n\nprint(\"Successfully wrote %s data points to InfluxDB\" % (len(points)))\n","repo_name":"Epaphus/personal-influxdb","sub_path":"retroachievements.py","file_name":"retroachievements.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"}
+{"seq_id":"11618122937","text":"from classes.data_cleaning import DataCleaner\nfrom classes.data_aggregating import DataAggregator\nfrom classes.lda_modeling import LdaModel\nfrom classes.dtm_modeling import DtmModel\nfrom classes.dtm_evaluation import Evaluator\nfrom classes.data_exploration import Explorer\nimport pandas as pd\nfrom classes.visualization import Viz\n\nfrom gensim import corpora\n\nimport datetime\n\nif __name__ == '__main__':\n wc_file_path = \"output/topics/all_topics.csv\"\n word_no = 50\n topic_labels = [\n 'Agricultural Science',\n 'Energy Science',\n 'Information Systems',\n 'Chemistry',\n 'Aerodynamics',\n 'Cell Research',\n 'Research Programmes',\n 'Material Science',\n 'Astrophysics',\n 'European Development',\n 'Social Science',\n 'Health Science',\n 'Genetic Research',\n 'Electronics and Photonics',\n 'Neuroscience',\n 'Quantum Physics',\n 'Molecular Biology',\n 'Energy Innovations',\n 'Software Engineering',\n 'Climate Science',\n ]\n\n viz = Viz(wc_file_path, word_no, topic_labels)\n\n viz.generate_word_clouds()\n viz.generate_word_cloud_viz(\"images/4_word_clouds.png\")","repo_name":"mknguyen1406/master-thesis-pipeline","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"74400157385","text":"\ndef crearMatriz(filas, columnas):\n matriz = [ [0] * columnas for i in range(filas) ]\n return matriz\n\n\ndef imprimirMatriz(matriz):\n for fila in matriz:\n print(fila)\n \n \n\nfilas = 4\ncolumnas = 4\n\nvalor = 0\n\ninicio = 0\nhasta = 3\n\nmatriz = crearMatriz( filas, columnas)\nimprimirMatriz(matriz)\nciclos = 2\n\nfor j in range(ciclos):\n\n #completamos la primer fila (avanzo por columna de izquierda a derecha)\n for i in range(inicio, hasta+1, 1):\n valor += 1\n matriz[inicio][i] = valor\n\n\n #completar la ultima columna (avanzo por fila de arriba hacia abajo)\n for i in range(inicio+1, hasta+1, 1):\n valor +=1\n matriz[i][hasta] = valor\n\n\n #completar la ultima fila (avanzo por columna de derecha a izquierda)\n for i in range( hasta-1, inicio-1, -1):\n valor +=1\n matriz[hasta][i] = valor\n\n #completar la ultima columna (avanzo por fila de abajo hacia arriba)\n for i in range( hasta-1, inicio, -1 ):\n valor +=1\n matriz[i][inicio] = valor\n\n inicio += 1\n hasta -= 1\n \n\nimprimirMatriz(matriz)\n","repo_name":"Zynno-Dev/Progra1-Ex1","sub_path":"Compilado Examen 1/Clases/Clase 4/Ejercicios Resueltos Trabajo Práctico/TP3Ej2PuntoG.py","file_name":"TP3Ej2PuntoG.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"21327430462","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ntf.set_random_seed(777) # for reproducibility\n\n# Mnist_Data load\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# hyper parameters\nlearning_rate = 0.001\ntrain_epoch = 15\ntrain_batchsize = 100\n\n# Model Class Definition\nclass Model:\n def __init__(self, sess, name):\n self.sess = sess\n self.name = name\n\n def _build_net(self):\n with tf.variable_scope(self.name):\n self.keep_porb = tf.placeholder(tf.float32)\n\n","repo_name":"sangheonEN/Multi-Cam-Capture_Code","sub_path":"example/tensorflow_example/CNN_Class_Model_Classification.py","file_name":"CNN_Class_Model_Classification.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"12321248585","text":"import pytest, pdb\nfrom ml_tools import Similars, CleanText\nfrom ml_tools.fixtures import articles\nimport ml_tools.cleantext as ct\n\ndef test_md_split_1():\n doc = articles()[0]\n paras = CleanText(doc) \\\n .markdown_split_paragraphs() \\\n .value()\n assert len(paras) > 1\n print(paras)\n\ndef test_md_split_all():\n docs = articles()\n paras = CleanText(docs)\\\n .markdown_split_paragraphs()\\\n .value()\n assert len(paras) > 0\n assert len(docs) < len(paras)\n print(paras)\n\n\n@pytest.mark.parametrize(\"content\", [\n (4, \"\"\"\n# Day 1\nLorem Ipsum is simply dummy text of the printing and typesetting industry. \nLorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. \n* LI One\n * LI One Sub\n* LI Two\n* LI Three\n\nIt has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.\n\n# Day 2\nLorem Ipsum is simply dummy text of the printing and typesetting industry.\n\n 1. OL One\n 1. OL Two\n 1. OL Three\n\nLorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book.\n\n\"\"\"),\n\n (1, \"hello\"),\n\n (1, \"*hello*\"),\n\n (1, \"_hello_\"),\n\n (1, \"# test\"),\n\n (3, [\"sentence one\", \"sentence two\\n\\nmultiple lines\"])\n])\ndef test_md_split_specific(content):\n ct, md = content\n res = CleanText(md).markdown_split_paragraphs().value()\n print(res)\n assert len(res) == ct\n\n\n# @pytest.mark.parametrize(\"group_by\", [None, \"article\", \"paragraph\"])\n@pytest.mark.parametrize(\"fmt\", [\"md\", \"txt\"])\n@pytest.mark.parametrize(\"coverage\", [\"basic\", \"full\"])\n@pytest.mark.parametrize(\"mode\", [\"fast\"]) # TODO put \"accurate\" back in, slows down tests\ndef test_normalize(fmt, coverage, mode):\n chain = CleanText(articles(fmt=fmt))\n if coverage == \"basic\":\n chain = chain.keywords(mode=mode)\n else:\n # Revisit this list as cleantext.py grows\n chain = chain\\\n .unmark()\\\n .strip_html()\\\n .normalize_numbers()\\\n .fix_punct()\\\n .only_english()\\\n .only_ascii()\\\n .remove_apos()\\\n .multiple_whitespace()\\\n .keywords(mode=mode)\n clean = chain.join().value()\n assert len(chain.data.lemmas) > 10\n print(chain.data.lemmas[:5])\n assert len(clean) > 10\n print(clean[0])\n\n@pytest.mark.parametrize(\"content\", [\n\"hello\",\n\n\"*hello*\",\n\n\"_hello_\",\n\n\"# test\",\n\n\"\"\"# Markdown Title\nHere is a list of items\n* list item 1\n* list item 2\n\n## Next section\nThis is a paragraph. Blah bla blah.\n\"\"\",\n])\ndef test_unmark(content):\n res = CleanText(content).unmark().value()\n print(res)\n assert type(res) == str\n assert \"#\" not in res\n assert \"*\" not in res\n\n\n@pytest.mark.parametrize(\"content\", [\n\"hello\",\n\n\"test\",\n\n\"\"\"\n
Test string
\n\"\"\"\n])\ndef test_strip_html(content):\n res = CleanText([content]).strip_html().value()\n print(res)\n assert type(res) == str\n assert \"<\" not in res\n\n\n@pytest.mark.parametrize(\"content\", [\n (\"I moved to Portland in 2014\", \"move portland DATE\"),\n (\"I moved to Portland 2014-01-01\", \"move portland DATE\"),\n (\"$5m dollars\", \"MONEY\"),\n (\"$5 million\", \"MONEY\"),\n (\"$ 5.5 million\", \"MONEY\"),\n (\"The third item\", \"ORDINAL item\"),\n (\"The 2nd item\", \"ORDINAL item\"),\n (\"1 megabyte file\", \"NUMBER file\"),\n (\"1gb file\", \"CARDINAL file\"),\n (\"1k people\", \"CARDINAL people\"),\n # (\"Artificial intelligence is a study in computer science that is gaining huge traction\", \"\")\n])\ndef test_keywords_fast(content):\n assert CleanText(content[0]).keywords(mode='fast').join().value() == content[1]\n\n\n@pytest.mark.parametrize(\"content\", [\n (0, \"token1 token2 token3 token4\"),\n (1, [\"token1\", \"token2\", \"token3\", \"token4\"]),\n (2, [[\"token1\", \"token2\"], [\"token3\", \"token4\"]])\n])\ndef test_join(content):\n ndim, content = content\n v = CleanText(content).join().value()\n if ndim == 0:\n assert content == 'token1 token2 token3 token4'\n if ndim == 1:\n assert v == 'token1 token2 token3 token4'\n if ndim == 2:\n assert len(v) == 2\n assert v[0] == 'token1 token2'\n assert v[1] == 'token3 token4'","repo_name":"lefnire/ml-tools","sub_path":"tests/test_cleantext.py","file_name":"test_cleantext.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"}
+{"seq_id":"73388282186","text":"# Excel Sheet Column Number\nclass Solution:\n def titleToNumber(self, s: str) -> int:\n ans = 0\n if s == \"\": return 0\n base = 1\n for i in range(len(s) - 1, -1, -1):\n ans += (ord(s[i]) - 64) * base\n base *= 26\n return ans\n\nif __name__ == '__main__':\n s = Solution()\n print(s.titleToNumber(\"ZY\"))","repo_name":"GavinPHR/code","sub_path":"phase1/171.py","file_name":"171.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"6242166006","text":"# This script \"assembles\" the NFPA 704 symbol components into a series\n# of complete and valid NFPA 704 symbols. This script was needed due to the\n# high cardinality of valid NFPA 704 symbols given the sheer number of possible\n# combinations of components.\n#\n# Every NFPA-standardized digit is included in the component set. All\n# permutations of the availabe components are created. This script was not\n# designed to be robust or maintainable and is currently suitable only for\n# development and/or testing purposes.\n#\n# @param componentsDir The filesystem path of the directory containing\n# the NFPA 704 component images.\n# @param outputDir The filesystem path of the directory to which all\n# generated NFPA 704 symbols should be saved.\n\nimport glob\nimport os\nfrom PIL import Image\nimport sys\n\n# Collect arguments. Not yet any validation or sanitization here so be careful.\ncomponentsDirPath = os.path.realpath(sys.argv[1])\noutputDirPath = os.path.realpath(sys.argv[2])\n\n# Enumerate blank diamond \"templates.\"\nimageComponentsDiamondsGlob = os.path.join(\n componentsDirPath,\n 'diamond_*.png'\n)\nimageComponentsDiamondPaths = glob.glob(imageComponentsDiamondsGlob)\n\n# Enumerate numeric digits for red, yellow, and blue quadrants.\nimageComponentsDigitsGlob = os.path.join(\n componentsDirPath,\n 'digit_*.png'\n)\nimageComponentsDigitPaths = glob.glob(imageComponentsDigitsGlob)\nimageComponentsDigitPaths.sort()\n\n# Enumerate special (white) quadrant content.\nimageComponentsSpecialGlob = os.path.join(\n componentsDirPath,\n 'special_*.png'\n)\nimageComponentsSpecialPaths = glob.glob(imageComponentsSpecialGlob)\n\n# Given that all diamond templates are 300x300 pixels and the components are\n# 71x71 pixels, the following tuples are coordinates for proper placement of the\n# respective pieces.\nquadrantRedCoords = (114, 41)\nquadrantBlueCoords = (41, 114)\nquadrantYellowCoords = (188, 114)\nquadrantWhiteCoords = (114, 188)\n\n# This is going to get ugly. REVISE THIS.\n# Don't close reopen files constantly. Abstract this.\n# Break nested loops into functions? Use itertools.combinations?\n# Iterate base diamonds.\nimageCount = 0\nfor baseDiamond in imageComponentsDiamondPaths:\n baseDiamond = Image.open(baseDiamond)\n\n # Iterate blue digits.\n for digitBluePath in imageComponentsDigitPaths:\n baseDiamondCopyBlue = baseDiamond.copy()\n digitBlueImage = Image.open(digitBluePath)\n baseDiamondCopyBlue.paste(digitBlueImage, quadrantBlueCoords, digitBlueImage)\n\n # Iterate red digits.\n for digitRedPath in imageComponentsDigitPaths:\n baseDiamondCopyRed = baseDiamondCopyBlue.copy()\n digitRedImage = Image.open(digitRedPath)\n baseDiamondCopyRed.paste(digitRedImage, quadrantRedCoords, digitRedImage)\n\n # Iterate yellow digits.\n for digitYellowPath in imageComponentsDigitPaths:\n baseDiamondCopyYellow = baseDiamondCopyRed.copy()\n digitYellowImage = Image.open(digitYellowPath)\n baseDiamondCopyYellow.paste(\n digitYellowImage,\n quadrantYellowCoords,\n digitYellowImage\n )\n\n # Iterate white symbols and save images.\n for digitWhitePath in imageComponentsSpecialPaths:\n baseDiamondCopyWhite = baseDiamondCopyYellow.copy()\n digitWhiteImage = Image.open(digitWhitePath)\n baseDiamondCopyWhite.paste(\n digitWhiteImage,\n quadrantWhiteCoords,\n digitWhiteImage\n )\n baseDiamondCopyWhite.save(\n os.path.join(outputDirPath, str(imageCount) + '.png')\n )\n imageCount += 1\n\n\n","repo_name":"monotonee/opencv_nfpa704","sub_path":"src/assemble_positives.py","file_name":"assemble_positives.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"73662425544","text":"import zipfile\r\nfrom zipfile import ZipFile, is_zipfile\r\nfrom os import walk, path\r\nfrom src.util_functions import mkdirp, get_file_size, get_directory_size, human_sizeC, clearColor\r\nfrom src.exceptions import PAKFileFormatError, PAKMismatchedCRC, PAKFileZippingError, PAKFileUnzippingError\r\nfrom src.colors import colors as c\r\n\r\n\r\ndef get_folder_size(folder_path: str):\r\n return str(human_sizeC(get_directory_size(folder_path)))\r\n\r\n\r\ndef get_size_of_file(file_path: str):\r\n return str(human_sizeC(get_file_size(file_path)))\r\n\r\n\r\ndef check_pak_validity(input_pakfile: str):\r\n is_zip = is_zipfile(input_pakfile)\r\n if not is_zip:\r\n # raise PAKFileFormatError(f\"Invalid Format - This File is NOT a PAKFile! ({input_pakfile})\")\r\n ...\r\n try:\r\n with ZipFile(input_pakfile, 'r') as zip:\r\n results_of_test = zip.testzip()\r\n except zipfile.BadZipfile as e:\r\n # raise PAKFileFormatError(f\"Invalid Format - This File is NOT a PAKFile! ({input_pakfile + ' - ' + e})\")\r\n results_of_test = \"Invalid File Format!\"\r\n if results_of_test is not None:\r\n # raise PAKMismatchedCRC(\"Corruption - There is a CRC or File Header Mismatch in the PAKFile!\")\r\n ...\r\n return is_zip, results_of_test\r\n\r\n\r\ndef find_all_files_recursive(directory: str):\r\n file_paths = []\r\n for root, directories, files in walk(directory):\r\n for filename in files:\r\n filepath = path.join(root, filename)\r\n file_paths.append(filepath)\r\n return file_paths\r\n\r\n\r\ndef write_to_pakfile(input_directory: str = \"./\", output_name: str = \"data2.pak\"):\r\n try:\r\n file_paths = []\r\n for root, directories, files in walk(input_directory):\r\n for filename in files:\r\n filepath = path.join(root, filename)\r\n file_paths.append(filepath)\r\n\r\n print(\"\\nThe Following Files will be PAK'ed:\")\r\n for file_name in file_paths:\r\n print(c.PURPLE + file_name)\r\n clearColor()\r\n\r\n with ZipFile(output_name, 'w', zipfile.ZIP_DEFLATED) as zipobj:\r\n length = len(input_directory)\r\n for root, directories, files in walk(input_directory):\r\n folder = root[length:]\r\n for filename in files:\r\n zipobj.write(path.join(root, filename), path.join(folder, filename))\r\n except:\r\n raise PAKFileZippingError(\"An error occurred whilst generating the PAK File!\")\r\n\r\n\r\ndef read_from_zipfile(input_pakfile: str):\r\n with ZipFile(input_pakfile, 'r') as zipobj:\r\n zipobj.printdir()\r\n\r\n\r\ndef extract_zipfile(input_pakfile: str, output_path: str):\r\n try:\r\n mkdirp(output_path)\r\n with ZipFile(input_pakfile, 'r') as zipobj:\r\n zipobj.extractall(path=output_path)\r\n except:\r\n raise PAKFileUnzippingError(\"An error occurred whilst extracting the PAK File!\")\r\n","repo_name":"RHQOnline/DL2-PAKFile-Utility","sub_path":"src/pak_functions.py","file_name":"pak_functions.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"}
+{"seq_id":"23068508840","text":"import cv2 \nimport numpy as np; \n \npath = r\"../CRBD/0-CRBD-bag/bag (1).jpg\"\n\nimg = cv2.imread(path, cv2.IMREAD_GRAYSCALE) \ncv2.imwrite(\"gs_1.jpg\", img)\n\nsize = (320, 240) \nblob = cv2.dnn.blobFromImage(img,\n scalefactor=1/255,\n size=size,\n swapRB=True)\n \n# let's see our transformed image- blob\n# print(blob)\ncv2.imwrite(\"blob_1.jpg\", blob)\nprint(f'Blob Shape : {np.array(blob).shape}')\n\n\"\"\" # Set up the detector with default parameters. \ndetector = cv2.SimpleBlobDetector() \n\n# Detecting blobs. \nkeypoints = detector.detect(img) \"\"\"\nprint(\"BUrada\")\n# Draw detected blobs as red circles. \n# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob \n\"\"\" im_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array([]), (0, 0, 255), \n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) \n# Show keypoints \ncv2.imwrite(\"Keypoints.jpg\", im_with_keypoints) \"\"\"\ncv2.waitKey(0) ","repo_name":"Shubuo/ZiDrone_Vision","sub_path":"crop_row_detection/src/blob_detect.py","file_name":"blob_detect.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"23951495871","text":"from PyQt5.QtGui import QColor, QPainter, QPen\nfrom RotateRect import RotateRect\nimport math\n\n\ndef dist(p, q):\n return math.sqrt(sum((px - qx) ** 2.0 for px, qx in zip(p, q)))\n\n\nclass Sensor(object):\n def __init__(self):\n self._d = 0\n self.virtualA = 0\n return\n\n def update(self):\n pass\n\n def Dist(self):\n return self._d\n\n\nclass GraphicSensor(Sensor):\n def __init__(self, w, x, y, a):\n super().__init__()\n\n self.w = w\n self.h = self.w\n\n self.x = x\n self.y = y\n\n self.graphX = x\n self.graphY = y\n\n self.cx = x\n self.cy = y\n\n self.a = 0\n self.virtualA = a\n\n self.halfDiag = self._getHalfSquareDiag(self.w)\n self.coeff = 0\n self.b = 0\n self.vert = False\n\n self.debug = False\n\n self.dmax = 200\n\n def angle(self, a, cx, cy):\n self.a = a\n self.cx = cx\n self.cy = cy\n\n def draw(self, painter, color=QColor('blue')):\n rr = RotateRect.create(x=self.x, y=self.y,\n w=self.w, h=self.h, a=self.a, painter=painter, color=color, rx=self.cx, ry=self.cy)\n\n self.graphX = (rr.crx + rr.clx) / 2\n self.graphY = (rr.cry + rr.cly) / 2\n\n def create(w, x, y, cx, cy, a, virtualA, painter, canvas, color=QColor('blue'), debug=False):\n gs = GraphicSensor(w, x, y, virtualA)\n gs.debug = debug\n gs.angle(a, cx, cy)\n gs.draw(painter, color)\n gs.update(painter, canvas)\n\n return gs\n create = staticmethod(create)\n\n def _getPointOnCircle(self, cx, cy, r, a):\n a = math.radians(a)\n x = cx + r * math.cos(a)\n y = cy + r * math.sin(a)\n\n return x, y\n\n def _getHalfSquareDiag(self, l):\n return (l * math.sqrt(2)) / 2\n\n def update(self, painter, canvas):\n x, y = self._getPointOnCircle(\n self.graphX, self.graphY, self.halfDiag, self.virtualA + self.a)\n\n self.tx = x\n self.ty = y\n\n if x - self.graphX == 0:\n self.vert = True\n else:\n self.vert = False\n\n self.coeff = (y - self.graphY) / (x - self.graphX)\n self.b = self.graphY - self.coeff * self.graphX\n\n self.goThrough(self.dmax, painter, canvas)\n\n return\n\n def goThrough(self, dmax, painter, canvas):\n lim = 90-self.virtualA\n\n i = self.graphX\n step = 0.1\n\n found = False\n\n img = canvas.pixmap().toImage()\n\n y0 = 0\n xi = 0\n yi = 0\n\n if not self.vert:\n y0 = self.coeff * self.graphX + self.b\n y = y0\n\n additive = False\n\n if self.virtualA <= 0:\n if self.a <= lim or self.a > lim+180:\n additive = True\n else:\n if self.a > lim+180 and self.a <= lim+180+180:\n additive = True\n\n while dist((self.graphX, y0), (i, y)) < dmax and not found:\n y = self.coeff * i + self.b\n\n pixel = img.pixel(i, y)\n if QColor(pixel) == QColor('black'):\n found = True\n break\n\n if additive:\n i += step\n else:\n i -= step\n\n xi = i\n yi = y\n else:\n xi = i\n y0 = self.graphY\n i = y0\n\n while dist((self.graphX, y0), (xi, i)) < dmax and not found:\n pixel = img.pixel(xi, i)\n if QColor(pixel) == QColor('black'):\n found = True\n break\n\n if self.a % 360 < lim or self.a % 360 >= lim+180:\n i -= step\n else:\n i += step\n\n yi = i\n\n if not found:\n self._d = dmax\n else:\n self._d = dist((self.graphX, y0), (xi, yi))\n\n if self.debug:\n lastPen = painter.pen()\n\n pen = QPen()\n pen.setWidth(1)\n pen.setColor(QColor(\"green\"))\n painter.setPen(pen)\n painter.drawLine(self.graphX, y0, xi, yi)\n\n painter.setPen(lastPen)\n\n return\n","repo_name":"Virkin/DeepLearning","sub_path":"Sensor.py","file_name":"Sensor.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"40792251547","text":"#######################################################################\r\n## THROWSHED ##\r\n#######################################################################\r\nimport os\r\nimport numpy as np\r\nfrom osgeo import gdal, ogr, osr\r\nfrom timeit import default_timer as timer\r\n\r\n#######################################################################\r\n## FUNCTIONS\r\n\r\ndef main(dem_path, point_layer_path, line_layer_path, throwshed_output_folder, throwshed_file, throwshed_mode, use_viewshed, use_lines, EPSG,\r\n cumulative_throwshed, initial_height, initial_velocity, drag_coefficient, cross_sectional_area, mass,\r\n eyes_height, target_height, wall_height, constant, area_addition, wobble_distance, band_number=1,\r\n interpolation=1, alpha_min=-90.0, alpha_max=90.0, gravitational_acceleration=-9.81, air_density=1.225, dalpha=5,\r\n trajectory_segment_width=None):\r\n \"\"\"Just main function with controls, global variables settings and triggers to other functions\"\"\"\r\n # making sure the vertical angle has correct range\r\n if alpha_max < alpha_min:\r\n print(\"Minimal vertical shooting angle higher than maximal.\")\r\n exit()\r\n # Global variables\r\n global SRS, DP, PLP, TOF, TF, TM, UV, UL, CT, BN, INT, BF, TSW, AL, DDS, DB, DA, DGT, DMINH, DMAXH, IH, IV, \\\r\n DC, CSA, M, CONST, AA, WD, GA, AD, EH, TH, NDV, TA, VDS, VB, VA, VGT\r\n # CRS and other variable definition\r\n SRS = osr.SpatialReference()\r\n SRS.ImportFromEPSG(EPSG)\r\n TOF, TF, TM, UV, UL, CT, BN, INT, IH, IV, DC, CSA, M, CONST, AA, WD, GA, AD, EH, TH = \\\r\n throwshed_output_folder, throwshed_file, throwshed_mode, use_viewshed, use_lines, cumulative_throwshed, \\\r\n band_number, interpolation, initial_height, initial_velocity, drag_coefficient, cross_sectional_area, mass, \\\r\n constant, area_addition, wobble_distance, gravitational_acceleration, air_density, eyes_height, target_height\r\n # get DEM data and assign them as global (and referencing datasource)\r\n DDS, DB, DA, DGT, NDV = get_raster_from_file(dem_path)\r\n # assign trajectory segment width\r\n TSW = np.min(np.abs([DGT[1],DGT[5]])) if trajectory_segment_width == None else trajectory_segment_width\r\n # obtain list of point geometries (and all referencing data it's dependent on)\r\n point_layer_ds, point_layer, point_feature_list, point_geom_list = get_geom_list_from_file(point_layer_path)\r\n # burn lines as obstacles into DEM, creating new DEM (Digital terrain model -> Digital surface model)\r\n if UL:\r\n burn_obstacles(line_layer_path, wall_height)\r\n # get minimum and maximum DEM height\r\n DMINH, DMAXH = get_min_max_height()\r\n # obtain list of vertical angles\r\n AL = np.arange(np.radians(alpha_min), np.radians(alpha_max + dalpha), np.radians(dalpha))\r\n AL[-1] = np.radians(alpha_max) # in case of last angle being larger than 90°\r\n # throwshed array containing zeroes at first, will be edited later, dimensions same as dimensions of DEM raster\r\n TA = [np.zeros((DA.shape[0], DA.shape[1]), np.int16), np.zeros((DA.shape[0], DA.shape[1]), np.int16)]\r\n # cycle calculating throwshed for each point\r\n for i, point_geom in enumerate(point_geom_list):\r\n # compute throwshed for 1 point\r\n throwshed(point_geom, i)\r\n #break #throwshed only for the first point is computed\r\n # temporary viewshed files have to be removed\r\n if UV:\r\n VDS = VB = VA = VGT = None\r\n os.remove(TOF + \"\\\\viewshed.tif\")\r\n # finally, array is written into band of output throwshed raster\r\n create_raster_file(TF, TA, 1, gdal.GDT_Int16, NDV)\r\n DDS = DB = DA = DGT = NDV = None\r\n # Digital surface model file will be removed as well\r\n if UL:\r\n remove_temp_files(DSM)\r\n\r\ndef get_raster_from_file(file_path):\r\n \"\"\"Get DEM datasource, band, array and geotransformation data and nodata value\"\"\"\r\n # import DEM datasource\r\n dem_ds = gdal.Open(file_path)\r\n # select band\r\n dem_band = dem_ds.GetRasterBand(BN)\r\n # DEM cell values into array\r\n dem_array = dem_band.ReadAsArray()\r\n # transformation data describing DEM\r\n dem_gt = dem_ds.GetGeoTransform()\r\n # nodata value\r\n no_data_value = dem_band.GetNoDataValue()\r\n return dem_ds, dem_band, dem_array, dem_gt, no_data_value\r\n\r\ndef get_min_max_height():\r\n \"\"\"Get minimum and maximum DEM height\"\"\"\r\n # sometimes the function returns None, therefore statistics need to be calculated first\r\n if DB.GetMinimum() == None or DB.GetMaximum() == None:\r\n DB.ComputeStatistics(0)\r\n return DB.GetMinimum(), DB.GetMaximum()\r\n\r\ndef get_geom_list_from_file(file_path):\r\n \"\"\"Get features' geometries from vector layer file\"\"\"\r\n # import vector layer datasource\r\n layer_ds = ogr.Open(file_path, 0) # 1 = editing, 0 = read only. Datasource\r\n # vector layer\r\n layer = layer_ds.GetLayer()\r\n # list of features, then geometries\r\n feature_list = [layer.GetFeature(number) for number in range(0, layer.GetFeatureCount())]\r\n geom_list = [feature.GetGeometryRef() for feature in feature_list]\r\n return layer_ds, layer, feature_list, geom_list\r\n\r\ndef burn_obstacles(line_layer_path, wall_height):\r\n \"\"\"Lines that resemble walls/obstacles are burnt into DEM\"\"\"\r\n # get list of line geometries\r\n line_ds, line_layer, line_feature_list, line_geom_list = get_geom_list_from_file(line_layer_path)\r\n # calculate minimal buffer distance, at which the obstacle will always be respected\r\n buffer_dist = TSW / 2 + (DGT[1] ** 2 + DGT[5] ** 2) ** (1 / 2) / 2 - (TSW / 2) / (\r\n DGT[1] ** 2 + DGT[5] ** 2) ** (1 / 2) % 1\r\n # 1st buffer is created and in case of multiple lines the cycle is utilized to create and unite buffers\r\n buffer_geom = line_geom_list[0].Buffer(buffer_dist, 1)\r\n if len(line_geom_list) > 1:\r\n for line_geom in line_geom_list[1:]:\r\n buffer_geom = buffer_geom.Union(line_geom.Buffer(buffer_dist, 1))\r\n # buffer file temporary name\r\n BFT = \"buffer_temp\"\r\n # create layer for buffer\r\n buffer_vector_ds, buffer_outlayer = create_and_use_outlayer(BFT, buffer_geom)\r\n # create buffer datasource\r\n buffer_raster_ds, buffer_band = create_raster_file(BFT, [0], 0, gdal.GDT_Float32, 0)\r\n # Buffer polygon is rasterized\r\n gdal.RasterizeLayer(buffer_raster_ds, [1], buffer_outlayer, burn_values=[wall_height])\r\n # Sum of initial dem and buffer rasters\r\n buffer_array = buffer_band.ReadAsArray()\r\n global DSM, DDS, DB, DA\r\n DA = np.add(DA, buffer_array)\r\n # create new raster datasource for DEM (DSM)\r\n DSM = \"dsm_temp\"\r\n DDS, DB = create_raster_file(DSM, [DA], 1, gdal.GDT_Float32, NDV)\r\n # delete all temporary files\r\n buffer_raster_ds = buffer_vector_ds = buffer_outlayer = buffer_band = None\r\n remove_temp_files(BFT)\r\n\r\ndef create_and_use_outlayer(layer_name, geom):\r\n \"\"\"Creates file for output layer, which will contain new features. Returns created layer.\"\"\"\r\n # create driver and output data source\r\n outds = ogr.GetDriverByName(\"ESRI Shapefile\").CreateDataSource(TOF + \"\\\\\" + layer_name + \".shp\")\r\n # create output layer\r\n outlayer = outds.CreateLayer(layer_name, SRS)\r\n # feature definition and setting\r\n feature = ogr.Feature(outlayer.GetLayerDefn())\r\n feature.SetGeometry(geom)\r\n # assign feature into output layer\r\n outlayer.CreateFeature(feature)\r\n return outds, outlayer\r\n\r\ndef create_raster_file(raster_name, dem_array_list, method, GDT, no_data):\r\n \"\"\"Creates raster file. Method 0 returns empty datasource and band. Method 1 returns datasource and band with written array\"\"\"\r\n # create driver and output data source\r\n outds = gdal.GetDriverByName('GTiff').Create(TOF + \"\\\\\" + raster_name + \".tif\", xsize=DA.shape[1],\r\n ysize=DA.shape[0], bands=len(dem_array_list), eType=GDT)\r\n # assign geotransformation, projection, band and nodata settings\r\n outds.SetGeoTransform(DGT)\r\n outds.SetProjection(SRS.ExportToWkt())\r\n for i, dem_array in enumerate(dem_array_list):\r\n raster_band = outds.GetRasterBand(i+1)\r\n raster_band.SetNoDataValue(no_data)\r\n if method:\r\n raster_band.WriteArray(dem_array)\r\n return outds, raster_band\r\n\r\ndef remove_temp_files(temp_file):\r\n \"\"\"Deletes all temporary files with assigned name\"\"\"\r\n for format in [file.split('.')[1] for file in os.listdir(TOF) if file.split('.')[0] == temp_file]:\r\n os.remove(TOF + '\\\\' + temp_file + \".\" + format)\r\n\r\ndef throwshed(point_geom, k):\r\n \"\"\"Calculates throwshed for 1 point\"\"\"\r\n global SP, WH\r\n # create shooting point with Z coordinate that is interpolated from DEM\r\n SP = ogr.Geometry(ogr.wkbPoint)\r\n SP.AddPoint(point_geom.GetX(), point_geom.GetY(), float(int_function(point_geom.GetX(), point_geom.GetY()))+IH)\r\n # generate set of trajectories for vertical angle range with basic step\r\n trajectory_simple_set()\r\n # insert trajectories between those from simple set, to ensure throwshed's edge accuracy\r\n trajectory_initial_set()\r\n # define Ascending and Descending Trajectory Fields\r\n create_trajectory_fields()\r\n # if viewshed is ON, it has to be created and deleted later\r\n if UV:\r\n create_viewshed()\r\n # Assign values into arrays of 2 bands (ATF and DTF)\r\n assign_values_to_throwshed(k)\r\n\r\ndef int_function(X, Y):\r\n \"\"\"Interpolates height of point from DEM cells\"\"\"\r\n # nearest neighbour\r\n if INT == 0:\r\n column = round(np.abs((X - (DGT[0] + DGT[1] / 2)) / DGT[1]))\r\n row = round(np.abs((Y - (DGT[3] + DGT[5] / 2)) / DGT[5]))\r\n return DA[row][column]\r\n # bilinear\r\n else:\r\n left_column = int(np.abs((X - (DGT[0] + DGT[1] / 2)) / DGT[1])) # index of left column in set of four cells\r\n upper_row = int(np.abs((Y - (DGT[3] + DGT[5] / 2)) / DGT[5])) # index of the upper row in set of four cells\r\n X_left_cell = DGT[0] + DGT[1] / 2 + left_column * DGT[1] # X coordinate of left cells\r\n Y_lower_cell = DGT[3] + DGT[5] / 2 + (upper_row + 1) * DGT[5] # Y coordinate of lower cells\r\n H_1 = DA[upper_row][left_column] # height of upper left cell\r\n H_2 = DA[upper_row][left_column + 1] # height of upper right cell\r\n H_3 = DA[upper_row + 1][left_column] # height of lower left cell\r\n H_4 = DA[upper_row + 1][left_column + 1] # height of lower right cell\r\n H_int_1 = ((X - X_left_cell) * (H_4 - H_3)) / (np.abs(DGT[1])) + H_3 # interpolated height among lower cells\r\n H_int_2 = ((X - X_left_cell) * (H_2 - H_1)) / (np.abs(DGT[1])) + H_1 # interpolated height among upper cells\r\n return ((Y - Y_lower_cell) * (H_int_2 - H_int_1)) / (np.abs(DGT[5])) + H_int_1\r\n\r\ndef trajectory_simple_set():\r\n \"\"\"Generate set of trajectories for vertical angle range with basic step\"\"\"\r\n # trajectory dictionary, that will contain all trajectories, their initial shooting angle etc.\r\n global TS\r\n # element in list begins with alpha value and continues with list of x and y coords lists in one trajectory\r\n TS = [[alpha, generate_trajectory(alpha)] for alpha in AL]\r\n\r\ndef generate_trajectory(alpha):\r\n \"\"\"Generates trajectory from input parameters\"\"\"\r\n # initial drag\r\n d = -AD * IV ** 2 * DC * CONST * (CSA + AA) / (2 * M)\r\n # initial projectile velocity\r\n V = IV\r\n # list of all trajectory points' coordinates\r\n points = [[0.0], [SP.GetZ()]]\r\n # velocity x and y elements\r\n V_x = V * np.cos(alpha)\r\n V_y = V * np.sin(alpha)\r\n # drag x and y elements\r\n d_x = d * np.cos(alpha)\r\n d_y = d * np.sin(alpha)\r\n # time step is set to value approximate to half of cell size\r\n dt = TSW / V / 2\r\n # x and y steps\r\n dX = V_x * dt + d_x / 2 * dt ** 2\r\n dY = V_y * dt + (d_y + GA) / 2 * dt ** 2\r\n # cycle calculating every new trajectory point one by one\r\n while True:\r\n # coords\r\n points[0].append(points[0][-1] + dX)\r\n points[1].append(points[1][-1] + dY)\r\n # when last height is less than minimal DEM height, cycle breaks and last values are reinterpolated into minimal DEM height (to prevent errors in extreme situations of further functions)\r\n if points[1][-1] <= DMINH:\r\n # if the shooting point is on the cell with minimal height of DEM, there will be only 2 points for trajectories starting with angle <= 0 and these points can't be the same, so this is the only exception where last points of trajectories are not recalculated (interpolated) to minimal DEM height\r\n if len(points[1]) == 2:\r\n pass\r\n # but normally the last segment passing the minimal DEM height is clipped by this height and the last point is interpolated to this height\r\n else:\r\n points[1][-1] = DMINH\r\n points[0][-1] = points[0][-2] + (points[1][-1] - points[1][-2])/np.tan(alpha)\r\n break\r\n # new vertical angle\r\n alpha = np.arctan(dY / dX)\r\n # new velocity\r\n V = ((dX / dt) ** 2 + (dY / dt) ** 2) ** (1 / 2)\r\n # new drag\r\n if (points[0][-1] ** 2 + (points[1][-1] - SP.GetZ()) ** 2) ** (1 / 2) < WD:\r\n d = -AD * V ** 2 * DC * CONST * (CSA + AA) / (2 * M)\r\n else:\r\n d = -AD * V ** 2 * DC * CSA / (2 * M)\r\n # new drag x and y elements\r\n d_x = d * np.cos(alpha)\r\n d_y = d * np.sin(alpha)\r\n # new velocity x and y elements\r\n V_x += d_x * dt\r\n V_y += (d_y + GA) * dt\r\n # time step is recalculated to value approximate to half of cell size\r\n dt = TSW / V / 2\r\n # new x and y steps\r\n dX = V_x * dt + d_x / 2 * dt ** 2\r\n dY = V_y * dt + (d_y + GA) / 2 * dt ** 2\r\n return points\r\n\r\ndef trajectory_initial_set():\r\n \"\"\"Calculates and inserts trajectories between those in simple set, to make it denser and to ensure throwshed's\r\n edge accuracy. Calculates and returns trajectory envelope points list (its useful section).\"\"\"\r\n global TS, envelope\r\n # new and previous trajectory end x, first ones are random, just to make sure the cycle does not stop immediately\r\n ntex = [(max(TS, key=lambda x: x[1][0][-1])[1][0][-1]+TSW)*2,(max(TS, key=lambda x: x[1][0][-1])[1][0][-1]+TSW)*3]\r\n # cycle that finds the furthest possible trajectory for minimal DEM height respecting the edge accuracy\r\n while round(np.abs(ntex[0] - ntex[1])/TSW):\r\n #most distant trajectory index\r\n mdti = TS.index((max(TS, key=lambda x: x[1][0][-1])))\r\n # adds new trajectories before and after current furthest trajectory\r\n if mdti != 0 and mdti != len(TS)-1:\r\n for new_alpha in [(TS[mdti+1][0] - TS[mdti][0]) / 2 + TS[mdti][0], (TS[mdti][0] - TS[mdti-1][0]) / 2 + TS[mdti-1][0]]:\r\n TS.append([new_alpha, generate_trajectory(new_alpha)])\r\n # for furthest trajectory that is also the first or last one, only one trajectory is added accordingly\r\n elif mdti == len(TS)-1:\r\n new_alpha = (TS[mdti][0] - TS[mdti-1][0]) / 2 + TS[mdti-1][0]\r\n TS.append([new_alpha, generate_trajectory(new_alpha)])\r\n mdti += 1 #this is just so that mdti gets higher like length of TS does (to cope with length of TS getting bigger, these 2 are compared in ntex)\r\n elif mdti == 0:\r\n new_alpha = (TS[mdti+1][0] - TS[mdti][0]) / 2 + TS[mdti][0]\r\n TS.append([new_alpha, generate_trajectory(new_alpha)])\r\n ntex = [max(TS[-1][1][0][-1],TS[-2][1][0][-1]) if mdti != 0 and mdti != len(TS)-1 else TS[-1][1][0][-1], ntex[0]]\r\n TS.sort(key=lambda x: x[0])\r\n\r\n # initial trajectory index (starting will be the trajectory with furthest reach)\r\n iti = TS.index((max(TS, key=lambda x: x[1][0][-1])))\r\n # function ends after finding out the last trajectory is the one with furthest reach (rest of the code is not applicable) and empty envelope is returned, otherwise trajectory set will get denser with following code\r\n if iti == len(TS)-1:\r\n envelope = [[], []]\r\n return\r\n # envelope needs starting point before adding more points to it\r\n envelope = [[TS[iti][1][0][-1]], [TS[iti][1][1][-1]]]\r\n # X and Y Inner Interection from Previous Cycle will be set to furthest point during first cycle\r\n XIIPR, YIIPR = envelope[0][0], envelope[1][0]\r\n # cycle that inserts trajectories between furthest trajectory at minimal DEM height and maximal DEM height\r\n while True:\r\n # generate new trajectory in between actual and following\r\n new_alpha = abs(TS[iti][0] - TS[iti + 1][0]) / 2 + TS[iti][0]\r\n TS.insert(iti+1, [new_alpha, generate_trajectory(new_alpha)])\r\n # in case of added trajectory having further reach than previously furthest trajectory (actual)\r\n if TS.index((max(TS, key=lambda x: x[1][0][-1]))) == iti + 1:\r\n iti += 1\r\n # even starting point of envelope needs to be updated\r\n envelope = [[TS[iti][1][0][-1]], [TS[iti][1][1][-1]]]\r\n XIIPR, YIIPR = envelope[0][0], envelope[1][0]\r\n continue\r\n\r\n # get intersection of actual and following (newly created) trajectory (Right Outer Intersection)\r\n XROI, YROI = intersection_of_trajectories(iti, iti+1)\r\n # get intersection of following and 2. following trajectory (Left Outer Intersection)\r\n XLOI, YLOI = intersection_of_trajectories(iti+1, iti+2)\r\n # get intersection of actual and 2. following trajectory (Inner Intersection), first one is calculated, the rest will be reused from outer intersections\r\n XII, YII = intersection_of_trajectories(iti, iti+2)\r\n\r\n # if the last trajectory incorporated in cycle is the last one from the net with shooting angle value of 90 degrees.\r\n # This is because with 90 degrees trajectory left outer and inner intersections will be the same, which would lead to undesired behaviour\r\n if TS[iti+2][0] == np.radians(90):\r\n # if X coordinate of last point in newly created trajectory is less than TSW, last possible area of the net was made dense enough (used to be X of highest point, but last was chosen so that when searching for cell intersecting trajectories no trajectories are added if the cell falls between 2 last trajectories with the last having 90° angle, which would cause problems in rare situations)\r\n if not np.floor(TS[iti+1][1][0][-1]/TSW):\r\n # Initial Trajectory Reversed list (X and Y coords)\r\n ITR = [TS[iti][1][0][-1::-1], TS[iti][1][1][-1::-1]]\r\n # update envelope with points from initial trajectory of last cycle, also update trajectory with shared part starting and ending point indexes (on trajectory as on envelope)\r\n TS[iti].append(update_envelope(1, ITR, XIIPR, XROI, YROI, 0))\r\n # Last Inserted Trajectory Reversed list (X and Y coords)\r\n LITR = [TS[iti+1][1][0][-1::-1], TS[iti+1][1][1][-1::-1]]\r\n # update last but one trajectory with shared part starting and ending point indexes (on trajectory as on envelope)\r\n TS[iti+1].append(update_envelope(0, LITR, 0, 0, 0, YROI))\r\n envelope[0].append(0)\r\n envelope[1].append(max(TS[-1][1][1]))\r\n # update last trajectory (90 degrees one) with shared part starting and ending point indexes (on trajectory as on envelope)\r\n TS[iti + 2].append([[TS[iti+2][1][1].index((max(TS[iti+2][1][1]))), TS[iti+2][1][1].index((max(TS[iti+2][1][1])))], [len(envelope[0])-1, len(envelope[0])-1]])\r\n break\r\n # if not dense enough, density will be accomplished with new iteration\r\n else:\r\n continue\r\n\r\n # coordinates of midpoint on line between outer intersections (Outer Midpoint)\r\n XOM, YOM = (XROI + XLOI) / 2, (YROI + YLOI) / 2\r\n # following trajectory reversed list (X and Y coords)\r\n FTR = [TS[iti + 1][1][0][-1::-1], TS[iti + 1][1][1][-1::-1]]\r\n # finds intersection of arc distance and particular segment on the arc\r\n for i in range(len(FTR[0])-1):\r\n XA, YA = calculate_intersection(XOM, YOM, XII, YII, FTR[0][i], FTR[1][i], FTR[0][i+1], FTR[1][i+1])\r\n # checking if the intersection is really on the segment, if so, the Distance of Arc from Inner Intersection is calculated\r\n if FTR[0][i] >= XA >= FTR[0][i+1]:\r\n DAII = ((XA-XII)**2 + (YA-YII)**2)**(1/2)\r\n break\r\n # controls - compare horizontal distance of intersections and distance of arc from inner intersection\r\n if round(np.abs(XROI - XLOI)/TSW) and round(DAII/TSW):\r\n continue\r\n else:\r\n # with each shooting point the amount of these inserted auxiliary trajectories would almost double which could create pointless amount of trajectories\r\n del TS[iti+1]\r\n # initial trajectory reversed list (X and Y coords)\r\n ITR = [TS[iti][1][0][-1::-1], TS[iti][1][1][-1::-1]]\r\n # update envelope and update trajectory list with starting and ending point index of shared part between trajectory and envelope, indexes will be used when looking for cell neighbouring trajectories\r\n TS[iti].append(update_envelope(1, ITR, XIIPR, XII, YII, 0))\r\n # previous intersection for next cycle is assigned\r\n XIIPR, YIIPR = XII, YII\r\n # at least one of the conditions was met and the cycle can jump to next initial trajectory\r\n iti += 1\r\n # if the cycle comes to last trajectory, it breaks as there is no following trajectory\r\n if TS[iti][0] == AL[-1]:\r\n # even last trajectory needs to be updated with indexes\r\n ITR = [TS[iti][1][0][-1::-1], TS[iti][1][1][-1::-1]]\r\n TS[iti].append(update_envelope(0, ITR, 0, 0, 0, YII))\r\n break\r\n\r\ndef intersection_of_trajectories(t1i,t2i):\r\n \"\"\"Looks for intersection between two trajectories and returns its X and Y coordinates.\r\n t1i and t2i are indexes of first and second trajectory between which the intersection is sought.\"\"\"\r\n # reversed lists of trajectories' coordinates as the algorithm starts from end points, TX1 = X coordinates of 1. trajectory\r\n T1X, T1Y = TS[t1i][1][0][-1::-1], TS[t1i][1][1][-1::-1]\r\n T2X, T2Y = TS[t2i][1][0][-1::-1], TS[t2i][1][1][-1::-1]\r\n # X and Y coords of intersection (to be compared e.g. with XPI), i2s stands for radius around i2 (or index)\r\n XI = YI = i2s = False\r\n # following trajectory segment radius where the intersection will be sought\r\n ftsr = [0, 1, -1, 2, -2]\r\n # cycle that starts comparing coords of actual trajectory points from the end\r\n for i1 in range(1, len(T1X)):\r\n # cycle that starts comparing coords of following trajectory points from the end\r\n for i2 in range(1, len(T2X)):\r\n if T2Y[i2] > T1Y[i1] and T1X[i1] < T2X[i2 - 1]:\r\n # when potentially intersecting segment of following trajectory is found, because of rare situations its 2 following and preceding segments have to be assessed\r\n for i2s in ftsr:\r\n XI, YI = calculate_intersection(T1X[i1 - 1], T1Y[i1 - 1], T1X[i1], T1Y[i1],\r\n T2X[i2 - 1 + i2s], T2Y[i2 - 1 + i2s], T2X[i2 + i2s],\r\n T2Y[i2 + i2s])\r\n # making sure the intersection is between existing segments, not on their extension\r\n if T1X[i1 - 1] >= XI >= T1X[i1] and T2X[i2 - 1 + i2s] >= XI >= T2X[i2 + i2s]:\r\n break\r\n XI = YI = False\r\n if XI or i2s == ftsr[-1]:\r\n # i2s makes sure that if the intersection is not found in the 2 segment radius of following trajectory, index for segment of actual trajectory has to increase\r\n i2s = False\r\n break\r\n if XI:\r\n break\r\n return XI, YI\r\n\r\ndef calculate_intersection(x1, y1, x2, y2, x3, y3, x4, y4):\r\n \"\"\"Calculates intersection of 2 line segments and returns its X and Y coordinates\"\"\"\r\n x1, y1, x2, y2, x3, y3, x4, y4 = x1, y1, x2, y2, x3, y3, x4, y4\r\n XI = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / ((x1 - x2) * (y3 - y4) - (y1 - y2) *\r\n (x3 - x4))\r\n YI = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / ((x1 - x2) * (y3 - y4) - (y1 - y2) *\r\n (x3 - x4))\r\n return XI, YI\r\n\r\ndef update_envelope(method, ITR, XIIPR, XII, YII, YROI):\r\n \"\"\"Updates envelope with parts of trajectories and returns starting and ending indexes of points on part of\r\n trajectory that is shared with the envelope and also indexes of points on envelope that are boundaries of the\r\n shared trajectory part (previous and actual inner intersections). Trajectory points have to be between the envelope\r\n points.\"\"\"\r\n global envelope\r\n # for regular parts\r\n if method:\r\n # on reversed trajectory, index of first point that will be incorporated within the envelope is found\r\n for i in range(len(ITR[0]) - 1):\r\n if ITR[0][i] >= XIIPR >= ITR[0][i + 1]:\r\n # Envelope Part Starting Point Index\r\n EPSPI = i + 1\r\n break\r\n # on reversed trajectory, index of last point that will be incorporated within the envelope is found\r\n for i in range(EPSPI - 1, len(ITR[0]) - 1):\r\n if ITR[0][i] >= XII >= ITR[0][i + 1]:\r\n # Envelope Part Ending Point Index\r\n EPEPI = i\r\n break\r\n # starting envelope index as the index of envelope's last point before update\r\n SEI = len(envelope[0]) - 1\r\n # condition for rare situation where both inner intersections could fall within one segment of trajectory\r\n if EPEPI >= EPSPI:\r\n # envelope is updated with all points between starting and ending point\r\n for i in range(EPSPI, EPEPI + 1):\r\n envelope[0].append(ITR[0][i])\r\n envelope[1].append(ITR[1][i])\r\n # ending envelope index as the index of envelope's last point after update\r\n EEI = len(envelope[0])\r\n # lastly, envelope is updated with the inner intersection point\r\n envelope[0].append(XII)\r\n envelope[1].append(YII)\r\n # return indexes of the first and last point of shared part, for trajectory direction of incrementing is from the left (shooting point), for envelope it's vice-versa\r\n return [[len(ITR[0]) - 1 - EPSPI, len(ITR[0]) - 1 - EPEPI], [SEI, EEI]]\r\n # for part of last but one trajectory when alpha of the last one is equal to 90° or part of last trajectory whose alpha is not equal to 90°\r\n else:\r\n # starting envelope index as the index of envelope's last point before update, for last but one trajectory\r\n SEI = len(envelope[0]) - 1\r\n # starting and ending last but one trajectory point indexes\r\n EPSPI = 0\r\n # last points from last inserted trajectory and highest point of last trajectory are appended to envelope\r\n for i in range(len(ITR[0])):\r\n if ITR[1][i] > YROI:\r\n if not EPSPI:\r\n EPSPI = i\r\n envelope[0].append(ITR[0][i])\r\n envelope[1].append(ITR[1][i])\r\n if ITR[0][i] == ITR[0][ITR[1].index((max(ITR[1])))]:\r\n break\r\n EPEPI = i - 1\r\n # ending envelope index as the index of envelope's last point after update, for last but one trajectory\r\n EEI = len(envelope[0]) - 1\r\n return [[len(ITR[0]) - 1 - EPSPI, len(ITR[0]) - 1 - EPEPI], [SEI, EEI]]\r\n\r\ndef create_trajectory_fields():\r\n \"\"\"Creates ATF - Ascending Trajectory Field and DTF - Descending Trajectory Field. Lists are made into polygons.\"\"\"\r\n global ATF_polygon, DTF_polygon\r\n ATF_polygon, DTF_polygon = None, None\r\n ATF, DTF = [[], []], [[], []]\r\n # in this case DTF does not exist\r\n if len(envelope[0]) == 0:\r\n ATF[0] = TS[0][1][0] + TS[-1][1][0][-1::-1]\r\n ATF[1] = TS[0][1][1] + TS[-1][1][1][-1::-1]\r\n # create polygon out of the list\r\n ATF_polygon = create_polygon_from_coords_list(ATF)\r\n # in this case both ATF and DTF are created\r\n else:\r\n # find out index of coords from last trajectory, where envelope connects with it\r\n for i in range(len(TS[-1][1][0])-1):\r\n # for last trajectory with shooting angle 90 degrees this stops immediately, to ATF is added first point of last trajectory, which still creates correct polygon\r\n if TS[-1][1][0][i] <= envelope[0][-1] <= TS[-1][1][0][i+1]:\r\n break\r\n ATF[0] = TS[0][1][0] + envelope[0] + TS[-1][1][0][i::-1]\r\n ATF[1] = TS[0][1][1] + envelope[1] + TS[-1][1][1][i::-1]\r\n # but for DTF with last trajectory shooting angle being 90 degrees, i is edited so that only last point down at min DEM height is added to polygon\r\n i = -2 if TS[-1][0] == np.radians(90) else i\r\n DTF[0] = envelope[0] + TS[-1][1][0][i+1:] + envelope[0][:1]\r\n DTF[1] = envelope[1] + TS[-1][1][1][i+1:] + envelope[1][:1]\r\n # create polygons out of the lists\r\n ATF_polygon = create_polygon_from_coords_list(ATF)\r\n DTF_polygon = create_polygon_from_coords_list(DTF)\r\n\r\ndef create_polygon_from_coords_list(x_y_list):\r\n \"\"\"Creates ring from list of X and Y coordinates, then uses ring to create polygon which is returned.\"\"\"\r\n # create ring\r\n ring = ogr.Geometry(ogr.wkbLinearRing)\r\n for x, y in zip(x_y_list[0], x_y_list[1]):\r\n ring.AddPoint(x, y)\r\n # create polygon\r\n polygon = ogr.Geometry(ogr.wkbPolygon)\r\n polygon.AddGeometry(ring)\r\n return polygon\r\n\r\ndef assign_values_to_throwshed(k):\r\n \"\"\"Assigns/adds values into throwshed arrays.\"\"\"\r\n # cycle going through every single cell of DEM\r\n for i in range(DA.shape[0]):\r\n for j in range(DA.shape[1]):\r\n\r\n\r\n\r\n start1 = timer()\r\n\r\n\r\n\r\n # with multiple shooting points nodata value can already be assigned to the cell, therefore the algorithm jumps to following cell\r\n if TA[0][i][j] == NDV:\r\n continue\r\n # nodata value is assigned to both arrays for both bands (ATF and DTF)\r\n if not k and DA[i][j] == NDV:\r\n TA[0][i][j] = TA[1][i][j] = NDV\r\n continue\r\n # for simple throwshed, if cell already has value 1, cycle continues with following cell, otherwise for cumulative throwshed, cell is assessed\r\n if k and CT == 0 and TA[0][i][j]:\r\n continue\r\n # if viewshed is incorporated and particular cell is not visible, nothing is added to throwshed cell, and for visible cells the algorithm proceeds with assessment of cells\r\n if UV:\r\n if not VA[i][j]:\r\n continue\r\n # calculate coordinates of cell's middle point and its horizontal distance from shooting point\r\n X_coor_cell = DGT[0]+(j+1/2)*DGT[1]\r\n Y_coor_cell = DGT[3]+(i+1/2)*DGT[5]\r\n cell_distance = ((SP.GetX() - X_coor_cell)**2 + (SP.GetY()-Y_coor_cell)**2)**(1/2)\r\n # create cell point with relative coordinates in the plane of trajectories and find out whether it's within the field, if so, further actions are conducted\r\n relative_cell = ogr.Geometry(ogr.wkbPoint)\r\n # also create cell point with absolute coordinates in the plane of projection plane, will be used in terrain comparison to calculate azimuth\r\n absolute_cell = ogr.Geometry(ogr.wkbPoint)\r\n relative_cell.AddPoint(cell_distance, float(DA[i][j]))\r\n absolute_cell.AddPoint(X_coor_cell, Y_coor_cell)\r\n # detect cell within the fields and call function to find cell intersecting trajectory and to determine whether the cell is reachable without any obstacles\r\n if ATF_polygon.Intersects(relative_cell):\r\n if TM:\r\n if find_intersecting_trajectory(1, relative_cell, absolute_cell):\r\n TA[0][i][j] += 1\r\n # for the case only cell's presence within the field is assessed\r\n else:\r\n TA[0][i][j] += 1\r\n # can be None\r\n if DTF_polygon:\r\n if DTF_polygon.Intersects(relative_cell):\r\n if TM:\r\n if find_intersecting_trajectory(-1, relative_cell, absolute_cell):\r\n TA[1][i][j] += 1\r\n # for the case only cell's presence within the field is assessed\r\n else:\r\n TA[1][i][j] += 1\r\n\r\n end1 = timer()\r\n print(f'Cell {i} {j} took {end1-start1} seconds.')\r\n print()\r\n\r\ndef find_intersecting_trajectory(dir, relative_cell, absolute_cell):\r\n \"\"\"Finds trajectory that intersects the cell (or is close enough, within allowed distance). For ATF cycle\r\n increments from start to end of trajectory set and viceversa for DTF. Returns True if the cell is accessible\r\n or False if the cell is not accessible without any obstacles - this is determined further function.\"\"\"\r\n\r\n\r\n start4 = timer()\r\n\r\n\r\n # Most Distant Trajectory Index\r\n MDTI = TS.index((max(TS, key=lambda x: x[1][0][-1])))\r\n if dir == -1:\r\n # Zooming Index List containing indexes of assessed trajectories, for descending direction, first index is the one of the trajectory with furthest reach\r\n ZIL = [MDTI, int(MDTI + (len(TS)-MDTI) / 2), len(TS) - 1]\r\n else:\r\n ZIL = [0, int(len(TS) / 2), len(TS) - 1]\r\n # cycle for zooming into the polygon of cell neighbouring trajectories\r\n while True:\r\n for j in [0, 1]:\r\n if dir == -1:\r\n # polygon also consists of envelope (starting and ending indexes of points of shared parts by trajectory and envelope are used)\r\n polygon = create_polygon_from_coords_list([envelope[0][TS[ZIL[j]][2][1][0]+1:TS[ZIL[j+1]][2][1][0]+1] + TS[ZIL[j + 1]][1][0][TS[ZIL[j+1]][2][0][0]+1:] + TS[ZIL[j]][1][0][-1:TS[ZIL[j]][2][0][0]-1:-1], envelope[1][TS[ZIL[j]][2][1][0]+1:TS[ZIL[j+1]][2][1][0]+1] + TS[ZIL[j + 1]][1][1][TS[ZIL[j+1]][2][0][0]+1:] + TS[ZIL[j]][1][1][-1:TS[ZIL[j]][2][0][0]-1:-1]])\r\n else:\r\n # very basic situation, envelope needs not to be used\r\n if ZIL[j+1] <= MDTI:\r\n polygon = create_polygon_from_coords_list([TS[ZIL[j]][1][0] + TS[ZIL[j + 1]][1][0][-1::-1], TS[ZIL[j]][1][1] + TS[ZIL[j + 1]][1][1][-1::-1]])\r\n # situation where at least second trajectory is already intersecting other trajectories with further reach\r\n else:\r\n # situation where first of the trajectories is the one with furthest reach or the ones following\r\n if ZIL[j] >= MDTI:\r\n polygon = create_polygon_from_coords_list([TS[ZIL[j]][1][0][:TS[ZIL[j]][2][0][1]] + envelope[0][TS[ZIL[j]][2][1][1]:TS[ZIL[j+1]][2][1][1]+1] + TS[ZIL[j + 1]][1][0][TS[ZIL[j+1]][2][0][1]-1::-1], TS[ZIL[j]][1][1][:TS[ZIL[j]][2][0][1]] + envelope[1][TS[ZIL[j]][2][1][1]:TS[ZIL[j+1]][2][1][1]+1] + TS[ZIL[j + 1]][1][1][TS[ZIL[j+1]][2][0][1]-1::-1]])\r\n # situation where first of the trajectories precedes trajectory with furthest reach\r\n else:\r\n polygon = create_polygon_from_coords_list([TS[ZIL[j]][1][0] + envelope[0][:TS[ZIL[j+1]][2][1][0]+1] + TS[ZIL[j + 1]][1][0][TS[ZIL[j+1]][2][0][0]::-1], TS[ZIL[j]][1][1] + envelope[1][:TS[ZIL[j+1]][2][1][0]+1] + TS[ZIL[j + 1]][1][1][TS[ZIL[j+1]][2][0][0]::-1]])\r\n if polygon.Intersects(relative_cell):\r\n break\r\n if abs(ZIL[j + 1] - ZIL[j]) == 1:\r\n i = ZIL[j]\r\n break\r\n ZIL = [ZIL[j], int(ZIL[j] + (ZIL[j + 1] - ZIL[j]) / 2), ZIL[j + 1]]\r\n\r\n\r\n end4 = timer()\r\n print(f'Zooming took {end4 - start4} seconds.')\r\n\r\n\r\n start5 = timer()\r\n\r\n # auxiliary indexes\r\n i1, i2 = -1, -1\r\n # Intersecting Trajectory Found list informing whether the previous and following intersecting trajectories were already found and compared with the terrain\r\n ITF = [1,1]\r\n # Inserted Trajectories Starting Index from which newly added trajectories will be removed right before returning from this function\r\n ITSI = -1\r\n # Inserted Trajectories Index Span across which the newly added trajectories will be removed\r\n ITIS = 0\r\n # at first, 2 surrounding trajectories are found by making polygon out of them and asking whether the cell lies within\r\n while True:\r\n # if the cell lies within, normals are computed to assess the smallest perpendicular distance from trajectory to cell\r\n if polygon.Intersects(relative_cell):\r\n # Inserted Trajectories Starting Index\r\n if ITSI == -1:\r\n ITSI = i+1\r\n # first condition is just to recycle normal1/2 computed in previous cycle that will be the same if first/second half of previous polygon is the one the cell lies within\r\n if i1 - i and ITF[0]:\r\n # compute normal from previous/following trajectory segment to cell\r\n normal1 = compute_normal(i, relative_cell.GetX(), relative_cell.GetY())\r\n # if cell is not close enough to trajectory to consider it as piercing trajectory, following trajectory is tested. If it is close enough, i will be used as the index of intersecting trajectory in the terrain comparison\r\n if not np.round(normal1 / TSW):\r\n if trajectory_terrain_comparison(i, relative_cell, absolute_cell):\r\n del TS[ITSI:ITSI + ITIS]\r\n\r\n\r\n end5 = timer()\r\n print(f'Intersecting took {end5 - start5} seconds.')\r\n\r\n\r\n return True\r\n # 1 is changed to 0 so next time the condition is eluded\r\n ITF[0] -= 1\r\n if i2 - i and ITF[1]:\r\n normal2 = compute_normal(i+1, relative_cell.GetX(), relative_cell.GetY())\r\n if not np.round(normal2 / TSW):\r\n if trajectory_terrain_comparison(i+1, relative_cell, absolute_cell):\r\n del TS[ITSI:ITSI + ITIS]\r\n\r\n\r\n end5 = timer()\r\n print(f'Intersecting took {end5 - start5} seconds.')\r\n\r\n\r\n return True\r\n ITF[1] -= 1\r\n # if trajectories from both sides are intersecting and none of them returned True meaning reachable cell, cell is considered unreachable\r\n if not any(ITF):\r\n del TS[ITSI:ITSI+ITIS]\r\n\r\n\r\n end5 = timer()\r\n print(f'Intersecting took {end5 - start5} seconds.')\r\n\r\n\r\n return False\r\n # ratio for angle addition to angle of previous trajectory, if one of the trajectories is already intersecting, second one is being searched by halving the angle difference of surrounding trajectories, because by normal ratio new trajectory can fall on wrong side of the cell, to the one that has already been assessed, which will slow down the computation\r\n ratio = 1 / 2 if not ITF[0] or not ITF[1] else normal1 / (normal1 + normal2)\r\n # new alpha calculated from the ratio and new trajectory is generated\r\n new_alpha = TS[i][0] + (TS[i + 1][0] - TS[i][0]) * ratio\r\n TS.insert(i + 1, [new_alpha, generate_trajectory(new_alpha)])\r\n # index i needs to be set one less to start again at the same trajectory\r\n # auxiliary index i1/2 to find out if the normal1/2 was already computed, will be used in next iteration\r\n i -= 1\r\n i1 = i\r\n i2 = i + 2\r\n # 1 trajectory added to the index span\r\n ITIS += 1\r\n i += 1\r\n # create polygon with inserted trajectory/ies\r\n polygon = create_polygon_from_coords_list([TS[i][1][0] + TS[i + 1][1][0][-1::-1], TS[i][1][1] + TS[i + 1][1][1][-1::-1]])\r\n\r\ndef compute_normal(i, X_relative_cell, Y_relative_cell):\r\n \"\"\"Computes perpendicular distance from closest segment of given trajectory and returns its size as well as index\r\n of first point of closest segment.\"\"\"\r\n\r\n start2 = timer()\r\n\r\n\r\n # Trajectory Point - Cell Distance List\r\n TPCDL = [((TS[i][1][0][j] - X_relative_cell) ** 2 + (TS[i][1][1][j] - Y_relative_cell) ** 2) ** (1 / 2) for j in range(len(TS[i][1][0]))]\r\n # index of closest point to cell is found\r\n j = TPCDL.index(min(TPCDL))\r\n # index of first of two closest points to cell is found\r\n if j != 0 and j != len(TPCDL) - 1:\r\n if TPCDL[j - 1] < TPCDL[j + 1]:\r\n j -= 1\r\n elif j == len(TPCDL) - 1:\r\n j -= 1\r\n # if closest point to cell mid point is closer than allowed distance, this distance is returned as there is no need to compute perpendicular distance to whole segment which can be only smaller than the point-cell distance\r\n if not np.round(min(TPCDL) / TSW):\r\n\r\n\r\n\r\n\r\n end2 = timer()\r\n print(f'Normal took {end2 - start2} seconds.')\r\n\r\n\r\n return min(TPCDL)\r\n # calculate perpendicular distance (normal) from closest trajectory segment\r\n a = TPCDL[j]\r\n b = TPCDL[j + 1]\r\n c = ((TS[i][1][0][j] - TS[i][1][0][j + 1]) ** 2 + (TS[i][1][1][j] - TS[i][1][1][j + 1]) ** 2) ** (1 / 2)\r\n s = (a + b + c) / 2\r\n area = (s * (s - a) * (s - b) * (s - c)) ** (1 / 2)\r\n\r\n\r\n end2 = timer()\r\n print(f'Normal took {end2-start2} seconds.')\r\n\r\n return area / c * 2\r\n\r\ndef trajectory_terrain_comparison(i, relative_cell, absolute_cell):\r\n \"\"\"Computes coordinates of terrain corresponding to each trajectory point and returns True or False depending\r\n on the result of terrain and trajectory point heights comparison.\"\"\"\r\n\r\n\r\n start3 = timer()\r\n\r\n\r\n # calculate azimuth of trajectory (shooting point to cell point), there is a chance of Y difference to be 0, therefore the exception\r\n dX = absolute_cell.GetX() - SP.GetX()\r\n dY = absolute_cell.GetY() - SP.GetY()\r\n # for the case where shooting point and middle point of assessed cell are same, automatically reachable\r\n if not dX and not dY:\r\n return True\r\n try:\r\n Azimuth = np.arctan(dX / dY)\r\n except ZeroDivisionError:\r\n # for the case of dY being 0, making the division impossible\r\n if dX > 0:\r\n Azimuth = np.radians(90)\r\n else:\r\n Azimuth = np.radians(270)\r\n # azimuth needs to be recalculated accordingly to correct quadrant\r\n if dY > 0:\r\n if dX < 0:\r\n Azimuth += np.radians(360)\r\n elif dY < 0:\r\n Azimuth += np.radians(180)\r\n # cycle iterates from first point of trajectory to the first point of segment closest to the cell point\r\n for X, Y in zip(TS[i][1][0],TS[i][1][1]):\r\n # if compared point is already above/below destination cell's area or beyond, cell is considered reachable\r\n if X >= relative_cell.GetX()-TSW/2:\r\n\r\n\r\n end3 = timer()\r\n print(f'Terrain comparison took {end3 - start3} seconds.')\r\n\r\n\r\n return True\r\n X_compare_point = SP.GetX() + X * np.sin(Azimuth)\r\n Y_compare_point = SP.GetY() + X * np.cos(Azimuth)\r\n Z_compare_point = int_function(X_compare_point, Y_compare_point)\r\n # if trajectory point is on or below terrain, False is returned\r\n if Y <= Z_compare_point:\r\n\r\n\r\n end3 = timer()\r\n print(f'Terrain comparison took {end3 - start3} seconds.')\r\n\r\n\r\n return False\r\n\r\ndef create_viewshed():\r\n \"\"\"Computes viewshed for one point which is saved temporarily, then load as an array.\"\"\"\r\n global VDS, VB, VA, VGT\r\n # generate viewshed and save it as temporary file to throwshed directory\r\n gdal.ViewshedGenerate(srcBand=DB, driverName='GTiff', targetRasterName=TOF + \"\\\\viewshed.tif\", creationOptions=[], observerX=SP.GetX(), observerY=SP.GetY(), observerHeight=EH, targetHeight=TH, visibleVal=1, invisibleVal=0, outOfRangeVal=0, noDataVal=NDV, dfCurvCoeff=0.85714, mode=2, maxDistance=0)\r\n # open viewshed raster, Viewshed Array will be crucial\r\n VDS, VB, VA, VGT, ndv = get_raster_from_file(TOF + \"\\\\viewshed.tif\")\r\n\r\ndef plot_trajectory(relative_cell, absolute_cell,jj,row,col,dir, poly_list, j, zoom):\r\n import matplotlib.pyplot as plt # na vykreslenie grafov\r\n plt.figure(figsize=(32, 18))\r\n for i in range(len(TS)):\r\n # plotting the points\r\n #plt.plot(TS[i][1][0], TS[i][1][1], markersize=5, linewidth=1, label=TS[i][0]/np.pi*180)\r\n plt.plot(TS[i][1][0], TS[i][1][1], '.', markersize=1)\r\n\r\n #plt.plot(envelope[0], envelope[1], '-', linewidth=1)\r\n #plt.plot([0, max(TS, key=lambda x: x[1][0][-1])[1][0][-1]], [DMAXH, DMAXH], '-', linewidth=1)\r\n\r\n # end_cell = ogr.Geometry(ogr.wkbPoint)\r\n # end_cell.AddPoint(-488475.5,-1259010.5)\r\n profile = get_profile(absolute_cell)\r\n plt.plot(profile[0], profile[1], '-', linewidth=3)\r\n\r\n # plt.plot(TS[jj][1][0], TS[jj][1][1], '-', linewidth=1)\r\n # plt.plot(TS[jj+1][1][0], TS[jj+1][1][1], '-', linewidth=1)\r\n\r\n plt.plot(poly_list[0], poly_list[1], '-', linewidth=2)\r\n\r\n plt.plot(relative_cell.GetX(), relative_cell.GetY(), 'o', markersize=2)\r\n\r\n # plt.plot(ATF[0], ATF[1], '-', linewidth=2)\r\n # plt.plot(DTF[0], DTF[1], '-', linewidth=2)\r\n print(i)\r\n #plt.plot(temp_xyp[0], temp_xyp[1], 'r.', markersize=2)\r\n # xpar = max(TS, key=lambda x: x[1][0][-1])[1][0][-1]\r\n # ypar = max(TS[-1][1][1])\r\n # a = -ypar/xpar**2\r\n # b = 0\r\n # c = ypar\r\n # x = []\r\n # y = []\r\n # for i in range(0,int(xpar)+1):\r\n # x.append(i)\r\n # y.append(a*i**2+b*i+c)\r\n # plt.plot(x, y, 'b*', markersize=1)\r\n\r\n # ohranicenie, popis osi a nastavenie rovnakej mierky v smere oboch osi\r\n plt.xlim(0, 155)\r\n plt.ylim(DMINH, max(TS[-1][1][1]))\r\n\r\n # plt.xlim(min(poly_list[0]), max(poly_list[0]))\r\n # plt.ylim(min(poly_list[1]), max(poly_list[1]))\r\n\r\n\r\n plt.xlabel(\"vzdialenosť [m]\")\r\n plt.ylabel(\"výška [m]\")\r\n plt.gca().set_aspect('equal', adjustable='box')\r\n\r\n # function to show the plot\r\n #plt.legend()\r\n\r\n\r\n plt.savefig(f'filename{row}_{col}_{dir}_zoom{zoom}_j{j}.png', dpi=300)\r\n\r\n start = timer()\r\n plt.show()\r\n end = timer()\r\n print('cas vykreslenia:', end-start)\r\n\r\ndef get_profile(end_cell):\r\n dX = end_cell.GetX() - SP.GetX()\r\n dY = end_cell.GetY() - SP.GetY()\r\n try:\r\n Azimuth = np.arctan(dX / dY)\r\n except ZeroDivisionError:\r\n # for the case of dY being 0, making the division impossible\r\n if dX > 0:\r\n Azimuth = np.radians(90)\r\n else:\r\n Azimuth = np.radians(270)\r\n # azimuth needs to be recalculated accordingly to correct quadrant\r\n if dY > 0:\r\n if dX < 0:\r\n Azimuth += np.radians(360)\r\n elif dY < 0:\r\n Azimuth += np.radians(180)\r\n profile = [[0],[SP.GetZ()-IH]]\r\n s = 0\r\n cell_dist = ((SP.GetX() - end_cell.GetX()) ** 2 + (SP.GetY() - end_cell.GetY()) ** 2) ** (1 / 2)\r\n while True:\r\n s += 0.5\r\n X_compare_point = SP.GetX() + s * np.sin(Azimuth)\r\n Y_compare_point = SP.GetY() + s * np.cos(Azimuth)\r\n Z_compare_point = int_function(X_compare_point, Y_compare_point)\r\n profile[0].append(s)\r\n profile[1].append(Z_compare_point)\r\n if s > cell_dist:\r\n break\r\n return profile\r\n\r\n#######################################################################\r\n## PATHS\r\ndem_path = r\"D:\\School\\STU_SvF_BA\\Term11\\Dizertacna_praca\\Throwshed2\\data\\dem\\dmr_clip.tif\" #path to DEM\r\npoint_layer_path = r\"D:\\School\\STU_SvF_BA\\Term11\\Dizertacna_praca\\Throwshed2\\data\\point\\points1.shp\" #path to point layer\r\nline_layer_path = r\"D:\\School\\STU_SvF_BA\\Term11\\Dizertacna_praca\\Throwshed2\\data\\line\\lines1.shp\" #path to line layer\r\nthrowshed_output_folder = r\"D:\\School\\STU_SvF_BA\\Term11\\Dizertacna_praca\\Throwshed2\\data\\throwshed\" #path to folder, where the file will be saved\r\nthrowshed_file = r\"throwshedtestnew\" #name of output throwshed file\r\n\r\n## SETTINGS\r\nthrowshed_mode = 1 #what type of throwshed will be calculated, simple safety zone (cells within safety field) = 0, regular throwshed with trajectory assessment = 1\r\nuse_viewshed = 0 #utilization of viewshed, that will clip throwshed, No = 0, Yes = 1\r\nuse_lines = 0 #utilization of line layer, where lines serve as obstacles or walls and will be burnt into DEM, No = 0, Yes = 1\r\nband_number = 1 #selected band from DEM, default = 1\r\ninterpolation = 0 #interpolation of DEM to calculate altitude of shooting point or compare points within the DEM-to-trajectory comparison function, Nearest neighbour = 0, Bilinear = 1\r\ncumulative_throwshed = 1 #Calculate cumulative throwshed? No = 0, Yes = 1 (Apropriate with more than 1 shooting places)\r\nEPSG = 8353 #EPSG code for CRS of output throwshed layer and other temporary results, must be same as DEM's EPSG\r\n\r\n## VARIABLES\r\ninitial_height = 1.7 #initial height of projectile above DEM when shot [m]\r\nalpha_min = -90.0 #minimum of vertical angle range at which the projectile is shot [°]\r\nalpha_max = 15.0 #maximum of vertical angle range at which the projectile is shot [°]\r\ngravitational_acceleration = -9.81 #gravitational acceleration [m/s^2]\r\ninitial_velocity = 50 #initial velocity of projectile when shot [m/s]\r\nair_density = 1.225 #air density [kg/m^3]\r\ndrag_coefficient = 0.47 #aerodynamic drag coefficient of projectile\r\ncross_sectional_area = 0.001963 #cross-sectional area of the projectile [m^2]\r\nmass = 0.100 #projectile mass [kg]\r\ndalpha = 5 #step in vertical angle range [°]\r\ntrajectory_segment_width = None #distance step, at which trajectory's points will be saved and compared to DEM [m], None = adjusted to DEM resolution (cell's size), any float/int value = customized distance step\r\neyes_height = 1.6 #shooter eye height above DEM for viewshed [m]\r\ntarget_height = 1.7 #target height for viewshed [m]\r\nwall_height = 4.0 #obstacle/wall height (if obstacle option is used) [m]\r\nconstant = 1 #constant multipling the drag coefficient within wobble distance of an arrow\r\narea_addition = 0.0 #average addition to cross-sectional area of an arrow within wobble distance [m^2]\r\nwobble_distance = 40 #wobble distance - distance at which an arrow stops wobbling [m]\r\n\r\n\r\n\r\nstart = timer()\r\n\r\n\r\nmain(dem_path, point_layer_path, line_layer_path, throwshed_output_folder, throwshed_file, throwshed_mode, use_viewshed, use_lines, EPSG,\r\n cumulative_throwshed, initial_height, initial_velocity, drag_coefficient, cross_sectional_area, mass,\r\n eyes_height, target_height, wall_height, constant, area_addition, wobble_distance, band_number=band_number,\r\n interpolation=interpolation, alpha_min=alpha_min, alpha_max=alpha_max,\r\n gravitational_acceleration=gravitational_acceleration, air_density=air_density, dalpha=dalpha,\r\n trajectory_segment_width=None)\r\n\r\nend = timer()\r\nprint('Duration:', end - start)\r\n","repo_name":"Tadeo98/throwshed2","sub_path":"throwshed2.py","file_name":"throwshed2.py","file_ext":"py","file_size_in_byte":51344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"32616053119","text":"#exec in python 3 shell: exec(open(\"infix1.py\").read())\n\ndef eval_infix_sum(expr, pos):\n\t\"\"\"evaluate a sum expression (zero or more additions and subtractions)\"\"\"\n\n\t#replaces the first value with the sum/difference\n\tif (expr[pos] == '+'):\n\t\texpr[pos-1] = int(expr[pos-1]) + int(expr[pos+1]) \n\n\telif (expr[pos] == '-'):\n\t\texpr[pos-1] = int(expr[pos-1]) - int(expr[pos+1])\n\n\t#deletes the operator sign and the second number\n\tdel expr[pos : pos+2]\n\treturn(expr) \n\ndef eval_infix_product(expr, pos):\n\t\"\"\"evaluate a product expression (zero or more multiplications/divisions)\"\"\"\n\n\t#replaces the first value with the product\n\tif (expr[pos] == '*'):\n\t\texpr[pos-1] = int(expr[pos-1]) * int(expr[pos+1]) \n\n\telif (expr[pos] == '/'):\t\t\n\t\texpr[pos-1] = int(expr[pos-1]) // int(expr[pos+1])\n\n\t#deletes the operator sign and the second number\n\tdel expr[pos : pos+2]\n\treturn(expr)\n\ndef eval_infix_factor(expr, pos):\n\t\"\"\"evaluate a factor (number or parenthesized sub-expression)\"\"\"\n\t#this function figures out whats inside the parentheses, creates a new string for that part and evaluates that substring\n\n\t#the number of open/closed parentheses. this is necessary to make sure nested parentheses work\n\topenparen = 1\n\tcloseparen = 0\n\t#position for the substring that will be evaluated\n\tendpos = pos\n\n\t#breaks when the currently open parentheses are closed\n\twhile (openparen != closeparen):\n\n\t\tendpos += 1\n\n\t\tif expr[endpos] == '(':\n\t\t\topenparen += 1\n\t\telif expr[endpos] == \")\":\n\t\t\tcloseparen += 1\n\n\t#gather the contents of the brackets into a new list which will be evaluated. should work with nested parentheses\n\tsubexpr = expr[(pos+1) : endpos]\n\tans = eval_infix_list(subexpr)\n\n\texpr[pos] = ans\n\tdel expr[pos+1 : endpos+1]\n\n\treturn expr\n\ndef eval_infix(expr): \n\n\texpr = eval_infix_list(expr.split())\n\treturn expr\n\ndef eval_infix_list(expr):\n\n\t#adds a semicolon making the string easier to work with\n\tif expr[-1] != ';':\n\t\texpr += ';'\n\t\n\t#i tried using a \"for pos in range\" thing and it didnt work so im doing this. \n\twhile ('(' in expr) or (')' in expr):\n\t\tpos = 0\n\t\twhile not(expr[pos] == '('):\n\t\t\tpos += 1\n\n\t\texpr = eval_infix_factor(expr, pos)\n\t\n\t#does all of the multiplcation and division operations until there are no more * or / signs\n\twhile ('*' in expr) or ('/' in expr):\n\t\tpos = 0\n\t\twhile not((expr[pos] == '*') or (expr[pos] == '/')):\n\t\t\tpos += 1\n\n\t\texpr = eval_infix_product (expr, pos)\n\n\n\t#does all of the add/sub operations until there are no more + or - signs\n\twhile ('+' in expr) or ('-' in expr):\n\t\tpos = 0\n\t\twhile not((expr[pos] == '+') or (expr[pos] == '-')):\n\t\t\t pos += 1\n\n\t\texpr = eval_infix_sum (expr, pos)\n\n\treturn (expr[0])","repo_name":"LeoLeoni/cmpsc122","sub_path":"previous/infix1.py","file_name":"infix1.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"72776897225","text":"from time import strptime\nfrom desktopcouch.records.record import Record\n\nNOTE_RECORD_TYPE = 'http://www.freedesktop.org/wiki/Specifications/desktopcouch/note'\n# keep in sync with the above notes record type\nFIELDS = {\n # String fields\n 'note_format': 'string',\n 'title': 'string',\n 'content': 'string',\n # Date fields\n 'last_change_date': 'date',\n 'create_date': 'date'\n}\n\nclass NoteBase(Record):\n \"\"\"\n A base for Note records.\n\n Use make_note_class to create the Note class with the required fields.\n \"\"\"\n\n def __init__(self, data=None, record_id=None):\n super(NoteBase, self).__init__(\n record_id=record_id, data=data, record_type=NOTE_RECORD_TYPE)\n\ndef make_note_class(fields):\n \"\"\"Note class factory function. field_names is a list of strings.\"\"\"\n NoteClass = type('Note', (NoteBase,), {})\n for field_name in fields:\n\n def fget(self, _field_name=field_name):\n return self.get(_field_name)\n\n def fset(self, value, _field_name=field_name):\n field_type = fields[_field_name]\n if field_type == 'date':\n # Check that it is a date with the correct format\n date_value = strptime(value, \"%Y-%m-%dT%H:%M:%S\")\n if date_value is None:\n return\n self[_field_name] = value\n\n setattr(NoteClass, field_name, property(fget, fset))\n return NoteClass\n\nNote = make_note_class(FIELDS)\n","repo_name":"inxware/ert-contrib-middleware","sub_path":"target_libs/arm-linux-gnu-glibc-2.12.1-ti-blaze-ubuntu-10_10-gtk_gst/arm-linux-gnu-glibc-2.12.1-ti-blaze-ubuntu-10_10-gtk_gst/build/lib/python2.6/dist-packages/desktopcouch/notes/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"32883966449","text":"import numpy as np\nimport cv2\nimport depthai # deals with camera and its data packets\nfrom glob import glob\n\nfrom util import frame_norm\n\n\nfor device in depthai.Device.getAllAvailableDevices():\n print(f\"Device ID:{device.getMxId()}, Device state: {device.state}\")\n\nmobilenet_path = glob(\"models/*.blob\")[0] # mobileNet\n\n\ndef run():\n pipeline = depthai.Pipeline()\n\n cam_rgb = pipeline.createColorCamera()\n cam_rgb.setPreviewSize(300, 300)\n cam_rgb.setInterleaved(False)\n\n detection_nn = pipeline.createNeuralNetwork()\n detection_nn.setBlobPath(mobilenet_path)\n\n cam_rgb.preview.link(detection_nn.input)\n\n # output both rgb and nn inference to host device screen\n xout_rgb = pipeline.createXLinkOut()\n xout_rgb.setStreamName(\"rgb\")\n cam_rgb.preview.link(xout_rgb.input)\n\n xout_nn = pipeline.createXLinkOut()\n xout_nn.setStreamName(\"nn\")\n detection_nn.out.link(xout_nn.input)\n\n # intialize\n device = depthai.Device(pipeline)\n device.startPipeline()\n\n # host side queues to receive results\n q_rgb = device.getOutputQueue(\"rgb\")\n q_nn = device.getOutputQueue(\"nn\")\n\n # place-holders to consume the above result\n frame = None\n bboxes = []\n\n while True:\n in_rgb = q_rgb.tryGet()\n in_nn = q_nn.tryGet()\n\n # transform the input from rgb camera(1D array) into HWC format\n if in_rgb is not None:\n shape = (3, in_rgb.getHeight(), in_rgb.getWidth())\n frame = in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)\n frame = np.ascontiguousarray(frame)\n\n # transform the nn inputs too\n # (image_id, label, confidence, x_min, y_min, x_max, y_max)\n # the last four fields are the bouding boxes\n if in_nn is not None:\n bboxes = np.array(in_nn.getFirstLayerFp16())\n bboxes = bboxes[: np.where(bboxes == -1)[0][0]]\n bboxes = bboxes.reshape((bboxes.size // 7, 7))\n bboxes = bboxes[bboxes[:, 2] > 0.8][:, 3:7]\n\n # display the result\n if frame is not None:\n for raw_bbox in bboxes:\n bbox = frame_norm(frame, raw_bbox)\n # (image, point1, point2, color, thickness)\n cv2.rectangle(\n frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2\n )\n cv2.imshow(\"preview\", frame)\n\n if cv2.waitKey(1) == ord(\"q\"):\n break\n\n\nif __name__ == \"__main__\":\n run()","repo_name":"bhagone/depthai-OAKD","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"4317758840","text":"from utils import *\nfrom dataloader import *\nfrom rnn_crf import *\n\ndef load_model(args):\n\n cti = load_tkn_to_idx(args[1]) # char_to_idx\n wti = load_tkn_to_idx(args[2]) # word_to_idx\n itt = load_idx_to_tkn(args[3]) # idx_to_tag\n\n model = rnn_crf(cti, wti, len(itt))\n print(model)\n\n load_checkpoint(args[0], model)\n\n return model, cti, wti, itt\n\ndef run_model(model, data, itt):\n\n with torch.no_grad():\n model.eval()\n\n for batch in data.batchify(BATCH_SIZE):\n\n xc, xw, lens = batch.xc, batch.xw, batch.lens\n xc, xw = data.to_tensor(bc = xc, bw = xw, lens = lens)\n y1 = model.decode(xc, xw, lens)\n batch.y1 = [[itt[i] for i in y] for y in y1]\n\n for x0, y0, y1 in zip(batch.x0, batch.y0, batch.y1):\n if not HRE:\n x0, y0, y1 = [x0], [y0], [y1]\n for x0, y0, y1 in zip(x0, y0, y1):\n yield x0, y0, y1\n\ndef predict(model, cti, wti, itt, filename):\n\n data = dataloader(hre = HRE)\n with open(filename) as fo:\n text = fo.read().strip().split(\"\\n\" * (HRE + 1))\n\n for block in text:\n data.append_row()\n\n for line in block.split(\"\\n\"):\n\n if re.match(\"\\S+/[^ /]+( \\S+/[^ /]+)*$\", line): # word/tag\n x0, y0 = zip(*[re.split(\"/(?=[^/]+$)\", w) for w in line.split(\" \")])\n x1 = list(map(normalize, x0))\n else:\n x0, y0 = line, []\n if re.match(\"[^\\t]+\\t[^\\t]+$\", x0): # sentence \\t label\n x0, *y0 = x0.split(\"\\t\")\n x0 = tokenize(x0)\n x1 = list(map(normalize, x0))\n\n xc = [[cti[c] if c in cti else UNK_IDX for c in w] for w in x1]\n xw = [wti[w] if w in wti else UNK_IDX for w in x1]\n\n data.append_item(x0 = x0, x1 = x1, xc = xc, xw = xw, y0 = y0)\n\n return run_model(model, data, itt)\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 6:\n sys.exit(\"Usage: %s model char_to_idx word_to_idx tag_to_idx test_data\" % sys.argv[0])\n\n result = predict(*load_model(sys.argv[1:5]), sys.argv[5])\n func = tag_to_txt if TASK else lambda *x: x\n\n for x0, y0, y1 in result:\n if y0:\n print(func(x0, y0))\n print(func(x0, y1))\n","repo_name":"threelittlemonkeys/lstm-crf-pytorch","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":455,"dataset":"github-code","pt":"81"}
+{"seq_id":"25675626","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass EmbeddingLossFunctions(object):\n def __init__(self, loss_fn='xent', neg_sample_weights=1.0):\n \"\"\"\n Basic class that applies skip-gram-like loss\n (i.e., dot product of node+target and node and negative samples)\n Args:\n bilinear_weights: use a bilinear weight for affinity calculation: u^T A v. If set to\n false, it is assumed that input dimensions are the same and the affinity will be\n based on dot product.\n \"\"\"\n self.neg_sample_weights = neg_sample_weights\n self.output_dim = 1\n if loss_fn == 'xent':\n self.loss_fn = self._xent_loss\n else:\n print(\"Not implemented yet.\")\n\n\n def loss(self, inputs1, inputs2, neg_samples):\n \"\"\" negative sampling loss.\n Args:\n neg_samples: tensor of shape [num_neg_samples x input_dim2]. Negative samples for all\n inputs in batch inputs1.\n \"\"\"\n return self.loss_fn(inputs1, inputs2, neg_samples)\n\n def affinity(self, inputs1, inputs2):\n \"\"\" Affinity score between batch of inputs1 and inputs2.\n Args:\n inputs1: tensor of shape [n_batch_edges x feature_size].\n \"\"\"\n # shape: [n_batch_edges, input_dim1]\n result = torch.sum(inputs1 * inputs2, dim=1) # shape: (n_batch_edges,)\n return result\n\n def neg_cost(self, inputs1, neg_samples):\n \"\"\" For each input in batch, compute the sum of its affinity to negative samples.\n\n Returns:\n Tensor of shape [n_batch_edges x num_neg_samples]. For each node, a list of affinities to\n negative samples is computed.\n \"\"\"\n neg_aff = inputs1.mm(neg_samples.t()) #(n_batch_edges, num_neg_samples)\n return neg_aff\n\n\n def sigmoid_cross_entropy_with_logits(self, labels, logits):\n sig_aff = torch.sigmoid(logits)\n loss = labels * -torch.log(sig_aff) + (1 - labels) * -torch.log(1 - sig_aff)\n return loss\n\n def _xent_loss(self, inputs1, inputs2, neg_samples):\n \"\"\"\n inputs1: Tensor (512, 256), normalized vector\n inputs2: Tensor (512, 256), normalized vector\n neg_sample: Tensor (20, 256)\n \"\"\"\n cuda = inputs1.is_cuda\n true_aff = self.affinity(inputs1, inputs2)\n neg_aff = self.neg_cost(inputs1, neg_samples)\n true_labels = torch.ones(true_aff.shape) # (n_batch_edges,)\n if cuda:\n true_labels = true_labels.cuda()\n true_xent = self.sigmoid_cross_entropy_with_logits(labels=true_labels, logits=true_aff)\n neg_labels = torch.zeros(neg_aff.shape)\n if cuda:\n neg_labels = neg_labels.cuda()\n neg_xent = self.sigmoid_cross_entropy_with_logits(labels=neg_labels, logits=neg_aff)\n loss0 = true_xent.sum()\n loss1 = self.neg_sample_weights * neg_xent.sum()\n loss = loss0 + loss1\n return loss, loss0, loss1\n\n\nclass MappingLossFunctions(object):\n def __init__(self):\n self.loss_fn = self._euclidean_loss\n\n def loss(self, inputs1, inputs2):\n return self.loss_fn(inputs1, inputs2)\n\n def _euclidean_loss(self, inputs1, inputs2):\n sub = inputs2 - inputs1\n square_sub = sub**2\n loss = torch.sum(square_sub) \n return loss\n","repo_name":"thanhtrunghuynh93/networkAlignment","sub_path":"algorithms/PALE/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"81"}
+{"seq_id":"6778646070","text":"import re\n\nclass RegexpCase(object):\n def __init__(self, rules, prefix=None, suffix=None, default=None):\n offset = 0\n probes = []\n maycall = default is None or callable(default)\n\n # We use a single regular expression and use special probe\n # captures to figure out which one has actually matched.\n # Hopefully, the regular expression engine will make this run\n # fast.\n for (regexp, action) in rules:\n compiled = re.compile(regexp)\n probes.append((offset, offset + 1, offset + compiled.groups + 1,\n action))\n offset += compiled.groups + 1\n if action is not None:\n maycall = maycall and callable(action)\n self.probes = tuple(probes)\n self.maycall = maycall\n\n if not self.probes:\n raise ValueError(\"empty rule list\")\n if prefix is None:\n prefix = \"^(?:(\"\n else:\n if re.compile(prefix).groups > 0:\n raise ValueError(\"prefix must not contain captures\")\n prefix = \"^(?:\" + prefix + \")(?:(\"\n\n if suffix is None:\n suffix = \"))$\"\n else:\n if re.compile(suffix).groups > 0:\n raise ValueError(\"suffix must not contain captures\")\n suffix = \"))(?:\" + suffix + \")$\"\n\n self.regexp = re.compile(\n prefix + ')|('.join(regexp for (regexp, action) in rules)\n + suffix)\n\n self.default = default\n\n def match(self, key):\n match = self.regexp.match(key)\n if match is None:\n return (None, self.default)\n groups = match.groups()\n for (probe, i, j, action) in self.probes:\n if groups[probe] is not None:\n return (groups[i:j], action)\n raise AssertionError(\"pattern and offset list incongruent\")\n\n def __getitem__(self, key):\n return self.match(key)[1]\n\n def __call__(self, key, *args):\n if not self.maycall:\n raise TypeError(\"not all actions are callable\")\n (groups, action) = self.match(key)\n if action is None:\n return None\n if groups is None:\n groups = key\n return action(groups, *args)\n\ndef rule(regexp):\n \"\"\"Add a regular expression to the function, for the rule list\"\"\"\n return lambda f: (regexp, f)\n","repo_name":"CVEDB/security-tracker","sub_path":"lib/python/sectracker/regexpcase.py","file_name":"regexpcase.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"71221140746","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\ndef plot_variable_pairs(df, target):\n for column in df.columns:\n if column != target:\n sns.lmplot(x=column, y=target, data=df)\n \ndef months_to_years(df):\n df_copy = df.copy()\n df_copy['tenure_years'] = round(df_copy.tenure / 12)\n \n return df_copy\n\ndef plot_categorical_and_continuous_vars(df, cont_vars, cat_vars, target, target_type=\"continuous\"):\n var_list = []\n \n if target_type == \"categorical\":\n for categorical in cat_vars:\n sns.heatmap(df[[target, categorical]])\n plt.show()\n \n var_list = cont_vars\n \n else:\n for continous in cont_vars:\n sns.relplot(x=continous, y=target, data=df)\n plt.show()\n \n var_list = cat_vars\n \n _generate_swarmplots(df, var_list, target)\n \ndef _generate_swarmplots(df, var_list, target):\n for var in var_list:\n sns.swarmplot(x=var, y=target, data=df)\n plt.show()","repo_name":"david-ryan-alviola/regression-exercises","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"12022984081","text":"import boto3\nimport time\nimport json\n\n\ndef abortable_operation_upgrade(envName):\n\n client = boto3.client('elasticbeanstalk')\n print('\\n\\nConnecting')\n\n print('\\n\\nChecking for ready status on environment -', envName)\n\n while True:\n\n abortable_response = client.describe_environments(\n EnvironmentNames=[envName],\n IncludeDeleted=False\n )\n\n status = abortable_response['Environments'][0]['Status']\n\n if status == 'Ready':\n print(envName, 'is now ready!')\n break\n else:\n print('''\n -----------------------------------------------------------------------------------------\n Sleeping for four minutes... waiting for immutable upgrade to finish.\n -----------------------------------------------------------------------------------------''')\n time.sleep(240)\n","repo_name":"Johnny-Martinez/EBUpdate","sub_path":"lib/abortable_operation_upgrade.py","file_name":"abortable_operation_upgrade.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"15256065489","text":"import unittest\nfrom unittest.mock import patch, Mock\nfrom src.proxy_checker import ProxyChecker\n\n\nclass TestProxyChecker(unittest.TestCase):\n def setUp(self):\n self.proxies = [\"proxy1\", \"proxy2\", \"proxy3\"]\n self.checker = ProxyChecker(self.proxies)\n\n @patch(\"src.proxy_checker.requests.get\")\n def test_check_proxy(self, mock_get):\n mock_response_200 = Mock()\n mock_response_200.status_code = 200\n mock_response_404 = Mock()\n mock_response_404.status_code = 404\n mock_response_500 = Mock()\n mock_response_500.status_code = 500\n mock_get.side_effect = [mock_response_200, mock_response_404, mock_response_500]\n\n expected_results = {200: \"proxy1\", 404: \"proxy2\", 500: \"proxy3\"}\n for status_code, proxy in expected_results.items():\n result = self.checker.check_proxy(proxy)\n if status_code == 200:\n self.assertEqual(result, proxy)\n else:\n self.assertIsNone(result)\n mock_get.assert_called_with(\n \"http://google.com\", proxies={\"http\": proxy, \"https\": proxy}, timeout=5\n )\n\n @patch(\"src.proxy_checker.requests.get\")\n def test_filter_proxies(self, mock_get):\n mock_response = Mock()\n mock_response.status_code = 200\n mock_get.return_value = mock_response\n\n valid_proxies = self.checker.filter_proxies()\n self.assertEqual(valid_proxies, self.proxies)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"kimjongbing/JAPCproxychecker","sub_path":"test/test_proxy_checker.py","file_name":"test_proxy_checker.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"40326349096","text":"\nfile1 = open('puzzle4.txt', 'r')\nlines = file1.readlines()\n\ntotal = 0\n\nfor line in lines:\n stripped_line = line.strip()\n pairs = stripped_line.split(',')\n\n pair1 = pairs[0].split('-')\n\n start1 = int(pair1[0])\n end1 = int(pair1[1])\n\n pair2 = pairs[1].split('-')\n\n start2 = int(pair2[0])\n end2 = int(pair2[1])\n\n if start1 <= start2 and end1 >= start2:\n total += 1\n continue\n\n if start2 <= start1 and end2 >= start1:\n total += 1\n continue\n\nprint(total)","repo_name":"lopes22/aoc2022","sub_path":"puzzle4.py","file_name":"puzzle4.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"69860462984","text":"import os\nimport random\nimport re\n\nfrom nltk import WhitespaceTokenizer, bigrams\n\ntokens = []\nfile_bigrams = []\nvocabulary = {}\nt = WhitespaceTokenizer()\n\nfilename = input().strip()\n\n\ndef tokenize_line(line):\n global tokens\n for token in t.tokenize(line):\n tokens.append(token)\n\n\ndef create_file_birgrams(items):\n global file_bigrams\n file_bigrams = list(bigrams(items))\n\n\ndef create_freq_vocabulary(file_bigrams):\n global vocabulary\n for item in file_bigrams:\n vocabulary.setdefault(item[0], {})\n vocabulary[item[0]].setdefault(item[1], 0)\n vocabulary[item[0]][item[1]] += 1\n\n\nif not os.access(filename, os.F_OK):\n print('File does not exist.')\nelse:\n with open(filename, 'r', encoding='utf-8') as file:\n for line in file.readlines():\n tokenize_line(line)\n \n create_file_birgrams(tokens)\n create_freq_vocabulary(file_bigrams)\n \n possible_starts = []\n for token in vocabulary.keys():\n if re.match('^[A-Z].*[^.!?]$', token):\n possible_starts.append(token)\n \n selected_tokens = random.choices(population=possible_starts, k=10)\n \n for token in selected_tokens:\n sentence = [token]\n valid_length = 1\n isValid = False\n while not isValid:\n prev_token = sentence[-1]\n \n possible_tokens = {}\n for t in vocabulary[prev_token]:\n possible_tokens[t] = vocabulary[prev_token][t]\n \n choice = random.choices(k=1, population=list(possible_tokens.keys()), weights=list(possible_tokens.values()))\n sentence += choice\n valid_length += 1\n if re.match('.*[.!?]$', choice[0]) is not None:\n if valid_length >= 5:\n isValid = True\n print(\" \".join(sentence))\n","repo_name":"Dilmurod777/python_projects","sub_path":"text-generator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"35112475342","text":"from models.BaseModel import BaseModel\nfrom db import DB\n\nclass ITehtava:\n jarjestysnumero: int\n tila: str\n nimi: str\n\nclass Tehtava(ITehtava):\n def __init__(self, jarjestysnumero: int, tila: str, nimi: str):\n self.jarjestysnumero = jarjestysnumero\n self.tila = tila\n self.nimi = nimi\n\nclass TodoModel(BaseModel):\n def __init__(self, db: DB) -> None:\n super().__init__(db)\n return None\n def lisaaTehtava(self, nimi: str) -> bool:\n onnistui = True\n try:\n lauseke = \"INSERT INTO todos (status, name) VALUES(?,?);\"\n tehtava = Tehtava(-1, ' ', nimi)\n tiedot = (tehtava.tila, tehtava.nimi,)\n print(tehtava)\n print(lauseke)\n print(tiedot)\n self.createRecord(lauseke, [tiedot])\n except Exception as err:\n onnistui = False\n print(\"TodoMalli - Virhe tehtävän luonnissa.\")\n print(err)\n return onnistui\n ","repo_name":"nikoroytio/python-and_sql-exercises","sub_path":"viikko10/L10-master/models/TodoModel.py","file_name":"TodoModel.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"28555831593","text":"import os\n\n\ndef read_from_file(user_file):\n if os.path.isfile(user_file) and os.stat(user_file).st_size > 0:\n with open(user_file, 'r') as f:\n return f.read() #zwraca string\n else:\n print('Nie ma takiego pliku')\n return '' #pusty string - zwracamy ten sam typ\n\n\ndef save_to_file(user_file, user_data):\n with open(user_file, 'a') as f:\n f.write(user_data)\n","repo_name":"ritaly/kurs_python","sub_path":"09_moduly/zad3/module_file.py","file_name":"module_file.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"4223568400","text":"from setuptools import setup\n\nlong_description = \"\"\"\nPlease view usage and document on https://github.com/RE-A/korean-geocoding. 사용법과 문서는 링크를 참조해 주세요.\n\"\"\"\n\nsetup(\n name='korean-geocoding',\n version='0.4.1',\n author='RE-A',\n author_email='skynine73@gmail.com',\n python_requires='>=3.8',\n install_requires=['requests', 'haversine', 'pyproj'],\n description='Korean geocoding library with Naver Geocoding API',\n long_description=long_description,\n url=\"https://github.com/RE-A/korean-geocoding\",\n packages=['korean_geocoding'],\n include_package_data=True\n)","repo_name":"RE-A/korean-geocoding","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"}
+{"seq_id":"2924645496","text":"import argparse\nimport os\nimport subprocess\nimport tempfile\n\nRUN_DIR = '/run/user/%d' % os.getuid()\n\ndef say(text, lang='en-US', volume=60, pitch=130, speed=100, device='default'):\n data = \"%s\" % \\\n (volume, pitch, speed, text)\n with tempfile.NamedTemporaryFile(suffix='.wav', dir=RUN_DIR) as f:\n cmd = 'pico2wave --wave %s --lang %s \"%s\" && aplay -q -D %s %s' % \\\n (f.name, lang, data, device, f.name)\n subprocess.check_call(cmd, shell=True)\n\n\ndef _main():\n parser = argparse.ArgumentParser(description='Text To Speech (pico2wave)')\n parser.add_argument('--lang', default='en-US')\n parser.add_argument('--volume', type=int, default=60)\n parser.add_argument('--pitch', type=int, default=130)\n parser.add_argument('--speed', type=int, default=100)\n parser.add_argument('--device', default='default')\n parser.add_argument('text', help='path to disk image file ')\n args = parser.parse_args()\n say(args.text, lang=args.lang, volume=args.volume, pitch=args.pitch, speed=args.speed,\n device=args.device)\n\n\nif __name__ == '__main__':\n _main()\n","repo_name":"eosurman/physicsc","sub_path":"src/aiy/voice/tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"30172831116","text":"# -*- coding: utf-8 -*-\n\nfrom ctypes import *\n\nfrom numpy import asarray\nimport cv2\n\n\n# Image type (IplImage)\nIPL_DEPTH_SIGN = 0x80000000\n\nIPL_DEPTH_1U = 1\nIPL_DEPTH_8U = 8\nIPL_DEPTH_16U = 16\nIPL_DEPTH_32F = 32\nIPL_DEPTH_64F = 64\n\nIPL_DEPTH_8S = IPL_DEPTH_SIGN + IPL_DEPTH_8U\nIPL_DEPTH_16S = IPL_DEPTH_SIGN + IPL_DEPTH_16U\nIPL_DEPTH_32S = IPL_DEPTH_SIGN + 32\n\nclass IplTileInfo(Structure):\n _fields_ = []\n\nclass IplROI(Structure):\n _fields_ = [\n # 0 - no COI (all channels are selected)\n # 1 - 0th channel is selected ...\n ('coi', c_int),\n ('xOffset', c_int),\n ('yOffset', c_int),\n ('width', c_int),\n ('height', c_int),\n ]\n\n# ipl image header\nclass IplImage(Structure):\n pass\n\nIplImage._fields_ = [\n (\"nSize\", c_int),\n (\"ID\", c_int),\n (\"nChannels\", c_int),\n (\"alphaChannel\", c_int),\n (\"depth\", c_int),\n (\"colorModel\", c_char * 4),\n (\"channelSeq\", c_char * 4),\n (\"dataOrder\", c_int),\n (\"origin\", c_int),\n (\"align\", c_int),\n (\"width\", c_int),\n (\"height\", c_int),\n (\"roi\", POINTER(IplROI)),\n (\"maskROI\", POINTER(IplImage)),\n (\"imageID\", c_void_p),\n (\"tileInfo\", POINTER(IplTileInfo)),\n (\"imageSize\", c_int),\n #be careful to use c_void_p!!\n #When making python type 2 IplImage* function, memory allocate is required\n (\"imageData\", c_void_p), \n (\"widthStep\", c_int),\n (\"BorderMode\", c_int * 4),\n (\"BorderConst\", c_int * 4),\n (\"imageDataOrigin\", c_char_p)\n ]\n\ndef ipl2iplimage(ipl_ptr, img_shape):\n \"\"\"get cv2.cv.iplimage from C's IplImage*\n\n ipl_ptr: POINTER(IplImage) that points to valid image\n img_shape: 3 element int tuple (height, width, n_channels)\n \"\"\"\n #allocate Python memory for image\n height, width, n_channels = img_shape\n cv_img = cv2.cv.CreateImageHeader((width, height), IPL_DEPTH_8U, n_channels)\n # getting the IplImage* and set memory\n iplimage = ipl_ptr.contents\n str_data = string_at(iplimage.imageData, iplimage.imageSize)\n cv2.cv.SetData(cv_img, str_data, iplimage.widthStep)\n return cv_img\n\ndef ipl2array(ipl_ptr, img_shape):\n \"\"\"get numpy.ndarray from IplImage*\n\n ipl_ptr: POINTER(IplImage) that points to valid image\n img_shape: 3 element int tuple (height, width, n_channels)\n \"\"\"\n cv_img = ipl2iplimage(ipl_ptr, img_shape)\n # building a CvMat image by slice operation([:,:]), \n # and build ndarray from CvMat\n return asarray(cv_img[:, :])\n\n","repo_name":"atsushisugiyama/phenox_python","sub_path":"library/cv_c2py.py","file_name":"cv_c2py.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"35673477525","text":"from shop.models import BillingAddress, Order, OrderItems,Product,ProductImage,Category, Review\nfrom django.shortcuts import redirect, render\nfrom http.client import HTTPResponse\nfrom django.http import HttpRequest, HttpResponse,HttpResponseRedirect\nfrom pprint import pp, pprint\nfrom django.core import serializers\nfrom django.http import JsonResponse\nimport json\n\nfrom django.db.models import Sum,Value,F\n\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db import transaction\n\nfrom django.forms import model_to_dict\n\n\ndef cartProduct(request): \n \n products = request.POST.get('data')\n \n\n if len(products) > 2:\n \n # the result is a Python dictionary:\n products = json.loads(products)\n\n prod_id = []\n prod_q = []\n for p in products:\n if len(p[\"id\"]) < 1:\n continue\n prod_id.append(int(p[\"id\"]))\n prod_q.append(int(p[\"quantity\"])) \n\n item_list = Product.objects.filter(pk__in=prod_id).values()\n\n cartItem = { }\n cart = []\n for item in list(item_list):\n for p in products:\n if p[\"id\"] == str(item['id']):\n id = item['id']\n for index in item.keys():\n cartItem[index] = item[index]\n\n cartItem['quantity'] = p[\"quantity\"]\n cartItem['image'] = ProductImage.objects.filter(product = Product.objects.get(pk = id)).values_list('product_img_src', flat=True).first()\n\n temp = cartItem.copy()\n cart.append(temp)\n \n cartItem.clear()\n\n json_items = json.dumps(cart,cls= DjangoJSONEncoder)\n\n return HttpResponse(json_items, content_type=\"application/json\")\n else:\n print(\"Data retrive problem!\")\n return HttpResponse(\"Data retrive problem!\")\n\ndef checkout(request):\n\n try:\n billing_address = BillingAddress.objects.get(customer = request.user.customer, status = 1)\n except:\n billing_address = None \n\n if request.method == 'POST':\n products = request.POST.get('data')\n comment = request.POST.get('comment')\n\n products = json.loads(products)\n comment = json.loads(comment)\n\n if request.user.is_anonymous:\n return HttpResponse(\"401\", content_type=\"application/json\")\n else:\n try:\n with transaction.atomic():\n order = Order()\n order.comment = comment\n order.billing_addr = billing_address\n order.customer = request.user.customer\n order.save()\n\n \n for p in products:\n if len(p[\"id\"]) < 1:\n continue\n items = OrderItems()\n prod = Product.objects.get(pk=p[\"id\"])\n items.order = order\n items.product = prod\n items.quantity = p[\"quantity\"]\n items.save()\n\n return HttpResponse(\"201\", content_type=\"application/json\")\n except:\n return HttpResponse(\"400\", content_type=\"application/json\")\n \n context = {\n 'meta_title' : 'The New Day a Cloud kitchen of Bangladesh',\n 'meta_description' : 'The New Day (TND) is a Cloud Kitchen of Fast Food & Restaurant with Multi Cuisine',\n 'title' : 'View Cart',\n 'h1_tag' : 'The New Day (TND) is a Cloud Kitchen of Fast Food & Restaurant with Multi Cuisine',\n 'class' : 'fastfood_1',\n 'billing_address' : billing_address\n }\n\n return render(request, 'order/checkout.html',context)","repo_name":"FahimDev/Django-ecommerce-website","sub_path":"shop/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"4432083753","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 27 13:07:35 2017\n\n@author: levi\n\"\"\"\n\nimport rest_sgra, grad_sgra, numpy, copy, os\nimport matplotlib.pyplot as plt\nfrom utils import ddt\nfrom multiprocessing import Pool\n\nclass binFlagDict(dict):\n \"\"\"Class for binary flag dictionaries.\n Provides good grounding for any settings or options dictionary. \"\"\"\n\n def __init__(self,inpDict={},inpName='options'):\n self.name = inpName\n for key in inpDict.keys():\n self[key] = inpDict[key]\n\n def setAll(self,tf=True,opt={}):\n for key in self.keys():\n self[key] = (tf and opt.get(key,True))\n\n# self.log.printL(\"\\nSetting '\"+self.name+\"' as follows:\")\n# self.log.pprint(self)\n\nclass LMPBVPhelp():\n \"\"\"Class for processing the Linear Multipoint Boundary Value Problem.\n The biggest advantage in using an object is that the parallelization of\n the computations for each solution is much easier.\"\"\"\n\n def __init__(self,sol,rho):\n \"\"\"Initialization method. Comprises all the \"common\" calculations for\n each independent solution to be added over.\n\n According to the Miele (2003) convention,\n rho = 0 for rest, and rho = 1 for grad.\n \"\"\"\n\n # debug options...\n self.dbugOptGrad = sol.dbugOptGrad\n self.dbugOptRest = sol.dbugOptRest\n self.t = sol.t\n\n # get sizes\n Ns,N,m,n,p,q,s = sol.Ns,sol.N,sol.m,sol.n,sol.p,sol.q,sol.s\n self.Ns,self.N,self.m,self.n,self.p,self.q,self.s = Ns,N,m,n,p,q,s\n self.dt = 1.0/(N-1)\n self.rho = rho\n\n # calculate integration error (only if necessary)\n psi = sol.calcPsi()\n self.psi = psi\n if rho < .5:\n # calculate phi and psi\n phi = sol.calcPhi()\n err = phi - ddt(sol.x,N)\n else:\n err = numpy.zeros((N,n,s))\n\n self.err = err\n\n\n #######################################################################\n if rho < 0.5 and self.dbugOptRest['plotErr']:\n print(\"\\nThis is err:\")\n for arc in range(s):\n plt.plot(self.t,err[:,0,arc])\n plt.ylabel(\"errPos\")\n plt.grid(True)\n plt.show()\n plt.clf()\n plt.close('all')\n\n if n>1:\n plt.plot(self.t,err[:,1,arc])\n plt.ylabel(\"errVel\")\n plt.grid(True)\n plt.show()\n plt.clf()\n plt.close('all')\n #######################################################################\n\n # Get gradients\n Grads = sol.calcGrads(calcCostTerm=(rho>0.5))\n #dt6 = dt/6\n phix = Grads['phix']\n phiu = Grads['phiu']\n phip = Grads['phip']\n psiy = Grads['psiy']\n psip = Grads['psip']\n fx = Grads['fx']\n fu = Grads['fu']\n fp = Grads['fp']\n\n self.phip = phip\n self.psiy = psiy\n self.psip = psip\n self.fx = fx\n self.fu = fu\n self.fp = fp\n\n # Prepare matrices with derivatives:\n phixTr = numpy.empty_like(phix)\n phiuTr = numpy.empty((N,m,n,s))\n phipTr = numpy.empty((N,p,n,s))\n phiuFu = numpy.empty((N,n,s))\n for arc in range(s):\n for k in range(N):\n phixTr[k,:,:,arc] = phix[k,:,:,arc].transpose()\n phiuTr[k,:,:,arc] = phiu[k,:,:,arc].transpose()\n phipTr[k,:,:,arc] = phip[k,:,:,arc].transpose()\n phiuFu[k,:,arc] = phiu[k,:,:,arc].dot(fu[k,:,arc])\n self.phiuFu = phiuFu\n self.phiuTr = phiuTr\n self.phipTr = phipTr\n\n InitCondMat = numpy.eye(Ns,Ns+1)\n self.InitCondMat = InitCondMat\n\n # Dynamics matrix for propagating the LSODE:\n DynMat = numpy.zeros((N,2*n,2*n,s))\n for arc in range(s):\n for k in range(N):\n DynMat[k,:n,:n,arc] = phix[k,:,:,arc]\n DynMat[k,:n,n:,arc] = phiu[k,:,:,arc].dot(phiuTr[k,:,:,arc])\n DynMat[k,n:,n:,arc] = -phixTr[k,:,:,arc]\n self.DynMat = DynMat\n\n def propagate(self,j):\n \"\"\"This method computes each solution, via propagation of the\n applicable Linear System of Ordinary Differential Equations.\"\"\"\n\n # Load data (sizes, common matrices, etc)\n rho = self.rho\n rho1 = self.rho-1.0\n Ns,N,n,m,p,s = self.Ns,self.N,self.n,self.m,self.p,self.s\n dt = self.dt\n\n InitCondMat = self.InitCondMat\n phip = self.phip\n err = self.err\n phiuFu = self.phiuFu\n fx = self.fx\n if rho > .5:\n rhoFu = self.fu\n else:\n rhoFu = numpy.zeros((N,m,s))\n\n phiuTr = self.phiuTr\n phipTr = self.phipTr\n DynMat = self.DynMat\n I = numpy.eye(2*n)\n\n # Declare matrices for corrections\n phiLamIntCol = numpy.zeros(p)\n DtCol = numpy.empty(2*n*s)\n EtCol = numpy.empty(2*n*s)\n A = numpy.zeros((N,n,s))\n B = numpy.zeros((N,m,s))\n C = numpy.zeros((p,1))\n lam = numpy.zeros((N,n,s))\n\n # the vector that will be integrated is Xi = [A; lam]\n Xi = numpy.zeros((N,2*n,s))\n # Initial conditions for the LSODE:\n for arc in range(s):\n A[0,:,arc] = InitCondMat[2*n*arc:(2*n*arc+n) , j]\n lam[0,:,arc] = InitCondMat[(2*n*arc+n):(2*n*(arc+1)) , j]\n Xi[0,:n,arc],Xi[0,n:,arc] = A[0,:,arc],lam[0,:,arc]\n C = InitCondMat[(2*n*s):,j]\n\n # Non-homogeneous terms for the LSODE:\n nonHom = numpy.empty((N,2*n,s))\n for arc in range(s):\n for k in range(N):\n # minus sign in rho1 (rho-1) is on purpose!\n nonHA = phip[k,:,:,arc].dot(C) + \\\n -rho1*err[k,:,arc] - rho*phiuFu[k,:,arc]\n nonHL = rho * fx[k,:,arc]\n nonHom[k,:n,arc] = nonHA#.copy()\n nonHom[k,n:,arc] = nonHL#.copy()\n\n # Integrate the LSODE (by Heun's method):\n for arc in range(s):\n# B[0,:,arc] = -rhoFu[0,:,arc] + \\\n# phiuTr[0,:,:,arc].dot(lam[0,:,arc])\n# phiLamIntCol += .5 * (phipTr[0,:,:,arc].dot(lam[0,:,arc]))\n#\n# # First point: simple propagation\n# derXik = DynMat[0,:,:,arc].dot(Xi[0,:,arc]) + \\\n# nonHom[0,:,arc]\n# Xi[1,:,arc] = Xi[0,:,arc] + dt * derXik\n# #A[1,:,arc] = Xi[1,:n,arc]\n# lam[1,:,arc] = Xi[1,n:,arc]\n# B[1,:,arc] = -rhoFu[1,:,arc] + \\\n# phiuTr[1,:,:,arc].dot(lam[1,:,arc])\n# phiLamIntCol += phipTr[1,:,:,arc].dot(lam[1,:,arc])\n#\n# # \"Middle\" points: original Heun propagation\n# for k in range(1,N-2):\n# derXik = DynMat[k,:,:,arc].dot(Xi[k,:,arc]) + \\\n# nonHom[k,:,arc]\n# aux = Xi[k,:,arc] + dt * derXik\n# Xi[k+1,:,arc] = Xi[k,:,arc] + .5 * dt * (derXik + \\\n# DynMat[k+1,:,:,arc].dot(aux) + \\\n# nonHom[k+1,:,arc])\n# #A[k+1,:,arc] = Xi[k+1,:n,arc]\n# lam[k+1,:,arc] = Xi[k+1,n:,arc]\n# B[k+1,:,arc] = -rhoFu[k+1,:,arc] + \\\n# phiuTr[k+1,:,:,arc].dot(lam[k+1,:,arc])\n# phiLamIntCol += phipTr[k+1,:,:,arc].dot(lam[k+1,:,arc])\n# #\n#\n# # Last point: simple propagation, but based on the last point\n# derXik = DynMat[N-1,:,:,arc].dot(Xi[N-2,:,arc]) + \\\n# nonHom[N-1,:,arc]\n# Xi[N-1,:,arc] = Xi[N-2,:,arc] + dt * derXik\n# #A[N-1,:,arc] = Xi[N-1,:n,arc]\n# lam[N-1,:,arc] = Xi[N-1,n:,arc]\n# B[N-1,:,arc] = -rhoFu[N-1,:,arc] + \\\n# phiuTr[N-1,:,:,arc].dot(lam[N-1,:,arc])\n# phiLamIntCol += .5*phipTr[N-1,:,:,arc].dot(lam[N-1,:,arc])\n\n # Integrate the LSODE by Euler Backwards implicit\n B[0,:,arc] = -rhoFu[0,:,arc] + \\\n phiuTr[0,:,:,arc].dot(lam[0,:,arc])\n phiLamIntCol += .5 * (phipTr[0,:,:,arc].dot(lam[0,:,arc]))\n\n for k in range(N-1):\n Xi[k+1,:,arc] = numpy.linalg.solve(I - dt*DynMat[k+1,:,:,arc],\\\n Xi[k,:,arc] + dt*nonHom[k+1,:,arc])\n lam[k+1,:,arc] = Xi[k+1,n:,arc]\n B[k+1,:,arc] = -rhoFu[k+1,:,arc] + \\\n phiuTr[k+1,:,:,arc].dot(lam[k+1,:,arc])\n phiLamIntCol += phipTr[k+1,:,:,arc].dot(lam[k+1,:,arc])\n\n phiLamIntCol -= .5*phipTr[N-1,:,:,arc].dot(lam[N-1,:,arc])\n\n\n # Get the A values from Xi\n A[:,:,arc] = Xi[:,:n,arc]\n\n # Put initial and final conditions of A and Lambda into matrices\n # DtCol and EtCol, which represent the columns of Dtilde(Dt) and\n # Etilde(Et)\n DtCol[(2*arc)*n : (2*arc+1)*n] = A[0,:,arc] # eq (32a)\n DtCol[(2*arc+1)*n : (2*arc+2)*n] = A[N-1,:,arc] # eq (32a)\n EtCol[(2*arc)*n : (2*arc+1)*n] = -lam[0,:,arc] # eq (32b)\n EtCol[(2*arc+1)*n : (2*arc+2)*n] = lam[N-1,:,arc] # eq (32b)\n #\n\n # All integrations ready!\n phiLamIntCol *= dt\n\n###############################################################################\n if (rho > 0.5 and self.dbugOptGrad['plotCorr']) or \\\n (rho < 0.5 and self.dbugOptRest['plotCorr']):\n print(\"\\nHere are the corrections for iteration \" + str(j+1) + \\\n \" of \" + str(Ns+1) + \":\\n\")\n for arc in range(s):\n print(\"> Corrections for arc =\",arc)\n plt.plot(self.t,A[:,0,arc])\n plt.grid(True)\n plt.ylabel('A: pos')\n plt.show()\n plt.clf()\n plt.close('all')\n\n\n plt.plot(self.t,lam[:,0,arc])\n plt.grid(True)\n plt.ylabel('lambda: pos')\n plt.show()\n plt.clf()\n plt.close('all')\n\n if n>1:\n plt.plot(self.t,A[:,1,arc])\n plt.grid(True)\n plt.ylabel('A: vel')\n plt.show()\n plt.clf()\n plt.close('all')\n\n plt.plot(self.t,lam[:,1,arc])\n plt.grid(True)\n plt.ylabel('lambda: vel')\n plt.show()\n plt.clf()\n plt.close('all')\n\n if n>2:\n plt.plot(self.t,A[:,2,arc])\n plt.grid(True)\n plt.ylabel('A: gama')\n plt.show()\n plt.clf()\n plt.close('all')\n\n plt.plot(self.t,lam[:,2,arc])\n plt.grid(True)\n plt.ylabel('lambda: gamma')\n plt.show()\n plt.clf()\n plt.close('all')\n\n\n if n>3:\n plt.plot(self.t,A[:,3,arc])\n plt.grid(True)\n plt.ylabel('A: m')\n plt.show()\n plt.clf()\n plt.close('all')\n\n plt.plot(self.t,lam[:,3,arc])\n plt.grid(True)\n plt.ylabel('lambda: m')\n plt.show()\n plt.clf()\n plt.close('all')\n\n\n plt.plot(self.t,B[:,0,arc])\n plt.grid(True)\n plt.ylabel('B0')\n plt.show()\n plt.clf()\n plt.close('all')\n\n if m>1:\n plt.plot(self.t,B[:,1,arc])\n plt.grid(True)\n plt.ylabel('B1')\n plt.show()\n plt.clf()\n plt.close('all')\n\n print(\"C[arc] =\",C[arc])\n #input(\" > \")\n###############################################################################\n\n # All the outputs go to main output dictionary; the final solution is\n # computed by the next method, 'getCorr'.\n outp = {'A':A,'B':B,'C':C,'L':lam,'Dt':DtCol,'Et':EtCol,\n 'phiLam':phiLamIntCol}\n\n return outp\n\n\n def getCorr(self,res,log):\n \"\"\" Computes the actual correction for this grad/rest step, by linear\n combination of the solutions generated by method 'propagate'.\"\"\"\n\n # Get sizes\n Ns,N,n,m,p,q,s = self.Ns,self.N,self.n,self.m,self.p,self.q,self.s\n rho1 = self.rho - 1.0\n\n # Declare matrices Ctilde, Dtilde, Etilde, and the integral term\n Ct = numpy.empty((p,Ns+1))\n Dt = numpy.empty((2*n*s,Ns+1))\n Et = numpy.empty((2*n*s,Ns+1))\n phiLamInt = numpy.empty((p,Ns+1))\n\n # Unpack outputs from 'propagate' into proper matrices Ct, Dt, etc.\n for j in range(Ns+1):\n Ct[:,j] = res[j]['C']\n Dt[:,j] = res[j]['Dt']\n Et[:,j] = res[j]['Et']\n phiLamInt[:,j] = res[j]['phiLam']\n\n # Assembly of matrix M and column 'Col' for the linear system\n\n # Matrix for linear system involving k's and mu's\n M = numpy.zeros((Ns+q+1,Ns+q+1))\n # from eq (34d) - k term\n M[0,:(Ns+1)] = numpy.ones(Ns+1)\n # from eq (34b) - mu term\n M[(q+1):(q+1+p),(Ns+1):] = self.psip.transpose()\n # from eq (34c) - mu term\n M[(p+q+1):,(Ns+1):] = self.psiy.transpose()\n # from eq (34a) - k term\n M[1:(q+1),:(Ns+1)] = self.psiy.dot(Dt) + self.psip.dot(Ct)\n # from eq (34b) - k term\n M[(q+1):(q+p+1),:(Ns+1)] = Ct - phiLamInt\n # from eq (34c) - k term\n M[(q+p+1):,:(Ns+1)] = Et\n\n # column vector for linear system involving k's and mu's [eqs (34)]\n col = numpy.zeros(Ns+q+1)\n col[0] = 1.0 # eq (34d)\n\n # Integral term\n if self.rho > 0.5:\n # eq (34b) - only applicable for grad\n sumIntFpi = numpy.zeros(p)\n for arc in range(s):\n for ind in range(p):\n sumIntFpi[ind] += self.fp[:,ind,arc].sum()\n sumIntFpi[ind] -= .5 * ( self.fp[0,ind,arc] + \\\n self.fp[-1,ind,arc])\n sumIntFpi *= self.dt\n col[(q+1):(q+p+1)] = -self.rho * sumIntFpi\n else:\n # eq (34a) - only applicable for rest\n col[1:(q+1)] = rho1 * self.psi\n\n\n # Calculations of weights k:\n KMi = numpy.linalg.solve(M,col)\n Res = M.dot(KMi)-col\n log.printL(\"Residual of the Linear System: \" + \\\n str(Res.transpose().dot(Res)))\n K,mu = KMi[:(Ns+1)], KMi[(Ns+1):]\n\n # summing up linear combinations\n A = numpy.zeros((N,n,s))\n B = numpy.zeros((N,m,s))\n C = numpy.zeros(p)\n lam = numpy.zeros((N,n,s))\n\n for j in range(Ns+1):\n A += K[j] * res[j]['A']#self.arrayA[j,:,:,:]\n B += K[j] * res[j]['B']#self.arrayB[j,:,:,:]\n C += K[j] * res[j]['C']#self.arrayC[j,:]\n lam += K[j] * res[j]['L']#self.arrayL[j,:,:,:]\n\n###############################################################################\n if (self.rho > 0.5 and self.dbugOptGrad['plotCorrFin']) or \\\n (self.rho < 0.5 and self.dbugOptRest['plotCorrFin']):\n log.printL(\"\\n------------------------------------------------------------\")\n log.printL(\"Final corrections:\\n\")\n for arc in range(s):\n log.printL(\"> Corrections for arc =\",arc)\n plt.plot(self.t,A[:,0,arc])\n plt.grid(True)\n plt.ylabel('A: pos')\n plt.show()\n plt.clf()\n plt.close('all')\n\n if n>1:\n plt.plot(self.t,A[:,1,arc])\n plt.grid(True)\n plt.ylabel('A: vel')\n plt.show()\n plt.clf()\n plt.close('all')\n\n if n>2:\n plt.plot(self.t,A[:,2,arc])\n plt.grid(True)\n plt.ylabel('A: gama')\n plt.show()\n plt.clf()\n plt.close('all')\n\n if n>3:\n plt.plot(self.t,A[:,3,arc])\n plt.grid(True)\n plt.ylabel('A: m')\n plt.show()\n plt.clf()\n plt.close('all')\n\n plt.plot(self.t,B[:,0,arc])\n plt.grid(True)\n plt.ylabel('B0')\n plt.show()\n plt.clf()\n plt.close('all')\n\n if m>1:\n plt.plot(self.t,B[:,1,arc])\n plt.grid(True)\n plt.ylabel('B1')\n plt.show()\n plt.clf()\n plt.close('all')\n\n\n log.printL(\"C[arc] =\",C[arc])\n\n #input(\" > \")\n###############################################################################\n return A,B,C,lam,mu\n\nclass sgra():\n \"\"\"Class for a general instance of the SGRA problem.\n\n Here are all the methods and variables that are independent of a specific\n instance of a problem.\n\n Each instance of an optimization problem must then inherit these methods\n and properties. \"\"\"\n\n probName = 'probSGRA'\n\n def __init__(self,parallel={}):\n # these numbers should not make any sense;\n # they should change with the problem\n N,n,m,p,q,s = 50000,4,2,1,3,2\n\n self.N = N\n self.n = n\n self.m = m\n self.p = p\n self.q = q\n self.s = s\n\n self.x = numpy.zeros((N,n))\n self.u = numpy.zeros((N,m))\n self.pi = numpy.zeros(p)\n self.lam = numpy.zeros((N,n))\n self.mu = numpy.zeros(q)\n\n self.boundary = {}\n self.constants = {}\n self.restrictions = {}\n\n self.P = 1.0\n self.Q = 1.0\n self.I = 1.0\n\n # Basic maximum number of iterations for grad/rest.\n # May be overriden in the problem definition\n MaxIterRest = 100000\n self.MaxIterRest = MaxIterRest\n self.NIterRest = 0\n self.histStepRest = numpy.zeros(MaxIterRest)\n self.histP = numpy.zeros(MaxIterRest)\n self.histPint = numpy.zeros(MaxIterRest)\n self.histPpsi = numpy.zeros(MaxIterRest)\n\n MaxIterGrad = 10000\n self.MaxIterGrad = MaxIterGrad\n self.NIterGrad = 0\n\n self.histStepGrad = numpy.zeros(MaxIterGrad)\n self.histQ = numpy.zeros(MaxIterGrad)\n self.histQx = numpy.zeros(MaxIterGrad)\n self.histQu = numpy.zeros(MaxIterGrad)\n self.histQp = numpy.zeros(MaxIterGrad)\n self.histQt = numpy.zeros(MaxIterGrad)\n\n self.histI = numpy.zeros(MaxIterGrad)\n self.histIorig = numpy.zeros(MaxIterGrad)\n self.histIpf = numpy.zeros(MaxIterGrad)\n\n self.histGRrate = numpy.zeros(MaxIterGrad)\n\n self.tol = {'P':1e-7,'Q':1e-7}\n\n # Gradient-Restoration EVent List (1-grad, 0-rest)\n self.GREvList = numpy.ones(MaxIterRest+MaxIterGrad,dtype='bool')\n self.GREvIndx = -1\n\n # Debugging options\n tf = False\n self.dbugOptRest = binFlagDict(inpDict={'pausRest':tf,\n 'pausCalcP':tf,\n 'plotP_int':tf,\n 'plotP_intZoom':tf,\n 'plotIntP_int':tf,\n 'plotSolMaxP':tf,\n 'plotRsidMaxP':tf,\n 'plotErr':tf,\n 'plotCorr':tf,\n 'plotCorrFin':tf},\\\n inpName='Debug options for Rest')\n tf = False#True#\n self.dbugOptGrad = binFlagDict(inpDict={'pausGrad':tf,\n 'pausCalcQ':tf,\n 'prntCalcStepGrad':tf,\n 'plotCalcStepGrad': tf,\n 'pausCalcStepGrad':tf,\n 'plotQx':tf,\n 'plotQu':tf,\n 'plotLam':tf,\n 'plotQxZoom':tf,\n 'plotQuZoom':tf,\n 'plotQuComp':tf,\n 'plotQuCompZoom':tf,\n 'plotSolQxMax':tf,\n 'plotSolQuMax':tf,\n 'plotCorr':tf,\n 'plotCorrFin':tf,\n 'plotF':tf,\n 'plotFint':tf},\\\n inpName='Debug options for Grad')\n\n # Solution plot saving status:\n self.save = binFlagDict(inpDict={'currSol':True,\n 'histP':True,\n 'histQ':True,\n 'histI':True,\n 'histGradStep':True,\n 'traj':True,\n 'comp':True},\\\n inpName='Plot saving options')\n\n # Paralellism options\n self.isParallel = dict()\n self.isParallel['gradLMPBVP'] = parallel.get('gradLMPBVP',False)\n self.isParallel['restLMPBVP'] = parallel.get('restLMPBVP',False)\n\n\n def updtGRrate(self):\n\n # find last gradient step index in event list\n LastGradIndx = self.GREvIndx - 1\n\n while self.GREvList[LastGradIndx] == False and LastGradIndx > 0:\n LastGradIndx -= 1\n\n # number of restorations after last gradient step\n if LastGradIndx == 0:\n nRest = self.GREvIndx-1\n else:\n nRest = self.GREvIndx - 1 - LastGradIndx\n\n self.histGRrate[self.NIterGrad] = nRest\n\n def copy(self):\n \"\"\"Copy the solution. It is useful for applying corrections, generating\n baselines for comparison, etc.\n Special care must be given for the logging object, however.\"\"\"\n\n # Get logging object (always by reference)\n log = self.log\n # Clear the reference in the solution\n self.log = None\n # Do the copy\n newSol = copy.deepcopy(self)\n # Point the logging object back into the solutions (original and copy)\n newSol.log = log\n self.log = log\n return newSol\n\n def aplyCorr(self,alfa,corr):\n self.log.printL(\"\\nApplying alfa = \"+str(alfa))\n self.x += alfa * corr['x']\n self.u += alfa * corr['u']\n self.pi += alfa * corr['pi']\n\n def initGues(self):\n # implemented by child classes\n pass\n\n def printPars(self):\n dPars = self.__dict__\n# keyList = dPars.keys()\n self.log.printL(\"These are the attributes for the current solution:\\n\")\n self.log.pprint(dPars)\n\n def plotCat(self,func,mark='',color='b',labl='',piIsTime=True,intv=[]):\n \"\"\"Plot a given function with several subarcs.\n Since this function should serve all the SGRA instances, the pi\n parameters (if exist!) are not necessarily the times for each\n subarc. Hence, the optional parameter \"piIsTime\".\n\n However, this function does consider the arcs to be concatenated.\n If this property does not hold for any future problems to be\n considered, then the function must be rewritten.\n \"\"\"\n\n s = self.s\n t = self.t\n pi = self.pi\n N = self.N\n dt = 1.0/(N-1)\n dtd = dt\n\n # Set upper and lower bounds\n lowrBnd = 0.0\n if piIsTime:\n uperBnd = pi.sum()\n else:\n TimeDur = t[-1]\n uperBnd = TimeDur * s\n\n if len(intv)==0:\n intv = [lowrBnd,uperBnd]\n\n # Check consistency of requested time interval, override if necessary\n if intv[0] < lowrBnd or intv[1] > uperBnd:\n self.log.printL(\"plotCat: inadequate time interval used! Ignoring...\")\n if intv[0] < lowrBnd:\n intv[0] = lowrBnd\n if intv[1] > uperBnd:\n intv[1] = uperBnd\n\n accTime = 0.0\n mustLabl = True\n isBgin = True\n\n for arc in range(s):\n if piIsTime:\n TimeDur = pi[arc]\n dtd = dt * TimeDur\n\n # check if this arc gets plotted at all\n #print(\"\\narc =\",arc)\n #print(\"accTime =\",accTime)\n #print(\"TimeDur =\",TimeDur)\n #print(\"intv =\",intv)\n #print(\"condition1:\",accTime <= intv[1])\n #print(\"condition2:\",accTime + TimeDur >= intv[0])\n if (accTime <= intv[1]) and (accTime + TimeDur >= intv[0]):\n\n # From this point on, the arc will be plotted.\n # Find the index for the first point to be plotted:\n\n if isBgin:\n # accTime + ind * dtd = intv[0]\n indBgin = int((intv[0] - accTime)/dtd)\n isBgin = False\n if intv[0] <= accTime:\n plt.plot(accTime + TimeDur*t[0],func[0,arc],'o'+color)\n else:\n indBgin = 0\n # arc beginning with circle\n plt.plot(accTime + TimeDur*t[0],func[0,arc],'o'+color)\n\n #print(\"indBgin =\",indBgin)\n if accTime + TimeDur > intv[1]:\n indEnd = int((intv[1] - accTime)/dtd)\n if indEnd == (N-1):\n plt.plot(accTime + TimeDur*t[-1], \\\n func[-1,arc],'s'+color)\n else:\n indEnd = N-1\n # arc end with square\n plt.plot(accTime + TimeDur*t[-1],func[-1,arc],'s'+color)\n\n #print(\"indEnd =\",indEnd)\n\n # Plot the function at each arc. Label only the first drawed arc\n if mustLabl:\n plt.plot(accTime + TimeDur * t[indBgin:indEnd], \\\n func[indBgin:indEnd,arc],\\\n mark+color,label=labl)\n mustLabl = False\n else:\n plt.plot(accTime + TimeDur * t[indBgin:indEnd], \\\n func[indBgin:indEnd,arc],\\\n mark+color)\n #\n #\n accTime += TimeDur\n\n def savefig(self,keyName='',fullName=''):\n if self.save.get(keyName,'False'):\n# fileName = self.log.folderName + '/' + self.probName + '_' + \\\n# keyName + '.pdf'\n fileName = self.log.folderName + os.sep + keyName + '.pdf'\n self.log.printL('Saving ' + fullName + ' plot to ' + fileName + \\\n '!')\n try:\n plt.savefig(fileName, bbox_inches='tight', pad_inches=0.1)\n except:\n self.log.printL(\"Sorry, pdf saving failed... Are you using Windows?\")\n self.log.printL(\"Anyway, you can always load the object and use some \"+ \\\n \"of its plotting methods later, I guess.\")\n else:\n plt.show()\n #\n plt.clf()\n plt.close('all')\n\n#%% Just for avoiding compatibilization issues with other problems\n # These methods are all properly implemented in probRock class.\n\n def plotTraj(self):\n self.log.printL(\"plotTraj: unimplemented method.\")\n pass\n\n def compWith(self,*args,**kwargs):\n self.log.printL(\"compWith: unimplemented method.\")\n pass\n\n def plotSol(self,*args,**kwargs):\n titlStr = \"Current solution\"\n\n plt.subplots_adjust(0.0125,0.0,0.9,2.5,0.2,0.2)\n Np = self.n + self.m\n\n\n # First state\n plt.subplot2grid((Np,1),(0,0))\n self.plotCat(self.x[:,0,:],piIsTime=False)\n plt.grid(True)\n plt.ylabel(\"State #1\")\n plt.title(titlStr)\n\n ind = 1\n for i in range(1,self.n):\n plt.subplot2grid((Np,1),(ind,0))\n ind+=1\n self.plotCat(self.x[:,i,:],piIsTime=False)\n plt.grid(True)\n plt.ylabel(\"State #\"+str(i+1))\n\n # Controls\n for i in range(self.m):\n plt.subplot2grid((Np,1),(ind,0))\n ind+=1\n self.plotCat(self.u[:,i,:],piIsTime=False)\n plt.grid(True)\n plt.ylabel(\"Control #\"+str(i+1))\n\n self.savefig(keyName='currSol',fullName='solution')\n\n def calcI(self,*args,**kwargs):\n pass\n\n def calcF(self,*args,**kwargs):\n pass\n\n#%% RESTORATION-WISE METHODS\n\n def rest(self,*args,**kwargs):\n rest_sgra.rest(self,*args,**kwargs)\n\n def calcStepRest(self,*args,**kwargs):\n return rest_sgra.calcStepRest(self,*args,**kwargs)\n\n def calcP(self,*args,**kwargs):\n return rest_sgra.calcP(self,*args,**kwargs)\n\n def updtHistP(self,alfa,mustPlotPint=False):\n\n NIterRest = self.NIterRest+1\n\n P,Pint,Ppsi = self.calcP(mustPlotPint=mustPlotPint)\n self.P = P\n self.histP[NIterRest] = P\n self.histPint[NIterRest] = Pint\n self.histPpsi[NIterRest] = Ppsi\n self.histStepRest[NIterRest] = alfa\n self.NIterRest = NIterRest\n\n def showHistP(self):\n IterRest = numpy.arange(0,self.NIterRest+1,1)\n\n if self.histP[IterRest].any() > 0:\n plt.semilogy(IterRest,self.histP[IterRest],'b',label='P')\n\n if self.histPint[IterRest].any() > 0:\n plt.semilogy(IterRest,self.histPint[IterRest],'k',label='P_int')\n\n if self.histPpsi[IterRest].any() > 0:\n plt.semilogy(IterRest,self.histPpsi[IterRest],'r',label='P_psi')\n\n plt.plot(IterRest,self.tol['P']+0.0*IterRest,'-.b',label='tolP')\n plt.title(\"Convergence report on P\")\n plt.grid(True)\n plt.xlabel(\"Rest iterations\")\n plt.ylabel(\"P values\")\n plt.legend(loc=\"upper left\", bbox_to_anchor=(1,1))\n\n self.savefig(keyName='histP',fullName='P')\n\n#%% GRADIENT-WISE METHODS\n\n def grad(self,*args,**kwargs):\n grad_sgra.grad(self,*args,**kwargs)\n\n def calcStepGrad(self,*args,**kwargs):\n return grad_sgra.calcStepGrad(self,*args,**kwargs)\n\n def calcQ(self,*args,**kwargs):\n return grad_sgra.calcQ(self,*args,**kwargs)\n\n def plotQRes(self,args):\n return grad_sgra.plotQRes(self,args)\n\n def plotF(self,*args,**kwargs):\n return grad_sgra.plotF(self,*args,**kwargs)\n\n def updtHistQ(self,alfa,mustPlotQs=False):\n \"\"\" Updates the history of Qs, Is and gradStep. \"\"\"\n\n NIterGrad = self.NIterGrad+1\n\n Q,Qx,Qu,Qp,Qt = self.calcQ(mustPlotQs=mustPlotQs)\n self.Q = Q\n self.histQ[NIterGrad] = Q\n self.histQx[NIterGrad] = Qx\n self.histQu[NIterGrad] = Qu\n self.histQp[NIterGrad] = Qp\n self.histQt[NIterGrad] = Qt\n self.histStepGrad[NIterGrad] = alfa\n\n I,Iorig,Ipf = self.calcI()\n self.histI[NIterGrad] = I\n self.histIorig[NIterGrad] = Iorig\n self.histIpf[NIterGrad] = Ipf\n self.I = I\n\n self.NIterGrad = NIterGrad\n\n\n def showHistQ(self):\n IterGrad = numpy.arange(1,self.NIterGrad+1,1)\n\n if self.histQ[IterGrad].any() > 0:\n plt.semilogy(IterGrad,self.histQ[IterGrad],'b',label='Q')\n\n if self.histQx[IterGrad].any() > 0:\n plt.semilogy(IterGrad,self.histQx[IterGrad],'k',label='Qx')\n\n if self.histQu[IterGrad].any() > 0:\n plt.semilogy(IterGrad,self.histQu[IterGrad],'r',label='Qu')\n\n if self.histQp[IterGrad].any() > 0:\n plt.semilogy(IterGrad,self.histQp[IterGrad],'g',label='Qp')\n\n if self.histQt[IterGrad].any() > 0:\n plt.semilogy(IterGrad,self.histQt[IterGrad],'y',label='Qt')\n\n plt.plot(IterGrad,self.tol['Q']+0.0*IterGrad,'-.b',label='tolQ')\n plt.title(\"Convergence report on Q\")\n plt.grid(True)\n plt.xlabel(\"Grad iterations\")\n plt.ylabel(\"Q values\")\n plt.legend(loc=\"upper left\", bbox_to_anchor=(1,1))\n\n self.savefig(keyName='histQ',fullName='Q convergence history')\n\n def showHistI(self):\n IterGrad = numpy.arange(1,self.NIterGrad+1,1)\n\n plt.title(\"Convergence report on I\")\n plt.semilogy(IterGrad,self.histI[IterGrad],label='I')\n plt.semilogy(IterGrad,self.histIorig[IterGrad],label='Iorig')\n plt.semilogy(IterGrad,self.histIpf[IterGrad],label='Ipf')\n plt.grid(True)\n plt.xlabel(\"Grad iterations\")\n plt.ylabel(\"I values\")\n plt.legend()\n\n self.savefig(keyName='histI',fullName='I convergence history')\n\n def showHistGradStep(self):\n IterGrad = numpy.arange(1,self.NIterGrad+1,1)\n\n plt.title(\"Gradient step history\")\n plt.semilogy(IterGrad,self.histStepGrad[IterGrad])\n plt.grid(True)\n plt.xlabel(\"Grad iterations\")\n plt.ylabel(\"Step values\")\n\n self.savefig(keyName='histGradStep',fullName='GradStep convergence history')\n\n def showHistGRrate(self):\n IterGrad = numpy.arange(1,self.NIterGrad+1,1)\n\n if self.histGRrate[IterGrad].any() > 0:\n plt.title(\"Gradient-restoration rate history\")\n plt.semilogy(IterGrad,self.histGRrate[IterGrad])\n plt.grid(True)\n plt.xlabel(\"Grad iterations\")\n plt.ylabel(\"Step values\")\n\n self.savefig(keyName='histGRrate',fullName='Grad-Rest rate history')\n\n#%% LMPBVP\n\n def LMPBVP(self,rho=0.0,isParallel=False):\n\n helper = LMPBVPhelp(self,rho)\n\n if isParallel:\n pool = Pool()\n res = pool.map(helper.propagate,range(self.Ns+1))\n pool.close()\n pool.join()\n else:\n if rho>0.5:\n self.log.printL(\"\\nRunning GRAD in sequential (non-parallel) mode...\\n\")\n else:\n self.log.printL(\"\\nRunning REST in sequential (non-parallel) mode...\\n\")\n res = list()\n for j in range(self.Ns+1):\n res.append(helper.propagate(j))\n\n A,B,C,lam,mu = helper.getCorr(res,self.log)\n\n return A,B,C,lam,mu\n","repo_name":"munizlgmn/SOAR","sub_path":"sgra.py","file_name":"sgra.py","file_ext":"py","file_size_in_byte":34431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"}
+{"seq_id":"1864831226","text":"from te import tvm\nfrom .util import Compare\nfrom .util import ceil_div\n\n\nclass AttachMap:\n \"\"\"docstring for AttachMap\"\"\"\n def __init__(self):\n # axis:stage\n self._parent_stages = dict()\n self._attached_path = dict()\n\n def record_attach(self, stage, scope):\n \"\"\"\n record the attach situation of stage\n\n Parameters\n ----------\n stage : the processing of compute\n scope : means axis\n\n Returns\n -------\n None\n \"\"\"\n if self._attached_path.get(scope) is None:\n self._attached_path[scope] = [stage]\n else:\n self._attached_path[scope].append(stage)\n\n def follow_with(self, stage, parent_stage, scope):\n \"\"\"\n update the _parent_stages, record the attach\n\n Parameters\n ----------\n stage : the processing of compute\n parent_stage : the parent tensor\n scope : means axis\n\n Returns\n -------\n scope : the attached axis\n \"\"\"\n self._parent_stages[scope] = parent_stage\n self.record_attach(stage, scope)\n return scope\n\n def record_same_attach(self, stage, ref_stage):\n \"\"\"\n attach the stage same as ref_stage\n\n Parameters\n ----------\n stage : the processing of compute\n ref_stage : the reference tensor\n\n Returns\n -------\n None\n \"\"\"\n for scope, s_list in self._attached_path.items():\n if ref_stage in s_list:\n self.record_attach(stage, scope)\n return scope\n return None\n\n def update_scope(self, scope, new_scope):\n \"\"\"\n replace the scope by new_scope\n\n Parameters\n ----------\n scope : the old axis\n new_scope : the new axis\n\n Returns\n -------\n None\n \"\"\"\n if scope == new_scope:\n return\n child_stages = self._attached_path.get(scope)\n if child_stages is None:\n return\n\n attached_path = self._attached_path\n if attached_path.get(new_scope) is None:\n attached_path[new_scope] = child_stages\n else:\n attached_path[new_scope].extend(child_stages)\n\n self._attached_path.pop(scope)\n\n self._parent_stages[new_scope] = self._parent_stages[scope]\n return\n\n def apply(self):\n \"\"\"\n begin the compute_at operation\n according to _parent_stages and _attached_path\n\n Parameters\n ----------\n\n Returns\n -------\n None\n \"\"\"\n for scope, array_stages in self._attached_path.items():\n parent = self._parent_stages[scope]\n pre_scope = None\n for axis in parent.leaf_iter_vars:\n if axis == scope:\n break\n pre_scope = axis\n if pre_scope is not None:\n for stage in array_stages:\n stage.compute_at(parent, pre_scope)\n\n @property\n def attached_path(self):\n \"\"\"\n get the _attached_path\n\n Parameters\n ----------\n\n Returns\n -------\n None\n \"\"\"\n return self._attached_path\n\n @property\n def parent_stages(self):\n \"\"\"\n get the _parent_stages\n\n Parameters\n ----------\n\n Returns\n -------\n None\n \"\"\"\n return self._parent_stages\n\n\nclass ScopeManager:\n \"\"\"docstring for ScopeManager\n keep active tensor same in whole schedule\n \"\"\"\n\n def __init__(self, stage):\n # super(ScopeManager, self).__init__()\n self._stage = stage\n self._axis_unit = dict()\n self._active_scopes = list()\n self._origin_axis = list()\n self._last_attached = None\n self._scope_intrinsic = None\n if len(stage.leaf_iter_vars) != len(stage.all_iter_vars):\n raise RuntimeError(\"Op should be init before schedule\")\n for axis in stage.leaf_iter_vars:\n self._axis_unit[axis] = [1, axis.dom.extent.value]\n self._active_scopes.append(axis)\n self._origin_axis.append(axis)\n\n @property\n def op(self):\n \"\"\"\n get stage's op\n \"\"\"\n return self._stage.op\n\n @property\n def origin_axis(self):\n \"\"\"\n get stage's _origin_axis\n \"\"\"\n return self._origin_axis\n\n @property\n def scope_intrinsic(self):\n \"\"\"\n get stage's _scope_intrinsic\n \"\"\"\n return self._scope_intrinsic\n\n @property\n def last_attached(self):\n \"\"\"\n get stage's _last_attached\n \"\"\"\n return self._last_attached\n\n def reused_by(self, *args):\n \"\"\"\n stage's original reused_by function\n \"\"\"\n self._stage.reused_by(*args)\n\n def split(self, parent, factor=None, nparts=None):\n \"\"\"\n apply split to scope\n\n Parameters\n ----------\n parent : Itervar\n factor : int\n nparts : int\n\n Returns\n -------\n outer : Itervar\n inner : Itervar\n\n \"\"\"\n\n if factor is None and nparts is None:\n raise RuntimeError(\"factor nparts can not be None\")\n\n if self._axis_unit.get(parent) is None:\n raise RuntimeError(\"parent scope can not be None\")\n\n unit, extent = self._axis_unit[parent]\n if nparts is not None:\n outer, inner = self._stage.split(parent, nparts=nparts)\n factor = ceil_div(extent, nparts)\n self._axis_unit[inner] = [unit, factor]\n self._axis_unit[outer] = [factor * unit, nparts]\n if parent in self._active_scopes: # not else\n self._update_active_scope(parent, inner)\n else:\n outer, inner = self._stage.split(parent, factor=factor)\n self._axis_unit[inner] = [unit, factor]\n self._axis_unit[outer] = [unit * factor, ceil_div(extent, factor)]\n if parent in self._active_scopes: # not else\n self._update_active_scope(parent, outer)\n return outer, inner\n\n def reorder(self, *args):\n \"\"\"\n stage's reorder function\n need check if axises in args is valid\n do not need all axised in this scope\n \"\"\"\n visited_scope = set()\n scopes = list(args)\n leaf_ivars = self._stage.leaf_iter_vars\n valid_scopes = list()\n scopes.reverse()\n for axis in scopes:\n if axis is not None \\\n and axis not in visited_scope \\\n and axis in leaf_ivars:\n valid_scopes.append(axis)\n visited_scope.add(axis)\n if len(valid_scopes) <= 1:\n return\n valid_scopes.reverse()\n self._stage.reorder(*valid_scopes)\n\n def double_buffer(self):\n \"\"\"\n stage's original double_buffer function\n \"\"\"\n self._stage.double_buffer()\n\n def unroll(self, var):\n \"\"\"\n stage's original unroll function\n \"\"\"\n self._stage.unroll(var)\n\n def buffer_align(self, *arg):\n \"\"\"\n stage's original buffer_align function\n \"\"\"\n self._stage.buffer_align(*arg)\n\n def buffer_tile(self, *arg):\n \"\"\"\n stage's original buffer_tile function\n \"\"\"\n self._stage.buffer_tile(*arg)\n\n def pragma(self, var, pragma_type, pragma_value=None):\n \"\"\"\n stage's original pragma function\n \"\"\"\n self._stage.pragma(var, pragma_type, pragma_value)\n\n def storage_align(self, axis, factor, offset):\n \"\"\"\n stage's original storage_align function\n \"\"\"\n self._stage.pragma(axis, factor, offset)\n\n def get_active_scope_and_unit(self):\n \"\"\"\n get _active_scopes and unit_list\n\n Returns\n -------\n _active_scopes : the scopes(axis) now used\n unit_list : the split part of axis\n \"\"\"\n if not self._check_active_scopes(self._active_scopes):\n raise RuntimeError(\"active itervar should in leaf_iter_vars\")\n unit_list = list()\n for axis in self._active_scopes:\n unit, _ = self._axis_unit[axis]\n unit_list.append(unit)\n\n return self._active_scopes, unit_list\n\n def get_active_scopes(self):\n \"\"\"\n get _active_scopes\n _active_scopes: the scopes(axis) now used\n \"\"\"\n if not self._check_active_scopes(self._active_scopes):\n raise RuntimeError(\"active itervar should in leaf_iter_vars\")\n return self._active_scopes\n\n def _update_active_scope(self, ax_before, ax_after):\n active_scopess = self._active_scopes\n index = active_scopess.index(ax_before)\n active_scopess[index] = ax_after\n\n def nlast_scopes(self, n_scope):\n \"\"\"\n get n last axises of this stage\n \"\"\"\n if n_scope <= 0:\n raise ValueError(\"n_scope must >0\")\n\n leaf_ivars = list(self._stage.leaf_iter_vars)\n if n_scope > len(leaf_ivars):\n raise ValueError(\"n_scope must less equal to leaf_ivars\")\n return leaf_ivars[-n_scope::]\n\n def intrin_scopes(self, nlast=0):\n \"\"\"\n this function developed for mmad\n split the axis of scope and reorder\n return the nlast axises for emit insn\n \"\"\"\n n_scope_intrin = len(self._origin_axis)\n nlast = n_scope_intrin if nlast == 0 else nlast\n\n if nlast < 0 or nlast > len(self._origin_axis):\n raise RuntimeError(\"nlast must >0 and < %d\" %\n len(self._origin_axis))\n # find the first split parent-child scope\n axis_maping = dict()\n for relation in self._stage.relations:\n if not isinstance(relation, tvm.schedule.Split):\n continue\n if relation.parent in self._origin_axis:\n axis_maping[relation.inner] = relation.parent\n # get the order of outer and intrin scopes\n leaf_ivars = list(self._stage.leaf_iter_vars)\n outer_ivars = list()\n inner_ivars = list(self._origin_axis)\n for scope in leaf_ivars:\n if scope in inner_ivars:\n continue\n parent = axis_maping.get(scope)\n if parent is None: # not a intrin sope\n outer_ivars.append(scope)\n else: # scope is a intrin scope\n if parent not in inner_ivars:\n raise RuntimeError(\"parent scope shound be in inner_ivars\")\n offset = inner_ivars.index(parent)\n inner_ivars[offset] = scope\n order_keeped_axis = outer_ivars + inner_ivars\n self._stage.reorder(*order_keeped_axis)\n self._scope_intrinsic = inner_ivars[0]\n return order_keeped_axis[-nlast::]\n\n def bind_core(self, scope_list, core_num_list):\n \"\"\"\n bind core : use the chip better\n finally fuse all the outter axises\n\n Parameters\n ----------\n scope_list : the list that axises to bind\n core_num_list : the list that core use\n\n Returns\n -------\n axis_to_bind : the axis that bind\n\n \"\"\"\n if not isinstance(scope_list, (list, tuple)):\n scope_list = [scope_list]\n if not isinstance(core_num_list, (list, tuple)):\n core_num_list = [core_num_list]\n if not scope_list: # len(scope_list) == 0\n raise RuntimeError(\"at least one axis is to bind\")\n if len(scope_list) != len(core_num_list):\n raise RuntimeError(\n \"len of scope_list and core_num_list should be same\")\n\n if not self._check_active_scopes(scope_list):\n raise RuntimeError(\"axis should be in leaf_iter_vars\")\n\n old_leaf_ivars = list(self._stage.leaf_iter_vars)\n axis_outers = list()\n max_index = 0\n for axis, core_num in zip(scope_list, core_num_list):\n axo, axi = self.split(axis, nparts=core_num)\n index = old_leaf_ivars.index(axis)\n old_leaf_ivars[index] = axi\n max_index = max(max_index, index)\n axis_outers.append(axo)\n\n reorder_list = axis_outers + old_leaf_ivars[0:max_index + 1:]\n self._stage.reorder(*reorder_list)\n\n block = tvm.thread_axis(\"blockIdx.x\")\n if len(axis_outers) > 1:\n axis_to_bind = self._stage.fuse(*axis_outers)\n else: # len(axis_outers) is 1\n axis_to_bind = axis_outers[0]\n self._stage.bind(axis_to_bind, block)\n return axis_to_bind\n\n def get_superkernel_axis_pragma(self):\n \"\"\"\n get the axis that superkernel used to pragma\n \"\"\"\n leaf_ivars = self._stage.leaf_iter_vars\n return leaf_ivars[1]\n\n def get_relate_scope(self, scope_key, scope_end=None):\n \"\"\"\n get the axises whose name contain scope_key\n \"\"\"\n scope_list = list()\n for scope in self._stage.leaf_iter_vars:\n if (scope_end is not None) and (scope == scope_end):\n break\n if scope.var.name.find('{}{}'\\\n .format(scope_key.var.name, '.')) == 0:\n scope_list.append(scope)\n return scope_list\n\n def emit_insn(self, scope, value, attrs=None):\n \"\"\"\n stage's original storage_align function\n difference is the default axis to emit insn\n \"\"\"\n if self._scope_intrinsic is None:\n self._scope_intrinsic = scope\n self._stage.emit_insn(scope, value, attrs)\n\n def set_last_attached(self, scope):\n \"\"\"\n set stage's _last_attached\n \"\"\"\n self._last_attached = scope\n\n def _check_active_scopes(self, ax_list):\n \"\"\"\n check if axis in stage's leaf_ivars\n \"\"\"\n leaf_ivars = list(self._stage.leaf_iter_vars)\n for axis in ax_list:\n if axis not in leaf_ivars:\n return False\n return True\n\n\nclass ScheduleAgent:\n \"\"\"docstring for ScheduleAgent\"\"\"\n\n def __init__(self, sch):\n self._sch = sch\n self._attach_map = AttachMap()\n # key=op,active op othen than origin_ops\n self._scope_managers = dict()\n\n def __getitem__(self, tensor):\n \"\"\"\n get scope manager of input tensor\n\n Parameters\n ----------\n tensor : Tensor\n\n Returns\n -------\n scope_manager\n\n \"\"\"\n if isinstance(tensor, tvm.tensor.Tensor):\n key = tensor.op\n else:\n key = tensor\n if self._scope_managers.get(key) is None:\n self._scope_managers[key] = ScopeManager(self._sch[key])\n return self._scope_managers[key]\n\n def same_attach(self, tensor_a, tensor_b):\n \"\"\"\n attached tensor_a at the scope that the scope tensor_b attached\n\n Parameters\n ----------\n tensor_a : Tensor\n tensor_b : Tenosr\n\n Returns\n -------\n\n \"\"\"\n\n sch = self._sch\n return self._attach_map.record_same_attach(sch[tensor_a],\n sch[tensor_b])\n\n def attach_at(self, tensor, parent, affine_shape):\n \"\"\"\n attach tensor to parent according to the affine_shape\n\n Parameters\n ----------\n tensor : Tensor\n parent : Tenosr\n affine_shape: shape of tensor affine to parent\n\n Returns\n -------\n the scope that tensor follow with\n \"\"\"\n # attach_map = self._attach_map\n scopes = self[parent]\n ax_list, unit = scopes.get_active_scope_and_unit()\n if len(affine_shape) != len(ax_list):\n raise RuntimeError(\"len(affine_shape) should be equal to \"\n \"len(shape)+len(reduce_axis) of {} \"\\\n .format(parent))\n factor_list = list(\n ceil_div(i, j) if i is not None else None\n for i, j in zip(affine_shape, unit)\n )\n axis_outer = list()\n axis_intrinsic = list()\n axis_ori_unrelate = list()\n\n def start_attach(factor_list, ax_list):\n origin_axis = scopes.origin_axis\n for factor, axis in zip(factor_list, ax_list):\n if factor is not None and (factor > 1 or axis in origin_axis):\n axo, axi = scopes.split(axis, factor=factor)\n self._attach_map.update_scope(axis, axi)\n axis_outer.append(axo)\n axis_intrinsic.append(axi)\n elif axis in origin_axis:\n axis_ori_unrelate.append(axis)\n else:\n axis_outer.append(axis)\n start_attach(factor_list, ax_list)\n scope_attach = None\n if axis_intrinsic: # len(axis_intrinsic) > 0:\n reorder_list = axis_outer + axis_ori_unrelate + axis_intrinsic\n self._sch[parent].reorder(*reorder_list)\n scope_attach = axis_intrinsic[0]\n self._attach_map.follow_with(self._sch[tensor],\n self._sch[parent],\n scope_attach)\n self[parent].set_last_attached(scope_attach)\n elif axis_outer: # len(axis_outer) > 0:\n scope_attach = self[parent].last_attached\n if scope_attach is not None: # no else\n self._attach_map.record_attach(self._sch[tensor],\n scope_attach)\n else:\n pass\n return scope_attach\n\n def root_stage_at(self, parent, scope):\n \"\"\"\n parent: parent stage\n scope: scope\n \"\"\"\n stage_array = self._sch.stages\n parent_stage = self._sch[parent.op]\n for stage in stage_array:\n if stage == parent_stage:\n continue\n if stage.attach_type == 1:\n stage.compute_at(parent_stage, scope)\n\n def apply(self):\n '''\n apply the attach path\n '''\n\n attach_map = self._attach_map\n parent_stages = list(set(attach_map.parent_stages.values()))\n remain_scopes = set(attach_map.attached_path.keys())\n for parent in parent_stages:\n scope_intrinsic = self[parent.origin_op].scope_intrinsic\n if scope_intrinsic is None:\n continue\n leaf_ivars = list(parent.leaf_iter_vars)\n index = leaf_ivars.index(scope_intrinsic)\n un_attachable_scopes = leaf_ivars[index + 1:]\n for scope in list(remain_scopes):\n if scope in un_attachable_scopes:\n attach_map.update_scope(scope, scope_intrinsic)\n remain_scopes.remove(scope)\n self._attach_map.apply()\n\n def pattern_abc(self,\n status,\n tensor_a,\n tensor_b,\n affine_shape_to_b,\n tensor_c,\n affine_shape_to_c):\n \"\"\"\n attach tensor_a to tensor_b\n or\n attach tensor_a to tensor_c\n according to the status\n \"\"\"\n if status is None:\n return None\n if isinstance(status, (list, tuple)):\n return None\n\n attach = None\n if status == Compare.EQUAL:\n attach = self.same_attach(tensor_a, tensor_b)\n elif status == Compare.LESS_EQ:\n attach = self.attach_at(tensor_a, tensor_b, affine_shape_to_b)\n elif status == Compare.GREATE_EQ:\n attach = self.attach_at(tensor_a, tensor_c, affine_shape_to_c)\n else:\n raise RuntimeError(\"tiling shape of {} shouldn't be both less \"\n \"and greater than {}\"\\\n .format(tensor_a, tensor_b))\n return attach\n","repo_name":"jizhuoran/caffe-huawei-atlas-convertor","sub_path":"convertor/huawei/te/lang/cce/boost_schedule_kit/schedule_agent.py","file_name":"schedule_agent.py","file_ext":"py","file_size_in_byte":19882,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"}
+{"seq_id":"41115169984","text":"import sys\nimport zmq\nimport asyncio\n\nport = \"5556\" # for now default port for publishers is 5556\n\n# if len(sys.argv) > 1:\n# port = sys.argv[1]\n# int(port)\n\n\n# put() publishes a message on a topic\n\n#subscribe() subscribes a topic\n#unsubscribe() unsubscribes a topic\n\ndef put(message, topic = \"NULL\"):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://localhost:{}\".format(port))\n socket.send(\"{}\".format(message).encode())\n # socket.close() is called when garbage collection happens but we should still call it\n return\n\ndef main_loop():\n #this loop will listen to keyboard inputs and proceed accordingly\n put(\"teste\")\n\n \n return\n\nmain_loop()\n","repo_name":"GustavoSena/SDLE-FEUP","sub_path":"publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"12165903781","text":"\nfrom caliop.core.raw import RawMail\nfrom caliop.core.contact import ContactLookup, Recipient\nfrom caliop.core.user import User, UserMessage\nfrom caliop.core.message import Message\nfrom caliop.core.mail import MailMessage\n\nfrom caliop.helpers.log import log\n\n\nclass UserMessageDelivery(object):\n\n def _resolve_user_contacts(self, user, msg):\n \"\"\"Find all contacts known in the mail\"\"\"\n contacts = []\n for type, recips in msg.recipients.iteritems():\n for addr, real_addr in recips:\n if addr != user.user_id:\n log.debug('Try to resolve contact %s' % addr)\n contact = ContactLookup.get(user, addr)\n contacts.append(Recipient(contact, real_addr, type))\n return contacts\n\n def _get_tags(self, user, mail):\n \"\"\"We eval user rules to get all tags for a mail\"\"\"\n tags = []\n if not user.rules:\n return []\n for rule in user.rules:\n res, stop = rule.eval(mail)\n if res:\n tags.extend(res)\n if stop:\n break\n return tags\n\n def process(self, user_id, message_id):\n user = User.get(user_id)\n msg = RawMail.get(message_id)\n mail = MailMessage(msg.data)\n contacts = self._resolve_user_contacts(user, mail)\n tags = self._get_tags(user, mail)\n sec_level = (mail.transport_security_level + \\\n mail.content_security_level) / 2\n user_msg = UserMessage(user, mail, sec_level, contacts, tags, [])\n return Message.from_user_message(user_msg)\n","repo_name":"LaurentChemla/CaliOpen","sub_path":"caliop/caliop/core/deliver.py","file_name":"deliver.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"}
+{"seq_id":"74665404104","text":"from fastapi.testclient import TestClient\nfrom main import app\nfrom queries.accounts import AccountRepository\n\nclient = TestClient(app)\n\naccount_out = {\n \"id\": 1,\n \"username\": \"user\"\n}\n\naccounts = [account_out]\n\n\nclass MockAccount:\n def get_all_accounts(self):\n return accounts\n\n\ndef test_accounts_list():\n app.dependency_overrides[AccountRepository] = MockAccount\n response = client.get('/accounts')\n\n assert response.status_code == 200\n assert response.json() == {'accounts': accounts}\n\n\napp.dependency_overrides = {}\n","repo_name":"michellexg/pan-plan","sub_path":"panplan-service/tests/test_accounts.py","file_name":"test_accounts.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"41792288804","text":"import cv2\nimport numpy as np\n\nprint(\"hello world\")\n\ntesting = 12\n\n\nparams = cv2.SimpleBlobDetector_Params()\n\n# Change thresholds\nparams.minThreshold = 10\nparams.maxThreshold = 200\n\n\n# Filter by Area.\nparams.filterByArea = True\nparams.minArea = 3000\nparams.maxArea = 100000\n\n# Filter by Circularity\nparams.filterByCircularity = True\nparams.minCircularity = 0.1\n\n# Filter by Convexity\nparams.filterByConvexity = False\nparams.minConvexity = 0.87\n\n# Filter by Inertia\nparams.filterByInertia = False\nparams.minInertiaRatio = 0.01\n\ncamera = cv2.VideoCapture(0)\nprint(camera.isOpened())\ndetector = cv2.SimpleBlobDetector_create(params)\n\n\n\nfor x in range(1000):\n f = None\n ret, image = camera.read(cv2.IMREAD_GRAYSCALE)\n \n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # cv2.imwrite(\"trial.png\",image) \n # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n blobs = detector.detect(image)\n\n\n mask = cv2.inRange(hsv, (0, 20, 0), (90, 255,90))\n\n ## slice the green\n imask = mask>0\n green = np.zeros_like(image, np.uint8)\n green[imask] = image[imask]\n # green = image\n\n keypoints = detector.detect(green)\n \n print(keypoints)\n for x in keypoints:\n print(x.size)\n\n im_with_keypoints = cv2.drawKeypoints(green, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n \n cv2.imshow(\"image\", im_with_keypoints) \n ## save \n # cv2.imshow(\"green.png\", gray)\n\n\n\n \n\n \n \n # circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 20)\n\n # green = cv2.drawKeypoints(green, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n # cv2.imshow(\"green.png\", green)\n\n cv2.waitKey(1)\n\n","repo_name":"justafakeusername/racecar_localizer","sub_path":"localizer.py","file_name":"localizer.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"20651863858","text":"import seaborn as sns\nimport colorsys\nfrom flask import Flask\nfrom flask import request\n\nimport ast\n\napp = Flask(__name__)\n\n\n@app.route(\"/backend/time\")\ndef getTime():\n import time\n\n # No need to jsonify\n return {\"time\": time.time()}\n\n\n@app.route(\"/backend/formAPI\", methods=[\"GET\", \"POST\"])\ndef formAPI():\n if request.method == \"GET\":\n print(\"request.args =\", request.args)\n dim = request.args.get(\"dim\")\n size = request.args.get(\"size\")\n sV = request.args.get(\"sV\")\n\n # Implement backend logic here\n\n # Implement SQL logic here\n\n # No need to jsonify\n return {\"dim\": dim, \"size\": size, \"sV\": sV}\n\n elif request.method == \"POST\":\n dict_str = request.data.decode(\"UTF-8\")\n postData = ast.literal_eval(dict_str)\n\n inputParams = postData.get(\"params\")\n\n dim = inputParams.get(\"dim\")\n size = inputParams.get(\"size\")\n sV = inputParams.get(\"sV\")\n\n # Implement backend logic here\n\n # Implement SQL logic here\n\n # No need to jsonify\n return {\"dim\": dim, \"size\": size, \"sV\": sV}\n\n\n###########################\n## Generate Shift Vector ##\n###########################\n@app.route('/backend/getSV', methods=['GET', 'POST'])\ndef getSV():\n if request.method == 'GET':\n dim = int(request.args.get('dim'))\n sC = float(request.args.get('sC'))\n sM = (request.args.get('sM'))\n\n sV = genShiftVector(sM, dim, sC)\n\n return {'sV': sV}\n\n elif request.method == 'POST':\n\n dict_str = request.data.decode(\"UTF-8\")\n postData = ast.literal_eval(dict_str)\n\n inputParams = postData.get(\"params\")\n\n dim = int(inputParams.get(\"dim\"))\n sC = float(inputParams.get(\"sC\"))\n sM = inputParams.get(\"sM\")\n\n sV = genShiftVector(sM, dim, sC)\n\n return {'sV': sV}\n\n\n#######################\n## Generate Vertices ##\n#######################\n@app.route('/backend/getV', methods=['GET', 'POST'])\ndef getV():\n if request.method == 'GET':\n dim = float(request.args.get('dim'))\n size = float(request.args.get('size'))\n sV = list(request.args.get('sV'))\n\n # Clean up invalid shift vector inputs\n # convert from string to list\n sV = sVStrToList(sV)\n\n # If too long remove until right length, if too short remove until right length\n # while len(sV) > dim:\n # sV.pop()\n # while len(sV) < dim:\n # sV.append(0)\n vertices = dict()\n for r in range(dim):\n for s in range(r+1, dim):\n for a in range(-size, size+1):\n for b in range(-size, size+1):\n vertices[f\"{r} {s} {a} {b}\"] = genVert(\n dim, sV, r, a, s, b)\n return {'vertices': vertices}\n\n elif request.method == 'POST':\n dict_str = request.data.decode(\"UTF-8\")\n\n postData = ast.literal_eval(dict_str)\n inputParams = postData.get(\"params\")\n\n sC = float(inputParams.get(\"sC\"))\n dim = int(inputParams.get(\"dim\"))\n size = int(inputParams.get(\"size\"))\n tileSize = int(inputParams.get('tileSize'))\n # tileSize = int(inputParams.get(\"tileSize\"))\n sM = inputParams.get(\"sM\")\n sV = genShiftVector(sM, dim, sC)\n\n # sV = list(inputParams.get(\"sV\"))\n # sV = sVStrToList(sV)\n\n vertices = {}\n for r in range(dim-1):\n for s in range(r+1, dim):\n for a in range(-size, size+1):\n for b in range(-size, size+1):\n vertices[f\"{r} {s} {a} {b}\"] = genVert(\n dim, sV, tileSize, r, a, s, b)\n return {'vertices': vertices}\n\n\n@app.route('/backend/getTV', methods=['GET', 'POST'])\ndef getTV():\n if request.method == 'GET':\n dim = float(request.args.get('dim'))\n sV = list(request.args.get('sV'))\n r = int(request.args.get('r'))\n s = int(request.args.get('s'))\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n\n vert = genVert(dim, sV, r, a, s, b)\n return {'vert': vert}\n\n elif request.method == 'POST':\n\n dict_str = request.data.decode(\"UTF-8\")\n postData = ast.literal_eval(dict_str)\n\n inputParams = postData.get(\"params\")\n\n dim = int(inputParams.get(\"dim\"))\n tileSize = int(inputParams.get('tileSize'))\n\n sV = [0]*dim\n\n r = int(inputParams.get('r'))\n s = int(inputParams.get('s'))\n a = int(inputParams.get('a'))\n b = int(inputParams.get('b'))\n\n vert = genVert(dim, sV, tileSize, r, a, s, b)\n return {'vert': vert}\n\n\ndef sVStrToList(sV):\n # First lets clean up the [] strings around the sV\n if '[' in sV:\n sV.remove('[')\n if ']' in sV:\n sV.remove(']')\n\n if type(sV) is list:\n # We currently have a list so lets join it together into a string and resplit w/ delimeter\n sV = ''.join(sV)\n sV = sV.split(',')\n sV = [cleanShiftStr(shift) for shift in sV]\n return sV\n return [69]*100\n\n\ndef cleanShiftStr(shiftStr):\n while shiftStr[0] == ' ':\n shiftStr = shiftStr[1:]\n while shiftStr[-1] == ' ':\n shiftStr = shiftStr[:-1]\n return float(shiftStr)\n\n\ndef genVert(dim, sV, tileSize, r, a, s, b):\n nV = [(-1)**((2/dim)*i) for i in range(dim)]\n if nV[s-r].imag == 0:\n kp = 1j*(nV[r]*(b-sV[s]) - nV[s]*(a-sV[r])) / 0.00001\n else:\n kp = 1j*(nV[r]*(b-sV[s]) - nV[s]*(a-sV[r])) / nV[s-r].imag\n\n k = [1+((kp/i).real+t)//1 for i, t in zip(nV, sV)]\n\n vertices = []\n for k[r], k[s] in [(a, b), (a+1, b), (a+1, b+1), (a, b+1)]:\n vSum = sum(x*t for t, x in zip(nV, k))\n vertices.append(vSum)\n return imagToReal(vertices, tileSize)\n\n\ndef imagToReal(vertices, tileSize):\n newVertices = []\n for vertex in vertices:\n scaledVert = tileSize*vertex\n newVertices.append((scaledVert.real, scaledVert.imag))\n return newVertices\n\n\ndef genShiftVector(sM, dim, sC):\n import math\n import random as rd\n bZ, bR, bH = False, False, False\n if sM == \"sZ\":\n bZ = True\n elif sM == \"sR\":\n bR = True\n elif sM == \"sH\":\n bH = True\n\n # All shifts set to zero\n if bZ:\n shiftVect = [0.0000001 for i in range(dim-1)]\n shiftVect.append(sC)\n return shiftVect\n # Take care of 3 dimmensional edge case\n if dim == 3:\n samplePopulation = list(range(1, 1000, 1))\n sample = rd.sample(samplePopulation, 2)\n samp1, samp2 = sample[0]/2, sample[1]/2\n final = sC - samp1 - samp2\n return [samp1, samp2, final]\n popMultiplier = 1\n samplePopulation = list(range(1, popMultiplier*dim))\n # Even dimmensions, something is going wrong here\n if dim % 2 == 0:\n lB = math.floor((dim/2)-1)\n samp = rd.sample(samplePopulation, lB)\n samp.sort()\n sV = []\n for i in range(lB):\n if bH:\n if bR:\n sV.append(samp[i]/2)\n sV.append(-samp[i]/2)\n else:\n sV.append((i+1)/2)\n sV.append(-(i+1)/2)\n else:\n if bR:\n sV.append(samp[i]/(dim+5))\n sV.append(-samp[i]/(dim+5))\n else:\n sV.append((i+1)/dim)\n sV.append(-(i+1)/dim)\n sV.append((1/3)*sC)\n sV.append((2/3)*sC)\n if bH:\n sV = [sv*0.2 for sv in sV]\n sV = [sv-0.000001 for sv in sV]\n return sV\n # Odd dimmensions\n else:\n lB = math.floor((dim-1)/2)\n uB = math.ceil((dim-1)/2)\n samp = rd.sample(samplePopulation, lB)\n samp.sort()\n sV = []\n for i in range(lB):\n if bH:\n if not bR:\n sV.append((i+1)/2)\n sV.append(-(i+1)/2)\n else:\n sV.append(samp[i]/2)\n sV.append(-samp[i]/2)\n else:\n if not bR:\n sV.append((i+1)/dim)\n sV.append(-(i+1)/dim)\n else:\n sV.append(samp[i]/(dim+5))\n sV.append(-samp[i]/(dim+5))\n if lB != uB:\n sV += [0.0]\n sV += [sC]\n return sV\n\n\n#############################################\n## Generate Adjacency Matrix Of Tile Types ##\n#############################################\n@app.route(\"/backend/getttm\", methods=[\"GET\", \"POST\"])\ndef getttm():\n if request.method == \"GET\":\n dim = int(request.args.get(\"dim\"))\n\n ttm = genttm(dim)\n\n # Implement backend logic here\n\n # Implement SQL logic here\n\n # No need to jsonify\n return {\"ttm\": ttm}\n\n elif request.method == \"POST\":\n dict_str = request.data.decode(\"UTF-8\")\n postData = ast.literal_eval(dict_str)\n\n inputParams = postData.get(\"params\")\n\n dim = int(inputParams.get(\"dim\"))\n\n ttm = genttm(dim)\n\n # Implement backend logic here\n\n # Implement SQL logic here\n\n # No need to jsonify\n return {\"ttm\": ttm}\n\n\ndef genttm(dim):\n # Intent: Find the tile type of all tiles\n if dim % 2 == 0:\n numTileTypes = int((dim/2)-1)\n else:\n numTileTypes = int((dim-1)/2)\n # create tile type adjacency matrix\n ttm = [[0 for x in range(dim)] for y in range(dim)]\n for x in range(dim):\n for y in range(1, numTileTypes+1):\n if x+y >= dim:\n ttm[x][(x+y)-dim] = y-1\n else:\n ttm[x][x+y] = y-1\n ttm[x][x-y] = y-1\n return ttm\n\n\n#####################\n## Generate Colors ##\n#####################\n@app.route(\"/backend/getColors\", methods=[\"GET\", \"POST\"])\ndef getColors():\n if request.method == \"GET\":\n manualCols = bool(request.args.get(\"manualCols\"))\n manualCOls = True\n numCols = int(request.args.get(\"numCols\"))\n\n colors = genColors(manualCols, numCols)\n\n # Implement backend logic here\n\n # Implement SQL logic here\n\n # No need to jsonify\n return {\"colors\": colors}\n\n elif request.method == \"POST\":\n dict_str = request.data.decode(\"UTF-8\")\n postData = ast.literal_eval(dict_str)\n\n inputParams = postData.get(\"params\")\n\n numCols = int(inputParams.get(\"numCols\"))\n\n curCols = (inputParams.get(\"curCols\"))\n\n colors = genSpecColors(numCols, curCols)\n\n # Implement backend logic here\n\n # Implement SQL logic here\n\n # No need to jsonify\n return {\"colors\": colors}\n\n\ndef genColors(manualCols, numCols):\n # if manualCols or numCols > 19:\n if manualCols:\n # Manually create colors\n # (hue, saturation, value)\n hsvCols = [(x/numCols, 1, 0.75) for x in range(numCols)]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsvCols))\n colors = [[255*color[0], 255*color[1], 255*color[2]]\n for color in colors]\n else:\n # Classic colors\n # colors = sns.color_palette(\"bright\", numCols)\n # colors = sns.color_palette(\"husl\", numCols)\n # colors = sns.color_palette(\"cubehelix\", numCols)\n\n # Divergent colors\n colors = sns.color_palette(\"BrBG\", numCols)\n # colors = sns.color_palette(\"coolwarm\", numCols)\n\n # Gradient colors\n # colors = sns.cubehelix_palette(numCols, dark=0.1, light=0.9)\n return colors\n\n\ndef genSpecColors(numCols, colType):\n # if manualCols or numCols > 19:\n if colType == \"mc\":\n hsvCols = [(x/numCols, 1, 0.75) for x in range(numCols)]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsvCols))\n colors = [[255*color[0], 255*color[1], 255*color[2]]\n for color in colors]\n # CHP\n elif colType == \"chp\":\n colors = sns.cubehelix_palette(numCols)\n elif colType == \"chp_rnd4\":\n colors = sns.cubehelix_palette(numCols, rot=-.4)\n elif colType == \"chp_s2d8_rd1\":\n colors = sns.cubehelix_palette(numCols, start=2.8, rot=.1)\n # MPLP\n elif colType == \"mplp_GnBu_d\":\n colors = sns.mpl_palette(\"GnBu_d\", numCols)\n elif colType == \"mplp_seismic\":\n colors = sns.mpl_palette(\"seismic\", numCols)\n # CP_Misc\n elif colType == \"cp\":\n colors = sns.color_palette(n_colors=numCols)\n elif colType == \"cp_Accent\":\n colors = sns.color_palette(\"Accent\", n_colors=numCols)\n elif colType == \"cp_cubehelix\":\n colors = sns.color_palette(\"cubehelix\", n_colors=numCols)\n elif colType == \"cp_flag\":\n colors = sns.color_palette(\"flag\", n_colors=numCols)\n elif colType == \"cp_Paired\":\n colors = sns.color_palette(\"Paired\", n_colors=numCols)\n elif colType == \"cp_Pastel1\":\n colors = sns.color_palette(\"Pastel1\", n_colors=numCols)\n elif colType == \"cp_Pastel2\":\n colors = sns.color_palette(\"Pastel2\", n_colors=numCols)\n elif colType == \"cp_tab10\":\n colors = sns.color_palette(\"tab10\", n_colors=numCols)\n elif colType == \"cp_tab20\":\n colors = sns.color_palette(\"tab20\", n_colors=numCols)\n elif colType == \"cp_tab20c\":\n colors = sns.color_palette(\"tab20c\", n_colors=numCols)\n # CP_Rainbow\n elif colType == \"cp_gistncar\":\n colors = sns.color_palette(\"gist_ncar\", n_colors=numCols)\n elif colType == \"cp_gistrainbow\":\n colors = sns.color_palette(\"gist_rainbow\", n_colors=numCols)\n elif colType == \"cp_hsv\":\n colors = sns.color_palette(\"hsv\", n_colors=numCols)\n elif colType == \"cp_nipyspectral\":\n colors = sns.color_palette(\"nipy_spectral\", n_colors=numCols)\n elif colType == \"cp_rainbow\":\n colors = sns.color_palette(\"rainbow\", n_colors=numCols)\n # CP_Grad2\n elif colType == \"cp_afmhot\":\n colors = sns.color_palette(\"afmhot\", n_colors=numCols)\n elif colType == \"cp_autumn\":\n colors = sns.color_palette(\"autumn\", n_colors=numCols)\n elif colType == \"cp_binary\":\n colors = sns.color_palette(\"binary\", n_colors=numCols)\n elif colType == \"cp_bone\":\n colors = sns.color_palette(\"bone\", n_colors=numCols)\n elif colType == \"cp_cividis\":\n colors = sns.color_palette(\"cividis\", n_colors=numCols)\n elif colType == \"cp_cool\":\n colors = sns.color_palette(\"cool\", n_colors=numCols)\n elif colType == \"cp_copper\":\n colors = sns.color_palette(\"copper\", n_colors=numCols)\n elif colType == \"cp_hot\":\n colors = sns.color_palette(\"hot\", n_colors=numCols)\n elif colType == \"cp_inferno\":\n colors = sns.color_palette(\"inferno\", n_colors=numCols)\n elif colType == \"cp_magma\":\n colors = sns.color_palette(\"magma\", n_colors=numCols)\n elif colType == \"cp_mako\":\n colors = sns.color_palette(\"mako\", n_colors=numCols)\n elif colType == \"cp_plasma\":\n colors = sns.color_palette(\"plasma\", n_colors=numCols)\n elif colType == \"cp_PuBuGn\":\n colors = sns.color_palette(\"PuBuGn\", n_colors=numCols)\n elif colType == \"cp_Purples\":\n colors = sns.color_palette(\"Purples\", n_colors=numCols)\n elif colType == \"cp_RdPu\":\n colors = sns.color_palette(\"RdPu\", n_colors=numCols)\n elif colType == \"cp_rocket\":\n colors = sns.color_palette(\"rocket\", n_colors=numCols)\n elif colType == \"cp_spring\":\n colors = sns.color_palette(\"spring\", n_colors=numCols)\n elif colType == \"cp_summer\":\n colors = sns.color_palette(\"summer\", n_colors=numCols)\n elif colType == \"cp_viridis\":\n colors = sns.color_palette(\"viridis\", n_colors=numCols)\n elif colType == \"cp_winter\":\n colors = sns.color_palette(\"winter\", n_colors=numCols)\n elif colType == \"cp_Wistia\":\n colors = sns.color_palette(\"Wistia\", n_colors=numCols)\n elif colType == \"cp_YlOrRd\":\n colors = sns.color_palette(\"YlOrRd\", n_colors=numCols)\n # CP_Grad3\n elif colType == \"cp_BrBG\":\n colors = sns.color_palette(\"BrBG\", n_colors=numCols)\n elif colType == \"cp_brg\":\n colors = sns.color_palette(\"brg\", n_colors=numCols)\n elif colType == \"cp_bwr\":\n colors = sns.color_palette(\"bwr\", n_colors=numCols)\n elif colType == \"cp_CMRmap\":\n colors = sns.color_palette(\"CMRmap\", n_colors=numCols)\n elif colType == \"cp_gistearth\":\n colors = sns.color_palette(\"gist_earth\", n_colors=numCols)\n elif colType == \"cp_giststern\":\n colors = sns.color_palette(\"gist_stern\", n_colors=numCols)\n elif colType == \"cp_gnuplot\":\n colors = sns.color_palette(\"gnuplot\", n_colors=numCols)\n elif colType == \"cp_gnuplot2\":\n colors = sns.color_palette(\"gnuplot2\", n_colors=numCols)\n elif colType == \"cp_icefire\":\n colors = sns.color_palette(\"icefire\", n_colors=numCols)\n elif colType == \"cp_ocean\":\n colors = sns.color_palette(\"ocean\", n_colors=numCols)\n elif colType == \"cp_PiYG\":\n colors = sns.color_palette(\"PiYG\", n_colors=numCols)\n elif colType == \"cp_PRGn\":\n colors = sns.color_palette(\"PRGn\", n_colors=numCols)\n elif colType == \"cp_prism\":\n colors = sns.color_palette(\"prism\", n_colors=numCols)\n elif colType == \"cp_RdBu\":\n colors = sns.color_palette(\"RdBu\", n_colors=numCols)\n elif colType == \"cp_RdGy\":\n colors = sns.color_palette(\"RdGy\", n_colors=numCols)\n elif colType == \"cp_RdYlBu\":\n colors = sns.color_palette(\"RdYlBu\", n_colors=numCols)\n elif colType == \"cp_RdYlGn\":\n colors = sns.color_palette(\"RdYlGn\", n_colors=numCols)\n elif colType == \"cp_seismic\":\n colors = sns.color_palette(\"seismic\", n_colors=numCols)\n elif colType == \"cp_Spectral\":\n colors = sns.color_palette(\"Spectral\", n_colors=numCols)\n elif colType == \"cp_terrein\":\n colors = sns.color_palette(\"terrein\", n_colors=numCols)\n elif colType == \"cp_vlag\":\n colors = sns.color_palette(\"vlag\", n_colors=numCols)\n else:\n hsvCols = [(x/numCols, 1, 0.75) for x in range(numCols)]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsvCols))\n colors = [[255*color[0], 255*color[1], 255*color[2]]\n for color in colors]\n\n return colors\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"jcanedo279/ptv","sub_path":"backend/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":18275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"2811294282","text":"from mathutil import prime_under\r\n\r\n\r\ndef pow_mod(power, p):\r\n \"\"\"\r\n power mod for base 10\r\n in case power is too big\r\n\r\n given that base is 10, and p and 10 are coprime\r\n \"\"\"\r\n e = power % (p - 1)\r\n return pow(10, e, p)\r\n\r\n\r\ndef repunit_mod(p, degree=9):\r\n \"\"\"\r\n compute R(10 ** degree) % p\r\n \"\"\"\r\n curr_mod = 1111111111 % p\r\n for i in range(1, degree):\r\n curr_mod = curr_mod * sum([pow_mod(10 ** i * j, p) for j in range(10)]) % p\r\n return curr_mod\r\n\r\n\r\ndef main():\r\n first40 = []\r\n count = 0\r\n for p in prime_under(10 ** 6)[3:]: # exclude 2 and 5, which are not coprime with 10\r\n if repunit_mod(p) == 0:\r\n count += 1\r\n # print(count, p)\r\n first40.append(p)\r\n if count == 40:\r\n break\r\n # print(first40)\r\n print(sum(first40))\r\n\r\n\r\nif __name__ == '__main__':\r\n from time import time\r\n starting_time = time()\r\n main()\r\n print(\"Time elapsed:\", time() - starting_time, \"seconds\")\r\n","repo_name":"colinxy/ProjectEuler","sub_path":"Python/project_euler132.py","file_name":"project_euler132.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"31726405470","text":"from stats import run_algo\nfilename = 'result2.txt'\nimport os\n\nfrom collections import defaultdict\ndef parse_path_file(path_file_content):\n agents_path = defaultdict(list)\n agent = None\n lines = path_file_content.split('\\n')[1:]\n for line in lines:\n if line == '':\n break\n nums = list(map(int, line.split(' ')))\n if len(nums) == 1:\n agent = int(nums[0])\n else:\n x, y, time = nums\n agents_path[agent].append((x, y, time))\n return agents_path\n\ndef read_paths(filename=filename):\n with open(filename, 'r') as path_file:\n path_file_content = path_file.read()\n agents_path = parse_path_file(path_file_content)\n return dict(agents_path)\n\nimport tqdm\n# test num takes index in range(0, num_repetitions)\ndef success_rate(alg_name, map_name, scen_name, num_agents, num_repetitions, w=None):\n rate = 0\n for i in tqdm.tqdm(range(num_repetitions)):\n done = run_algo(alg=alg_name, dest_file=filename,\n map_path=os.path.join(os.curdir, 'data', 'maps', 'mapf', map_name),\n scen_path=os.path.join(os.curdir, 'data', 'scens', 'mapf', scen_name),\n tasks_count=num_agents,\n test_num=i, w=w)\n rate += int(done)\n return rate / num_repetitions\n\n# build success_rate stats\nrandom_map, random_scen = 'maze-32-32-2.map', 'maze-32-32-2-even-1.scen'\nrate_list = []\nN = 10\nfor num_actors in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:\n rate = success_rate('ECBS', random_map, random_scen, num_actors, N, w=1.1)\n print(f'Num actors :{num_actors}, Rate: {rate:.2f}%')\n if rate <= 0.1:\n print(\"too low rate, stopping here\")\n break\n rate_list.append(rate)\n\nprint(rate_list)\n","repo_name":"neckbosov/CBS","sub_path":"success.py","file_name":"success.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"24180683871","text":"# -*- coding: utf-8 -*-\nfrom dataclasses import dataclass\nfrom typing import Annotated, Union\n\nfrom mtpylon import int128\n\n\n@dataclass\nclass DHGenOk:\n nonce: int128\n server_nonce: int128\n new_nonce_hash1: int128\n\n class Meta:\n name = 'dh_gen_ok'\n order = (\n 'nonce',\n 'server_nonce',\n 'new_nonce_hash1'\n )\n\n\n@dataclass\nclass DHGenRetry:\n nonce: int128\n server_nonce: int128\n new_nonce_hash2: int128\n\n class Meta:\n name = 'dh_gen_retry'\n order = (\n 'nonce',\n 'server_nonce',\n 'new_nonce_hash2'\n )\n\n\n@dataclass\nclass DHGenFail:\n nonce: int128\n server_nonce: int128\n new_nonce_hash3: int128\n\n class Meta:\n name = 'dh_gen_fail'\n order = (\n 'nonce',\n 'server_nonce',\n 'new_nonce_hash3'\n )\n\n\nSet_client_DH_params_answer = Annotated[\n Union[\n DHGenOk,\n DHGenRetry,\n DHGenFail\n ],\n 'Set_client_DH_params_answer'\n]\n","repo_name":"Zapix/mtpylon","sub_path":"mtpylon/service_schema/constructors/set_client_dh_params_answer.py","file_name":"set_client_dh_params_answer.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"}
+{"seq_id":"71082972106","text":"############## PiCamera video stream creator ###############\n#\n# Author: Evan Juras (heavily copying from Adrian Rosebrock)\n# Date: 9/5/17\n# Description: Defines the PiVideoStream object, which controls\n# acquisition of frames from the PiCamera. The object uses multi-threading to\n# aquire camera frames in a separate thread from the main program. This allows\n# the main thread to grab the most recent camera frame without having to take \n# it directly from the camera feed, reducing I/O time, which slightly improves \n# framerate.\n#\n# See http://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/\n# for a full explanation of the source code.\n\n# Import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom threading import Thread\nimport cv2\n\n\nclass PiVideoStream:\n \"\"\"Camera object\"\"\"\n def __init__(self,resolution=(640,480),framerate=30):\n\t\t# Initialize the camera and the camera image stream\n self.camera = PiCamera()\n self.camera.resolution = resolution\n self.camera.framerate = framerate\n self.rawCapture = PiRGBArray(self.camera,size=resolution)\n self.stream = self.camera.capture_continuous(\n self.rawCapture, format = \"bgr\", use_video_port = True)\n\n\t\t# Create a variable to store the camera frame and to control\n\t\t# when the camera is stopped\n self.frame = []\n self.stopped = False\n\n def start(self):\n\t\t# Start the thread to read frames from the video stream\n Thread(target=self.update,args=()).start()\n return self\n\n def update(self):\n\t\t# Keep looping indefinitely until the thread is stopped\n for f in self.stream:\n\t\t\t# Grab the frame from the stream and clear the stream\n\t\t\t# in preparation for the next frame\n self.frame = f.array\n self.rawCapture.truncate(0)\n\n\t\t\t# If the camera is stopped, stop the thread and close\n\t\t\t# the camera resources\n if self.stopped:\n self.stream.close()\n self.rawCapture.close()\n self.camera.close()\n\n def read(self):\n\t\t# Return the most recent frame\n return self.frame\n\n def stop(self):\n\t\t# Indicate that the camera and thread should be stopped\n self.stopped = True\n","repo_name":"dandrews7396/buggyBot","sub_path":"PiVideoStream.py","file_name":"PiVideoStream.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"42131965691","text":"class Solution:\n def removeOuterParentheses(self, s: str) -> str:\n res = \"\"\n count = 0\n\n for char in s:\n if char == \"(\":\n count += 1\n if count > 1:\n res += char\n if char == \")\":\n count -= 1\n if count > 0:\n res += char\n\n return res\n # Lessons learned\n # we can solve the problem in linear time by keeping a count of the amount of closed and open brackets that we see, by doing so we can ignore the outer parentheses based on how balanced things are\n","repo_name":"mdiallo98/python-dataStructures-Algos","sub_path":"LeetcodeQuestions/String_Problems/remove_outermost_parenthesis.py","file_name":"remove_outermost_parenthesis.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"}
+{"seq_id":"24340908253","text":"# vim: set ts=8 sts=4 sw=4 tw=99 et:\n#\n# This file is part of AMBuild.\n#\n# AMBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# AMBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with AMBuild. If not, see .\nimport os, errno\nimport uuid as uuids\nfrom ambuild2 import util\nfrom ambuild2 import nodetypes\nfrom ambuild2.frontend import paths\nfrom ambuild2.frontend.vs import nodes\nfrom ambuild2.frontend.base_generator import BaseGenerator\n\nSupportedVersions = ['10', '11', '12', '14', '15', '16', '17']\nYearMap = {\n '2010': 10,\n '2012': 11,\n '2013': 12,\n '2015': 14,\n '2017': 15,\n '2019': 16,\n '2022': 17,\n}\n\nclass Generator(BaseGenerator):\n def __init__(self, cm):\n super(Generator, self).__init__(cm)\n self.compiler = None\n self.vs_version = None\n self.files_ = {}\n self.projects_ = set()\n\n if self.cm.options.vs_version in SupportedVersions:\n self.vs_version = int(self.cm.options.vs_version)\n else:\n if self.cm.options.vs_version not in YearMap:\n util.con_err(\n util.ConsoleRed,\n 'Unsupported Visual Studio version: {0}'.format(self.cm.options.vs_version),\n util.ConsoleNormal)\n raise Exception('Unsupported Visual Studio version: {0}'.format(\n self.cm.options.vs_version))\n self.vs_version = YearMap[self.cm.options.vs_version]\n\n self.cacheFile = os.path.join(self.cm.buildPath, '.cache')\n try:\n with open(self.cacheFile, 'rb') as fp:\n self.vars_ = util.pickle.load(fp)\n except:\n self.vars_ = {}\n\n if 'uuids' not in self.vars_:\n self.vars_['uuids'] = {}\n\n self.target_platform = 'windows'\n\n # Overridden.\n @property\n def backend(self):\n return 'vs'\n\n # Overridden.\n def preGenerate(self):\n pass\n\n # Overriden.\n def postGenerate(self):\n self.generateProjects()\n with open(self.cacheFile, 'wb') as fp:\n util.DiskPickle(self.vars_, fp)\n\n def generateProjects(self):\n for node in self.projects_:\n # We cache uuids across runs to keep them consistent.\n node.uuid = self.vars_['uuids'].get(node.path)\n if node.uuid is None:\n node.uuid = str(uuids.uuid1()).upper()\n self.vars_['uuids'][node.path] = node.uuid\n node.project.export(self.cm, node)\n\n # Overridden.\n #\n # We don't support reconfiguring in this frontend.\n def addConfigureFile(self, cx, path):\n pass\n\n def detectCompilers(self):\n raise Exception('Implement me!')\n\n # Overridden.\n def enterContext(self, cx):\n cx.vs_nodes = []\n\n # Overridden.\n def leaveContext(self, cx):\n pass\n\n def ensureUnique(self, path):\n if path in self.files_:\n entry = self.files_[path]\n util.con_err(util.ConsoleRed,\n 'Path {0} already exists as: {1}'.format(path,\n entry.kind), util.ConsoleNormal)\n raise Exception('Path {0} already exists as: {1}'.format(path, entry.kind))\n\n # Overridden.\n def getLocalFolder(self, context):\n if type(context.localFolder_) is nodes.FolderNode or context.localFolder_ is None:\n return context.localFolder_\n\n if not len(context.buildFolder):\n context.localFolder_ = None\n else:\n context.localFolder_ = self.addFolder(context.parent, context.buildFolder)\n\n return context.localFolder_\n\n # Overridden.\n def addFolder(self, cx, folder):\n parentFolderNode = None\n if cx is not None:\n parentFolderNode = cx.localFolder\n\n _, path = paths.ResolveFolder(parentFolderNode, folder)\n if path in self.files_:\n entry = self.files_[path]\n if type(entry) is not nodes.FolderNode:\n self.ensureUnique(path) # Will always throw.\n return entry\n\n try:\n os.makedirs(path)\n except OSError as exn:\n if not (exn.errno == errno.EEXIST and os.path.isdir(path)):\n raise\n\n obj = nodes.FolderNode(path)\n self.files_[path] = obj\n return obj\n\n # Overridden.\n def addCopy(self, context, source, output_path):\n return (None, (None,))\n\n # Overridden.\n def addShellCommand(self,\n context,\n inputs,\n argv,\n outputs,\n folder = -1,\n dep_type = None,\n weak_inputs = None,\n shared_outputs = None):\n print(inputs, argv, outputs, folder, dep_type, weak_inputs, shared_outputs)\n\n def addOutput(self, context, path, parent):\n self.ensureUnique(path)\n\n node = nodes.OutputNode(context, path, parent)\n self.files_[path] = node\n return node\n\n def addProjectNode(self, context, project):\n self.ensureUnique(project.path)\n self.projects_.add(project)\n self.files_[project.path] = project\n","repo_name":"alliedmodders/ambuild","sub_path":"ambuild2/frontend/vs/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"81"}
+{"seq_id":"41030443061","text":"import self as self\r\nfrom selenium import webdriver\r\nfrom os import system\r\nfrom googletrans import Translator\r\nfrom time import sleep\r\nimport json\r\n\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nservice_urls=['translate.google.com','translate.google.co.kr']\r\ntranslator = Translator(service_urls)\r\n\r\ncount = 0\r\n\r\nwith open(r\"Context/title.txt\",\"r\") as title,open(r\"Context/short.txt\",\"r\") as short:\r\n title = title.read()\r\n short = short.read()\r\n long = open(r\"Context/long.txt\",\"r\")\r\n\r\nwith open(r'Config/playstore_language_config.json') as f:\r\n PLAYSTORE = json.load(f)\r\n\r\nwith open(r'Config/playstore_language_config.json') as f:\r\n LANGUAGES = json.load(f)\r\n\r\n\r\n# create a new Firefox session\r\ndriver = webdriver.Firefox(executable_path=r'Runtime/geckodriver')\r\ndriver.maximize_window()\r\n\r\n\r\n# navigate to the application home page\r\nURL = driver.get(\"https://accounts.google.com/signin/v2/identifier?service=androiddeveloper&passive=1209600&continue=https%3A%2F%2Fplay.google.com%2Fapps%2Fpublish%2F%23&followup=https%3A%2F%2Fplay.google.com%2Fapps%2Fpublish%2F&flowName=GlifWebSignIn&flowEntry=ServiceLogin\")\r\nConfirm = input(\"Login with User & Pass (Enter)\")\r\n\r\nNew = driver.find_element_by_css_selector(\"button[class='IF2W6TD-f-a IF2W6TD-f-r']\").click()\r\ncommand = 'echo ' + title.strip() + '| clip'\r\ncase = system(command)\r\nConfirm = input(\"Paste project name : \" + title + \" (Enter)\")\r\n\r\nConfirm = input(\"Select add languages (Enter)\")\r\n\r\nlanguage_button = driver.find_element_by_css_selector(\"button[class='IF2W6TD-f-a IF2W6TD-f-p IF2W6TD-f-n IF2W6TD-f-b IF2W6TD-f-c IF2W6TD-f-s']\")\r\n\r\ntitle_box = driver.find_element_by_css_selector(\"input[class='gwt-TextBox IF2W6TD-rn-d']\")\r\nshort_box = driver.find_element_by_css_selector(\"textarea[class='gwt-TextArea IF2W6TD-di-d IF2W6TD-rn-d']\")\r\nlong_box = driver.find_element_by_css_selector(\"textarea[class='gwt-TextArea IF2W6TD-rn-d']\")\r\n\r\nsystem('cls')\r\nprint(\"SR.\" + \" | \" + \"LANGUAGES\" + \" - \" + \"Encode\" + \" | Function Processing\" + \"................... [Status]\")\r\n\r\nfor l in PLAYSTORE:\r\n count=count+1\r\n long.seek(0)\r\n sleep(1)\r\n\r\n update_language = driver.execute_script(\"arguments[0].setAttribute('data-lang-code','\" + l + \"')\", language_button)\r\n script = driver.execute_script(\"arguments[0].setAttribute('aria-pressed','false')\", language_button)\r\n sleep(5)\r\n language_button.click()\r\n\r\n title_box.clear()\r\n short_box.clear()\r\n long_box.clear()\r\n\r\n translate_title = translator.translate(title, dest=PLAYSTORE[l]).text\r\n translate_short = translator.translate(short, dest=PLAYSTORE[l]).text\r\n translate_long = translator.translate(long.read(), dest=PLAYSTORE[l]).text\r\n\r\n if PLAYSTORE[l]=='bg': translate_short=translate_title\r\n if PLAYSTORE[l]=='mk': translate_short=translate_title\r\n\r\n title_box.send_keys(translate_title[:50])\r\n sleep(1)\r\n short_box.send_keys(translate_short[:80])\r\n sleep(1)\r\n long_box.send_keys(translate_long[:4000])\r\n sleep(2)\r\n\r\n print(str(count) + \" | \" + LANGUAGES[PLAYSTORE[l]] + \" - \" + l + \" | Init Operation Sleep Time 10 Seconds\" + \"...................[Done]\")\r\n\r\n#Draft = driver.find_element_by_css_selector(\"button[class='JIM1JT-f-a JIM1JT-f-r JIM1JT-f-f']\").click()\r\nVerfiy = input(\"Verify inputs\")\r\nlong.close()\r\ndriver.quit()\r\n","repo_name":"RahulRathi46/Automating-Play-Console-Apk-Publication-Using-Selenium-Webdriver","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"9584644255","text":"rta = True\r\nestudiante = []\r\nwhile rta == True:\r\n print(\"1).registro de estudiante\\n2).registro de quices\\n3).registro de trabajos\\n4).registro de parciales\\nEnter para salir\")\r\n rta=input(\":)_\")\r\n if rta == \"1\":\r\n nombre = input(\"ingrese nombre : \")\r\n codigo = input(\"ingrese codigo : \")\r\n estudiante.append([nombre,codigo,[],[],[]])\r\n elif rta == \"2\":\r\n palabra = input (\"ingrese codigo del estudiante : \")\r\n for item in estudiante:\r\n if estudiante in item:\r\n notaq = float(input(\"ingrese nota del quiz : \")) \r\n print(estudiante)\r\n\r\nrta = bool(input(\"desea ingresar otro estudiante (si) presione S de lo contrario presione Enter\"))\r\nprint(estudiante)\r\n\r\n \r\n\r\n\r\n \r\n ","repo_name":"rosver96/CAMPUS-PYTHON-2023","sub_path":"CAMPUS-PYTHON23/EJERCICIOS-PYTHON-ALGORITMOS/agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"5712437845","text":"import pygame\r\npygame.init()\r\nwin = pygame.display.set_mode((999,999))\r\npygame.display.set_caption(\"Ultimate Tic-Tac-Toe\")\r\nmyfont = pygame.font.SysFont('Times New Roman', 30)\r\nxdisp = myfont.render('X', False, (0, 0, 0))\r\nodisp = myfont.render('O', False, (0, 0, 0))\r\no_win = pygame.image.load(\"owin.png\").convert()\r\nx_win = pygame.image.load(\"xwin.png\").convert()\r\no_end = pygame.image.load(\"oend.png\").convert()\r\nx_end = pygame.image.load(\"xend.png\").convert()\r\ntie_image = pygame.image.load(\"xend.png\").convert()\r\ncounter = \"O\"\r\nindex = []\r\nwins = [[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8],[0,4,8],[2,4,6]]\r\nxcount = 0\r\nocount = 0\r\ngrandowin = False\r\ngrandxwin = False\r\ndef clicked(rect):\r\n if pygame.mouse.get_pressed()[0] and rect.collidepoint(pygame.mouse.get_pos()):\r\n return True\r\nclass Board(object):\r\n def __init__(self,x,y):\r\n self.rect = (x,y,333,333)\r\n self.x = x\r\n self.y = y\r\n self.width = 333\r\n self.clickable = True\r\n self.done = False\r\n self.x0 = self.x\r\n self.x1 = self.x +111\r\n self.x2 = self.x+222\r\n self.y0 = self.y\r\n self.y1 = self.y +111\r\n self.y2 = self.y+222\r\n self.squares = [[Square(self.x0,self.y0),Square(self.x1,self.y0),Square(self.x2,self.y0)],\r\n [Square(self.x0,self.y1),Square(self.x1,self.y1), Square(self.x2,self.y1)],\r\n [Square(self.x0,self.y2), Square(self.x1,self.y2),Square(self.x2,self.y2)]]\r\n self.long = [Square(self.x0,self.y0),Square(self.x1,self.y0),Square(self.x2,self.y0),\r\n Square(self.x0,self.y1),Square(self.x1,self.y1), Square(self.x2,self.y1),\r\n Square(self.x0,self.y2), Square(self.x1,self.y2),Square(self.x2,self.y2)]\r\n self.owin = False\r\n self.xwin = False\r\n self.tie = False\r\n def draw1(self):\r\n global grandxwin\r\n global grandowin\r\n for y in range(3):\r\n for x in range(3):\r\n self.squares[y][x].draw()\r\n pygame.draw.line(win,(0,0,0),(self.x1-6,self.y0),(self.x1-6,self.y2+100),11)\r\n pygame.draw.line(win,(0,0,0),(self.x2-6,self.y0),(self.x2-6,self.y2+100),11)\r\n pygame.draw.line(win,(0,0,0),(self.x0,self.y1-6),(self.x2+100,self.y1-6),11)\r\n pygame.draw.line(win,(0,0,0),(self.x0,self.y2-6),(self.x2+100,self.y2-6),11)\r\n if self.xwin == True:\r\n win.blit(x_win,(self.x0,self.y0))\r\n if self.owin == True:\r\n win.blit(o_win,(self.x0,self.y0))\r\n if self.tie:\r\n win.blit(tie_image,(self.x0,self.y0))\r\n if grandowin == True:\r\n win.blit(o_end,(0,0))\r\n if grandxwin == True:\r\n win.blit(x_end,(0,0))\r\n def update1(self):\r\n global index\r\n global counter\r\n global grandowin\r\n global grandxwin\r\n if self.tie or self.owin or self.xwin:\r\n for row in self.squares:\r\n for square in row:\r\n square.clickable = False\r\n for y in range(3):\r\n for x in range(3):\r\n if self.clickable == True:\r\n self.squares[y][x].clickable = True\r\n if self.clickable == False:\r\n self.squares[y][x].clickable = False\r\n if clicked(self.squares[y][x].rect) and self.squares[y][x].clickable == True and self.squares[y][x].done == False and self.done == False:\r\n self.squares[y][x].value = counter\r\n if counter == \"X\":\r\n counter = \"O\"\r\n else:\r\n counter = \"X\"\r\n self.squares[y][x].value = counter\r\n self.squares[y][x].done = True\r\n self.squares[y][x].clickable = False\r\n index = [x,y]\r\n return\r\n def check_win(self):\r\n global wins\r\n global xcount\r\n global ocount\r\n global index\r\n self.long = []\r\n for row in self.squares:\r\n for item in row:\r\n self.long.append(item)\r\n for test in wins:\r\n xcount = 0\r\n ocount = 0\r\n for squares in test:\r\n if self.long[squares].value == \"X\":\r\n xcount += 1\r\n if self.long[squares].value == \"O\":\r\n ocount +=1\r\n if ocount == 3:\r\n self.owin = True\r\n self.done = True\r\n return\r\n if xcount == 3:\r\n self.xwin = True\r\n self.done = True\r\n return\r\n for square in self.long:\r\n if square.value == \"\":\r\n return\r\n else:\r\n self.tie = True\r\n index = []\r\nclass Square(object):\r\n def __init__(self,x,y):\r\n self.rect = (x,y,111,111)\r\n self.y = y\r\n self.x = x\r\n self.clickable = True\r\n self.done = False\r\n self.value = \"\"\r\n self.rect = pygame.Rect(self.x,self.y,111,111)\r\n def draw(self):\r\n if self.clickable == True:\r\n pygame.draw.rect(win,(0,200,0),(self.x,self.y,100,100))\r\n if self.clickable == False or self.done == True:\r\n pygame.draw.rect(win,(140,200,250),(self.x,self.y,100,100))\r\n if self.done == True:\r\n pygame.draw.rect(win,(255,100,100),(self.x,self.y,100,100))\r\n if self.value == \"X\":\r\n win.blit(xdisp,(self.x+36,self.y+36))\r\n if self.value == \"O\":\r\n win.blit(odisp,(self.x+36,self.y+36))\r\nscreen = [[Board(0*333,0*333),Board(1*333,0*333),Board(2*333,0*333)],\r\n[Board(0*333,1*333),Board(1*333,1*333),Board(2*333,1*333)],\r\n[Board(0*333,2*333),Board(1*333,2*333),Board(2*333,2*333)]]\r\nscreenlong = [Board(0*333,0*333),Board(1*333,0*333),Board(2*333,0*333),\r\nBoard(0*333,1*333),Board(1*333,1*333),Board(2*333,1*333),\r\nBoard(0*333,2*333),Board(1*333,2*333),Board(2*333,2*333)]\r\nwin.fill((255,255,255))\r\nrun_game = True\r\nwhile run_game:\r\n if index == []:\r\n for row in screen:\r\n for space in row:\r\n space.clickable = True\r\n elif screen[index[1]][index[0]].xwin or screen[index[1]][index[0]].owin or screen[index[1]][index[0]].tie:\r\n index = []\r\n else:\r\n screen[index[1]][index[0]].clickable = True\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run_game = False\r\n for row in screen:\r\n for space in row:\r\n space.check_win()\r\n space.update1()\r\n space.draw1()\r\n space.clickable = False\r\n screenlong = []\r\n for x in screen:\r\n for y in x:\r\n screenlong.append(y)\r\n for board in screenlong:\r\n for test in wins:\r\n xcount = 0\r\n ocount = 0\r\n for squares in test:\r\n if screenlong[squares].xwin == True:\r\n xcount += 1\r\n if screenlong[squares].owin == True:\r\n ocount +=1\r\n if ocount == 3:\r\n grandowin = True\r\n if xcount == 3:\r\n grandxwin = True\r\n pygame.display.update()\r\npygame.quit()\r\n","repo_name":"g-w1/ultimate-tic-tac-toe","sub_path":"ultimate_tic_tac_toe.py","file_name":"ultimate_tic_tac_toe.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"25605619997","text":"import sys\nfrom Pyro4 import naming, locateNS\n\nif __name__ == \"__main__\":\n args = []\n flags = []\n if len(sys.argv)>1:\n for arg in sys.argv[1:]:\n if arg.startswith('--'): flags.append(arg[2:])\n elif arg=='-f': flags.append('force_restart')\n else: args.append(arg)\n try:\n ns = locateNS()\n sys.exit(-1)\n except:\n sys.exit(naming.main())\n","repo_name":"grnydawn/Act3","sub_path":"name/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"14534152796","text":"from io import BytesIO\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import List\nfrom EPCPyYes.core.SBDH import sbdh\nfrom EPCPyYes.core.SBDH import template_sbdh\nfrom EPCPyYes.core.v1_2 import template_events\nfrom EPCPyYes.core.v1_2.events import EPCISBusinessEvent\nfrom EPCPyYes.core.v1_2.CBV.dispositions import Disposition\nfrom EPCPyYes.core.v1_2.CBV import business_steps, source_destination\nfrom quartet_capture import rules, models\nfrom quartet_capture.rules import RuleContext\nfrom quartet_integrations.frequentz.environment import get_default_environment\nfrom quartet_integrations.generic import mixins\nfrom quartet_integrations.gs1ushc.parsing import SimpleOutputParser, \\\n BusinessOutputParser\nfrom quartet_masterdata.models import Company, Location, TradeItem\nfrom quartet_output.steps import ContextKeys as OutputKeys, \\\n EPCPyYesOutputStep as EPYOS\nfrom quartet_output.steps import OutputParsingStep as QOPS\nfrom EPCPyYes.core.v1_2.events import Source, Destination\nimport copy\nfrom quartet_integrations.optel.steps import \\\n ContextKeys as OptelContextKeys\n\nEventList = List[EPCISBusinessEvent]\n\n\nclass ContextKeys(Enum):\n \"\"\"\n RECEIVER_COMPANY\n ----------------\n A masterdata Company (or location)\n record for the receiving company. This is derived\n via company prefix information in filtered events.\n\n SENDER_COMPANY\n --------------\n This is a masterdate Company (or location) record for the sender. This\n is pulled from the Sender data in the EPCIS message.\n \"\"\"\n RECEIVER_COMPANY = 'RECEIVER_COMPANY'\n SENDER_COMPANY = 'SENDER_COMPANY'\n\n\nclass OutputParsingStep(mixins.ObserveChildrenMixin, QOPS):\n\n def get_parser_type(self, *args):\n \"\"\"\n Override to provide a different parser type.\n :return: The `type` of parser to use.\n \"\"\"\n parser_type = SimpleOutputParser if self.loose_enforcement \\\n else BusinessOutputParser\n return parser_type\n\n @property\n def declared_parameters(self):\n params = super().declared_parameters\n params['Create Child Observation'] = ('Whether or not to take any '\n 'inbound parents and creat an '\n 'Object event of action '\n 'OBSERVE with their children.')\n params['Use Sources'] = (\n 'Whether or not to pass the source event source list to the '\n 'created object/observe event. Only applicable if the '\n 'Create Child Observation step parameter is set to True.'\n )\n params['Use Destinations'] = (\n 'Whether or not to pass the source event destination list to the '\n 'created object/observe event. Only applicable if the '\n 'Create Child Observation step parameter is set to True.'\n )\n return params\n\n def execute(self, data, rule_context: rules.RuleContext):\n super().execute(data, rule_context)\n if self.get_boolean_parameter('Create Child Observation', False):\n self.info('Create Child Observation step parameter was set to '\n 'True...checking filtered events to create '\n 'object/observe events.')\n use_sources = self.get_boolean_parameter('Use Sources', True)\n use_destinations = self.get_boolean_parameter('Use Destinations',\n True)\n filtered_events = rule_context.context[\n OutputKeys.FILTERED_EVENTS_KEY.value]\n doc = template_events.EPCISDocument()\n for event in filtered_events:\n objEvent = self.create_observation_event(event, use_sources,\n use_destinations)\n objEvent.biz_step = business_steps.BusinessSteps.other.value\n doc.object_events.append(objEvent)\n if len(doc.object_events) > 0:\n parser = self.get_parser_type()\n parser(BytesIO(doc.render().encode()),\n self.epc_output_criteria).parse()\n\n\nclass EPCPyYesOutputStep(EPYOS, mixins.CompanyFromURNMixin,\n mixins.OutboundMappingMixin,\n mixins.CompanyLocationMixin):\n \"\"\"\n Provides a new template for object events that includes gs1ushc\n ILMD data instead of CBV ILMD.\n \"\"\"\n\n def __init__(self, db_task: models.Task, **kwargs):\n super().__init__(db_task, **kwargs)\n self.template = self._get_new_template()\n self.add_sbdh = self.get_or_create_parameter(\n 'Add SBDH',\n 'True',\n self.declared_parameters.get('Add SBDH')\n ) in ['True', 'true']\n self.header = template_sbdh.StandardBusinessDocumentHeader()\n self.header.partners = []\n\n def _get_new_template(self):\n \"\"\"\n Grabs the jinja environment and creates a jinja template object and\n returns\n :return: A new Jinja template.\n \"\"\"\n env = get_default_environment()\n template = env.get_template('gs1ushc/object_event.xml')\n return template\n\n def execute(self, data, rule_context: RuleContext):\n # two events need new templates - object and shipping\n # the overall document needs a new template get that below\n # if filtered events has more than one event then you know\n # the event in filtered events is a shipping event so grab that\n # and give it a new template\n append_data = self.get_or_create_parameter(\n 'Append Data', 'True',\n 'Whether or not to call the append data function of the step for '\n 'events prior to rendering.') in ['True', 'true']\n modify_date = self.get_or_create_parameter(\n 'Modify Date', 'True',\n 'Whether or not to call the modify date function on teh step'\n ' for events prior to rendering.'\n ) in ['True', 'true']\n self.append_mapping = self.get_or_create_parameter(\n 'Add Partners to Shipping Events', 'True').lower() == 'true'\n self.use_glns = self.get_or_create_parameter('Use GLNs for Owners',\n 'True').lower() == 'true'\n ilmd = None\n schema_version = self.get_or_create_parameter('Schema Version', '1',\n self.declared_parameters.get(\n 'Schema Version'))\n self.info('Setting the schema version to %s', schema_version)\n rule_context.context['schema_version'] = schema_version\n filtered_events = rule_context.context.get(\n OutputKeys.FILTERED_EVENTS_KEY.value)\n if len(filtered_events) > 0:\n # get the object events from the context - these are added by\n # the AddCommissioningDataStep step in the rule.\n if modify_date: self.modify_date(filtered_events)\n object_events = rule_context.context.get(\n OutputKeys.OBJECT_EVENTS_KEY.value, [])\n if len(object_events) > 0:\n if modify_date: self.modify_date(object_events)\n for object_event in object_events:\n if len(object_event.ilmd) > 0:\n ilmd = object_event.ilmd\n break\n self.info(\n 'Found some filtered object events.'\n ' Looking up the receiver company by urn value/'\n 'company prefix.')\n copied_event = copy.copy(filtered_events[0])\n mapping_applied = False\n if self.append_mapping:\n mapping_applied = self.append_mapping_info(filtered_events[0])\n if self.add_sbdh:\n self.add_header(copied_event, rule_context,\n mapping_applied)\n # self.sbdh.partners.append(receiver)\n for event in object_events:\n event._template = self.template\n if len(event.ilmd) == 0:\n event.ilmd = ilmd\n agg_events = rule_context.context.get(\n OutputKeys.AGGREGATION_EVENTS_KEY.value, []\n )\n if append_data: self.append_event_data(agg_events)\n if modify_date: self.modify_date(agg_events)\n\n super().execute(data, rule_context)\n\n def modify_date(self, epcis_events: EventList):\n \"\"\"\n Some systems don't like timezone info so remove it. Override to\n provide different behavior.\n \"\"\"\n for epcis_event in epcis_events:\n epcis_event.event_time = epcis_event.event_time.replace('+00:00',\n 'Z')\n epcis_event.record_time = epcis_event.record_time.replace('+00:00',\n 'Z')\n\n def append_event_data(self, epcis_events: EventList):\n \"\"\"\n If set, will append data to the event, in this case will\n append disposition information if it is missing. Override to provide\n different behavior.\n \"\"\"\n disposition = self.get_or_create_parameter(\n 'Added Disposition', Disposition.in_progress.value,\n 'The disposition to add to events that do not have one.'\n )\n for epcis_event in epcis_events:\n if not epcis_event.disposition:\n epcis_event.disposition = disposition\n\n def add_header(self, filtered_event: EPCISBusinessEvent, rule_context,\n mapping_applied: bool):\n \"\"\"\n Adds the SBDH data.\n :param object_events:\n :param rule_context:\n :param mapping_applied: Whether or not an outbound mapping has\n already been applied.\n :return:\n \"\"\"\n # first get the receiver by the company prefix\n # noinspection PyTypeChecker\n sender_location = self.get_sender_location(filtered_event)\n self.add_sender_partner(sender_location, rule_context)\n receiver_company = self.get_company_by_urn(filtered_event,\n rule_context)\n self.add_receiver_partner(receiver_company, rule_context)\n # next get the receiving location by the receiving party in the event\n receiver_location = self.get_receiver_location(filtered_event)\n\n if not mapping_applied:\n owner_source = Source(\n source_destination.SourceDestinationTypes.owning_party.value,\n receiver_company.SGLN)\n owner_destination = Destination(\n source_destination.SourceDestinationTypes.owning_party.value,\n receiver_company.SGLN)\n source_location = Source(\n source_destination.SourceDestinationTypes.location.value,\n sender_location.SGLN)\n destination_location = Destination(\n source_destination.SourceDestinationTypes.location.value,\n receiver_location.SGLN)\n filtered_event.source_list = [owner_source, source_location]\n filtered_event.destination_list = [owner_destination,\n destination_location]\n rule_context.context['masterdata'] = {\n receiver_company.SGLN: receiver_company,\n receiver_location.SGLN: receiver_location,\n sender_location.SGLN: sender_location\n }\n\n def get_receiver_location(self, filtered_event):\n \"\"\"\n Will grab either the Company or Location quartet_masterdata.models\n instance that correlates to the filtered event's receiver location\n data.\n :param filtered_event: The event that met the output criteria and\n was filtered.\n :return: The Company or Location model instance.\n \"\"\"\n try:\n receiver_location = self.get_company_by_identifier(\n epcis_event=filtered_event, source_list=False\n )\n except Company.DoesNotExist:\n receiver_location = self.get_location_by_identifier(\n filtered_event, source_list=False\n )\n return receiver_location\n\n def get_sender_location(self, filtered_event):\n \"\"\"\n The name is slightly misleading, this will return either a\n quartet_masterdata.models Company or Location django model instance\n if it can be found by the SGLN or GLN value in the message.\n :param filtered_event: The event that has met the output criteria.\n :return: The Company or Location model instance.\n \"\"\"\n try:\n sender_location = self.get_company_by_identifier(\n filtered_event,\n source_destination.SourceDestinationTypes.possessing_party.value\n )\n except Company.DoesNotExist:\n sender_location = self.get_location_by_identifier(\n filtered_event,\n source_destination.SourceDestinationTypes.possessing_party.value\n )\n return sender_location\n\n def append_mapping_info(self, filtered_event: EPCISBusinessEvent,\n use_receiver=True):\n \"\"\"\n If the append partner step parameter is set, this will look for an\n outbound mapping in the system's master data and append the mappings\n if they are present. This will only take place if the filtered event\n is has a shipping business step.\n :param company: The company in question.\n :return: None\n \"\"\"\n biz_step = getattr(filtered_event, 'biz_step', None)\n if use_receiver:\n company = self.get_receiver_location(filtered_event)\n else:\n company = self.get_sender_location(filtered_event)\n if biz_step and 'shipping' in biz_step:\n mapping = self.get_outbound_mapping_by_company(company)\n if mapping:\n senders = []\n receivers = []\n # from business\n if mapping.from_business:\n from_business = mapping.from_business.GLN13 if self.use_glns else mapping.from_business.SGLN\n if from_business:\n senders.append(\n Source(\n source_destination.SourceDestinationTypes.owning_party.value,\n from_business\n )\n )\n self.info('Appending %s as the posessing party.',\n from_business)\n # from location\n if mapping.ship_from:\n ship_from = mapping.ship_from.SGLN\n if ship_from:\n senders.append(\n Source(\n source_destination.SourceDestinationTypes.location.value,\n ship_from\n )\n )\n self.info('Appending %s as the ship from party.',\n ship_from)\n # to business\n if mapping.to_business:\n to_business = mapping.to_business.GLN13 if self.use_glns else mapping.to_business.SGLN\n if to_business:\n receivers.append(\n Destination(\n source_destination.SourceDestinationTypes.owning_party.value,\n to_business\n )\n )\n self.info(\n 'Appending %s as the to business owning party.',\n to_business)\n # to location\n if mapping.ship_to:\n ship_to = mapping.ship_to.SGLN\n if ship_to:\n receivers.append(\n Destination(\n source_destination.SourceDestinationTypes.location.value,\n ship_to\n )\n )\n self.info('Appending %s as the ship to location.',\n ship_to)\n filtered_event.source_list = senders\n filtered_event.destination_list = receivers\n return True\n\n\n def add_receiver_location(self, receiver):\n pass\n\n def add_receiver_partner(self, receiver_company, rule_context):\n \"\"\"\n Adds the receiver partner to the header and the receiver company\n to the context.\n :param receiver_company: The masterdata Company model instance.\n :param rule_context: The RuleContext passed to execute.\n :return: None\n \"\"\"\n receiver = sbdh.Partner(\n sbdh.PartnerType.RECEIVER,\n partner_id=sbdh.PartnerIdentification('GLN',\n receiver_company.GLN13)\n )\n self.header.partners.append(receiver)\n self.header.document_identification.creation_date_and_time = \\\n datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n rule_context.context[\n ContextKeys.RECEIVER_COMPANY.value] = receiver\n\n def add_sender_partner(self, sender_company, rule_context):\n \"\"\"\n Adds the receiver partner to the header and the sender company\n to the context.\n :param sender_company: The masterdata Company model instance.\n :param rule_context: The RuleContext passed to execute.\n :return: None\n \"\"\"\n sender = sbdh.Partner(\n sbdh.PartnerType.SENDER,\n partner_id=sbdh.PartnerIdentification('GLN',\n sender_company.GLN13)\n )\n self.header.partners.append(sender)\n rule_context.context[\n ContextKeys.SENDER_COMPANY.value] = sender\n\n def get_epcis_document_class(self,\n all_events\n ) -> template_events.EPCISEventListDocument:\n \"\"\"\n This function will override the default 1.2 EPCIS doc with a 1.0\n template\n :param all_events: The events to add to the document\n :return: The EPCPyYes event list document to render\n \"\"\"\n doc_class = template_events.EPCISEventListDocument(all_events,\n self.header)\n env = get_default_environment()\n template = env.get_template('gs1ushc/epcis_document.xml')\n doc_class.additional_context = {\n 'masterdata': self.rule_context.context['masterdata']}\n doc_class._template = template\n return doc_class\n\n @property\n def declared_parameters(self):\n ret = super().declared_parameters()\n ret['Schema Version'] = 'The schema version to include in the header. ' \\\n 'default is 1'\n ret['Add SBDH'] = 'Whether or not to add a Standard Business Document' \\\n ' Header to the EPCIS message. Default is true.'\n ret['Add Partners to Shipping Events'] = 'Whether or not to add any ' \\\n 'mapped partner info to ' \\\n 'shipping events.'\n ret['Use GLNs for Owners'] = 'Whether or not to use SGLNs or GLNs for ' \\\n 'owning parties in the shipping event ' \\\n 'source destinations. If true, GLNs will ' \\\n 'be used. Default is False.'\n return ret\n\n\nclass EPCPyYesMasterDataOutputStep(EPCPyYesOutputStep):\n \"\"\"\n Additionally provides a way to add TradeItem masterdata to the \n \"\"\"\n \n def get_trade_items_mastedata(self, items):\n items_dict = {}\n \n for item in items:\n try:\n if len(item) == 14:\n trade_item = TradeItem.objects.get(GTIN14=item)\n items_dict[item] = trade_item.__dict__\n items_dict[item]['company'] = trade_item.company\n else:\n sscc = item[1:] + '-' + item[0]\n items_dict[sscc] = {\n 'id_type': 'SSCC',\n 'GTIN14': sscc,\n 'company': Company.objects.get(gs1_company_prefix=item[1:]),\n 'regulated_product_name': 'The Pallet',\n 'dosage_form': '-',\n 'strength': '-'\n }\n except TradeItem.DoesNotExist:\n raise self.TradeItemMasterdataDoesNotExist(\n 'Create Trade Item for GTIN %s' % item\n )\n except Company.DoesNotExist:\n raise self.CompanyMasterdataDoesNotExist(\n 'Create company for prefix %s' % item[1:]\n )\n return items_dict\n\n def get_epcis_document_class(self,\n all_events\n ) -> template_events.EPCISEventListDocument:\n \"\"\"\n This function will override the default 1.2 EPCIS doc with a 1.0\n template and additionally it provides a trade items masterdata. \n\n :param all_events: The events to add to the document\n :return: The EPCPyYes event list document to render\n \"\"\"\n\n # code from base method + changed template\n doc_class = template_events.EPCISEventListDocument(all_events,\n self.header)\n env = get_default_environment()\n template = env.get_template('gs1ushc/epcis_document_complete_masterdata.xml')\n doc_class.additional_context = {\n 'masterdata': self.rule_context.context['masterdata']}\n doc_class._template = template\n \n # adding a TradeItems masterdata to the template\n self.info('Trying to extract TradeItem GTINs from the rule context '\n 'using OptelContextKey - TRADE_ITEMS_MASTERDATA key.')\n trade_items = self.rule_context.context.get(\n OptelContextKeys.TRADE_ITEMS_MASTERDATA.value)\n self.info('Found %d trade items in the rule context. Adding masterdata'\n ' to the EPCIS message.' % len(trade_items))\n trade_items_masterdata = self.get_trade_items_mastedata(trade_items)\n doc_class.additional_context['trade_items'] = trade_items_masterdata\n \n return doc_class\n \n class TradeItemMasterdataDoesNotExist(Exception):\n pass\n\n class CompanyMasterdataDoesNotExist(Exception):\n pass\n","repo_name":"rmagee/quartet_integrations","sub_path":"quartet_integrations/gs1ushc/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":23089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"32122013014","text":"import pygame\nfrom towers.tower import Tower, menu_bg, upgrade_btn\nfrom menu.menu import Menu\nimport os\nimport math\nimport time\n\n#loads long range tower images\ntower_imgs_1 = []\ntower_imgs_1.append(pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets/Towers\", \"long_range_tower.png\")), (90,180)))\n\n#loads short range tower images\ntower_imgs_2 = []\ntower_imgs_2.append(pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets/Towers\", \"short_range_tower.png\")), (90,180)))\n\n\n#loads archer images\narcher_1_img = []\narcher_2_img = []\narcher_3_img = []\n\nfor x in range(6):\n archer_1_img.append(pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets/Towers/level_1\", \"a_Attack00\" + str(x) + \".png\")), (72,72)))\nfor x in range(6):\n archer_2_img.append(pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets/Towers/level_2\", \"a_Attack00\" + str(x) + \".png\")), (72,72)))\nfor x in range(6):\n archer_3_img.append(pygame.transform.scale(pygame.image.load(os.path.join(\"game_assets/Towers/level_3\", \"a_Attack00\" + str(x) + \".png\")), (72,72)))\n\n\nclass ArcherTowerLong(Tower):\n def __init__(self,x,y):\n super().__init__(x,y)\n \n self.tower_imgs = tower_imgs_1[:]\n self.archer_1_img = archer_1_img[:]\n self.archer_2_img = archer_2_img[:]\n self.archer_3_img = archer_3_img[:]\n self.archer_imgs = []\n self.upg_price = [1000,5000,\"MAX\"]\n self.archer_count = 0\n self.range = 250\n self.original_range = self.range\n self.inRange = False\n self.right = False\n self.damage = 1\n self.original_damage = self.damage\n self.width = self.tower_imgs[0].get_width()\n self.height = self.tower_imgs[0].get_height()\n #def menu\n self.menu = Menu(self, self.x, self.y, menu_bg)\n self.menu.add_btn(upgrade_btn, \"Upgrade\")\n self.name = \"archer\"\n \n \n def draw(self, win):\n super().draw_radius(win, self.range,128,128,128)\n super().draw(win)\n \n # self.width = self.tower_imgs[0].get_width()\n # self.height = self.tower_imgs[0].get_height()\n \n if self.level == 1:\n self.archer_imgs = self.archer_1_img\n elif self.level == 2:\n self.archer_imgs = self.archer_2_img\n elif self.level == 3:\n self.archer_imgs = self.archer_3_img\n \n\n archer = self.archer_imgs[self.archer_count//10 - 1]\n win.blit(archer, ((self.x) - 35, (self.y - archer.get_height() + 35)))\n \n\n \n def change_range(self, r):\n \"\"\"\n change range of archer tower\n :param r: int\n :return: None\n \"\"\"\n self.range = r\n \n def attack(self, enemies):\n \"\"\"\n attacks an enemy in the enemy list, modifies the list\n :param enemies: enemy list\n :return: None\n \"\"\"\n if self.inRange and not self.moving:\n self.archer_count += 1\n else:\n self.archer_count = 0\n\n if self.archer_count >= len(self.archer_imgs)*10:\n self.archer_count = 0\n \n money = 0\n self.inRange = False\n enemy_closest = []\n for enemy in enemies:\n x = enemy.x\n y = enemy.y\n \n dis = math.sqrt((self.x - x)**2 + (self.y - y)**2)\n if dis < self.range:\n self.inRange = True\n enemy_closest.append(enemy)\n \n enemy_closest.sort(key=lambda x: x.x)\n if len(enemy_closest) > 0:\n first_enemy = enemy_closest[0]\n if self.archer_count == 50:\n if first_enemy.hit(self.damage) == True:\n enemies.remove(first_enemy)\n money = first_enemy.money\n \n if first_enemy.x > self.x and not(self.right):\n self.right = True\n for x, img in enumerate(self.archer_imgs):\n self.archer_imgs[x] = pygame.transform.flip(img, True, False)\n elif self.right and first_enemy.x < self.x:\n self.right = False\n for x, img in enumerate(self.archer_imgs):\n self.archer_imgs[x] = pygame.transform.flip(img, True, False)\n return money\n \n \nclass ArcherTowerShort(ArcherTowerLong):\n def __init__(self, x, y):\n super().__init__(x, y)\n \n self.archer_1_img = archer_1_img[:]\n self.archer_2_img = archer_2_img[:]\n self.archer_3_img = archer_3_img[:]\n self.tower_imgs = tower_imgs_2[:]\n self.archer_imgs = []\n self.upg_price = [2000,6000,\"MAX\"]\n self.archer_count = 0\n self.range = 150\n self.inRange = False\n self.right = False\n self.damage = 2\n self.original_range = self.range\n self.original_damage = self.damage\n self.width = self.tower_imgs[0].get_width()\n self.height = self.tower_imgs[0].get_height()\n #def menu\n self.menu = Menu(self, self.x, self.y, menu_bg)\n self.menu.add_btn(upgrade_btn, \"Upgrade\")\n self.name = \"archer2\"\n","repo_name":"KiedusCracknell/L3Game-RealmGuard-Tower-Wars","sub_path":"towers/archerTower.py","file_name":"archerTower.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"2558999265","text":"#!/usr/bin/env python\n\nfrom ast import Str\nfrom cmath import sin\nimport rospy\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Float64, String, Int16\n\nimport os\nimport cv2 as cv\nimport copy\nimport itertools\nimport mediapipe as mp\nimport math\nimport json\nimport sys\n# print(sys.path)\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom gesture_recognition.keypoint_classifier import KeyPointClassifier\n\nORIGIN = (0.5,0.5)\n\ndef draw_bounding_rect(use_brect, image, brect, label):\n if use_brect:\n\n cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),\n (0, 0, 0), 1)\n\n return image\n\ndef calc_bounding_rect(image_shape, landmarks):\n image_width, image_height = image_shape\n\n landmark_array = np.empty((0, 2), int)\n\n for _, landmark in enumerate(landmarks.landmark):\n landmark_x = min(int(landmark.x * image_width), image_width - 1)\n landmark_y = min(int(landmark.y * image_height), image_height - 1)\n\n landmark_point = [np.array((landmark_x, landmark_y))]\n\n landmark_array = np.append(landmark_array, landmark_point, axis=0)\n\n x, y, w, h = cv.boundingRect(landmark_array)\n\n return [x, y, x + w, y + h]\n\ndef calc_landmark_list(image, landmarks):\n image_width, image_height = image\n\n landmark_point = []\n\n\n for _, landmark in enumerate(landmarks):\n landmark_x = min(int(landmark['x'] * image_width), image_width - 1)\n landmark_y = min(int(landmark['y'] * image_height), image_height - 1)\n # landmark_z = landmark.z\n\n landmark_point.append([landmark_x, landmark_y])\n return landmark_point\n\ndef pre_process_landmark(landmark_list):\n temp_landmark_list = copy.deepcopy(landmark_list)\n\n\n base_x, base_y = 0, 0\n for index, landmark_point in enumerate(temp_landmark_list):\n if index == 0:\n base_x, base_y = landmark_point[0], landmark_point[1]\n\n temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x\n temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y\n\n\n temp_landmark_list = list(\n itertools.chain.from_iterable(temp_landmark_list))\n\n\n max_value = max(list(map(abs, temp_landmark_list)))\n\n def normalize_(n):\n return n / max_value\n\n temp_landmark_list = list(map(normalize_, temp_landmark_list))\n\n return temp_landmark_list\n\n\ndef pre_process_point_history(image, point_history):\n image_width, image_height = image.shape[1], image.shape[0]\n\n temp_point_history = copy.deepcopy(point_history)\n\n\n base_x, base_y = 0, 0\n for index, point in enumerate(temp_point_history):\n if index == 0:\n base_x, base_y = point[0], point[1]\n\n temp_point_history[index][0] = (temp_point_history[index][0] -\n base_x) / image_width\n temp_point_history[index][1] = (temp_point_history[index][1] -\n base_y) / image_height\n\n\n temp_point_history = list(\n itertools.chain.from_iterable(temp_point_history))\n\n return temp_point_history\n\ndef callback(data:String):\n landmarks = json.loads(data.data)\n landmark_list = calc_landmark_list((1280,720), landmarks)\n pre_processed_landmark_list = pre_process_landmark(landmark_list)\n hand_sign_id = keypoint_classifier(pre_processed_landmark_list)\n\n x = landmarks[0]['x']-ORIGIN[0]\n y = landmarks[0]['y']-ORIGIN[1]\n \n # print(x, y , hand_sign_id)\n if hand_sign_id == 1:\n tracker_pub.publish(-y*4)\n pusher_pub.publish(-x*4)\n elif hand_sign_id == 0:\n tracker_pub.publish(10000)\n else:\n hand_sign_pub.publish(hand_sign_id)\n \n\ndef talker():\n # global tracker_pub\n # global pusher_pub\n global tracker_pub\n global pusher_pub\n global hand_sign_pub\n tracker_pub = rospy.Publisher('hockey_robot/joint3_position_controller/command', Float64, queue_size=10)\n pusher_pub = rospy.Publisher('hockey_robot/joint4_position_controller/command', Float64, queue_size=10)\n # tracker_vel_pub = rospy.Publisher('hockey_robot/joint5_position_controller/command', Float64, queue_size=10)\n hand_sign_pub = rospy.Publisher('/hockey_robot/gest_controller/hand_sign', Int16, queue_size=10)\n rospy.init_node('gest_controller', anonymous=True)\n rospy.Subscriber('/landmark', String, callback)\n rospy.spin()\n # cap = cv.VideoCapture(0)\n # # pos = get_hand_position(cap,(0,0))\n \n # # while not rospy.is_shutdown():\n # while True:\n # hand,hand_sign_id, image_shape = get_hand_position(cap,orgin)\n # if hand:\n # x = hand.landmark[mp_hands.HandLandmark.WRIST].x-orgin[0]\n # y = (hand.landmark[mp_hands.HandLandmark.WRIST].y-orgin[1])\n # else:\n # x,y = x,y\n # if hand_sign_id == 1:\n # tracker_pub.publish(-y*4)\n # pusher_pub.publish(-x*4)\n # elif hand_sign_id == 0:\n # tracker_pub.publish(10000)\n # # pusher_pub.publish(-x*4)\n\nif __name__ == '__main__':\n\n DIRECTION = []\n VELECITY = [50, 100, 500]\n\n model_save_path = os.path.join('/home/xumingjie/catkin_ws/src/air_hockey_robot/hockey_robot_gazebo/scripts/model/keypoint_classifier/keypoint_classifier.tflite') \n keypoint_classifier = KeyPointClassifier(model_path=model_save_path)\n\n mp_drawing = mp.solutions.drawing_utils\n mp_drawing_styles = mp.solutions.drawing_styles\n mp_hands = mp.solutions.hands\n br = CvBridge()\n try:\n talker()\n except rospy.ROSInterruptException:\n pass","repo_name":"Xumj82/air_hockey_robot","sub_path":"hockey_robot_gazebo/scripts/gesture_controller_web.py","file_name":"gesture_controller_web.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"17746096267","text":"import zencad.util\nfrom OCC.Core.Geom import Geom_Line\nfrom OCC.Core.gp import gp_Lin, gp_Pnt, gp_Dir, gp_XYZ\n\n\nclass Axis:\n def __init__(self, *xyz):\n self._coords = zencad.util.as_indexed(xyz)\n\n def to_Geom_Line(self):\n return Geom_Line(\n gp_Lin(\n gp_Pnt(0, 0, 0),\n gp_Dir(\n gp_XYZ(\n self._coords[0],\n self._coords[1],\n self._coords[2]))))\n","repo_name":"mirmik/zencad","sub_path":"zencad/axis.py","file_name":"axis.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"81"}
+{"seq_id":"71329488904","text":"from bs4 import BeautifulSoup\nimport urllib.request\nfrom os import system\nfrom time import sleep\n\nclass Main():\n\n def __init__(self, site_name, season, episode, *args, **kwargs):\n self.season = season # начальный сезон\n self.episode = episode # начальная серия\n self.link = site_name + \"/season-{}/episode-{}\".format(season, episode)\n\n with urllib.request.urlopen(self.link) as response:\n # Запись в переменную html код страницы\n html = response.read()\n\n soup = BeautifulSoup(html, 'html.parser')\n\n # Поиск тега с классом .empty\n search_empty_tag = soup.select(\".empty\")\n\n check_new_video(search_empty_tag) # Проверка новых серий\n\n check_new_season()\n\n # Проверка нового сезона\n def check_new_season(self):\n season = self.season + 1\n episode = 1\n link = self.link\n print(link)\n\n # Отправка аудио сигнала\n def send_audio(count, time_loop):\n i = count # кол-во уведомлений\n while i >= 0:\n system('beep.mp3')\n sleep(time_loop) # интервал уведомлений в секундах\n i = i - 1\n\n # Проверка новой серии\n def check_new_video(empty_tag):\n if not empty_tag:\n # Если такого тега нет, то есть новые серии.\n print(\"Новые серии!\")\n send_audio(3, 60*5) # Звуковые сигналы (5 минут интервал)\n return True\n # Затем проверить 1 серию нового сезона, если ее нет,\n # тогда проверить следующую текущего сезона, если она есть,\n # тогда подать сигнал и написать в телеграмм.\n else:\n print(\"Нет новых серий\")\n return False\n\n\nwrecked = Main(\"http://www.torrentino.me/serial/888489-wrecked\", 2, 5)\n\n","repo_name":"dns2316/python-experience","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"42918094067","text":"import pytest, sys, os\nfrom src.data import *\nfrom src.figures import *\nfrom src.utils import *\nfrom src.pci_model import *\n\ndef test_create_plotly():\n fig = create_plotly_figure(input = \"./figures/pci.csv\")\n\ndef test_proc_data():\n print(os.getcwd())\n\n if not os.path.exists(\"./tests/Data/Output/\"):\n os.makedirs(\"./tests/Data/Output/\")\n\n proc_embedding(\n input_file = \"./tests/Data/Input/embedding_example.txt\",\n output_path = \"./tests/Data/Output/\"\n )\n\n with open(\"./tests/Data/Output/embedding.pkl\" , 'rb') as f:\n embedding = pickle.load(f)\n \n words = list( embedding.keys())\n\n example_data_2000 = gen_testing_data(words = words, from_year = 2000, to_year = 2010, type = 0, seed = 1 )\n example_data_2000.to_pickle(\"./tests/Data/Output/2000_2010.pkl\")\n\n example_data_2011 = gen_testing_data(words = words, from_year = 2011, to_year = 2011, type = 1, seed = 2 )\n example_data_2011.to_pickle(\"./tests/Data/Output/2011.pkl\")\n\n\n proc_pd(\n input = \"./tests/Data/Output/2000_2010.pkl\",\n create = 1,\n seed = 1,\n k_fold = 10,\n output = \"./tests/Data/Output/database.db\",\n embedding = \"./tests/Data/Output/\"\n )\n\n proc_pd(\n input = \"./tests/Data/Output/2011.pkl\",\n create = 0,\n seed = 2,\n k_fold = 10,\n output = \"./tests/Data/Output/database.db\",\n embedding = \"./tests/Data/Output/\"\n )\n\n run_pci_model(year_target=2011, mt_target=1, i=1, gpu=-1, model=\"window_2_years_quarterly\", root = \"./tests/\", T=0.01, discount=0.05, bandwidth = 0.2 )\n run_pci_model(year_target=2011, mt_target=1, i=2, gpu=-1, model=\"window_2_years_quarterly\", root = \"./tests/\", T=0.01, discount=0.05, bandwidth = 0.2 )\n\n compile_model_results(\"window_2_years_quarterly\", root = \"./tests\")\n\n create_text_output(\"window_2_years_quarterly\", \"2011_M1\", gpu=-1, root =\"./tests/\")\n","repo_name":"hehuanshu96/PCI","sub_path":"PCI-China/tests/test_01.py","file_name":"test_01.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"}
+{"seq_id":"11703689151","text":"from Scripts.UI.Menu import Menu, DefaultButton, AnimationTextEdit, FileSystemWatching, ListWidget, TextEdit\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nimport Scripts.Settings.Settings as Settings\nimport Scripts.Settings.StyleSheets as StyleSheets\nfrom Scripts.API.OctoPrintAPI import OctoPrintAPI, Preset\n\nclass PresetsMenu(Menu):\n name = \"PresetsMenu\"\n\n def __init__(self, parent):\n super(PresetsMenu, self).__init__(parent)\n\n self.setObjectName(self.name)\n self.setFixedSize(Settings.WINDOW_SIZE[0], Settings.WINDOW_SIZE[1])\n\n self.menu = DefaultButton(self, \"Back\", 300, 200, 100, 100, \"Back\", lambda: parent.ChangeMenu(\"MainPage\"))\n self.addPreset = DefaultButton(self, \"Add\", 300, 100, 100, 100, \"Add\", lambda: parent.ChangeMenu(\"CreatePresetsMenu\"))\n self.updatePresets = DefaultButton(self, \"Update\", 300, 0, 100, 100, \"Update\", self.UpdatePreset)\n\n self.files = ListWidget(self, \"Files\", 0, 0, 300, 300, self.SetPreset)\n\n self.Update()\n self.UpdatePreset()\n # self.files = FileSystemWatching(self, 200, 0, 200, 100)\n\n def UpdatePreset(self):\n OctoPrintAPI.LoadPresets(OctoPrintAPI)\n self.files.SetItems(OctoPrintAPI.PRESETS.keys())\n\n def SetPreset(self, signal):\n key = self.files.model.itemFromIndex(signal).text()\n OctoPrintAPI.SetToolTemperature(OctoPrintAPI, OctoPrintAPI.PRESETS[key].temperatureT0, OctoPrintAPI.PRESETS[key].temperatureT1, False)\n\n def Update(self):\n pass\n # self.statusBar.SetText(f\"[STATUS] {OctoPrintAPI.JOB.state}\")\n\n\nclass CreatePresetsMenu(Menu):\n name = \"CreatePresetsMenu\"\n\n def __init__(self, parent):\n super(CreatePresetsMenu, self).__init__(parent)\n\n self.setObjectName(self.name)\n self.setFixedSize(Settings.WINDOW_SIZE[0], Settings.WINDOW_SIZE[1])\n\n self.i_name = TextEdit(self, 0, 20, 250, 50, \"Name: str\", \"Preset\")\n self.i_temperatureBed = TextEdit(self, 0, 90, 250, 50, \"Temperature Bed: null\")\n self.i_temperatureT0 = TextEdit(self, 0, 160, 250, 50, \"Temperature T0: int\", \"200\")\n self.i_temperatureT1 = TextEdit(self, 0, 230, 250, 50, \"Temperature T1: null\")\n\n self.menu = DefaultButton(self, \"Back\", 300, 180, 100, 100, \"Back\", lambda: parent.ChangeMenu(\"PresetsMenu\"))\n\n self.createPreset = DefaultButton(self, \"Save\", 300, 20, 100, 100, \"Save\", self.SavePreset)\n\n self.Update()\n # self.files = FileSystemWatching(self, 200, 0, 200, 100)\n\n def SavePreset(self):\n try:\n t_name = self.i_name.label.text()\n t_tempBed = None if (self.i_temperatureBed.label.text() == \"\") else float(self.i_temperatureBed.label.text())\n t_tempT0 = None if (self.i_temperatureT0.label.text() == \"\") else float(self.i_temperatureT0.label.text())\n t_tempT1 = None if (self.i_temperatureT1.label.text() == \"\") else float(self.i_temperatureT1.label.text())\n\n OctoPrintAPI.CreatePreset(OctoPrintAPI, t_name, t_tempBed, t_tempT0, t_tempT1)\n except Exception as exception: print(exception)\n\n def Update(self):\n pass\n # self.statusBar.SetText(f\"[STATUS] {OctoPrintAPI.JOB.state}\")","repo_name":"shogo-makishima/PyQT5_OctoScreen","sub_path":"Scripts/UI/PresetsMenu.py","file_name":"PresetsMenu.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"41419269109","text":"from functions import *\n\ndef test():\n filename = \"winequality.csv\"\n atributo = \"alcohol\"\n try:\n diccionario = read_data(filename)\n diccionarios = split(read_data(filename))\n print(diccionarios)\n # print(reduce(diccionarios[1], atributo))\n\n except ValueError as err:\n print(\"Ha ocurrido la excepcion {}\".format(err))\n\ntest()","repo_name":"begimode/examenPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"13583348259","text":"import math\nimport collections\nfrom hellonlp.ChineseWordSegmentation.hyperparameters import Hyperparamters as hp\nfrom hellonlp.ChineseWordSegmentation.modules import (\n get_ngram_frequence_infomation,\n calcul_ngram_entropy,\n calcul_ngram_pmi,\n)\n\n\nclass SeriesQun:\n def __init__(self) -> None:\n self.min_n = 2\n self.max_n = 5\n self.min_freq = 3\n\n def cal_score(self, ngram_freq, ngram_keys):\n\n # Get left and right ngram entropy\n left_right_entropy = calcul_ngram_entropy(\n ngram_freq, ngram_keys, range(self.min_n, self.max_n + 1)\n )\n # Get pmi ngram entropy\n mi = calcul_ngram_pmi(ngram_freq, ngram_keys, range(self.min_n, self.max_n + 1))\n # Join keys of entropy and keys of pmi\n joint_phrase = mi.keys() & left_right_entropy.keys()\n\n # Word liberalization\n def word_liberalization(el, er):\n return math.log(\n (el * hp.e**er + 0.00001) / (abs(el - er) + 1), hp.e\n ) + math.log((er * hp.e**el + 0.00001) / (abs(el - er) + 1), hp.e)\n\n word_info_scores = {\n word: (\n mi[word][0],\n mi[word][1],\n left_right_entropy[word][0],\n left_right_entropy[word][1],\n min(left_right_entropy[word][0], left_right_entropy[word][1]),\n word_liberalization(\n left_right_entropy[word][0], left_right_entropy[word][1]\n )\n + mi[word][1],\n )\n for word in joint_phrase\n }\n\n # Drop some special word that end with \"的\" like \"XX的,美丽的,漂亮的\"\n target_ngrams = word_info_scores.keys()\n start_chars = collections.Counter([n[0] for n in target_ngrams])\n end_chars = collections.Counter([n[-1] for n in target_ngrams])\n threshold = int(len(target_ngrams) * 0.004)\n threshold = max(50, threshold)\n invalid_start_chars = set(\n [char for char, count in start_chars.items() if count > threshold]\n )\n invalid_end_chars = set(\n [char for char, count in end_chars.items() if count > threshold]\n )\n invalid_target_ngrams = set(\n [\n n\n for n in target_ngrams\n if (n[0] in invalid_start_chars or n[-1] in invalid_end_chars)\n ]\n )\n # Remove some words invalids\n for n in invalid_target_ngrams:\n word_info_scores.pop(n)\n return word_info_scores\n\n def series_name(self, data):\n\n ngram_freq, ngram_keys = get_ngram_frequence_infomation(\n data,\n self.min_n,\n self.max_n,\n chunk_size=hp.chunk_size,\n min_freq=self.min_freq,\n )\n word_info_scores = self.cal_score(ngram_freq, ngram_keys)\n new_words = self.drop_repeat_short_term(ngram_freq, word_info_scores)\n series_qun = self.group_qun(new_words, data)\n\n return series_qun\n\n def drop_repeat_short_term(self, ngram_freq, word_info_scores):\n ngram_keys = [(\"\".join(x), x) for x in ngram_freq.keys()]\n ngram_keys = sorted(ngram_keys, key=lambda x: len(x[0]))\n delete_key = set()\n for i, (key, key_tuple) in enumerate(ngram_keys):\n if len(key) < self.min_n:\n continue\n for j in range(i + 1, len(ngram_keys)):\n compare_key, compare_key_tuple = ngram_keys[j]\n if len(compare_key) == len(key):\n continue\n if (\n key in compare_key\n and ngram_freq[key_tuple] == ngram_freq[compare_key_tuple]\n ):\n delete_key.add(key_tuple)\n new_words = set()\n for key in word_info_scores:\n if key not in delete_key:\n new_words.add(\n (\n \"\".join(key),\n tuple(list(word_info_scores[key]) + [ngram_freq[key]]),\n )\n )\n return new_words\n\n def group_qun(self, new_words, data):\n new_words = sorted(new_words, key=lambda x: x[1][-1], reverse=True)\n group_qun = {}\n for d in data:\n for new_entry in new_words:\n new_word = new_entry[0]\n if new_word in d:\n if new_word not in group_qun:\n group_qun[new_word] = []\n group_qun[new_word].append(d)\n break\n series_qun = {}\n for key in group_qun.keys():\n if len(group_qun[key]) >= self.min_n:\n series_qun[key] = group_qun[key]\n return series_qun\n\n\nif __name__ == \"__main__\":\n\n data = [\n \"【乌市】青春交友群\",\n \"乌市🍒灯红酒绿\",\n \"乌市🍒美术馆2\",\n \"乌市🍒汇珊园\",\n \"乌市🍒幸福花园\",\n \"乌市🍒美术馆3\",\n \"乌市🍒美术馆1\",\n \"乌市🍒️恒大之星\",\n \"乌市🍒️美琳花源\",\n \"乌市💕️桃花岛3\",\n \"乌市💋夜色撩人\",\n \"乌鲁木齐🈷️百味小\",\n \"乌鲁木齐🈷️仙女聚\",\n \"乌鲁木齐🈷️仙女聚\",\n \"乌鲁木齐🈷️仙女聚\",\n \"乌鲁木齐🈷️仙女聚\",\n \"乌鲁木齐之夜6\",\n \"乌鲁木齐之夜5\",\n \"乌鲁木齐之夜10\",\n \"乌鲁木齐都是聚会1\",\n \"乌鲁木齐都市大聚\",\n \"同城交友(1)\",\n \"同城交友(3)\",\n \"乌市✔美女大本营\",\n \"\t乌鲁木齐💋可a2女神\",\n \"乌市欢乐汇\",\n \"乌市大风吹\",\n \"乌鲁木齐都市大聚会10\",\n \"乌鲁木齐2020JP(2)\",\n \"乌鲁木齐2020JP(3)\",\n \"乌鲁木齐2020JP(4)\",\n \"乌鲁木齐2020JP(7)\",\n \"乌市新市区交友群(5\",\n \"乌市高端\",\n ]\n data2 = [\n \"a辽鞍海🌼荔枝红茶4️\",\n \"a辽鞍海🌼荷叶冬瓜6️\",\n \"a辽鞍海🌼蜜桃乌龙7️\",\n \"A✡️辽鞍海✡️大眼\",\n \"A✡️辽鞍海✡️大眼\",\n \"A✡️辽鞍海✡️大眼\",\n \"A✡️辽鞍海✡️大眼\",\n \"🍓鞍山'漫漫长夜😍无\",\n \"🍓鞍山'💮浪漫满屋\",\n \"🍓鞍山'风花雪月,梦\",\n \"辽阳鞍山Vip贵族❹\",\n \"辽阳鞍山Vip贵族❺\",\n \"辽鞍娱乐{5}\",\n \"辽鞍娱乐{3}\",\n \"花儿朵朵③\",\n \"花儿朵朵⑩\",\n \"花儿朵朵⑨\",\n \"花儿朵朵⑦\",\n \"花儿朵朵④\",\n \"花儿朵朵⑥\",\n \"梅兰竹菊\",\n \"小天王经典娱乐83\",\n \"a小鞍@小海@小辽2000\",\n \"辽阳阳光永在风雨后二\",\n \"JS小聚酒馆2000\",\n \"人间烟火🎇2000\",\n \"小天王娱乐经典\",\n \"大灰狼\",\n \"\t下辈子不让你孤单\",\n \"襄平娱乐①\",\n \"\t鞍❤️辽江南小蛮腰\",\n \"沈阳夜未央⑤\",\n \"辽鞍鹊桥相会1群2000人\",\n \"孟泰后山练功🤛🤜\",\n \"需要点咸味\",\n \"拉人给福利5个人一部\",\n \"鞍山交友娱乐群\",\n \"辽鞍皇家贵族人(1)\",\n \"同城🉑娱乐共享群\",\n \"后宫佳丽三千\",\n \"小天王经典娱乐\",\n \"辽鞍☃️炫彩斑斓\",\n \"沈阳夜未央②\",\n \"鞍辽☞你的笑真的很美\",\n \"🛏️上快乐大本营\",\n \"天下至尊舍我其谁1000\",\n \"一二三四五\",\n \"✨我的女孩你别碰✨\",\n \"辽阳鞍山学生VIP\",\n \"后宫佳丽三千\",\n ]\n data3 = [\n \"沈阳美女雷霆交友群\",\n \"南京💐师范职业技术\",\n \"雷霆二年一班\",\n \"南京女神🌺交友\",\n \"王者沈阳醉酒当歌\",\n \"《昆明飞飞鱼》\",\n \"雷霆世纪家园\",\n \"沈阳美女雷霆丘比特\",\n \"沈阳同城学生交友1k\",\n \"沈阳美女🎀验证群\",\n \"沈阳摩天大厦\",\n \"十里桃花2群\",\n \"雷霆世纪佳缘\",\n \"沈阳后宫佳丽三千人\",\n \"王者沈阳美女交友群⑤\",\n \"上海闵行灰领达人\",\n \"沈阳酒醉金迷夜逍遥4\",\n \"苏州~此妖、未成精\",\n \"雷霆二年二班\",\n \"雷霆初一二班\",\n \"沈阳旺仔俱乐部\",\n \"南京🍀大学生👗论坛\",\n \"王者王者荣耀(苏城)\",\n \"南京💖紫罗兰之夜\",\n \"沈阳聊天交友大群A(\",\n \"成都💋耍💄耍💗💗\",\n \"辽宁沈阳不夜城1000\",\n \"南京小姐姐群\",\n ]\n data4 = [\n \"一二次200k🈲️躺群\",\n \"一二次已开🈲 纯三 陆瑀\",\n \"一二次klq200k🚫躺群\",\n \"一二次klq60k🈲️纯三\",\n \"一二次100k🈲纯三躺群TNT\",\n \"一二次200k扩列群🈲️纯三\",\n \"一二次扩列群188k🈲纯三tnt躺群\",\n \"国乙已开200k🈲纯三 不懂群名\",\n \"国乙自由扩150k🈲️看不懂群名\",\n \"国乙klq已开🈲️TNT、纯三\",\n ]\n sq = SeriesQun()\n series_quns = sq.series_name(data4)\n print(series_quns)\n","repo_name":"baicao/wxy_model_toolbox","sub_path":"common_model/series_qun.py","file_name":"series_qun.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"3772915398","text":"import statistics\r\nimport math\r\nimport json\r\ntry:\r\n from .common import first_float\r\nexcept ImportError:\r\n from common import first_float\r\n\r\ndef score_list_to_score(score_list):\r\n all_keys = \"ZXCVBNMASDFGHJQWERTYUzxcvbnmasdfghjqwertyu\"\r\n all_time = [i for i in score_list if type(i) == float]\r\n try:\r\n average_time = statistics.mode(all_time) #mode\r\n except statistics.StatisticsError: #if no mode or 2 mode\r\n average_time = None\r\n if not average_time or all_time.count(average_time) < len(all_time)/4:\r\n average_time = sorted(all_time)[round(len(all_time) / 2)] # use median\r\n\r\n # edit average dashes here: ↓\r\n lowest_lim = average_time / 16\r\n lowest_time = min([i for i in all_time if i >= lowest_lim])\r\n\r\n score = \"\"\r\n for msg in score_list:\r\n if type(msg) == float:\r\n msg = (round(msg / lowest_time)) * \"-\"\r\n elif len(msg) > 1:\r\n msg = [key for key in msg if key in all_keys]\r\n msg = f\"({''.join(set(msg))})\"\r\n score += msg\r\n\r\n # score has been created but have no line break\r\n # so it is created here\r\n # edit length of each line below:\r\n line_break = 70\r\n to_break = line_break\r\n score = score.replace(\"-\", \" \").strip().replace(\" \", \"-\")\r\n score = list(score)\r\n for idx, key in enumerate(score):\r\n if idx > to_break and key == \"-\" and idx != len(score) - 1 and score[idx + 1] != '-':\r\n score[idx] = \"\\n\"\r\n to_break += line_break\r\n # 0.0059 is delay by time.sleep() function\r\n score = f\"(beat{round(lowest_time - 0.0059, 3)})-\\n{''.join(score)}\"\r\n return score\r\n\r\ndef key_list_to_score_list(key_list:list):\r\n score_list = [0.0]\r\n temp_beat = 0.15\r\n for key in key_list:\r\n if \"beat\" in key.lower():\r\n temp_beat = first_float(key)\r\n elif key == \"-\":\r\n score_list[-1] += temp_beat + 0.0059\r\n elif key.isnumeric():\r\n score_list[-1] += (temp_beat * (float(key)/10)) + 0.0059\r\n else:\r\n score_list.append(key)\r\n score_list.append(0.109)\r\n return score_list\r\n\r\ndef score_list_to_nightly(score_list:list, name=\"Undefined\"):\r\n all_time = [i for i in score_list if type(i) == float]\r\n try:\r\n average_time = statistics.mode(all_time) # mode\r\n except statistics.StatisticsError: # if no mode or 2 mode\r\n average_time = None\r\n if not average_time or all_time.count(average_time) < len(all_time) / 4:\r\n average_time = sorted(all_time)[round(len(all_time) / 2)] # use median\r\n\r\n # edit average dashes here: ↓\r\n lowest_time = average_time / 4\r\n\r\n bpm = round(60 / lowest_time)\r\n jsonFile = {\"data\": {\"isComposed\": True, \"isComposedVersion\": True, \"appName\": \"Genshin\"}, \"name\": name, \"bpm\": bpm,\r\n \"pitch\": \"C\", \"breakpoints\": [0], \"instruments\": [\"Lyre\", \"Lyre\", \"Lyre\"]}\r\n KEYS = ['Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'A', 'S', 'D', 'F', 'G', 'H', 'J', 'Z', 'X', 'C', 'V', 'B', 'N', 'M']\r\n NOTES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\r\n KN = dict(zip(KEYS, NOTES))\r\n def get_representative_key(note:list,ratio:float)->list:\r\n if ratio == 1:\r\n return [[0, note]]\r\n elif ratio < 0.125:\r\n return [[3, note]]\r\n noOf125Left = ratio / 0.125\r\n ratio_key = {8: 0, 4: 1, 2: 2, 1: 3}\r\n final_keys = []\r\n for value, note_type in ratio_key.items():\r\n needed_no = math.floor(noOf125Left / value)\r\n if needed_no:\r\n final_keys += [note_type] * needed_no\r\n noOf125Left %= value\r\n final_keys.sort()\r\n self_note = final_keys[0]\r\n extra_note = [[note_type, []] for note_type in final_keys[1:]]\r\n\r\n return [[self_note,note]] + extra_note\r\n\r\n columns = []\r\n idx = 0\r\n while idx < len(score_list):\r\n if isinstance(score_list[idx], float):\r\n ratio = score_list[idx] / lowest_time\r\n columns += get_representative_key([],ratio)\r\n idx += 1\r\n continue\r\n #current note is str\r\n note = [[KN[key],\"100\"] for key in score_list[idx].upper() if key in KN]\r\n try:\r\n length = score_list[idx+1]\r\n ratio = length / lowest_time\r\n except IndexError:\r\n ratio = 1\r\n except ValueError:\r\n ratio = 0.109/lowest_time\r\n notes_list = get_representative_key(note, ratio)\r\n columns += notes_list\r\n idx+=2\r\n jsonFile[\"columns\"] = columns\r\n jsonFile = json.dumps([jsonFile])\r\n return jsonFile","repo_name":"stanX19/genshin_lyre","sub_path":"utils/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"12558608899","text":"import math\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.contrib.image.python.ops import image_ops\n\ndef one_hot( indices, depth, on_value=1, off_value=0, axis=-1, dtype=None, name=None):\n # only valid for 1-D indices\n one_hot_matrix = np.eye(depth)[np.array(indices)]\n if(axis == 0):\n one_hot_matrix = np.transpose(one_hot_matrix)\n if not dtype == None:\n one_hot_matrix = one_hot_matrix.astype(dtype)\n\n return one_hot_matrix\n\n\n\n\ndef areas(gt_bboxes):\n ymin, xmin, ymax, xmax = np.split(gt_bboxes, 4, axis=1)\n return (xmax - xmin) * (ymax - ymin)\n\ndef intersection(gt_bboxes, default_bboxes):\n # num_anchors x 1\n ymin, xmin, ymax, xmax = np.split(gt_bboxes, 4, axis=1)\n # 1 x num_anchors\n gt_ymin, gt_xmin, gt_ymax, gt_xmax = [np.transpose(b, perm=[1, 0]) for b in np.split(default_bboxes, 4, axis=1)]\n # broadcast here to generate the full matrix\n int_ymin = max(ymin, gt_ymin)\n int_xmin = max(xmin, gt_xmin)\n int_ymax = min(ymax, gt_ymax)\n int_xmax = min(xmax, gt_xmax)\n h = max(int_ymax - int_ymin, 0.)\n w = max(int_xmax - int_xmin, 0.)\n return h * w\n\ndef iou_matrix(gt_bboxes, default_bboxes):\n inter_vol = intersection(gt_bboxes, default_bboxes)\n # broadcast\n union_vol = areas(gt_bboxes) + np.transpose(areas(default_bboxes)) - inter_vol\n return np.where(np.equal(union_vol, 0.0),\n np.zeros_like(inter_vol), np.true_divide(inter_vol, union_vol))\n\ndef do_dual_max_match(overlap_matrix, low_thres, high_thres, ignore_between=True, gt_max_first=True):\n '''\n overlap_matrix: num_gt * num_anchors\n '''\n # first match from anchors' side\n anchors_to_gt = np.argmax(overlap_matrix, axis=0)\n # the matching degree\n match_values = np.amax(overlap_matrix, axis=0)\n\n #positive_mask = tf.greater(match_values, high_thres)\n less_mask = np.less(match_values, low_thres)\n between_mask = np.logical_and(np.less(match_values, high_thres), np.greater_equal(match_values, low_thres))\n negative_mask = less_mask if ignore_between else between_mask\n ignore_mask = between_mask if ignore_between else less_mask\n # fill all negative positions with -1, all ignore positions is -2\n match_indices = np.where(negative_mask, -1 * np.ones_like(anchors_to_gt), anchors_to_gt)\n match_indices = np.where(ignore_mask, -2 * np.ones_like(match_indices), match_indices)\n\n # negtive values has no effect in tf.one_hot, that means all zeros along that axis\n # so all positive match positions in anchors_to_gt_mask is 1, all others are 0\n anchors_to_gt_mask = one_hot(np.clip(match_indices, -1, np.shape(overlap_matrix)[0].astype(np.int64)),\n np.shape(overlap_matrix)[0], on_value=1, off_value=0, axis=0, dtype=np.int32)\n # match from ground truth's side\n gt_to_anchors = np.argmax(overlap_matrix, axis=1)\n\n if gt_max_first:\n # the max match from ground truth's side has higher priority\n left_gt_to_anchors_mask = one_hot(gt_to_anchors, np.shape(overlap_matrix)[1], on_value=1, off_value=0, axis=1, dtype=np.int32)\n else:\n # the max match from anchors' side has higher priority\n # use match result from ground truth's side only when the the matching degree from anchors' side is lower than position threshold\n left_gt_to_anchors_mask = np.logical_and(np.amax(anchors_to_gt_mask, axis=1, keep_dims=True) < 1,\n one_hot(gt_to_anchors, np.shape(overlap_matrix)[1], on_value=True, off_value=False, axis=1, dtype=np.bool_)\n ).astype(np.int64)\n # can not use left_gt_to_anchors_mask here, because there are many ground truthes match to one anchor, we should pick the highest one even when we are merging matching from ground truth side\n left_gt_to_anchors_scores = overlap_matrix * left_gt_to_anchors_mask.astype(np.float)\n # merge matching results from ground truth's side with the original matching results from anchors' side\n # then select all the overlap score of those matching pairs\n selected_scores = np.take(overlap_matrix, np.stack([np.where(np.amax(left_gt_to_anchors_mask, axis=0) > 0,\n np.argmax(left_gt_to_anchors_scores, axis=0),\n anchors_to_gt),\n range(np.shape(overlap_matrix)[1].astype(np.int64))], axis=1))\n # return the matching results for both foreground anchors and background anchors, also with overlap scores\n return np.where(np.amax(left_gt_to_anchors_mask, axis=0) > 0,\n np.argmax(left_gt_to_anchors_scores, axis=0),\n match_indices), selected_scores\n\n# def save_anchors(bboxes, labels, anchors_point):\n# if not hasattr(save_image_with_bbox, \"counter\"):\n# save_image_with_bbox.counter = 0 # it doesn't exist yet, so initialize it\n# save_image_with_bbox.counter += 1\n\n# np.save('./debug/bboxes_{}.npy'.format(save_image_with_bbox.counter), np.copy(bboxes))\n# np.save('./debug/labels_{}.npy'.format(save_image_with_bbox.counter), np.copy(labels))\n# np.save('./debug/anchors_{}.npy'.format(save_image_with_bbox.counter), np.copy(anchors_point))\n# return save_image_with_bbox.counter\n\nclass AnchorEncoder(object):\n def __init__(self, allowed_borders, positive_threshold, ignore_threshold, prior_scaling, clip=False):\n super(AnchorEncoder, self).__init__()\n self._all_anchors = None\n self._allowed_borders = allowed_borders\n self._positive_threshold = positive_threshold\n self._ignore_threshold = ignore_threshold\n self._prior_scaling = prior_scaling\n self._clip = clip\n\n def center2point(self, center_y, center_x, height, width):\n return center_y - height / 2., center_x - width / 2., center_y + height / 2., center_x + width / 2.,\n\n def point2center(self, ymin, xmin, ymax, xmax):\n height, width = (ymax - ymin), (xmax - xmin)\n return ymin + height / 2., xmin + width / 2., height, width\n\n def encode_all_anchors(self, labels, bboxes, all_anchors, all_num_anchors_depth, all_num_anchors_spatial, debug=False):\n # y, x, h, w are all in range [0, 1] relative to the original image size\n # shape info:\n # y_on_image, x_on_image: layers_shapes[0] * layers_shapes[1]\n # h_on_image, w_on_image: num_anchors\n assert (len(all_num_anchors_depth)==len(all_num_anchors_spatial)) and (len(all_num_anchors_depth)==len(all_anchors)), 'inconsist num layers for anchors.'\n \n num_layers = len(all_num_anchors_depth)\n list_anchors_ymin = []\n list_anchors_xmin = []\n list_anchors_ymax = []\n list_anchors_xmax = []\n tiled_allowed_borders = []\n for ind, anchor in enumerate(all_anchors):\n anchors_ymin_, anchors_xmin_, anchors_ymax_, anchors_xmax_ = self.center2point(anchor[0], anchor[1], anchor[2], anchor[3])\n\n list_anchors_ymin.append(np.reshape(anchors_ymin_, [-1]))\n list_anchors_xmin.append(np.reshape(anchors_xmin_, [-1]))\n list_anchors_ymax.append(np.reshape(anchors_ymax_, [-1]))\n list_anchors_xmax.append(np.reshape(anchors_xmax_, [-1]))\n\n tiled_allowed_borders.extend([self._allowed_borders[ind]] * all_num_anchors_depth[ind] * all_num_anchors_spatial[ind])\n\n anchors_ymin = np.concatenate(list_anchors_ymin, 0)\n anchors_xmin = np.concatenate(list_anchors_xmin, 0)\n anchors_ymax = np.concatenate(list_anchors_ymax, 0)\n anchors_xmax = np.concatenate(list_anchors_xmax, 0)\n\n if self._clip:\n anchors_ymin = np.clip(anchors_ymin, 0., 1.)\n anchors_xmin = np.clip(anchors_xmin, 0., 1.)\n anchors_ymax = np.clip(anchors_ymax, 0., 1.)\n anchors_xmax = np.clip(anchors_xmax, 0., 1.)\n\n anchor_allowed_borders = np.stack(tiled_allowed_borders, 0)\n\n inside_mask = np.logical_and(np.logical_and(anchors_ymin > -anchor_allowed_borders * 1.,\n anchors_xmin > -anchor_allowed_borders * 1.),\n np.logical_and(anchors_ymax < (1. + anchor_allowed_borders * 1.),\n anchors_xmax < (1. + anchor_allowed_borders * 1.)))\n\n anchors_point = np.stack([anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax], axis=-1)\n\n # save_anchors_op = tf.py_func(save_anchors,\n # [bboxes,\n # labels,\n # anchors_point],\n # tf.int64, stateful=True)\n\n # with tf.control_dependencies([save_anchors_op]):\n overlap_matrix = iou_matrix(bboxes, anchors_point) * np.expand_dims(inside_mask, 0).astype(np.float32)\n matched_gt, gt_scores = do_dual_max_match(overlap_matrix, self._ignore_threshold, self._positive_threshold)\n # get all positive matching positions\n matched_gt_mask = matched_gt > -1\n matched_indices = np.clip(matched_gt, 0, np.iinfo(np.int64).max)\n # the labels here maybe chaos at those non-positive positions\n gt_labels = np.take(labels, matched_indices)\n # filter the invalid labels\n gt_labels = gt_labels * matched_gt_mask.astype(np.int64)\n # set those ignored positions to -1\n gt_labels = gt_labels + (-1 * (matched_gt < -1).astype(np.int64))\n\n gt_ymin, gt_xmin, gt_ymax, gt_xmax = np.unstack(np.take(bboxes, matched_indices), 4, axis=-1)\n\n # transform to center / size.\n gt_cy, gt_cx, gt_h, gt_w = self.point2center(gt_ymin, gt_xmin, gt_ymax, gt_xmax)\n anchor_cy, anchor_cx, anchor_h, anchor_w = self.point2center(anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax)\n # encode features.\n # the prior_scaling (in fact is 5 and 10) is use for balance the regression loss of center and with(or height)\n gt_cy = (gt_cy - anchor_cy) / anchor_h / self._prior_scaling[0]\n gt_cx = (gt_cx - anchor_cx) / anchor_w / self._prior_scaling[1]\n gt_h = math.log(gt_h / anchor_h) / self._prior_scaling[2]\n gt_w = math.log(gt_w / anchor_w) / self._prior_scaling[3]\n # now gt_localizations is our regression object, but also maybe chaos at those non-positive positions\n if debug:\n gt_targets = np.stack([anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax], axis=-1)\n else:\n gt_targets = np.stack([gt_cy, gt_cx, gt_h, gt_w], axis=-1)\n # set all targets of non-positive positions to 0\n gt_targets = np.expand_dims(matched_gt_mask.astype(np.float32), -1) * gt_targets\n self._all_anchors = (anchor_cy, anchor_cx, anchor_h, anchor_w)\n return gt_targets, gt_labels, gt_scores\n\n # return a list, of which each is:\n # shape: [feature_h, feature_w, num_anchors, 4]\n # order: ymin, xmin, ymax, xmax\n def decode_all_anchors(self, pred_location, num_anchors_per_layer):\n assert self._all_anchors is not None, 'no anchors to decode.'\n anchor_cy, anchor_cx, anchor_h, anchor_w = self._all_anchors\n\n pred_h = math.exp(pred_location[:, -2] * self._prior_scaling[2]) * anchor_h\n pred_w = math.exp(pred_location[:, -1] * self._prior_scaling[3]) * anchor_w\n pred_cy = pred_location[:, 0] * self._prior_scaling[0] * anchor_h + anchor_cy\n pred_cx = pred_location[:, 1] * self._prior_scaling[1] * anchor_w + anchor_cx\n\n return np.split(np.stack(self.center2point(pred_cy, pred_cx, pred_h, pred_w), axis=-1), num_anchors_per_layer, axis=0)\n\n def ext_decode_all_anchors(self, pred_location, all_anchors, all_num_anchors_depth, all_num_anchors_spatial):\n assert (len(all_num_anchors_depth)==len(all_num_anchors_spatial)) and (len(all_num_anchors_depth)==len(all_anchors)), 'inconsist num layers for anchors.'\n num_anchors_per_layer = []\n for ind in range(len(all_anchors)):\n num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind])\n\n num_layers = len(all_num_anchors_depth)\n list_anchors_ymin = []\n list_anchors_xmin = []\n list_anchors_ymax = []\n list_anchors_xmax = []\n tiled_allowed_borders = []\n for ind, anchor in enumerate(all_anchors):\n\n anchors_ymin_, anchors_xmin_, anchors_ymax_, anchors_xmax_ = self.center2point(anchor[0], anchor[1], anchor[2], anchor[3])\n\n list_anchors_ymin.append(np.reshape(anchors_ymin_, [-1]))\n list_anchors_xmin.append(np.reshape(anchors_xmin_, [-1]))\n list_anchors_ymax.append(np.reshape(anchors_ymax_, [-1]))\n list_anchors_xmax.append(np.reshape(anchors_xmax_, [-1]))\n # if(ind > 1):\n # print('\\n==\\n anchors_ymin_ = {}, anchors_ymax_ = {}'.format(np.reshape(anchors_ymin_, [-1]),np.reshape(anchors_ymax_, [-1])))\n anchors_ymin = np.concatenate(list_anchors_ymin, 0)\n anchors_xmin = np.concatenate(list_anchors_xmin, 0)\n anchors_ymax = np.concatenate(list_anchors_ymax, 0)\n anchors_xmax = np.concatenate(list_anchors_xmax, 0)\n\n anchor_cy, anchor_cx, anchor_h, anchor_w = self.point2center(anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax)\n selected = [318, 330, 368, 369, 379]\n # print(\"\\n==\\n selected anchor = \\n{}\\n{}\\n{}\\n{}\".format(np.take(anchor_cy, selected), np.take(anchor_cx, selected), \n # np.take(anchor_h, selected), np.take(anchor_w, selected)))\n # print(\"\\n==\\n pred_offset = \\n{}\".format(np.take(pred_location, selected, axis=0)))\n\n pred_h = np.exp(pred_location[:,-2] * self._prior_scaling[2]) * anchor_h\n pred_w = np.exp(pred_location[:, -1] * self._prior_scaling[3]) * anchor_w\n pred_cy = pred_location[:, 0] * self._prior_scaling[0] * anchor_h + anchor_cy\n pred_cx = pred_location[:, 1] * self._prior_scaling[1] * anchor_w + anchor_cx\n # # print(\"\\n==\\n pred cy cx h w = \\n{}\\n{}\\n{}\\n{}\".format(np.take(pred_cy, selected), np.take(pred_cx, selected), \n # np.take(pred_h, selected), np.take(pred_w, selected))) \n split_param = []\n split_param.append(num_anchors_per_layer[0])\n for layer_num in num_anchors_per_layer[1:-1]:\n split_param.append(layer_num + split_param[-1])\n\n return np.split(np.stack(self.center2point(pred_cy, pred_cx, pred_h, pred_w), axis=-1), split_param, axis=0)\n\nclass AnchorCreator(object):\n def __init__(self, img_shape, layers_shapes, anchor_scales, extra_anchor_scales, anchor_ratios, layer_steps):\n super(AnchorCreator, self).__init__()\n # img_shape -> (height, width)\n self._img_shape = img_shape\n self._layers_shapes = layers_shapes\n self._anchor_scales = anchor_scales\n self._extra_anchor_scales = extra_anchor_scales\n self._anchor_ratios = anchor_ratios\n self._layer_steps = layer_steps\n self._anchor_offset = [0.5] * len(self._layers_shapes)\n\n def get_layer_anchors(self, layer_shape, anchor_scale, extra_anchor_scale, anchor_ratio, layer_step, offset = 0.5):\n ''' assume layer_shape[0] = 6, layer_shape[1] = 5\n x_on_layer = [[0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4]]\n y_on_layer = [[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4],\n [5, 5, 5, 5, 5]]\n '''\n x_on_layer, y_on_layer = np.meshgrid(range(layer_shape[1]), range(layer_shape[0]))\n\n y_on_image = (y_on_layer.astype(np.float32) + offset) * layer_step / self._img_shape[0]\n x_on_image = (x_on_layer.astype(np.float32) + offset) * layer_step / self._img_shape[1]\n # print('anchor_scale = {}, {}'.format(len(anchor_scale), anchor_scale))\n # print('anchor_ratio = {}, {}'.format(len(anchor_ratio), anchor_ratio))\n\n num_anchors_along_depth = len(anchor_scale) * len(anchor_ratio) + len(extra_anchor_scale)\n num_anchors_along_spatial = layer_shape[1] * layer_shape[0]\n\n list_h_on_image = []\n list_w_on_image = []\n\n global_index = 0\n # for square anchors\n for _, scale in enumerate(extra_anchor_scale):\n list_h_on_image.append(scale)\n list_w_on_image.append(scale)\n global_index += 1\n # for other aspect ratio anchors\n for scale_index, scale in enumerate(anchor_scale):\n for ratio_index, ratio in enumerate(anchor_ratio):\n list_h_on_image.append(scale / math.sqrt(ratio))\n list_w_on_image.append(scale * math.sqrt(ratio))\n global_index += 1\n # shape info:\n # y_on_image, x_on_image: layers_shapes[0] * layers_shapes[1]\n # h_on_image, w_on_image: num_anchors_along_depth\n\n print('list_h_on_image = {}, list_w_on_image = {}'.format(list_h_on_image, list_w_on_image))\n return np.expand_dims(y_on_image, axis=-1), np.expand_dims(x_on_image, axis=-1), \\\n np.array(list_h_on_image), \\\n np.array(list_w_on_image), num_anchors_along_depth, num_anchors_along_spatial\n\n def get_all_anchors(self):\n all_anchors = []\n all_num_anchors_depth = []\n all_num_anchors_spatial = []\n for layer_index, layer_shape in enumerate(self._layers_shapes):\n anchors_this_layer = self.get_layer_anchors(layer_shape,\n self._anchor_scales[layer_index],\n self._extra_anchor_scales[layer_index],\n self._anchor_ratios[layer_index],\n self._layer_steps[layer_index],\n self._anchor_offset[layer_index])\n all_anchors.append(anchors_this_layer[:-2])\n all_num_anchors_depth.append(anchors_this_layer[-2])\n all_num_anchors_spatial.append(anchors_this_layer[-1])\n return all_anchors, all_num_anchors_depth, all_num_anchors_spatial\n\n","repo_name":"roychen97/detect_and_track","sub_path":"ssdface_freeze/anchor_manipulator_np.py","file_name":"anchor_manipulator_np.py","file_ext":"py","file_size_in_byte":18519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"24959325885","text":"import serial\nimport serial.tools.list_ports\nimport logging\n\n\nclass BldcDebugIf:\n def __init__(self, comnum='auto') -> None:\n self.C_USB_DESC = \"STMicroelectronics STLink Virtual COM Port\"\n self.ser = serial.Serial()\n self.ser.baudrate = 115200\n self.comnum = comnum\n\n # ログ設定\n self.loglevel = logging.INFO\n self.logger = logging.getLogger('BldcSerial')\n self.logger.setLevel(self.loglevel) # ログレベル\n\n formatter = logging.Formatter('[%(name)s][%(levelname)s]:%(message)s')\n self.ch = logging.StreamHandler()\n self.ch.setLevel(self.loglevel)\n self.ch.setFormatter(formatter)\n self.logger.addHandler(self.ch)\n\n def __enter__(self):\n self.open()\n return self\n \n def __exit__(self, exception_type, exception_value, traceback):\n self.close()\n\n def open(self):\n if self.comnum == 'auto':\n self.auto_connect()\n else:\n self.ser.port = self.comnum\n \n try:\n self.ser.open()\n self.logger.debug('Success!')\n except:\n self.logger.debug('Failure...')\n \n def close(self):\n if self.ser.is_open:\n self.ser.close()\n self.logger.debug('Serial Close')\n \n def auto_connect(self):\n devices = serial.tools.list_ports.comports()\n for device in devices:\n if self.C_USB_DESC in device.description:\n self.logger.debug(f'Connecting {device.description}')\n self.ser.port = device.name\n break\n\n def readline(self):\n rxstr = str(self.ser.readline().decode(encoding='utf-8').strip().replace(\"\\x00\",\"\"))\n self.logger.debug(':'+rxstr)\n return rxstr\n\n def print_ver(self):\n self.ser.write(b'b')\n print('**********')\n print(self.readline())\n print(self.readline())\n print(self.readline())\n print('**********')\n \n\ndef test():\n with BldcDebugIf() as bldc_if:\n bldc_if.print_ver()\n\nif __name__ == '__main__':\n test()\n","repo_name":"Moryu-Io/Bldc_Servo_Driver_Prj","sub_path":"python/test_mod_interface.py","file_name":"test_mod_interface.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"}
+{"seq_id":"4259555312","text":"from typing import Sequence\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models import Model\nfrom django.utils.timezone import now\n\nfrom datahub.core.exceptions import DataHubError\n\n# Registry of all defined reports (mapping report IDs to Report instances)\n_registry = {}\n\n\nclass Report:\n \"\"\"\n Base class for reports.\n\n See QuerySetReport for reports based on a QuerySet.\n \"\"\"\n\n id: str = None\n name: str = None\n model: Model = None\n permissions_required: Sequence = None\n field_titles: dict = None\n filename_template = '{name} - {timestamp}'\n\n _required_attrs = (\n 'id',\n 'name',\n 'model',\n 'permissions_required',\n 'field_titles',\n )\n\n @classmethod\n def __init_subclass__(cls, is_abstract=False, **kwargs):\n \"\"\"Called on class declaration to register the report.\"\"\"\n super().__init_subclass__(**kwargs)\n if not is_abstract:\n cls._validate_attrs()\n _registry[cls.id] = cls()\n\n def check_permission(self, user):\n \"\"\"Checks whether the user has permission for this report.\"\"\"\n return user.has_perms(self.permissions_required)\n\n def get_filename(self):\n \"\"\"Gets the filename (excluding extension) to use for the report.\"\"\"\n timestamp = now().strftime('%Y-%m-%d-%H-%M-%S')\n return self.filename_template.format(name=self.name, timestamp=timestamp)\n\n def rows(self):\n \"\"\"Returns an iterator of the rows for this report.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def _validate_attrs(cls):\n missing_attrs = [attr for attr in cls._required_attrs if getattr(cls, attr, None) is None]\n if missing_attrs:\n raise DataHubError(f'Required report attributes {missing_attrs} not set')\n if 'ID' in cls.field_titles.values():\n raise DataHubError(\n 'ID cannot be used as a column title due to the potential confusion with SYLK '\n 'files in e.g. Excel',\n )\n\n\nclass QuerySetReport(Report, is_abstract=True):\n \"\"\"Base class for reports based on a QuerySet.\"\"\"\n\n queryset = None\n\n _required_attrs = (\n *Report._required_attrs,\n 'queryset',\n )\n\n def rows(self):\n \"\"\"Returns an iterator of the rows for this report.\"\"\"\n return self.queryset.values(*self.field_titles.keys()).iterator()\n\n\ndef get_reports_by_model(user):\n \"\"\"\n Returns a dictionary mapping models to list of reports.\n\n Only reports that the user is allowed to access are returned.\n \"\"\"\n reports_by_model = {}\n\n for report in _registry.values():\n if report.check_permission(user):\n reports_for_model = reports_by_model.setdefault(report.model, [])\n reports_for_model.append(report)\n\n return reports_by_model\n\n\ndef report_exists(report_id):\n \"\"\"Checks if a report exists.\"\"\"\n return report_id in _registry\n\n\ndef get_report_by_id(report_id, user):\n \"\"\"\n Gets a report instance for using its ID.\n\n If the user does not have the correct permission for the report, PermissionDenied is raised.\n \"\"\"\n report = _registry[report_id]\n if not report.check_permission(user):\n raise PermissionDenied\n return report\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/admin_report/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"}
+{"seq_id":"18914508532","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\n\n# weight initializesrs\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n# convolution and pooling\n\ndef conv2d(x, W):\n return tf.nn.conv2d(\n x,\n W,\n strides=[1, 1, 1, 1],\n padding='SAME'\n )\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(\n x,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME'\n )\n\n\nif __name__ == \"__main__\":\n\n DEEP_LAYERS = 1024\n LEARNING_RATE = 1e-4 # 1e-3 for GD, 1e-4 for ADAM\n\n ###### SETUP ######\n\n # dataset, because we need data\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n # tensorflow interactive session, the connection to the C++ backend\n sess = tf.InteractiveSession()\n\n x = tf.placeholder(tf.float32, shape=[None, 784]) #input placeholder\n y_ = tf.placeholder(tf.float32, shape=[None, 10]) #output placeholder\n\n ###### SHAPE AND CONVULSE ON INPUT DATA TO REDUCE DIMENSIONALITY TO 7X7 ######\n\n # we must reshape the image (x) to a 4d tensor\n # QUESTION: what does the negative one represent??\n # second and third dimensions are image width x height\n # third dimension corresponds to number of channels (1, black (something) or white (nothing))\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n # FIRST CONVULSION LAYER\n\n # convulse\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n\n # convolve the image\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # max pool to a 14x14\n h_pool1 = max_pool_2x2(h_conv1)\n\n # SECOND CONVULSION LAYER\n\n # convulse\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n\n # pool\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n\n #for dropout and draining accuracy\n keep_prob = tf.placeholder(tf.float32) #only have one instance of this line\n\n ##\n ######DONT FIDDLE ABOVE HERE\n ##\n\n ###### HIDDEN LAYERS ######\n\n # Densely connected Layer 1\n W_fc1 = weight_variable([7 * 7 * 64, DEEP_LAYERS])\n b_fc1 = bias_variable([DEEP_LAYERS])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n h_fc1 = tf.nn.relu6(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) #our densly connected layer\n # h_fc16 = tf.nn.sigmoid(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) #our densly connected layer\n\n ###### Interlayer DROPOUT ######\n # h_fc1_drop1 = tf.nn.dropout(h_fc1, keep_prob)\n\n ###### MORE HIDDEN LAYERS ######\n # W_fc12 = weight_variable([1024, 1024])\n # b_fc12 = bias_variable([1024])\n # h_fc12 = tf.nn.relu(tf.matmul(h_fc1, W_fc12) + b_fc12) # our densly connected layer\n\n # DROPOUT\n # h_fc1_drop2 = tf.nn.dropout(h_fc12, keep_prob)\n\n ###### EVEN MORE ######\n # W_fc13 = weight_variable([1024, 1024])\n # b_fc13 = bias_variable([1024])\n # h_fc13 = tf.nn.relu(tf.matmul(h_fc1_drop2, W_fc13) + b_fc13) # our densly connected layer\n\n # DROPOUT AGAIN\n # h_fc1_drop3 = tf.nn.dropout(h_fc13, keep_prob)\n\n # BATCH NORMALIZATION\n # epsilon = 1e-3 # Small epsilon value for the BN transform\n # scale = tf.Variable(tf.ones([1024]))\n # beta = tf.Variable(tf.zeros([1024]))\n # batch_mean, batch_var = tf.nn.moments(h_fc1, [0])\n # h_fcl_batch_norm = tf.nn.batch_normalization(h_fc1, batch_mean, batch_var, beta, scale, epsilon)\n\n ##\n ######DONT FIDDLE BELLOW HERE\n ##\n\n ###### READOUT AND VECTORIZE ######\n\n # READOUT LAYER\n W_fc2 = weight_variable([DEEP_LAYERS, 10])\n b_fc2 = bias_variable([10])\n\n # vectorize\n # vectorized image, times weights, plus bias\n ###\n ### Modify based out final layer\n ###\n y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2 #Basic Single dense layer\n # y_conv = tf.matmul(h_fc1_drop1, W_fc2) + b_fc2 #With Dropout\n # y_conv = tf.matmul(h_fcl_batch_norm, W_fc2) + b_fc2 #With Batch Normalization\n # y_conv = tf.matmul(h_fc12, W_fc2) + b_fc2 #second strongly connected layer\n # y_conv = tf.matmul(h_fc1_drop3, W_fc2) + b_fc2 #third strongly connected layer\n # y_conv = tf.matmul(h_fc16, W_fc2) + b_fc2 #Basic Single dense layer with relu6\n\n ###### TRAIN AND EVALUATE ######\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)\n # train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n sess.run(tf.global_variables_initializer())\n\n for i in range(20000):\n batch = mnist.train.next_batch(50)\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n print(\"step %d, training accuracy %g\" % (i, train_accuracy))\n train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\n print(\"test accuracy %g\" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\n\n","repo_name":"falkzach/CSCI595-Deep-Knowledge-Transfer","sub_path":"HW3/mnist_deep.py","file_name":"mnist_deep.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"21383855987","text":"from app import *\n\ndef memberpaynow():\n try:\n email = request.json['email']\n parking_id = request.json['parking_id']\n except:\n return jsonify({'message' : 'BAD_PAYLOAD'}) , 400\n cur = mysql.connection.cursor()\n token = generate_token(email)\n checkValue = cur.execute('SELECT * FROM Parking_record WHERE parking_id = %s and parking_id NOT IN (select parking_id from Invoice)', (parking_id,))\n if checkValue > 0:\n result = cur.fetchone()\n now = datetime.datetime.now()\n time_delta = (now - result['entry_datetime'])\n total_seconds = time_delta.total_seconds()\n minutes = total_seconds/60\n if minutes < 15:\n parking_cost = 0\n else:\n hours = math.ceil(minutes/60)\n parking_cost = 15 * hours\n cur.fetchall()\n cur.execute('SELECT main_payment_method,wallet FROM Account WHERE email = %s', (email,))\n token = cur.fetchone()\n if token['main_payment_method'] == 'VISA':\n cur.execute('INSERT INTO Invoice(amount,method,payment_datetime,parking_id) VALUES(%s,%s,%s,%s)',(parking_cost,token['main_payment_method'],now,parking_id,))\n mysql.connection.commit()\n cur.close()\n return jsonify({'payment_datetime' : now, 'amount' : parking_cost, 'method' : token['main_payment_method'], 'wallet' : token['wallet']}) ,201\n else:\n if token['wallet'] < parking_cost:\n mysql.connection.commit()\n cur.close()\n return jsonify({'message' : 'WALLET_MONEY_NOT_SUFFICIENT'}) ,409\n else:\n token['wallet'] = token['wallet'] - parking_cost\n cur.execute('UPDATE Account SET wallet = %s WHERE email = %s' , (token['wallet'],email,))\n cur.execute('INSERT INTO Invoice(amount,method,payment_datetime,parking_id) VALUES(%s,%s,%s,%s)',(parking_cost,token['main_payment_method'],now,parking_id,))\n mysql.connection.commit()\n cur.close()\n return jsonify({'payment_datetime' : now, 'amount' : parking_cost, 'method' : token['main_payment_method'], 'wallet' : token['wallet']}) ,201\n else:\n mysql.connection.commit()\n cur.close()\n return jsonify({'message' : 'PARKING_FEE_IS_PAID'}) ,409","repo_name":"kevctae/park-sa-mart-BackEnd","sub_path":"payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"41457050070","text":"\"\"\"Utility ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport tensorflow as tf\nimport numpy as np\n\nfrom os import path\n\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nfrom tools.anchor import locate_anchors as py_locate_anchors\nfrom proposals_connector.connect_proposals import \\\n connect_proposals as py_connect_proposals\n\n\ndef get_patches(images, patch_dim):\n \"\"\"Extract patches from images and put them in \"depth\" output dimension.\n\n Args:\n images: 4-D Tensor with shape [batch, in_rows, in_cols, depth].\n patch_dim: An integer number representing the dimension of patch.\n\n Returns:\n patches: 3-D Tensor with shape\n [batch, out_rows, in_cols, patch_dim**2 * depth].\n \"\"\"\n in_filters = images.get_shape().as_list()[3]\n out_filters = patch_dim**2 * in_filters\n kernel = tf.constant(np.eye(out_filters).\n reshape(patch_dim, patch_dim,\n in_filters, out_filters),\n tf.float32)\n\n patches = tf.nn.conv2d(images, kernel,\n [1, 1, 1, 1], \"SAME\")\n\n return patches\n\n\ndef generate_anchors(feat_map_size, feat_stride, anchor_num):\n \"\"\"Generate anchors on feature map of an image.\n\n Args:\n feat_map_size: Int32 1-D Tensor containing the sizes of feature map\n [feat_map_height, feat_map_width].\n feat_stride: Int32 Constant Tensor containing the stride of kernel on\n feature maps.\n anchor_num: Int32 Constant Tensor containing the number of generated\n anchors.\n\n Returns:\n anchors: 3-D Tensor with shape [height, width, 4] containing the\n generated anchors.\n \"\"\"\n anchors = tf.py_func(py_locate_anchors,\n [feat_map_size, feat_stride, anchor_num],\n tf.int32, name=\"generate_anchors\")\n\n anchors = tf.to_float(anchors)\n\n return anchors\n\n\ndef apply_vertical_deltas_to_anchors(boxes_delta, anchors):\n \"\"\"Compute coordinates of boxes based on its predicted relative coordianates\n (boxes_deltas) and anchors.\n\n Args:\n boxes_delta: Float 2-D Tensor, [num_anchors, 2], containing\n the boxes deltas.\n anchors: Float 2-D Tensor, [num_anchors, 4] containing the\n coordinates of anchors.\n\n Returns:\n boxes: Float 2-D Tensor, [num_anchors, 4], containing the real\n coordinates of boxes.\n \"\"\"\n anchor_y_ctr = (anchors[:, 1] + anchors[:, 3]) / 2.\n anchor_h = anchors[:, 3] - anchors[:, 1] + 1\n vertical_heights = tf.exp(boxes_delta[:, 1]) * anchor_h\n vertical_centers = boxes_delta[:, 0] * anchor_h + anchor_y_ctr\n\n vertical_tops = vertical_centers - vertical_heights / 2.\n vertical_bottoms = vertical_centers + vertical_heights / 2.\n\n boxes = tf.stack([anchors[:, 0], vertical_tops,\n anchors[:, 2], vertical_bottoms ], axis=1)\n\n return boxes\n\n\ndef apply_horizontal_deltas_to_anchors(offset_delta, anchors):\n \"\"\"Compute horizontal coordinates of boxes based on its predicted\n relative coordianates (boxes_deltas) and anchors.\n\n Args:\n offset_delta: Float 2-D Tensor, [num_anchors, 1], containing\n the boxes deltas.\n anchors: Float 2-D Tensor, [num_anchors, 4] containing the\n coordinates of anchors.\n\n Returns:\n offset: Float 2-D Tensor, [num_anchors, 1], containing the real\n offset of boxes.\n \"\"\"\n anchor_x_ctr = (anchors[:, 0] + anchors[:, 2]) / 2.\n anchor_w = anchors[:, 2] - anchors[:, 0] + 1\n offsets = offset_delta[:, 0] * anchor_w + anchor_x_ctr\n\n return offsets\n\n\ndef convert_to_vertical_deltas(coords, anchors):\n \"\"\"Compute relative coordianates of boxes (deltas) based on anchors.\n\n Args:\n coords: Float 2-D Tensor, [num_boxes, 2], containing the real\n coordinates of boxes.\n anchors: Float 2-D Tensor, [num_boxes, 4] containing the\n coordinates of anchors.\n\n Returns:\n boxes_deltas: Float 2-D Tensor, [num_boxes, 2], containing\n the boxes deltas.\n \"\"\"\n coords_y_ctr = coords[:, 1]\n coords_h = coords[:, 0]\n anchor_y_ctr = (anchors[:, 1] + anchors[:, 3]) / 2.\n anchor_h = anchors[:, 3] - anchors[:, 1] + 1\n\n vertical_ctr = (coords_y_ctr - anchor_y_ctr) / anchor_h\n vertical_h = tf.log(coords_h / anchor_h)\n\n boxes_deltas = tf.stack([vertical_ctr, vertical_h], axis=1)\n\n return boxes_deltas\n\n\ndef convert_to_horizontal_deltas(offsets, anchors):\n \"\"\"Compute relative coordianates of boxes (deltas) based on anchors.\n\n Args:\n offsets: Float 2-D Tensor, [num_offsets, 1], containing offsets.\n anchors: Float 2-D Tensor, [num_offsets, 4] containing the\n coordinates of anchors.\n\n Returns:\n horizontal_ctr: Float 2-D Tensor, [num_offsets, 1], containing\n the offsets deltas.\n \"\"\"\n x_sides = tf.reshape(offsets, [-1])\n anchor_x_ctr = (anchors[:, 0] + anchors[:, 2]) / 2.\n anchor_w = anchors[:, 2] - anchors[:, 0] + 1\n horizontal_ctr = (x_sides - anchor_x_ctr) / anchor_w\n horizontal_ctr = tf.reshape(horizontal_ctr, [-1, 1])\n\n return horizontal_ctr\n\n\ndef convert_box_to_veritical_coords(boxes):\n \"\"\"Convert the coordinates of proposal boxes to vertical coordinates\n including the height and y-axis center of the proposal boxes.\n\n Args:\n boxes: Float 3-D Tensor, [height, width, 4], containing the coordinates\n of the proposal boxes.\n\n Returns:\n vertical_coords: Float 3-D Tensor, [height, width, 2] containing the\n vertical coordinates of the proposal boxes.\n \"\"\"\n y_ctr = (boxes[:, 1] + boxes[:, 3]) / 2.\n h = boxes[:, 3] - boxes[:, 1] + 1\n\n vertical_coords = tf.stack([y_ctr, h], axis=1)\n\n return vertical_coords\n\ndef rescale_bboxes(bboxes, old_h, old_w, new_h, new_w):\n \"\"\"Rescale bouding boxes being suitable for the original image.\n\n Args:\n bboxes: A Float32 Tensor with shape [num_bboxes, 5] contains the\n bounding boxes corresponding to the scaled image.\n old_h: A Int32 scalar Tensor contains the height of the original image.\n old_w: A Int32 scalar Tensor contains the width of the original image.\n new_h: A Int32 scalar Tensor contains the height of the scaled image.\n new_w: A Int32 scalar Tensor contains the width of the scaled image.\n\n Returns:\n scaled_bboxes: A Float32 Tensor with shape [num_bboxes, 5] contains the\n bounding boxes corresponding to the original image.\n \"\"\"\n # Scale coordinates (left, top, right, bottom) of bounding boxes\n l = bboxes[:, 0] * (tf.to_float(old_w) / tf.to_float(new_w))\n t = bboxes[:, 1] * (tf.to_float(old_h) / tf.to_float(new_h))\n r = bboxes[:, 2] * (tf.to_float(old_w) / tf.to_float(new_w))\n b = bboxes[:, 3] * (tf.to_float(old_h) / tf.to_float(new_h))\n scores = bboxes[:, 4]\n\n # Build the bounding boxes corresponding to the original image\n scaled_bboxes = tf.stack([l, t, r, b, scores], axis=1)\n\n return scaled_bboxes\n\ndef connect_proposals(proposals, scores, offsets,\n offset_scores, img_shape):\n \"\"\"Connect text proposals returned by neural nets to build the final text\n bounding boxes. This function is a TensorFlow operation built from\n a python function.\n\n Args:\n proposals: A Float32 Tensor with shape [num_proposals, 4] contains the\n coordinates of each text proposal.\n scores: A Float32 Tensor with shape [num_proposals, 1] contains the\n predicted confidence of each text proposal.\n offsets: A Float32 Tensor with shape [num_proposals, 1] contains the\n horizontal offset of each text proposal.\n offset_scores: A Float32 Tensor with shape [num_proposals, 1] contains\n the predicted confidence of each text proposal.\n \"\"\"\n text_lines = tf.py_func(py_connect_proposals,\n [proposals, scores, offsets, offset_scores, img_shape],\n tf.float32)\n\n return text_lines\n\n\ndef get_intersect(x, y):\n \"\"\"Find the intersection of 2 1-D arrays.\n\n Args:\n x: The first Int 1-D Tensor.\n y: The second Int 1-D Tensor.\n\n Returns:\n inter_ids: The Int 1_D Tensor containing intersection ids.\n inter_values: The Int 1_D Tensor containing intersection elements.\n \"\"\"\n def py_get_intersect(x, y):\n inter_mask = np.in1d(x, y)\n inter_values = x[inter_mask]\n inter_ids = np.where(inter_mask == True)[0]\n\n return inter_ids, inter_values\n\n x = tf.to_int64(x)\n y = tf.to_int64(y)\n inter_ids, inter_values = tf.py_func(py_get_intersect,\n [x, y], [tf.int64, tf.int64],\n name=\"get_intersect\")\n inter_ids = tf.to_int32(inter_ids)\n inter_values = tf.to_int32(inter_values)\n\n return inter_ids, inter_values\n\n\ndef smooth_l1(pred, targets):\n \"\"\"Calculate the smooth L1 value for loss of regression models.\n smooth_l1(x) = 0.5 * x^2, if |x| < 1\n |x| - 0.5, otherwise\n Args:\n pred: A float32 tensor contains the predicted values.\n targets: A float32 tensor contains the target values.\n \"\"\"\n term = pred - targets\n sign = tf.cast(tf.less(tf.abs(term), 1.0), tf.float32)\n pos_result = 0.5 * (term * term)\n neg_result = tf.abs(term) - 0.5\n result = pos_result * sign + neg_result * tf.abs(1 - sign)\n\n return result\n","repo_name":"tranbahien/CTPN-TensorFlow","sub_path":"ops/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9662,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"}
+{"seq_id":"40129981698","text":"#! python\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom urllib.parse import urlparse\nfrom sys import argv\nfrom collections import namedtuple\nfrom os import path\n\nclass MimeType:\n html = 'text/html'\n js = 'text/javascript'\n css = 'text/css'\n jpg = 'image/jpeg'\n gif = 'image/gif'\n image = ('image/jpeg', 'image/gif')\n text = ('text/html', 'text/css', 'text/javascript')\n ico = 'NotImplemented'\n\n\nclass MungoServer(BaseHTTPRequestHandler):\n\n content_path = path.dirname(path.realpath(__file__))\n collection = ''\n\n def __set_headers(self, response):\n self.send_response(response.code)\n self.send_header('Content-type', response.type)\n self.end_headers()\n\n def do_GET(self):\n path = urlparse(self.path).path\n mime = getattr(MimeType, path.split('.')[-1], 'text/html')\n if mime in MimeType.image:\n try:\n data_fp = open(f'{MungoServer.content_path}/{path}', 'rb')\n except IOError:\n print(f'Not found {path}')\n self.__set_headers(namedtuple('response', ('code', 'type'))('404', 'text/html'))\n self.wfile.write(bytes(f'File not found {MungoServer.content_path}/{path.split(\"/\")[-1]}'))\n else:\n self.__set_headers(namedtuple('response', ('code', 'type'))(200, mime))\n self.wfile.write(data_fp.read())\n data_fp.close()\n elif mime in MimeType.text:\n try:\n if not path or path == '/':\n file = f'{MungoServer.content_path}/{MungoServer.collection}.html'\n else:\n file = f'{MungoServer.content_path}/{path}'\n data_fp = open(file, 'rb')\n except IOError:\n self.__set_headers(namedtuple('response', ('code', 'type'))('404', 'text/html'))\n self.wfile.write(bytes(f'File not found{file}'))\n data_fp.close()\n else:\n data = data_fp.read()\n data_fp.close()\n self.__set_headers(namedtuple('response', ('code', 'type'))(200, mime))\n self.wfile.write(data)\n\n def do_POST(self):\n pass\n\n @staticmethod\n def run(host='', port=9090):\n if port is None:\n port = 9090\n http_server = (host, port)\n print(http_server)\n httpd = HTTPServer(http_server, MungoServer)\n try:\n print(f'Starting server on: {host or \"http://localhost\"}{\":\"+str(port) if port != 80 else \"\"}')\n httpd.serve_forever()\n except KeyboardInterrupt:\n print('Bye!')\n httpd.shutdown()\n exit()\n\n\nif __name__ == '__main__':\n if len(argv) <2 or len(argv) > 3:\n print('Usage: MungoServer []')\n exit(1)\n MungoServer.collection += argv[1]\n port = int(dict(enumerate(argv)).get(2))\n MungoServer.run(port=port)\n","repo_name":"jukny/jsexperiments","sub_path":"NoServer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"3164187224","text":"# from conda import iteritems\n\nfrom Generator.Autoencoder import add_noise, convert_to_integer, load_model, create_auto_encoder, auto_encoder_3d\nfrom Generator.Constraints import *\nfrom Generator.Delenox_Config import *\nfrom Generator.Visualization import *\n\n\nclass NeatGenerator:\n \"\"\"\n NEAT module of the Delenox pipeline, responsible for creating and evolving CPPN's which are\n used to generate lattices (genotype) that represent buildings (phenotype). Evolution uses\n novelty search as the heuristic, which computes the average Euclidean distance to the k-nearest\n neighbors as well as to an archive of unique novel individuals from past generations.\n \"\"\"\n def __init__(self, config, population_id):\n self.experiment = \"\"\n self.population_id = population_id\n self.config = config\n self.config.__setattr__(\"pop_size\", population_size)\n self.population = neat.Population(self.config)\n self.population.add_reporter(neat.StatisticsReporter())\n self.current_phase = 0\n self.current_gen = 0\n self.encoder = None\n self.decoder = None\n self.pool = None\n self.noise = False\n self.archive = []\n self.phase_best_fit = []\n self.archive_lattices = []\n self.neat_metrics = {'Experiment': None, 'Mean Novelty': [], 'Best Novelty': [], 'Node Complexity': [],\n 'Infeasible Size': [], 'Connection Complexity': [], 'Archive Size': [],\n 'Species Count': [], 'Mean Genetic Diversity': [],\n 'Minimum Species Size': [], 'Maximum Species Size': [], 'Mean Species Size': []}\n\n def run_neat(self, phase_number, p_experiment, static=False, noise=False, persistent_archive=True, train_on_archive=True):\n \"\"\"\n Executes one \"exploration\" phase of the Delenox pipeline. A set number of independent evolutionary runs\n are completed and the top N most novel individuals are taken and inserted into a population. At the of\n the phase we look at the distribution of individuals in the population according to numerous metrics and\n statistics regarding the evolution of the populations such as the speciation, novelty scores etc.\n \"\"\"\n # Check to see if we should clear the novelty archive before starting the next phase.\n if not persistent_archive:\n self.archive = []\n self.archive_lattices.clear()\n\n if phase_number == 0:\n self.config = load_config_file()\n self.__init__(self.config, self.population_id)\n\n # Re-initialize phase variables accordingly\n self.phase_best_fit.clear()\n self.current_gen = 0\n self.current_phase = phase_number\n self.experiment = p_experiment\n self.noise = noise\n\n # Load last phase's autoencoder, or the seed autoencoder if this is the first phase.\n if phase_number > 0 and static is False:\n self.encoder = load_model(\n \"./Results/{}/Phase{:d}/encoder\".format(p_experiment, phase_number - 1))\n self.decoder = load_model(\n \"./Results/{}/Phase{:d}/decoder\".format(p_experiment, phase_number - 1))\n else:\n # If the experiment uses a de-noising autoencoder, load the appropriate model.\n if not noise:\n self.encoder = load_model(\"./Results/Seed/encoder\")\n self.decoder = load_model(\"./Results/Seed/decoder\")\n else:\n self.encoder = load_model(\"./Results/Seed/encoder_noisy\")\n self.decoder = load_model(\"./Results/Seed/decoder_noisy\")\n\n # Update the archive of latent vectors with the current encoder's interpretation of the lattices\n self.archive = []\n for lattice in self.archive_lattices:\n self.archive.append(self.encoder.predict(lattice[None])[0])\n\n # Initialize the processes used for the NEAT run and execute the phase.\n self.pool = Pool(thread_count)\n self.population.run(self.run_one_generation, generations_per_run)\n self.pool.close()\n self.pool.join()\n\n # Update the NEAT metrics with the end-of-phase statistics.\n self.neat_metrics['Experiment'] = p_experiment\n self.neat_metrics['Mean Novelty'] = self.population.reporters.reporters[0].get_fitness_mean()\n self.neat_metrics['Best Novelty'] = self.population.reporters.reporters[0].get_fitness_stat(max)\n\n # Clearing the pool variable and auto-encoder as these cannot be saved to a pickle file.\n self.pool = None\n self.encoder = None\n self.decoder = None\n\n if train_on_archive:\n return self, self.archive_lattices, self.neat_metrics\n else:\n return self, self.phase_best_fit, self.neat_metrics\n\n def run_one_generation(self, genomes, config):\n \"\"\"\n Multi-process fitness function for the NEAT module of the project. Implements novelty search and\n scales the workload across the thread count given in the experiment parameters. Assigns a novelty\n value to each genome and keeps the feasible population separate, discarding and randomly regenerating\n the infeasible individuals.\n \"\"\"\n start = time.time()\n compressed_population = {}\n lattices = {}\n remove = 0\n jobs = []\n\n for genome_id, genome in genomes:\n jobs.append(self.pool.apply_async(generate_lattice, (genome, config, False, None)))\n for job, (genome_id, genome) in zip(jobs, genomes):\n lattice, _, feasible = job.get()\n if not feasible:\n del self.population.population[genome_id]\n genome.fitness = 0\n remove += 1\n else:\n lattices.update({genome_id: lattice})\n\n for genome_id, lattice in lattices.items():\n to_compress = lattice\n if self.noise:\n to_compress = add_noise(lattice)\n compressed_population.update({genome_id: self.encoder.predict(to_compress[None])[0]})\n\n jobs.clear()\n for genome_id in compressed_population.keys():\n parameters = (compressed_population[genome_id], compressed_population, self.archive)\n jobs.append(self.pool.apply_async(novelty_search, parameters))\n for job, genome_id in zip(jobs, compressed_population.keys()):\n self.population.population[genome_id].fitness = job.get()\n\n fitness = {genome_id: fitness.fitness for genome_id, fitness in self.population.population.items() if\n fitness.fitness > 0}\n sorted_keys = [k for k, _ in sorted(fitness.items(), key=lambda item: item[1])]\n\n for individual in range(1, np.min([add_to_archive, len(lattices)])):\n lattice = lattices[sorted_keys[-individual]]\n self.archive_lattices.append(lattice)\n vector = self.encoder.predict(lattice[None])[0]\n if len(self.archive) == 0 or not (vector == self.archive).all(1).any():\n self.archive.append(vector)\n\n if self.current_gen % 100 == 0 or self.current_gen + 1 == generations_per_run:\n most_novel_lattice = lattices[sorted_keys[-1]]\n least = lattices[sorted_keys[0]]\n mid = lattices[sorted_keys[int(len(sorted_keys) / 2)]]\n novelty_voxel_plot(\n [convert_to_integer(least), convert_to_integer(mid), convert_to_integer(most_novel_lattice)],\n self.current_gen + 1, self.population_id, self.current_phase, self.experiment)\n\n if self.current_gen + 1 == generations_per_run:\n np.savez_compressed(\"./Results/{}/Phase{:d}/Population_{:d}.npz\".format(self.experiment, self.current_phase, self.population_id), lattices)\n for individual in range(1, np.min([best_fit_count, len(sorted_keys)])):\n self.phase_best_fit.append(lattices[sorted_keys[-individual]])\n\n node_complexity = 0\n connection_complexity = 0\n\n for individual in self.population.population.values():\n node_complexity += individual.size()[0]\n connection_complexity += individual.size()[1]\n\n node_complexity /= len(self.population.population)\n connection_complexity /= len(self.population.population)\n species_sizes = [len(specie.members) for specie in self.population.species.species.values() ]\n\n self.neat_metrics['Node Complexity'].append(node_complexity)\n self.neat_metrics['Connection Complexity'].append(connection_complexity)\n self.neat_metrics['Archive Size'].append(len(self.archive))\n self.neat_metrics['Species Count'].append(len(self.population.species.species))\n self.neat_metrics['Infeasible Size'].append(remove)\n self.neat_metrics['Minimum Species Size'].append(np.min(species_sizes))\n self.neat_metrics['Maximum Species Size'].append(np.max(species_sizes))\n self.neat_metrics['Mean Species Size'].append(np.mean(species_sizes))\n\n print(\"[Population {:d}]: Generation {:d} took {:2f} seconds.\".format(self.population_id, self.current_gen, time.time() - start))\n print(\"Average Hidden Layer Size: {:2.2f}\".format(node_complexity))\n print(\"Average Connection Count: {:2.2f}\".format(connection_complexity))\n print(\"Size of the Novelty Archive: {:d}\".format(len(self.archive)))\n print(\"Number of Infeasible Buildings:\", remove)\n print(\"Number of Species:\", len(self.population.species.species))\n print(\"Species Sizes:\", species_sizes)\n print(\"Max Novelty:\", fitness[sorted_keys[-1]])\n print(\"Mean Novelty:\", np.mean(list(fitness.values())), \"\\n\")\n\n current_compatibility = self.config.species_set_config.compatibility_threshold\n if len(self.population.species.species) < target_species_count:\n self.config.species_set_config.__setattr__(\"compatibility_threshold\", current_compatibility - 0.05)\n elif len(self.population.species.species) > target_species_count:\n self.config.species_set_config.__setattr__(\"compatibility_threshold\", current_compatibility + 0.05)\n\n self.current_gen += 1\n\n\ndef novelty_search(genome, compressed_population, archive):\n \"\"\"\n Computes the novelty score for the given genome with respect to the current population and\n an archive of past novel individuals for this run. The score is the average euclidean distance\n to the nearest K neighbors (taken from the population and archive).\n :param genome: the ID of the genome being assessed.\n :param compressed_population: the population of latent vectors to compare to.\n :param archive: the archive of past novel individuals for this run.\n :return: the novelty score for this genome.\n \"\"\"\n distances = []\n for neighbour in list(compressed_population.values()) + archive:\n distance = 0\n if (genome == neighbour).all():\n continue\n for element in range(len(neighbour)):\n distance += np.square(genome[element] - neighbour[element])\n distances.append(np.sqrt(distance))\n distances = np.sort(distances)\n return np.round(np.average(distances[:k_nearest_neighbors]), 2)\n\n\ndef generate_lattice(genome, config, noise_flag=True, plot=None):\n \"\"\"\n Generates a lattice using the given CPPN genome and NEAT configuration file. May also generate\n a noisy variant of the lattice (if a DAE is being used) and may plot the lattice if required.\n :param plot: Title of the figure for the plot\n :param noise_flag: Boolean value for adding noise.\n :param genome: CPPN object used to generate lattices.\n :param config: CPPN-NEAT config file specifying the parameters for the genomes.\n :return: generated lattice, noisy variant, feasibility status and\n \"\"\"\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n lattice = np.zeros(lattice_dimensions)\n noisy = np.zeros(lattice_dimensions)\n\n for (x, y, z) in value_range:\n lattice[x][y][z] = np.round(\n net.activate((x / lattice_dimensions[0], y / lattice_dimensions[0], z / lattice_dimensions[0]))[0])\n feasible, lattice = apply_constraints(lattice)\n if noise_flag:\n noisy = add_noise(lattice)\n if plot is not None:\n voxel_plot(lattice, plot)\n\n lattice = to_categorical(lattice, num_classes=5)\n noisy = to_categorical(noisy, num_classes=5)\n\n return np.asarray(lattice, dtype=bool), np.asarray(noisy, dtype=bool), feasible\n\n\ndef generate_lattices(genomes, config, noise_flag=True):\n pool = Pool(thread_count)\n jobs = []\n lattices = []\n noisy = []\n for genome in genomes:\n jobs.append(pool.apply_async(generate_lattice, (genome, config, noise_flag)))\n for job in jobs:\n lattice, noisy_lattice, valid = job.get()\n if valid:\n lattices.append(lattice)\n if noise_flag:\n noisy.append(noisy_lattice)\n pool.close()\n pool.join()\n return noisy, lattices\n\n\ndef create_population_lattices(config, noise_flag=True):\n \"\"\"\n Generates a population of lattices and their noisy counterparts.\n :param noise_flag: boolean which determines whether a noised copy of the dataset should be created.\n :param config: CPPN-NEAT config file specifying the parameters for the genomes.\n :return lattices, noisy: the population of generated lattices and their noisy counterparts\n \"\"\"\n lattices = []\n noisy = []\n while len(lattices) < best_fit_count * runs_per_phase:\n population = create_population(config, round((best_fit_count * runs_per_phase - len(lattices)) * 2))\n noisy_batch, lattice_batch = generate_lattices(population.population.values(), config, noise_flag)\n lattices += lattice_batch\n if noise_flag:\n noisy += noisy_batch\n print(\"{:d} Lattices created!\".format(len(lattices)))\n return np.asarray(lattices[:1000], dtype=bool), np.asarray(noisy[:1000], dtype=bool)\n\n\ndef create_population(config, pop_size=population_size):\n \"\"\"\n Generates a population of CPPN genomes according to the given CPPN-NEAT config file and population size.\n :param config: CPPN-NEAT config file specifying the parameters for the genomes.\n :param pop_size: Number of genomes to create.\n :return population: Population objecting containing a dictionary in the form {genome_id: genome_object}.\n \"\"\"\n config.__setattr__(\"pop_size\", pop_size)\n population = neat.Population(config)\n return population\n\n\ndef create_seed_files(config):\n \"\"\"\n Initializes the NEAT populations which act as the seed for our experiments, and trains an auto-encoder\n on randomly initialized CPPNs to be used the first phase of the experiments. This way all the experiments\n start with the same populations and the same auto-encoder eliminating potential randomness interfering with\n results.\n :param config: CPPN-NEAT config file specifying the parameters for the genomes.\n \"\"\"\n training_population, _ = create_population_lattices(config, False)\n np.savez_compressed(\"Results/Seed/Initial_Training_Set.npz\", np.asarray(training_population))\n _ = create_auto_encoder(model_type=auto_encoder_3d,\n phase=-1,\n population=np.asarray(training_population),\n noisy=None,\n experiment=\"Seed\")\n for runs in range(runs_per_phase):\n generator = NeatGenerator(\n config=config,\n population_id=runs\n )\n with open(\"./Results/Seed/Neat_Population_{:d}.pkl\".format(runs), \"wb+\") as f:\n pickle.dump(generator, f)\n\n\ndef load_config_file():\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'neat.cfg')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet,\n neat.DefaultStagnation, config_path)\n config.genome_config.add_activation('sin_adjusted', sinc)\n return config\n\n\nif __name__ == \"__main__\":\n\n config = load_config_file()\n\n pool = Pool(16)\n for pop in range(10):\n with open(\"../Generator/Results/Seed/Neat_Population_{:d}.pkl\".format(pop), \"rb\") as file:\n generator = pickle.load(file)\n jobs = []\n lattices = []\n for genome_id, genome in list(iteritems(generator.population.population)):\n jobs.append(pool.apply_async(generate_lattice, (genome, config, False, None)))\n for job in jobs:\n result = job.get()\n if result[2]:\n lattices.append(result[0])\n np.save(\"../Generator/Results/Seed/Neat_Population_{:d}.npy\".format(pop), lattices)\n pool.close()\n pool.join()\n\n\n","repo_name":"Matt-Barthet/M.Sc.-Thesis---Open-Ended-Minecraft-Buildings","sub_path":"Generator/NeatGenerator.py","file_name":"NeatGenerator.py","file_ext":"py","file_size_in_byte":16800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"630766918","text":"from allImports import * # goes on top\n#decorator\n@app.route(\"/customer/\" , methods = [\"GET\"])\ndef viewCustomer(c_id):\n \"\"\"This function will get data from the database\"\"\"\n viewCustomer = Customer.get(Customer.c_id == c_id) \n return render_template( \"viewCustomer.html\",\n cfg = cfg, \n viewCustomer=viewCustomer)\n # return viewOrders.status.status","repo_name":"cstadlock/TunnelVision","sub_path":"app/viewCustomer.py","file_name":"viewCustomer.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"69916915785","text":"import dgl\nimport torch\n\nfrom torch import nn\n\n\nclass TextLevelGCN(torch.nn.Module):\n def __init__(\n self,\n vocab_size,\n node_embed_size,\n edge_num,\n class_num=2,\n pre_node_embed=False,\n node_embed_vectors=0,\n is_node_freeze=False,\n pre_edge_embed=False,\n edge_embed_vectors=0,\n is_edge_freeze=False,\n fc_dropout_rate=0.5\n ):\n super(TextLevelGCN, self).__init__()\n\n self.class_num = class_num\n\n self.vocab_size = vocab_size\n self.node_embed_size = node_embed_size\n\n self.pre_node_embed = pre_node_embed\n self.node_freeze = is_node_freeze\n\n if self.pre_node_embed:\n self.node_embed = nn.Embedding.from_pretrained(\n embeddings=node_embed_vectors,\n freeze=self.node_freeze\n )\n else:\n self.node_embed = nn.Embedding(\n self.vocab_size,\n self.node_embed_size\n )\n nn.init.uniform_(self.node_embed.weight.data)\n\n self.pre_edge_embed = pre_edge_embed\n self.edge_num = edge_num\n self.edge_freeze = is_edge_freeze\n\n if self.pre_edge_embed:\n self.edge_embed = torch.nn.Embedding.from_pretrained(\n embeddings=edge_embed_vectors,\n freeze=self.edge_freeze\n )\n else:\n self.edge_embed = torch.nn.Embedding.from_pretrained(\n torch.ones(edge_num, 1),\n freeze=self.edge_freeze\n )\n\n self.dropout = torch.nn.Dropout(p=fc_dropout_rate)\n\n self.activation = torch.nn.ReLU()\n\n self.classify = torch.nn.Linear(self.node_embed_size, self.class_num, bias=True)\n\n self.init_weights()\n\n def init_weights(self):\n nn.init.xavier_uniform_(self.classify.weight)\n\n def forward(\n self,\n sub_graph,\n **kwargs\n ):\n sub_graph.update_all(\n message_func=dgl.function.src_mul_edge('h', 'w', 'weighted_message'),\n reduce_func=dgl.function.max('weighted_message', 'h')\n )\n graph_feature = dgl.sum_nodes(sub_graph, feat='h')\n\n graph_feature = self.dropout(graph_feature)\n graph_feature = self.activation(graph_feature)\n\n out = self.classify(graph_feature)\n\n return out\n","repo_name":"xiangking/ark-nlp","sub_path":"ark_nlp/nn/text_level_gcn.py","file_name":"text_level_gcn.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"81"}
+{"seq_id":"34065411671","text":"import requests\nfrom tabulate import tabulate\nfrom pathlib import Path\n\n\ndef giveURL(x):\n base_url = \"https://api.themoviedb.org/3/discover/movie?api_key=87a9f28a9fa52eae1eebdb5012de2c95&with_genres=\"\n main_url = base_url + x\n giveTitles(main_url)\n\n\ndef giveTitles(url):\n r = requests.get(url)\n data = r.json()\n length_dict = len(data[\"results\"])\n\n # grabbing the movie data\n movies = [data[\"results\"][i][\"title\"] for i in range(length_dict)]\n ratings = [data[\"results\"][i][\"vote_average\"] for i in range(length_dict)]\n user_votes = [data[\"results\"][i][\"vote_count\"] for i in range(length_dict)]\n\n # creating dictionary containing movie data\n finale = {\"Movie Name\": movies, \"Ratings\": ratings, \"Votes\": user_votes}\n\n printResults(finale)\n\n\ndef printResults(finale):\n file_name = input(\"Enter file name:\") + \".txt\"\n file_path = str(Path.cwd()) + \"/\" + file_name\n\n # using tabulate to print it to a table\n table = tabulate(finale, headers=\"keys\", showindex=False, tablefmt=\"pretty\")\n with open(file_path, \"w\") as f:\n f.write(f\"Displaying results for emotion {emotion}\\n\")\n f.write(table)\n print(\"File Saved as \" + file_name)\n\n\n# main function\ndef main():\n global emotion\n emotion = str(\n input(\n \"\"\"Enter an emotion:\n List of emotions----->sad,anger,fear/horror,joy,thrill,suspense and love<-----\\n\"\"\"\n )\n ).lower()\n # Drama\n if emotion == \"sad\":\n giveURL(\"18\")\n # Action\n elif emotion in \"anger\":\n giveURL(\"28\")\n # Horror\n elif emotion == [\"fear\", \"horror\"]:\n giveURL(\"27\")\n # Comedy\n elif emotion == \"joy\":\n giveURL(\"35\")\n # Thriller\n elif emotion == \"thrill\":\n giveURL(\"53\")\n # Mystery\n elif emotion in \"suspense\":\n giveURL(\"9648\")\n # Romance\n elif emotion == \"love\":\n giveURL(\"10749\")\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"Floo-zy/EmotionBasedMovieRecommender-","sub_path":"OldVersions/MovieRecommender3.9.py","file_name":"MovieRecommender3.9.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"27156022929","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPubmed related utilities\n\nGiven PMID - collect Pubmed data and Pubtator Bioconcepts used for the BELMgr\nor enhancing BEL Nanopubs\n\"\"\"\n\n# Standard Library\nimport asyncio\nimport copy\nimport datetime\nimport re\nfrom typing import Any, Mapping, MutableMapping\n\n# Third Party\nimport cachetools\nimport httpx\nfrom loguru import logger\nfrom lxml import etree\n\n# Local\nimport bel.core.settings as settings\nimport bel.terms.terms\nfrom bel.core.utils import http_client, url_path_param_quoting\n\n# Replace PMID\nPUBMED_TMPL = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id=\"\n\n# https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/biocjson?pmids=28483577,28483578,28483579\n\nPUBTATOR_URL = (\n \"https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/biocjson?pmids=\"\n)\n\npubtator_ns_convert = {\n \"CHEBI\": \"CHEBI\",\n \"Species\": \"TAX\",\n \"Gene\": \"EG\",\n \"Chemical\": \"MESH\",\n \"Disease\": \"MESH\",\n}\n\npubtator_entity_convert = {\"Chemical\": \"Abundance\", \"Gene\": \"Gene\", \"Disease\": \"Pathology\"}\npubtator_annotation_convert = {\"Disease\": \"Pathology\"}\npubtator_known_types = [key for key in pubtator_ns_convert.keys()]\n\n\ndef node_text(node):\n \"\"\"Needed for things like abstracts which have internal tags (see PMID:27822475)\"\"\"\n\n if node.text:\n result = node.text\n else:\n result = \"\"\n for child in node:\n if child.tail is not None:\n result += child.tail\n return result\n\n\n@cachetools.cached(cachetools.TTLCache(maxsize=200, ttl=3600))\ndef get_pubtator_url(pmid):\n \"\"\"Get pubtator content from url\"\"\"\n\n pubtator = None\n\n url = f\"{PUBTATOR_URL}{pmid}\"\n\n r = http_client.get(url, timeout=10)\n\n if r and r.status_code == 200:\n pubtator = r.json()\n\n else:\n logger.error(f\"Cannot access Pubtator, status: {r.status_code} url: {url}\")\n\n return pubtator\n\n\ndef pubtator_convert_to_key(annotation: dict) -> str:\n \"\"\"Convert pubtator annotation info to key (NS:ID)\"\"\"\n\n ns = pubtator_ns_convert.get(annotation[\"infons\"][\"type\"], None)\n id_ = annotation[\"infons\"][\"identifier\"]\n id_ = id_.replace(\"MESH:\", \"\")\n\n if ns is None:\n logger.warning(\"\")\n return f\"{ns}:{id_}\"\n\n\ndef get_pubtator(pmid):\n \"\"\"Get Pubtator Bioconcepts from Pubmed Abstract\n\n Re-configure the denotations into an annotation dictionary format\n and collapse duplicate terms so that their spans are in a list.\n \"\"\"\n\n annotations = []\n\n pubtator = get_pubtator_url(pmid)\n if pubtator is None:\n return annotations\n\n known_types = [\"CHEBI\", \"Chemical\", \"Disease\", \"Gene\", \"Species\"]\n\n for passage in pubtator[\"passages\"]:\n for annotation in passage[\"annotations\"]:\n if annotation[\"infons\"][\"type\"] not in known_types:\n continue\n\n key = pubtator_convert_to_key(annotation)\n\n annotations.append(\n {\n \"key\": key,\n \"text\": annotation[\"text\"],\n \"locations\": copy.copy(annotation[\"locations\"]),\n }\n )\n\n return annotations\n\n\ndef process_pub_date(year, mon, day, medline_date):\n \"\"\"Create pub_date from what Pubmed provides in Journal PubDate entry\"\"\"\n\n if medline_date:\n year = \"0000\"\n match = re.search(r\"\\d{4,4}\", medline_date)\n if match:\n year = match.group(0)\n\n if year and re.match(\"[a-zA-Z]+\", mon):\n pub_date = datetime.datetime.strptime(f\"{year}-{mon}-{day}\", \"%Y-%b-%d\").strftime(\n \"%Y-%m-%d\"\n )\n elif year:\n pub_date = f\"{year}-{mon}-{day}\"\n\n else:\n pub_date = None\n if year and re.match(\"[a-zA-Z]+\", mon):\n pub_date = datetime.datetime.strptime(f\"{year}-{mon}-{day}\", \"%Y-%b-%d\").strftime(\n \"%Y-%m-%d\"\n )\n elif year:\n pub_date = f\"{year}-{mon}-{day}\"\n\n return pub_date\n\n\ndef parse_book_record(doc: dict, root) -> dict:\n \"\"\"Parse Pubmed Book entry\"\"\"\n\n doc[\"title\"] = next(iter(root.xpath(\"//BookTitle/text()\")))\n\n doc[\"authors\"] = []\n for author in root.xpath(\"//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\"//Book/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\"//Book/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\"//Book/PubDate/Day/text()\")), \"01\")\n medline_date = next(iter(root.xpath(\"//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None)\n\n pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n\n for abstracttext in root.xpath(\"//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n return doc\n\n\ndef parse_journal_article_record(doc: dict, root) -> dict:\n \"\"\"Parse Pubmed Journal Article record\"\"\"\n\n doc[\"title\"] = next(iter(root.xpath(\"//ArticleTitle/text()\")), \"\")\n\n # TODO https://stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element\n atext = next(iter(root.xpath(\"//Abstract/AbstractText/text()\")), \"\")\n\n for abstracttext in root.xpath(\"//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n doc[\"authors\"] = []\n for author in root.xpath(\"//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\"//Journal/JournalIssue/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\"//Journal/JournalIssue/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\"//Journal/JournalIssue/PubDate/Day/text()\")), \"01\")\n medline_date = next(iter(root.xpath(\"//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None)\n\n pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n doc[\"journal_title\"] = next(iter(root.xpath(\"//Journal/Title/text()\")), \"\")\n doc[\"joural_iso_title\"] = next(iter(root.xpath(\"//Journal/ISOAbbreviation/text()\")), \"\")\n doc[\"doi\"] = next(iter(root.xpath('//ArticleId[@IdType=\"doi\"]/text()')), None)\n\n doc[\"compounds\"] = []\n for chem in root.xpath(\"//ChemicalList/Chemical/NameOfSubstance\"):\n chem_id = chem.get(\"UI\")\n doc[\"compounds\"].append({\"key\": f\"MESH:{chem_id}\", \"label\": chem.text})\n\n compounds = [cmpd[\"key\"] for cmpd in doc[\"compounds\"]]\n doc[\"mesh\"] = []\n for mesh in root.xpath(\"//MeshHeading/DescriptorName\"):\n mesh_id = f\"MESH:{mesh.get('UI')}\"\n if mesh_id in compounds:\n continue\n doc[\"mesh\"].append({\"key\": mesh_id, \"label\": mesh.text})\n\n return doc\n\n\n@cachetools.cached(cachetools.TTLCache(maxsize=200, ttl=3600))\ndef get_pubmed_url(pmid):\n \"\"\"Get pubmed url\"\"\"\n\n root = None\n\n try:\n pubmed_url = f\"{PUBMED_TMPL}{str(pmid)}\"\n\n r = http_client.get(pubmed_url)\n\n logger.info(f\"Status {r.status_code} URL: {pubmed_url}\")\n\n if r.status_code == 200:\n content = r.content\n root = etree.fromstring(content)\n else:\n logger.warning(f\"Could not download pubmed url: {pubmed_url}\")\n\n except Exception as e:\n logger.warning(\n f\"Bad Pubmed request, error: {str(e)}\",\n url=f'{PUBMED_TMPL.replace(\"PMID\", pmid)}',\n )\n\n return root\n\n\ndef get_pubmed(pmid: str) -> Mapping[str, Any]:\n \"\"\"Get pubmed xml for pmid and convert to JSON\n\n Remove MESH terms if they are duplicated in the compound term set\n\n ArticleDate vs PubDate gets complicated: https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html see and \n Only getting pub_year at this point from the element.\n\n Args:\n pmid: pubmed id number as a string\n\n Returns:\n pubmed json\n \"\"\"\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": pmid,\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n root = get_pubmed_url(pmid)\n\n if root is None:\n return None\n\n try:\n doc[\"pmid\"] = root.xpath(\"//PMID/text()\")[0]\n except Exception as e:\n return None\n\n if doc[\"pmid\"] != pmid:\n logger.error(f\"Requested PMID {doc['pmid']}doesn't match record PMID {pmid}\")\n\n if root.find(\"PubmedArticle\") is not None:\n doc = parse_journal_article_record(doc, root)\n elif root.find(\"PubmedBookArticle\") is not None:\n doc = parse_book_record(doc, root)\n\n return doc\n\n\nasync def async_get_normalized_terms_for_annotations(term_keys):\n \"\"\"Async collection of normalized terms for annotations\"\"\"\n\n normalized = asyncio.gather(\n *[bel.terms.terms.async_get_normalized_terms(term_key) for term_key in term_keys]\n )\n\n return normalized\n\n\ndef get_normalized_terms_for_annotations(term_keys):\n\n return [bel.terms.terms.get_normalized_terms(term_key) for term_key in term_keys]\n\n\ndef add_annotations(pubmed):\n \"\"\"Add nanopub annotations to pubmed doc\n\n Enhance MESH terms etc as full-fledged nanopub annotations for use by the BEL Nanopub editor\n \"\"\"\n\n term_keys = (\n [entry[\"key\"] for entry in pubmed.get(\"compounds\", [])]\n + [entry[\"key\"] for entry in pubmed.get(\"mesh\", [])]\n + [entry[\"key\"] for entry in pubmed.get(\"pubtator\", [])]\n )\n term_keys = list(set(term_keys))\n\n terms = {}\n\n for entry in pubmed.get(\"pubtator\", []):\n terms[entry[\"key\"]] = {\"key\": entry[\"key\"], \"label\": entry[\"text\"]}\n\n for entry in pubmed.get(\"compounds\", []):\n terms[entry[\"key\"]] = {\"key\": entry[\"key\"], \"label\": entry[\"label\"]}\n\n for entry in pubmed.get(\"mesh\", []):\n terms[entry[\"key\"]] = {\"key\": entry[\"key\"], \"label\": entry[\"label\"]}\n\n # loop = asyncio.get_event_loop()\n # normalized = loop.run_until_complete(async_get_normalized_terms_for_annotations(term_keys))\n\n normalized = get_normalized_terms_for_annotations(terms.keys())\n\n normalized = sorted(normalized, key=lambda x: x[\"annotation_types\"], reverse=True)\n\n pubmed[\"annotations\"] = []\n\n for annotation in normalized:\n\n # HACK - only show first annotation type\n if len(annotation[\"annotation_types\"]) > 0:\n annotation_type = annotation[\"annotation_types\"][0]\n else:\n annotation_type = \"\"\n\n if annotation.get(\"label\", False):\n terms[annotation[\"original\"]][\"key\"] = annotation[\"decanonical\"]\n terms[annotation[\"original\"]][\"label\"] = annotation[\"label\"]\n terms[annotation[\"original\"]][\"annotation_types\"] = [annotation_type]\n\n pubmed[\"annotations\"] = copy.deepcopy(\n sorted(terms.values(), key=lambda x: x.get(\"annotation_types\", []), reverse=True)\n )\n\n # Add missing\n for idx, annotation in enumerate(pubmed[\"annotations\"]):\n if annotation[\"label\"] == \"\":\n pubmed[\"annotations\"][idx][\"label\"] = annotation[\"key\"]\n\n return pubmed\n\n\ndef get_pubmed_for_beleditor(pmid: str, pubmed_only: bool = False) -> Mapping[str, Any]:\n \"\"\"Get fully annotated pubmed doc with Pubtator and full entity/annotation_types\n\n Args:\n pmid: Pubmed PMID\n\n Returns:\n Mapping[str, Any]: pubmed dictionary\n \"\"\"\n\n pubmed = get_pubmed(pmid)\n\n if pubmed is None:\n return pubmed\n\n if not pubmed_only:\n pubmed[\"pubtator\"] = get_pubtator(pmid)\n\n # Add entity types and annotation types to annotations\n pubmed = add_annotations(pubmed)\n\n return pubmed\n\n\ndef main():\n\n pmid = \"19894120\"\n\n pubmed = get_pubmed_for_beleditor(pmid)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"belbio/bel","sub_path":"bel/nanopub/pubmed.py","file_name":"pubmed.py","file_ext":"py","file_size_in_byte":12766,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"}
+{"seq_id":"32174556956","text":"import numpy as np\nimport cv2\n\n\nclass Compressor:\n \"\"\"\n Compressor allow you to serialize and compress an image using either JPEG\n or PNG compression.\n \"\"\"\n\n _format = \".png\"\n\n @property\n def format(self):\n return self._format\n\n @format.setter\n def format(self, fmt):\n \"\"\"\n Set format to either .jpg jpg .png png\n \"\"\"\n if fmt not in [\".jpg\", \"jpg\", \".png\", \"png\"]:\n raise ValueError(f\"Invalid format: {fmt}\")\n if fmt.find(\".\") != 0:\n fmt = \".\" + fmt\n self._format = fmt\n\n def compress(self, img):\n ok, cb = cv2.imencode(self._format, img)\n if ok:\n cb = cb.tobytes()\n else:\n cb = None\n return cb\n\n def uncompress(self, img_bytes, shape):\n img = np.frombuffer(img_bytes, dtype=np.uint8)\n\n if len(shape) == 3:\n img = cv2.imdecode(img, cv2.IMREAD_COLOR)\n else:\n img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)\n\n img = img.reshape(shape)\n return img\n","repo_name":"MomsFriendlyRobotCompany/opencv_camera","sub_path":"opencv_camera/compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"}
+{"seq_id":"24564778127","text":"\r\n\r\nimport geocoder\r\nimport time\r\nfrom PMV_Elevation import elevation\r\n\r\n# **********************************************************************\r\n# Ajouter aux enregistrements du fichier des stations Bixi: l'altitude,\r\n# le code postal et la ville.\r\n#\r\n# Input: Fichier Bixi a traiter\r\n# Output: Fichier avec l'imformation ajoutee\r\n# **********************************************************************\r\n\r\ndef loadBixiStation(filenameIn, filenameOut):\r\n fichierIn = open(filenameIn)\r\n\r\n # lire le contenu du fichier (toutes les lignes\r\n lignes = fichierIn.readlines()\r\n\r\n # fermer le fichier\r\n fichierIn.close()\r\n\r\n # Ouvrir le fichier de sortie\r\n fichierOut = open(filenameOut, \"w\")\r\n\r\n # ecrire l'entete\r\n fichierOut.writelines([\"code,name,latitude,longitude,altitude,postal,city\\n\"])\r\n\r\n ligneIter = iter(lignes)\r\n\r\n # on saute la première ligne (l'entete)\r\n next(ligneIter)\r\n\r\n nbLignes = 0\r\n # Traiter chaque enregistrement du fichier\r\n for ligne in ligneIter:\r\n\r\n # Compter le nombre de lignes pour montrer l'evolution du traitement\r\n nbLignes += 1\r\n if nbLignes %10 == 0:\r\n print(nbLignes)\r\n\r\n # extraire chaque donnée, séparée par une virgule\r\n stationCSV = ligne.strip().split(\",\")\r\n code = int(stationCSV[0])\r\n name = stationCSV[1]\r\n latitude = stationCSV[2]\r\n longitude = stationCSV[3]\r\n\r\n # Le fichier traitee est soit le fichier d'origine de Bixi ou le resultat de ce programme (mais incomplet)\r\n # Le fichier comprend deja l'altitude\r\n if len(stationCSV) > 4:\r\n altitude = float(stationCSV[4])\r\n else:\r\n altitude = 0.0\r\n\r\n # Le fichier comprend deja le code postal\r\n if len(stationCSV) > 5:\r\n postal = stationCSV[5]\r\n else:\r\n postal = None\r\n\r\n # Le fichier comprend dela la ville\r\n if len(stationCSV) > 6:\r\n city = stationCSV[6]\r\n else:\r\n city = None\r\n\r\n # trouver l'altitude de la station\r\n if altitude == 0.0:\r\n altitude = elevation(latitude, longitude)\r\n\r\n # Trouver le code postal et la ville (a partit de la latitude et de la longitude)\r\n if postal == \"Nonex\":\r\n g = geocoder.google([latitude, longitude], method='reverse')\r\n\r\n # le code postal n'a pas ete trouve, on reessaye.\r\n if (g.postal == None):\r\n # on re essaye avec une pause\r\n nbTry = 0\r\n while nbTry < 5:\r\n print(\"Tentative\",nbTry)\r\n nbTry += 1 # un essai de plus\r\n time.sleep(6-nbTry)\r\n g = geocoder.google([latitude, longitude], method='reverse')\r\n if g.postal != None:\r\n nbTry = 500 # on arrete\r\n\r\n postal = str(g.postal)\r\n city = str(g.city)\r\n\r\n #print(postal, city)\r\n # Sauver le tout dans le fichier de sortie\r\n fichierOut.write(str(code)+\",\"+name+\",\"+str(latitude)+\",\"+str(longitude)+\",\"+ '%.0f'%float(altitude) + \",\" + str(postal)+\",\"+str(city)+\"\\n\")\r\n\r\n fichierOut.close()\r\n\r\n# **********************************************************************\r\n# Debut du programme\r\n# **********************************************************************\r\n\r\ninputFilename = \"Stations_2017.csv\" # Nom du fichuier en imput\r\noutputFilename = \"Stations_2017-5.csv\" # Nom du fichier en output\r\nloadBixiStation(inputFilename, outputFilename) # Lancer le traitement\r\n\r\n","repo_name":"TessierDominique/PMV","sub_path":"1-Datasets/Scripts/PMV_GeoCoder.py","file_name":"PMV_GeoCoder.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"25593716575","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\nfrom selenium import webdriver\n\nclass Switch_To_iFRAME():\n def Chrome_Method(self):\n driver = webdriver.Chrome()\n driver.maximize_window()\n driver.get(\"https://letskodeit.teachable.com/pages/practice\")\n driver.implicitly_wait(3)\n driver.execute_script(\"window.scrollBy(0, 1000);\") # takes 2 args, SCROLL down\n\n #TO AVOID ISSUES, WHEN USING EITHER OF THE 3 SEARCH OPTIONS\n #(ID, NAME, NUMBERS), PLS, COMMENT THE OTHERS\n\n\n #Switch to the frame using ID\n #driver.switch_to.frame(\"courses-iframe\")# this is the command to switch to the IFRAME, with id.\n\n # Switch to the frame using NAME\n #driver.switch_to.frame(\"iframe-name\") # this is the command to switch to the IFRAME, with name.\n\n # Switch to the frame using NUMBERS\n driver.switch_to.frame(0) # this is the command to switch to the IFRAME, with number, like the index on the list.\n\n time.sleep(3)\n #Search course\n searchBox = driver.find_element(By.ID, \"search-courses\")\n searchBox.send_keys(\"Python\")\n time.sleep(3)\n\n #Switch back to the parent frame\n driver.switch_to.default_content() # switching back to the PARENT IFRAME\n driver.execute_script(\"window.scrollBy(0, -1000);\") # takes 2 args, SCROLL UP\n time.sleep(5)\n\n #Type something on the field on the PARENT frame\n driver.find_element_by_id(\"name\").send_keys(\"SUCCESS.\")\n time.sleep(5)\n\n\n\n\n\n\n#driver.quit() closes EVERYTHING\n#driver.close() closes the CURRENT window with the focus.\n\nChromee = Switch_To_iFRAME()\nChromee.Chrome_Method()\n\n#windows are used with \"handles\" in selenium, a string","repo_name":"juancoan/python","sub_path":"Browser_Tests(System Variable 4 WebDriver Path)/Switch_To_IFRAME.py","file_name":"Switch_To_IFRAME.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"43672558367","text":"\"\"\"\nDate:\n 22.10.28\nTitle:\n BAEKJOON 10871번\nProject:\n X보다 작은 수\nLevel:\n Bronze 5\nName:\n thelight0804\n\"\"\"\n\n# standard_input = \"\"\"\n# 10 5\n# 1 10 4 9 2 3 8 5 7 6\n# \"\"\"\n#input\ntest, n = map(int, input().split(\" \"))\nlist = list(map(int, input().split(\" \")))\n\n#print less than n\nfor i in range(len(list)):\n if list[i] < n:\n print(list[i], end = ' ')","repo_name":"GDSC-DEU/PSS-Study","sub_path":"thelight0804/BAEKJOON/Num10871.py","file_name":"Num10871.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"19889655051","text":"import os\nimport shutil\nfrom cati.cmdline.BaseCommand import BaseCommand\nfrom cati.cmdline import pr, ansi\nfrom cati.cmdline.components import DownloadProgress\nfrom cati.dotcati.Pkg import Pkg\n\nclass DownloadCommand(BaseCommand):\n \"\"\" Download command \"\"\"\n def help(self):\n \"\"\"\n download packages\n\n Usage: cati download pkg1 pkg1 ... [options]\n ...... cati download pkg= pkg2= pkg3 ... [options]\n ...... cati download pkg== ... [options]\n\n Options:\n --output=[output file path]: set file download path\n -q|--quiet: quiet output\n \"\"\"\n pass\n\n def config(self) -> dict:\n \"\"\" Define and config this command \"\"\"\n return {\n 'name': 'download',\n 'options': {\n '-q': [False, False],\n '--quiet': [False, False],\n '--output': [False, True],\n },\n 'max_args_count': None,\n 'min_args_count': 1,\n }\n\n def download_once(self, pkg, output=None):\n \"\"\" Download once package \"\"\"\n\n try:\n file_path = pkg.data['file_path']\n except:\n self.message('package \"' + pkg.data['name'] + '\" is local and cannot be downloaded' + ansi.reset, is_error=True, before=ansi.red)\n return False\n if not self.is_quiet():\n pr.p('Downloading ' + pkg.data['name'] + ':' + pkg.data['version'] + ':' + pkg.data['arch'] + '...')\n\n if output == None:\n output = file_path.split('/')[-1]\n\n if file_path[:7] == 'http://' or file_path[:8] == 'https://':\n i = 0\n while i < 5:\n res = DownloadProgress.download(file_path, output)\n if res == True:\n break\n else:\n pr.e(ansi.red + str(res) + ansi.reset)\n i += 1\n if i == 5:\n return False\n else:\n if not os.path.isfile(file_path):\n return False\n shutil.copy(file_path, output)\n\n return True\n\n def run(self):\n \"\"\" Run command \"\"\"\n\n if not self.is_quiet():\n pr.p('Loading packages list...')\n pr.p('========================')\n\n loaded_packages = []\n\n for argument in self.arguments:\n arg_parts = argument.split('=')\n if len(arg_parts) == 1:\n # load last version as default\n pkg = Pkg.load_last(argument)\n elif len(arg_parts) == 2:\n # load specify version\n pkg = Pkg.load_version(arg_parts[0], arg_parts[1])\n if pkg == 1:\n pkg = False\n elif pkg == 2:\n self.message('package \"' + arg_parts[0] + '\" has not version \"' + arg_parts[1] + '\"' + ansi.reset, before=ansi.red)\n continue\n else:\n # load specify version and specify arch\n pkg = Pkg.load_version(arg_parts[0], arg_parts[1], arg_parts[2])\n if pkg == 1:\n pkg = False\n elif pkg == 2:\n self.message('package \"' + arg_parts[0] + '\" has not version or arch \"' + arg_parts[1] + ':' + arg_parts[2] + '\"' + ansi.reset, before=ansi.red)\n continue\n if pkg:\n loaded_packages.append(pkg)\n else:\n self.message('unknow package \"' + argument + '\"' + ansi.reset, before=ansi.red)\n\n if not loaded_packages:\n return 1\n\n # download loaded packages\n is_any_success = False\n output_path = self.option_value('--output')\n for pkg in loaded_packages:\n if len(loaded_packages) > 1:\n tmp = self.download_once(pkg)\n else:\n tmp = self.download_once(pkg, output_path)\n if tmp:\n is_any_success = True\n\n if not is_any_success:\n return 1\n","repo_name":"catios/cati","sub_path":"cati/cmdline/commands/DownloadCommand.py","file_name":"DownloadCommand.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"}
+{"seq_id":"73072085706","text":"import torch\nimport argparse\nfrom IO import DataLoader\nimport models\nfrom torch.autograd import Variable\nfrom util import args\nimport pickle\nimport time\nimport numpy as np\n\n\ndef decode_dialog(ind2word, seq):\n return \" \".join([ind2word[int(x)] for x in seq])\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-save_model', type=str, default='visdialog.pt',\n help='trained parameters')\nparser.add_argument('-cuda', action='store_true',\n help='enable training with cuda')\nparser.add_argument('-batch_size', default=1, type=int,\n help='batch_size (1 for now)')\nparser.add_argument('-decoding', default='greedy', type=str,\n choices=['greedy', 'sampling'],\n help='Greedy or softmax sampling')\nparser.add_argument('-eval_size', default=10000, type=int,\n help=\"\"\"Size of validation set to evaluate.\n Default is 10K, must be an integer <= 40K\"\"\")\nparser.add_argument('-intervention', default='none', type=str,\n choices=['none', 'answer', 'question', 'image',\n 'caption', 'negation'],\n help='Whether to intervene or do standard decoding.')\nparser.add_argument('-round', default=0, type=int,\n help='Intervene from this specific round')\nparser.add_argument('-pnoise', default=1.0, type=float,\n help=\"\"\"Percentage of random noise.\n 1 means complete noise\"\"\")\nopt = parser.parse_args()\n\n# Load checkpoint\nprint(\"=> loading checkpoint '{}'\".format(opt.save_model))\ncheckpoint = torch.load(opt.save_model)\n\n# Merge two argparse namespaces with priority to args from this script\nopt = {**vars(checkpoint['opt']), **vars(opt)}\nopt = args(opt) # wrapper around a dict to object with fields\n\nprint(opt.__dict__)\n\nprint(\"=> loaded checkpoint '{}'\".format(opt.save_model))\n\n# Construct dataloader\nloader = DataLoader(opt, ['val'])\nvocab_size = loader.vocab_size\nopt.vocab_size = vocab_size\nbos_idx = loader.data['word2ind']['']\neos_idx = loader.data['word2ind']['']\n\n# Load models\nqbot = models.QBot(opt)\nabot = models.ABot(opt)\nif opt.share_qa_embeddings:\n qbot.embeddings.weight = abot.embeddings.weight\n\nbots = models.QABots(qbot, abot)\n\nif opt.cuda:\n qbot.cuda()\n abot.cuda()\n bots.cuda()\nbots.load_state_dict(checkpoint['params'])\n\n# Params\nbatch_size = opt.batch_size # TODO change routine for parallel evaluation\nnum_rounds = 10\nmax_decode_len = 20\nvalid_data = loader.batchify(subset='val',\n batch_size=batch_size,\n subset_size=opt.eval_size)\n\nbots.eval() # eval mode\n\n\n# not sure what's going on here, but let try\ndef truncate2d(x, padding_idx=0):\n # x (batch, length)\n masked_pad = x.ne(padding_idx).sum(-1)\n max_length = masked_pad.int().max()\n return x[:, :max_length].contiguous()\n\n\nintervention = opt.intervention\nvocab_tokens = list(loader.data['ind2word'].keys())\nprint('Running evaluation ')\n\n\n# negation interventions\nyes_idx = loader.data['word2ind']['yes']\nno_idx = loader.data['word2ind']['no']\n\n\ndef pertube(x, p):\n \"\"\"x is a Variable of LongTensor\"\"\"\n new_x = torch.from_numpy(\n np.random.choice(a=vocab_tokens, size=tuple(x.size()))).long().cuda()\n mask = torch.FloatTensor(new_x.size()).fill_(p).bernoulli().long().cuda()\n new_x = new_x * mask + (1-mask) * x.data\n return Variable(new_x, volatile=True)\n\n\ngt_img_features = loader.data['val_img_fv'][:40000, :]\n# convert to pytorch\nval_img_fv = gt_img_features.float().cuda()\n\n\ndef rank(fv, true_idx):\n d = torch.norm(val_img_fv - fv, 2, 1)\n _, idx = torch.sort(d)\n _, r = torch.sort(idx)\n return (r[true_idx] + 0.5) / 40000\n\n\nreports = {}\nfor i in range(num_rounds):\n reports[i] = []\n\nn_negs = 0\n# start playing game\nfor i, batch in enumerate(valid_data):\n t = time.time()\n if (i % 500) == 0 and i > 0:\n print('| Evaluating dialogue at {}/{}'.format(i + 1, opt.eval_size))\n for k in range(num_rounds):\n mpr = (1-torch.FloatTensor(reports[k]).mean()) * 100\n print(\"|\\tround %d | MPR %.2f\" % (k, mpr))\n\n if n_negs > 0:\n print('num negations: %d' % n_negs)\n print('-' * 12)\n\n caption, img_feats, *_, img_idx = batch\n if opt.cuda:\n caption = caption.cuda()\n img_feats = img_feats.cuda()\n\n caption = truncate2d(caption) # should truncate first\n # For storing inference results\n dialogue_idx = img_idx.numpy()[0]\n caption = Variable(caption, volatile=True)\n if intervention == 'caption':\n caption = pertube(caption, opt.pnoise)\n caption = caption.transpose(0, 1)\n img = Variable(img_feats, volatile=True)\n\n # encode caption\n # qbot also updates history\n q_fact_enc = qbot.enc_fact(caption)\n # print('check caption encoded %.3f' % q_fact_enc.data.norm(2))\n _, q_hist_state = qbot.history_encoder(q_fact_enc[None, :, :])\n # print('initial q', q_hist_state[0].data.norm(2))\n\n a_fact_enc = abot.enc_fact(caption)\n a_hist_state = None # empty at first\n\n for r in range(num_rounds):\n # (1) qbot ask question\n # q_i (length, 1)\n q_i = qbot.generate(q_hist_state, bos_idx, eos_idx,\n max_decode_len, opt.decoding == 'greedy')\n\n if intervention == 'question' and r >= opt.round:\n q_i = pertube(q_i, opt.pnoise)\n\n # (2) abot answer\n # (2.1) encode the question First\n a_question_enc = abot.enc_question(q_i)\n\n # Intervention for image\n if intervention == 'image' and r == opt.round:\n # Replace image with random noise\n img.data.uniform_()\n\n # concatenate img, a_q, Fa to pass to history encoder\n # important: preserve order of concatenation as done in training\n inpt = torch.cat([img, a_question_enc, a_fact_enc], -1)\n _, a_hist_state = abot.history_encoder(inpt[None, :, :], a_hist_state)\n\n # (2.2) generate answer\n a_i = abot.generate(a_hist_state, bos_idx, eos_idx,\n max_decode_len, opt.decoding == 'greedy')\n # checking\n # print('q: %s' % loader.tensor2string(q_i.data))\n # print('a: %s' % loader.tensor2string(a_i.data))\n # print('----')\n\n if intervention == 'answer' and r >= opt.round:\n a_i = pertube(a_i, opt.pnoise)\n if intervention == 'negation' and r >= opt.round:\n if a_i.numel() == 2:\n _tmp = a_i.view(-1)\n _idx = _tmp.data[0]\n if _idx == yes_idx or _idx == no_idx:\n if _idx == yes_idx:\n _tmp.data[0] = no_idx\n else:\n _tmp.data[0] = yes_idx\n n_negs += 1\n\n # (3) both bots encode fact fi\n # concatenate q and a (both are tok1...tokN EOS)\n f_i = torch.cat([q_i[:-1], a_i[:-1]], dim=0)\n\n q_fact_enc = qbot.enc_fact(f_i)\n # print(f_i)\n _, q_hist_state = qbot.history_encoder(q_fact_enc[None, :, :],\n q_hist_state)\n # print('q_hist_state', q_hist_state[0].sum().data[0])\n\n a_fact_enc = abot.enc_fact(f_i)\n\n # (4) qbot makes image prediction\n y = qbot.pred_img(q_hist_state[0][-1])\n pr = rank(y.data, dialogue_idx)\n reports[r] += [pr]\n if i % 500 == 0 and i > 0:\n save_file = 'report_{}_{}_{}.pkl'.format(intervention,\n opt.round,\n opt.pnoise)\n with open(save_file, 'wb') as f:\n pickle.dump(reports, f)\n\n\n# save finals results\nsave_file = 'report_{}_{}_{}.pkl'.format(intervention,\n opt.round,\n opt.pnoise)\n\nwith open(save_file, 'wb') as f:\n pickle.dump(reports, f)\n","repo_name":"danakianfar/Examining-Cooperation-in-VDM","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"44071829692","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog as fd\r\nfrom tkinter import messagebox\r\n\r\nimport pickle\r\nimport numpy as np\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n\r\nroot = tk.Tk()\r\nroot.title('Image Steganography')\r\nroot.resizable(False, False)\r\nroot.geometry('400x450')\r\nframe = tk.Frame(master=root, relief=tk.RAISED)\r\n\r\ndef get_crop(frame,N,P,K,Temp,Humidity,PH,Rainfall,mdl_typ):\r\n N=float(N)\r\n P=float(P)\r\n K=float(K)\r\n Temp=float(Temp)\r\n Humidity=float(Humidity)\r\n PH=float(PH)\r\n Rainfall=float(Rainfall)\r\n\r\n data = np.array([[N, P, K, Temp, Humidity, PH, Rainfall]])\r\n\r\n mdl=\"DecisionTree.pkl\"\r\n if mdl_typ==\"Decision Tree\":\r\n mdl=\"DecisionTree.pkl\"\r\n elif mdl_typ==\"Guassian Naive Bayes\":\r\n mdl=\"NBClassifier.pkl\"\r\n elif mdl_typ==\"Logistic Regression\":\r\n mdl=\"LogisticRegression.pkl\"\r\n elif mdl_typ==\"Random Forest\":\r\n mdl=\"RandomForest.pkl\"\r\n\r\n rcmd_model=pickle.load(open(mdl, \"rb\"))\r\n prediction = rcmd_model.predict(data)\r\n print(prediction)\r\n txt=\"Recommended Crop for this Soil is \"+prediction[0]\r\n label1 = tk.Label(master=frame, text=txt,font=('Helvetica bold',12))\r\n label1.grid(row=7,column=0,padx=5,pady=5,columnspan=3)\r\n\r\ndef show_frm(model_type):\r\n global frame\r\n if (frame.winfo_exists()):\r\n frame.destroy()\r\n frame = tk.Frame(master=root, relief=tk.RAISED)\r\n N = tk.Entry(master=frame, width=15)\r\n P = tk.Entry(master=frame, width=15)\r\n K = tk.Entry(master=frame, width=15)\r\n temp = tk.Entry(master=frame, width=15)\r\n humidity = tk.Entry(master=frame, width=15)\r\n ph = tk.Entry(master=frame, width=15)\r\n rainfall = tk.Entry(master=frame, width=15)\r\n frame.grid(row=2,column=0,padx=5,pady=5,columnspan=3)\r\n label1 = tk.Label(master=frame, text=\"Nitrogen\")\r\n label2 = tk.Label(master=frame, text=\"Phosphorous\")\r\n label3 = tk.Label(master=frame, text=\"Pottasium\")\r\n label4 = tk.Label(master=frame, text=\"Temperature \")\r\n label5 = tk.Label(master=frame, text=\"Humidity\")\r\n label6 = tk.Label(master=frame, text=\"pH\")\r\n label7 = tk.Label(master=frame, text=\"Rainfall \")\r\n\r\n label1.grid(row=0,column=0,padx=5,pady=5)\r\n N.grid(row=1,column=0,padx=5,pady=5)\r\n label2.grid(row=0, column=1, padx=5, pady=5)\r\n P.grid(row=1, column=1, padx=5, pady=5)\r\n label3.grid(row=0, column=2, padx=5, pady=5)\r\n K.grid(row=1, column=2, padx=5, pady=5)\r\n label4.grid(row=2, column=0, padx=5, pady=5)\r\n temp.grid(row=3, column=0, padx=5, pady=5)\r\n label5.grid(row=2, column=2, padx=5, pady=5)\r\n humidity.grid(row=3, column=2, padx=5, pady=5)\r\n label6.grid(row=4, column=0, padx=5, pady=5)\r\n ph.grid(row=5, column=0, padx=5, pady=5)\r\n label7.grid(row=4, column=2, padx=5, pady=5)\r\n rainfall.grid(row=5, column=2, padx=5, pady=5)\r\n rcmd_btn = ttk.Button(\r\n frame,\r\n width=15,\r\n text='Crop',\r\n command=lambda: get_crop(frame,N.get(),P.get(),K.get(),temp.get(),humidity.get(),ph.get(),rainfall.get(),model_type)\r\n )\r\n rcmd_btn.grid(row=6,column=1,padx=5, pady=5)\r\n print(model_type)\r\n\r\n\r\n# Press the green button in the gutter to run the script.\r\nif __name__ == '__main__':\r\n label = tk.Label(master=root, text=\"Choose a Model\")\r\n options = [\r\n \"Decision Tree\",\r\n \"Guassian Naive Bayes\",\r\n \"Logistic Regression\",\r\n \"Random Forest\",\r\n ]\r\n clicked = tk.StringVar()\r\n clicked.set(\"Decision Tree\")\r\n drop = tk.OptionMenu(root, clicked, *options)\r\n Reco = ttk.Button(\r\n root,\r\n width = 15,\r\n text = 'Recomend',\r\n command = lambda: show_frm(clicked.get())\r\n )\r\n label.grid(row=0,column=0,padx=5,pady=5)\r\n drop.grid(row=0,column=1,padx=5,pady=5)\r\n Reco.grid(row=1,column=1,padx=5,pady=5)\r\n root.mainloop()\r\n\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n","repo_name":"Sandeep-x/Crop-Recommendation-Using-Machine-Learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"15740962200","text":"from xml.dom import minidom\nthiene = minidom.parse('data/Thiene.osm')\nuseful_nodes = minidom.parse('data/useful_nodes.osm')\n\n# Extract the way and the nodes from the source files\nwaylist = thiene.getElementsByTagName('way')\nselected_nodes = useful_nodes.getElementsByTagName('node')\n\n# Opens the document where will be written the data\nosm_file = open('data/final_file.osm', 'w')\nosm_file.write(\"\"+\"\\n\")\nosm_file.write(\"\"+\"\\n\")\n\n# Writes all the relevant nodes to the file\nfor node in selected_nodes:\n osm_file.write(node.toxml()+\"\\n\")\n\n# For way in waylist:\nfor way in waylist:\n # Extract the tags to check if the way is a building\n tags_child_nodes = way.getElementsByTagName('tag')\n\n # Boolean to check if one tag is \"building\"\n there_is_building = True\n for tags_child in tags_child_nodes:\n if(tags_child.getAttribute(\"k\") == \"highway\"):\n there_is_building = False\n \n # If it's a building skip the way\n if(there_is_building):\n continue\n \n # Get all the way's nodes\n child_nodes = way.getElementsByTagName(\"nd\")\n\n # Check all the possible match\n for node in child_nodes:\n # Boolean value used to see if the node is in the checked ones\n is_absent = True\n for check_node in selected_nodes:\n # If the id is present continue\n if(node.getAttribute(\"ref\")==check_node.getAttribute(\"id\")):\n is_absent = False\n break\n \n # If the nodes isn't in the checked ones it\n # remove the child from the way\n if(is_absent):\n way.removeChild(node)\n\n # Print the way into the final file\n osm_file.write(way.toxml() + \"\\n\")\n\n# Close the file\nosm_file.write(\"\")\nosm_file.close()","repo_name":"thomasporro/PloggingRouteCalculator","sub_path":"node_generator.py","file_name":"node_generator.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"3959622913","text":"from abc import ABC, abstractmethod\n\nimport torch\nfrom torchvision.transforms import functional as TVF\nimport numpy as np\n\n\nclass Policy(ABC):\n @abstractmethod\n def action(self, screen, observation):\n \"\"\"\n\n :param screen: batch, channels, height, width\n :param observation: batch, channels, height, width\n :return: an action in the embedding space, will need to be converted to the simulator space\n \"\"\"\n raise NotImplementedError\n\n\nclass RandomPolicy(Policy):\n def __init__(self, env):\n self.env = env\n\n # todo decide if action will be in embedded or simulator space\n # todo if so then embedding should be part of the policy\n def action(self, screen, observation):\n return self.env.action_space.sample()\n\n\nclass ActionEmbedding:\n \"\"\"\n Simple one-hot embedding of the action space\n \"\"\"\n\n def __init__(self, env):\n self.env = env\n\n def tensor(self, action):\n action_t = torch.zeros(self.env.action_space.n)\n action_t[action] = 1.0\n return action_t\n\n def numpy(self, action):\n action_n = np.zeros(self.env.action_space.n)\n action_n[action] = 1.0\n return action_n\n\n def embedding_to_action(self, index):\n return index\n\n def start_tensor(self):\n return torch.zeros(self.env.action_space.n)\n\n def start_numpy(self):\n return np.zeros(self.env.action_space.n)\n\n\nclass ToTensor(object):\n def __init__(self, action_embedding):\n self.embed_action = action_embedding\n\n def __call__(self, screen, observation, reward, done, info, action):\n screen_t = TVF.to_tensor(screen)\n observation_t = torch.Tensor(observation)\n reward_t = torch.Tensor([reward])\n done_t = torch.Tensor([done])\n action_t = self.embed_action.tensor(action)\n return screen_t, observation_t, reward_t, done_t, info, action_t\n\n\nclass Rollout:\n def __init__(self, env):\n self.env = env\n\n def rollout(self, policy, episode, max_timesteps=100):\n observation = self.env.reset()\n screen = self.env.render(mode='rgb_array')\n\n for t in range(max_timesteps):\n\n action = policy.action(screen, observation)\n observation, reward, done, info = self.env.step(action)\n screen = self.env.render(mode='rgb_array')\n\n if done:\n print(\"Episode finished after {} timesteps\".format(t + 1))\n break\n\n\nclass RolloutGen(object):\n \"\"\"\n Wrap gym in a generator object\n \"\"\"\n\n def __init__(self, env, policy, action_embedding, populate_screen=True, render_to_window=False):\n \"\"\"\n\n :param env: gym environment\n :param policy: policy to select actions in the environment\n :param populate_screen: populates the screen return parameter with numpy array of RGB data\n :param render_to_window: render the output to a window\n \"\"\"\n self.env = env\n self.policy = policy\n self.done = True\n self.action = None\n self.populate_screen = populate_screen\n self.render_to_window = render_to_window\n self.to_tensor = ToTensor(action_embedding)\n\n def __iter__(self):\n return self\n\n # Python 3 compatibility\n def __next__(self):\n return self.next()\n\n def render(self):\n screen = None\n if self.populate_screen:\n screen = self.env.render(mode='rgb_array')\n if self.render_to_window:\n self.env.render()\n return screen\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n screen = self.render()\n screen_t, observation_t, reward_t, done_t, info, action_t = \\\n self.to_tensor(screen, observation, reward, done, info, action)\n action = self.policy.action(screen_t.unsqueeze(0), observation_t.unsqueeze(0))\n return screen, observation, reward, done, info, action\n\n def next(self):\n\n if self.done:\n observation = self.env.reset()\n screen = self.render()\n reward = 0\n self.done = False\n info = {}\n screen_t, observation_t, reward_t, done, info, action = \\\n self.to_tensor(screen, observation, reward, self.done, info, 0)\n self.action = self.policy.action(screen_t.unsqueeze(0), observation_t.unsqueeze(0))\n return screen, observation, reward, self.done, info, self.action\n\n else:\n screen, observation, reward, done, info, action = self.step(self.action)\n self.action = action\n self.done = done\n return screen, observation, reward, done, info, action\n\n\nclass GymSimulatorDataset(torch.utils.data.Dataset):\n def __init__(self, env, policy, length, action_embedding, output_in_numpy_format=False, render_to_window=False):\n torch.utils.data.Dataset.__init__(self)\n self.length = length\n self.count = 0\n self.policy = policy\n self.rollout = RolloutGen(env, policy, action_embedding, render_to_window=render_to_window).__iter__()\n self.output_in_numpy_format = output_in_numpy_format\n self.to_tensor = ToTensor(action_embedding=action_embedding)\n\n def __getitem__(self, index):\n screen, observation, reward, done, info, action = self.rollout.next()\n\n if not self.output_in_numpy_format:\n screen, observation, action, reward, info, done = \\\n self.to_tensor(screen, observation, reward, done, info, action)\n\n self.count += 1\n\n return screen, observation, action, reward, done\n\n def __len__(self):\n return self.length\n","repo_name":"DuaneNielsen/rnd","sub_path":"gym_data.py","file_name":"gym_data.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"}
+{"seq_id":"11304840247","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, HttpRequest\nfrom django.urls import reverse\nfrom urllib.parse import urlencode, quote_plus\nfrom django.shortcuts import redirect\nfrom .models import User\n\n\n\n# Create your views here.\n\ndef home(request):\n \n if request.method == 'POST':\n name = request.POST.get('name')\n print(f\"Input value: {name}\")\n user = User(username=name)\n \n \n return render(request, 'start.html', {'user':user})\n \n \n return render(request, 'index.html') \n \n \ndef start(request):\n \n username = request.session.get('username')\n username = request.POST.get('username')\n print(username)\n return render(request, 'start.html', {'username':username})\n \n\n\ndef quiz1(request):\n \n username = request.POST.get('username')\n name1 = request.POST.get('name1')\n print(f'Input value: {username}')\n print(f'Input value: {name1}')\n sum = request.POST.get('sum')\n print(f'input :{sum}')\n sum = 0\n resultDiv = ''\n\n next_url = request.build_absolute_uri(reverse('quiz2'))\n next_url_with_username = next_url + f'?name1={name1}'\n share_url = f'https://api.whatsapp.com/send?text={next_url}'\n context = {'share_url':share_url,'sum':sum,'name1':name1,'username':username,'resultDiv':resultDiv}\n\n \n response = render(request,'quiz1.html',context)\n return response\n \n \n \n \n\ndef quiz2(request):\n \n if request.method == 'POST':\n name1 = request.POST.get('name1')\n print(f\"Input value: {name1}\")\n sum = request.POST.get('sum')\n print(f'input :{sum}')\n context = {'name1':name1, 'username': 'username', 'sum':sum}\n return render(request, 'quiz3.html', context)\n return render(request,'quiz2.html')\n \n\n\ndef quiz3(request):\n\n if request.method == 'POST':\n name1 = request.POST.get('name1')\n print(name1)\n context = {'name1':name1,'data2':'Quiz3 Data'}\n return render(request,'quiz3.html',context)\n\n return render(request,'quiz3.html')\n\ndef final(request):\n\n if request.method == 'POST':\n name1 = request.POST.get('name1')\n\n context = {'name1':name1, 'data2':'Final Data'}\n return render(request, 'final.html', context)\n\n return render(request,'final.html')\n\ndef first(request):\n if request.method == 'POST':\n name1 = request.POST.get('name1')\n username = request.POST.get('username')\n name1 = request.POST.get('name1')\n request.session['name1'] = name1\n print(f'Input value: {name1}')\n sum = 0\n resultDiv = ''\n next_url = request.build_absolute_uri(reverse('quiz2'))\n share_url = f'https://api.whatsapp.com/send?text={next_url}'\n\n context = {'name1':'name1','sum':'sum', 'data2':'First Data', 'username':username}\n return redirect('home')\n\n return redirect('home')\n\n\n\n ","repo_name":"Khagesh000/Online-quiz","sub_path":"online/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"6776943475","text":"# -*- coding: utf-8 -*-\nimport copy\n\n__author__ = 'nietaki'\n\nfrom BaseBot import BaseBot\nfrom alg import *\nimport physics\nimport random\n\nclass PhysicsTester(BaseBot):\n def __init__(self, sock, name, key):\n super(PhysicsTester, self).__init__(sock, name, key)\n #self.radius_speed_dict = {40: 4.8, 60: 5.5, 90: 6.6, 110: 7.3}\n self.radius_speed_dict = {40: 4.7, 60: 5.4, 90: 6.5, 110: 7.2}\n #self.radius_speed_dict = {40: 4.5, 60: 5.1, 90: 6.3, 110: 7.0}\n\n def on_car_positions(self, data, tick):\n piece_index = self.my_car().track_piece_index\n lane = self.my_car().lane()\n radius = self.track.true_radius(piece_index, lane)\n \"\"\"\n game plan: sprawdzamy, czy trzeba zwalniać przed następnym zakrętem:\n jak tak, to zwalniamy\n jak nie, to sprawdzamy, czy jesteśmy w zakręcie:\n jak tak, to utrzymujemy prędkość adekwatną do zakrętu\n jak nie, to pełen gaz\n \"\"\"\n if (self.my_car().slip_angle > 50 and self.my_car().angle_velocity > 1.0) or \\\n (self.my_car().slip_angle < -50 and self.my_car().angle_velocity < -1.0):\n #print(\"That's too dangerous, brother!\")\n self.throttle(0.3)\n return\n next_turn_id = self.track.next_bend_id(piece_index, min(radius, 150))\n distance_until_sharp_turn = self.track.distance_until_index(piece_index, self.my_car().in_piece_distance, next_turn_id, lane)\n if not distance_until_sharp_turn is None:\n target_velocity = self.radius_speed_dict[self.track.true_radius(next_turn_id, lane)]\n minimal_distance_to_break = physics.distance_to_break(self.my_car().velocity, target_velocity)\n if minimal_distance_to_break >= distance_until_sharp_turn and (self.my_car().velocity - target_velocity) > 0.1:\n self.throttle(0.0)\n #print(\"gotta slow down to {0}!\".format(target_velocity))\n return\n else:\n 0 == 0\n #print('there is a turn, but nothing serious yet, breaking distance is {0}, {1} available'.format(minimal_distance_to_break, distance_until_sharp_turn))\n\n if radius < 150:\n target_speed = self.radius_speed_dict[radius]\n #print(\"gonna set throttle to {0}\".format(target_speed / 10))\n self.throttle(target_speed / 10) # TODO rely on physics\n else:\n #print(\"full ahead, cap'n\")\n self.throttle(1.0)\n\n\nclass PhysicsBisector(BaseBot):\n def __init__(self, sock, name, key):\n super(PhysicsBisector, self).__init__(sock, name, key)\n self.piece_look_ahead = 5\n\n def on_car_positions(self, data, tick):\n car = self.my_car()\n\n if not car.crashed:\n the_until = (car.track_piece_index + self.piece_look_ahead) % car.track.track_piece_count\n deduced_throttle = my_bisect(0.0, 1.0, 6, lambda t: physics.is_safe_until_simple(car, t, the_until, 0.0))\n print(\"decided to go on throttle {0} from {1} to {2}\".format(deduced_throttle, car.track_piece_index, the_until))\n self.throttle(deduced_throttle, tick)\n else:\n self.ping()\n\nclass AdvancedBisector(BaseBot):\n def __init__(self, sock, name, key):\n super(AdvancedBisector, self).__init__(sock, name, key)\n\n def on_car_positions(self, data, tick):\n car = self.my_car()\n\n if not car.crashed:\n cur_index = self.my_car().track_piece_index\n macro_index = self.track.macro_piece_map[cur_index]\n next_macro_beginning = self.track.reverse_macro_map[(macro_index + 1) % len(self.track.reverse_macro_map)]\n lane = car.lane()\n next_macro_beginning_piece = car.track.track_pieces[next_macro_beginning]\n next_macro_radius = next_macro_beginning_piece.true_radius(lane)\n #next_macro_target_speed = physics.estimate_stable_speed_at_angle(next_macro_radius, physics.crash_angle_buffered())\n next_macro_target_speed = physics.estimate_safe_speed_at_angle(next_macro_radius, physics.crash_angle_buffered())\n\n if car.current_track_piece().is_straight:\n if physics.distance_to_break(car.velocity, next_macro_target_speed) >= \\\n car.track.distance_until_index(car.track_piece_index,\n car.in_piece_distance,\n next_macro_beginning,\n lane):\n self.throttle(physics.throttle_to_reach_velocity(car.velocity, next_macro_target_speed), tick)\n else:\n self.throttle(1.0, tick)\n else:\n # it's a bend!\n #FIXME: do the safe ending in a more intelligent way than adding one to the next piece index\n #FIXME: this is just copied from the other bot, we want velocities, not throttles\n the_until = (next_macro_beginning + 1) % car.track.track_piece_count\n deduced_throttle = my_bisect(0.0, 1.0, 6, lambda t: physics.is_safe_until_simple(car, t, the_until, 0.0))\n self.throttle(deduced_throttle, tick)\n else:\n self.ping()\n\n\n\nclass Cruiser(BaseBot):\n def __init__(self, sock, name, key):\n super(Cruiser, self).__init__(sock, name, key)\n\n def on_car_positions(self, data, tick):\n car = self.my_car()\n cur_index = car.track_piece_index\n\n if not car.crashed:\n # turning on Turbo at the beginning of the longest straight:\n if cur_index == self.track.index_of_the_beginning_of_the_longest_straight_piece and self.turbo_available:\n self.turbo(\"Buckle up!\", tick)\n return\n\n macro_index = self.track.macro_piece_map[cur_index]\n next_macro_beginning = self.track.reverse_macro_map[(macro_index + 1) % len(self.track.reverse_macro_map)]\n lane = car.lane()\n distance_until_next_macro = car.track.distance_until_index(car.track_piece_index,\n car.in_piece_distance,\n next_macro_beginning,\n lane)\n\n next_macro_beginning_piece = car.track.track_pieces[next_macro_beginning]\n next_macro_radius = next_macro_beginning_piece.true_radius(lane)\n next_macro_target_speed = physics.estimate_stable_speed_at_angle(next_macro_radius, physics.crash_angle_buffered())\n\n ### SWITCH ###\n # should we consider switching? - is next piece a switch and is it legal now?\n if not car.is_switching() and not self.switch_initiated and self.track.next_piece(cur_index).switch and \\\n len(self.track.lanes) > 1 and car.velocity > physics.safe_speed:\n same_lane_and_close = self.other_cars_on_lane_within_distance(lane, 150)\n if len(same_lane_and_close):\n #there is somebody to go around\n\n dirs = car.possible_lane_switch_directions()\n switch_direction = random.choice(dirs)\n\n target_lane = lane + switch_direction\n print('checking if there is somebody blocking the other lane - {0}, after going in {1} direction'.format(target_lane, switch_direction))\n opponents_on_target_lane = self.other_cars_on_lane_within_distance(target_lane, 250)\n if not opponents_on_target_lane:\n print(\"other lane is clear, let's see if physics says it is safe\")\n car_other_lane = copy.copy(car)\n car_other_lane.start_lane_index += switch_direction\n car_other_lane.end_lane_index += switch_direction\n if physics.check_with_annealing(car_other_lane):\n print(\"going to switch lane to in the {0} direction\".format(switch_direction))\n self.switch_lane_int(switch_direction, tick)\n return\n else:\n print(\"staying here, switching is not safe!\")\n else:\n print(\"somebody on the other lane, not switching now\")\n\n if self.switch_initiated:\n print('reducing speed to be safe')\n self.throttle(0.0, tick)\n return\n ### end SWITCH ###\n\n if car.current_track_piece().is_straight:\n # straight!\n should_run_like_hell = self.is_race() and \\\n car.lap == self.lap_count() - 1 and \\\n macro_index == len(self.track.reverse_macro_map) - 1 and \\\n car.current_track_piece().is_straight\n if should_run_like_hell:\n print(\"gotta go fast!\")\n if self.turbo_available:\n self.turbo(\"Strap yourselves in boys!\", tick)\n else:\n self.throttle(1.0, tick)\n return\n\n\n next_macro_target_speed = physics.estimate_safe_speed_at_angle(next_macro_radius, physics.crash_angle_buffered())\n\n # simulating the car braking to the safe speed\n car_at_next_macro = physics.simulate_straight_with_breaking_to_speed(car, distance_until_next_macro, next_macro_target_speed)\n\n # end of the simulation with annealing\n macro_plus_1 = (macro_index + 1) % len(self.track.reverse_macro_map)\n # getting the best speed for the bend\n deduced_speed = physics.estimate_optimal_speed_at_bend_with_annealing(car_at_next_macro,\n car.track.reverse_macro_map[macro_plus_1],\n True)\n\n # planning the breaking, now with better values\n # checking if we can afford it\n # velocity and travelled distance in one step of full throttle\n next_velocity, next_distance = physics.velocity_and_distance_step(car.velocity, 1.0)\n # distance needed to break\n breaking_distance = physics.distance_to_break(next_velocity, max(deduced_speed, next_macro_target_speed))\n if breaking_distance + next_distance < distance_until_next_macro:\n self.throttle(1.0, tick)\n else:\n self.throttle(0.0, tick)\n\n else:\n # it's a bend!\n the_until = (next_macro_beginning + 1) % car.track.track_piece_count\n deduced_speed = physics.estimate_optimal_speed_at_bend_with_annealing(car, the_until)\n self.throttle(physics.throttle_to_reach_velocity(car.velocity, deduced_speed), tick)\n else:\n self.ping(tick)\n\n\n\n","repo_name":"nietaki/HWO-2014---the-What-What-What","sub_path":"python/heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":11269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"20251212480","text":"import typing\nfrom enum import Enum\n\nclass FieldType(Enum):\n INTEGER = 'INTEGER'\n FLOAT = 'FLOAT'\n STRING = 'STRING'\n BOOLEAN = 'BOOLEAN'\n DATE = 'DATE'\n DATETIME = 'DATETIME'\n TIME = 'TIME'\n RECORD = 'RECORD'\n NULL = 'NULL'\n\ndef addDataTypes(new_data_types: typing.Dict[str, str]):\n global FieldType\n cur_field_types = dict()\n for attr in dir(FieldType):\n if not attr.startswith('__'):\n cur_field_types[attr] = attr\n cur_field_types.update(new_data_types)\n FieldType = Enum('FieldType', cur_field_types)\n\ndef removeDataTypes(removed_data_types: typing.Dict[str, str]):\n global FieldType\n cur_field_types = dict()\n for attr in dir(FieldType):\n if not attr.startswith('__') and attr not in removed_data_types:\n cur_field_types[attr] = attr\n FieldType = Enum('FieldType', cur_field_types)\n\nclass Field(object):\n __slots__ = {\n 'name': \"-> string [REQUIRED]\",\n 'type': \"-> string [REQUIRED] integer|float|string|boolean|date|datetime|time|record\",\n 'mode': \"-> string [OPTIONAL] REQUIRED|NULLABLE|REPEATED: default NULLABLE\",\n 'fields': \"-> list [OPTIONAL IF type = RECORD]\",\n 'schema_name': \"-> string [OPTIONAL]\"\n }\n\n def __init__(self, **attrs):\n self.name = attrs['name']\n self.type = attrs['type']\n self.mode = attrs.get('mode', 'NULLABLE')\n self.schema_name = attrs.get('schema_name')\n self.fields = [\n f if isinstance(f, Field) else Field(**f)\n for f in attrs.get('fields', [])\n ]\n\n\n def __repr__(self):\n return \"\".format(\n id=id(self),\n name=self.name,\n type=self.type\n )\n \n\n def __eq__(self, other):\n\n return (\n self.name == other.name\n and self.type == other.type\n and self.mode == other.mode\n and self.fields == other.fields\n and self.schema_name == other.schema_name\n )\n\n @property\n def path(self):\n if self.schema_name:\n return self.schema_name + \".\" + self.name\n else:\n return self.name\n \n\n def new(self, **parts):\n attrs = {attr:getattr(self, attr) for attr in self.__slots__}\n attrs.update(parts)\n\n return self.__class__(**attrs)\n\n def to_dict(self):\n return dict(\n name = self.name,\n type = self.type,\n mode = self.mode,\n fields = [f.to_dict() for f in self.fields]\n )\n\n\n","repo_name":"wyfunique/DBSim","sub_path":"dbsim/field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"}
+{"seq_id":"4689376871","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nimport os\r\nimport tkinter\r\nfrom PIL import Image, ImageTk\r\n\r\n\r\nroot = Tk()\r\n\r\n#背景\r\ncanvas = tkinter.Canvas(root,\r\n width=800, # 指定Canvas组件的宽度\r\n height=460, # 指定Canvas组件的高度\r\n bg='white') # 指定Canvas组件的背景色\r\nimage = Image.open(\"img.jpg\")\r\nim = ImageTk.PhotoImage(image)\r\n\r\ncanvas.create_image(300, 350, image=im) # 使用create_image将图片添加到Canvas组件中\r\n\r\n\r\n\r\n\r\n# Frame框架\r\nf1 = Frame(root)\r\nf1.pack()\r\nf2 = Frame(root)\r\nf2.pack()\r\nf3 = Frame(root)\r\nf3.pack()\r\nf4 = Frame(root)\r\nf4.pack()\r\nf5 = Frame(root)\r\nf5.pack()\r\nf5_5 = Frame(root)\r\nf5_5.pack()\r\nf6 = Frame(root)\r\nf6.pack()\r\nf7 = Frame(root)\r\nf7.pack()\r\n\r\nlabel_l1 = Label(f1, text='关键字(必选)',background='pink')\r\nlabel_l1.pack(side=LEFT)\r\n# 关键字输入框\r\nkey = StringVar()\r\nentry_e1 = Entry(f1, textvariable=key)\r\nentry_e1.pack(side=RIGHT)\r\n\r\nlabel_l2 = Label(f2, text='***页 数*** ',background='pink')\r\nlabel_l2.pack(side=LEFT)\r\n# 页数输入框\r\npage = StringVar()\r\nentry_e2 = Entry(f2, textvariable=page)\r\nentry_e2.pack(side=RIGHT)\r\n\r\n# 数据库复选框\r\nlabel_c = Label(f3, text='选择数据库',background='pink')\r\nlabel_c.pack()\r\nmysql = IntVar()\r\ncheck_one = Checkbutton(f3, text='MYSQL', variable=mysql)\r\ncheck_one.pack()\r\nmongo = IntVar()\r\ncheck_two = Checkbutton(f3, text='MONGO', variable=mongo)\r\ncheck_two.pack()\r\n\r\n\r\nlabel_u = Label(f4, text='用户名',background='pink')\r\nlabel_u.pack(side=LEFT)\r\n# 用户名输入框\r\nuser = StringVar()\r\nentry_u = Entry(f4, textvariable=user)\r\nentry_u.pack(side=RIGHT)\r\nlabel_p = Label(f5, text='密 码',background='pink')\r\nlabel_p.pack(side=LEFT)\r\n# 密码输入框\r\npassword = StringVar()\r\nentry_p = Entry(f5, textvariable=password)\r\nentry_p['show']='*'\r\nentry_p.pack(side=RIGHT)\r\n\r\n#下拉列表(地区)\r\narea={\r\n '全国': 489,\r\n '北京': 530,\r\n '上海': 538,\r\n '深圳': 765,\r\n '广州': 763,\r\n '天津': 531,\r\n '成都': 801,\r\n '杭州': 653,\r\n '武汉': 736,\r\n '大连': 600,\r\n '南京': 635,\r\n '苏州': 639,\r\n '西安': 854,\r\n}\r\n\r\nlabel_ttk = Label(f5_5, text='地 区',background='pink')\r\nlabel_ttk.pack(side=LEFT)\r\n\r\nttk_v='全国'\r\ndef functtk(event):\r\n global ttk_v\r\n ttk_v=cmb.get()\r\n print(ttk_v)\r\n\r\ncmb=ttk.Combobox(f5_5)\r\ncmb.pack(side=RIGHT)\r\ncmb['value']=('全国','北京','上海','深圳','广州','天津','成都','杭州','武汉','大连','南京','苏州','西安')\r\ncmb.current(0)\r\ncmb.bind(\"<>\",functtk)\r\n\r\n\r\n\r\n#rbutton_one=Radiobutton(f5_5,text=area['全国'],variable=bintvar,vaule=1,command=bo)\r\n\r\n\r\n\r\n\r\ndef run():\r\n if key.get():\r\n now_path = os.path.dirname(__file__)\r\n params = '-a input_keyword={} '.format(\r\n key.get())\r\n if page.get():\r\n params = params+'-a input_page={} '.format(page.get())\r\n else:\r\n params = params+'-a input_page=1 '\r\n if mysql.get() == 1 and user.get() and password.get():\r\n params = params + '-s set_mysql=1 -s set_user={} -s set_password={} '.format(\r\n user.get(), password.get())\r\n else:\r\n params = params + '-s set_mysql=0 -s set_user={} -s set_password={} '.format(\r\n '','')\r\n if mongo.get() == 1:\r\n params = params + \\\r\n '-s set_mongo=1 '\r\n else:\r\n params = params + '-s set_mongo=0 '\r\n\r\n params=params + '-a input_area={} '.format(area[ttk_v])\r\n\r\n os.system('cd {}'.format(now_path)+r'\\recruit'+' && ' +\r\n 'scrapy crawl zhilian {}'.format(params)+'-s COLLECTION={}'.format('求职信息'))\r\n\r\n\r\nbutton = Button(f6, text='run', command=run,background='orange')\r\nbutton.pack()\r\n\r\ntext = '''\r\n页数默认为1页,每页有90条数据,最大限制在10页,避免恶意使用\r\n只有选择MYSQL数据库才需要输入用户名和密码,存储在本地\r\n'''\r\nspecification = Label(f7, text=text,background='yellow')\r\nspecification.pack()\r\n\r\nunit=5.5\r\n\r\ncanvas.create_window(400,unit*10,\r\n window=f1)\r\ncanvas.create_window(400,unit*20,\r\n window=f2)\r\ncanvas.create_window(400,unit*30,\r\n window=f3)\r\ncanvas.create_window(400,unit*40,\r\n window=f4)\r\ncanvas.create_window(400,unit*50,\r\n window=f5)\r\ncanvas.create_window(400,unit*60,\r\n window=f5_5)\r\ncanvas.create_window(400,unit*70,\r\n window=f6)\r\ncanvas.create_window(400,unit*80,\r\n window=f7)\r\n\r\n\r\n\r\n\r\ncanvas.pack() # 将Canvas添加到主窗口\r\n\r\nroot.mainloop()\r\n","repo_name":"MonGod0222/zhilian-GUI-crawl","sub_path":"任务1-智联招聘5.0/智联招聘GUI.py","file_name":"智联招聘GUI.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"6507580241","text":"import pickle\n\n\n#http://www.tutorialspoint.com/python/python_files_io.htm\n\nclass pixel(object):\n def __init__(self, px, py, prgb):\n self.x = px\n self.y = py\n self.rgb = prgb\n\n\nfile = open(\".\\\\Content\\\\testData.json\",mode=\"rb+\")\n\ntestdata = [1,2,\"#fffff3\"]\n\npixels = []\n\ntestObj = pixel (1,1,\"#ffFFF1\")\npixels.append(testObj)\n\ntestObj = pixel (1,2,\"#ffFFF2\")\npixels.append(testObj)\n\ntestObj = pixel (1,3,\"#ffFFF3\")\npixels.append(testObj)\nprint (len(pixels))\n#print (\"pixels:\" + )\n\npickle.dump(pixels, file)\n#file.close()\n\nfile = open(\".\\\\Content\\\\testData.json\",mode=\"rb+\")\nreadTestData = pickle.load(file)\n\nprint (len(readTestData))\n\nfor pix in readTestData:\n print (pix.x, pix.y, pix.rgb)\n\n","repo_name":"nrnoble/masterdirectory","sub_path":"GRC1/Library/Examples/FileIO.py","file_name":"FileIO.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"73123571464","text":"\"\"\"\nA simple study space location entry flask app.\n\"\"\"\nimport flask\nfrom flask.views import MethodView\nfrom index import Index\nfrom submit import Submit\nfrom display import Display\n\napp = flask.Flask(__name__) # our Flask app\n\napp.add_url_rule('/',\n view_func=Index.as_view('index'),\n methods=[\"GET\"])\n\napp.add_url_rule('/submit',\n view_func=Submit.as_view('submit'),\n methods=['GET', 'POST'])\n\napp.add_url_rule('/display',\n view_func=Display.as_view('display'),\n methods=[\"GET\"])\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"vishrutss/Web-and-cloud-Homeworks-and-Projects","sub_path":"hw4/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"6459119884","text":"from flask import Flask\nimport json\nimport os\n\napp = Flask(__name__)\n\nsick_traces = []\n\ndef load_sick_traces():\n n = len(os.listdir(\"data\"))\n for i in range(1, n+1):\n with open(\"data/\" + str(i) + \".json\") as f:\n sick_traces.append(json.load(f))\n\n@app.route('/heartbeat/')\ndef heartbeat(offset):\n offset = int(offset)\n if offset >= 0 and offset < len(sick_traces):\n return json.dumps(sick_traces[offset:])\n return json.dumps([])\n\n\nif __name__ == '__main__':\n load_sick_traces()\n app.run()","repo_name":"KamilPiechowiak/covid-contact-tracing","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"8771535267","text":"\"\"\"Standalone script to seed data into the Voithos model\"\"\"\n## Imports\n# from this project\nimport web.ephesus.app as ephesus_app\nfrom web.ephesus.extensions import db\nfrom web.ephesus.model.user import User, Project, ProjectAccess\nfrom web.ephesus.model.voithos import (\n FlaggedTokens,\n Vocabulary,\n TokenSuggestions,\n SuggestionType,\n UserDecisionType,\n SuggestionSourceType,\n)\n\nvoithos_seed_data = [\n {\n \"flagged_tokens\": [{\"lang_code\": \"eng\", \"token\": \"confidint\"}],\n \"vocabulary\": [\n {\n \"lang_code\": \"eng\",\n \"entry\": \"confident\",\n },\n {\"lang_code\": \"eng\", \"entry\": \"confidante\"},\n ],\n \"token_suggestions\": [\n {\n \"mapping\": {\"flagged_token_idx\": 0, \"vocabulary_idx\": 0},\n \"association_data\": {\n \"suggestion_type\": SuggestionType.SPELLING,\n \"confidence\": 0.7,\n \"user_decision_type\": UserDecisionType.UNDECIDED,\n \"suggestion_source_type\": SuggestionSourceType.AI,\n },\n },\n {\n \"mapping\": {\"flagged_token_idx\": 0, \"vocabulary_idx\": 1},\n \"association_data\": {\n \"suggestion_type\": SuggestionType.SPELLING,\n \"confidence\": 0.3,\n \"user_decision_type\": UserDecisionType.UNDECIDED,\n \"suggestion_source_type\": SuggestionSourceType.AI,\n },\n },\n ],\n }\n]\n\nusers_seed_data = {\n \"users\": [\n {\n \"email\": \"bob@email.com\",\n \"username\": \"bob\",\n \"password\": \"pbkdf2:sha512:210000$UKQsOQDc7Ngj5g1w$6364090ba739122c31f034c5600f67d6ac837058b2282f4a68ac64e171bc0d052767540da7e900af40b8bdbf13dac35bae89fea863b1afdbdefb3034af6fb982\",\n \"is_email_verified\": 1,\n \"status\": \"ACTIVE\",\n \"roles\": \"[\" \"public\" \"]\",\n },\n {\n \"email\": \"sam@email.com\",\n \"username\": \"sam\",\n \"password\": \"pbkdf2:sha512:210000$jUzijkvcIXcMGJep$303cfec08ac269c91f102c16e580d4dd3798fd0577c23189eb8e729311a7461e483177d0d1c58b8d5b95da8938c48732eb1ec316da88d1f16b7bce3828f09295\",\n \"is_email_verified\": 1,\n \"status\": \"ACTIVE\",\n \"roles\": \"[\" \"public\" \",\" \"admin\" \"]\",\n },\n ],\n \"projects\": [\n {\n \"resource_id\": \"asdf1234\",\n \"name\": \"Hindi NT\",\n \"lang_code\": \"hin\",\n \"status\": \"ACTIVE\",\n },\n {\n \"resource_id\": \"zxcv5678\",\n \"name\": \"Urdu NT\",\n \"lang_code\": \"urd\",\n \"status\": \"ACTIVE\",\n },\n ],\n \"projectAccess\": [\n {\n \"user_id\": 1,\n \"project_id\": 1,\n },\n {\n \"user_id\": 1,\n \"project_id\": 2,\n },\n {\"user_id\": 2, \"project_id\": 2, \"access_type\": \"COLLABORATOR\"},\n ],\n}\n\napp = ephesus_app.create_app()\n\nwith app.app_context():\n # Seed User\n for seed_user in users_seed_data[\"users\"]:\n user = User(**seed_user)\n db.session.add(user)\n\n # Seed Project\n for seed_project in users_seed_data[\"projects\"]:\n project = Project(**seed_project)\n db.session.add(project)\n\n # Seed ProjectAccess\n for seed_project_access in users_seed_data[\"projectAccess\"]:\n project_access = ProjectAccess(**seed_project_access)\n db.session.add(project_access)\n\n # # Seed data for spell checking\n # for item in voithos_seed_data:\n # flagged_tokens = []\n # vocabulary = []\n\n # # Create FlaggedTokens\n # for flagged_token in item[\"flagged_tokens\"]:\n # flagged_tokens.append(FlaggedTokens(**flagged_token))\n\n # # Create Vocabulary\n # for vocabulary_entry in item[\"vocabulary\"]:\n # vocabulary.append(Vocabulary(**vocabulary_entry))\n\n # # Create TokenSuggestions\n # for suggestion in item[\"token_suggestions\"]:\n # token_suggestion_data = {\n # **{\n # \"flagged_token\": flagged_tokens[\n # suggestion[\"mapping\"][\"flagged_token_idx\"]\n # ],\n # \"suggestion\": vocabulary[suggestion[\"mapping\"][\"vocabulary_idx\"]],\n # },\n # **suggestion[\"association_data\"],\n # }\n\n # token_suggestion = TokenSuggestions(**token_suggestion_data)\n\n # # Add to DB\n # db.session.add(token_suggestion)\n\n # Commit\n db.session.commit()\n","repo_name":"BibleNLP/greek-room","sub_path":"web/ephesus/model/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"}
+{"seq_id":"14173224823","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[11]:\n\n\ndef normalization(l):\n maxnum=max(l)\n minnum=min(l)\n for i in range(0,len(l)):\n l[i]=(l[i]-minnum)/(maxnum-minnum)\n sum=0\n for n in l:\n sum+=n\n mean=sum/len(l)\n print(\"Mean after Normalizing:\",mean)\n \nl=[78,65,56,87,91,37,49,77,62,59,95,63,42,55,72,68,81,39,45,49]\nnormalization(l)\n\n\n# In[10]:\n\n\nl=[1,2,3,4,5]\nx=min(l)\nprint(x)\n\n\n# In[35]:\n\n\nfrom sklearn.preprocessing import Normalizer\nl=[[4,1,2,2],[1,3,9,3],[5,7,5,1]]\nprint(Normalizer().fit_transform(l))\nprint(Normalizer().fit_transform(l).mean())\nprint(Normalizer().fit_transform(l).std())\n\n\n# In[34]:\n\n\nfrom sklearn.preprocessing import StandardScaler\ndata = [[0, 0], [0, 0], [1, 1], [1, 1]]\nprint(StandardScaler().fit_transform(data))\nprint(StandardScaler().fit_transform(data).mean(axis=0))\nprint(StandardScaler().fit_transform(data).std())\n\n\n# In[30]:\n\n\nimport statistics as st\ndef standardization(l):\n dev=st.stdev(l)\n mean=st.mean(l)\n for i in range(0,len(l)):\n l[i]=(l[i]-mean)/dev\n print(st.mean(l))\n print(st.stdev(l))\n \nl=[1,5,3,7,6]\nstandardization(l)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"1nt18is038/1NT18IS038_jothsna_A_ML","sub_path":"ML_LAB3.py","file_name":"ML_LAB3.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"2266192598","text":"import s6fits\nimport sys\n\ndef run_s6(filename):\n spec = s6fits.s6dataspec_t()\n spec.filename = filename \n s6fits.get_s6data(spec)\n\nif __name__ == '__main__':\n #filename = \"serendip6_eth2_AO_ALFA_1006_20160127_000604.fits\"\n filename = sys.argv[1]\n run_s6(filename)\n \n","repo_name":"liuweiseu/etfits-api","sub_path":"s6python.py","file_name":"s6python.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"16773459611","text":"#!/usr/bin/python3\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\nfrom pyvirtualdisplay import Display\nimport requests\n\ndisplay = Display(visible=0, size=(1920, 1080))\ndisplay.start()\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--window-size=1920x1080\")\n\nchrome_driver = '/usr/lib/chromium-browser/chromedriver'\n\ndriver = webdriver.Chrome(chrome_options=chrome_options, executable_path=chrome_driver)\n\ndriver.get('https://www.worldpadeltour.com/jugadores/?ranking=todos')\ntime.sleep(20)\n\nfor i in range(1,25):\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n\ntime.sleep(10)\ndriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\ntime.sleep(10)\ndriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\ntime.sleep(20)\nhtml_source = driver.page_source\ndata = html_source.encode('utf-8')\nfile = open(\"/home/pi/Websites/padel-info-world-padel-tour-html.txt\",\"wb\")\nfile.write(data)\nfile.close()\ndriver.quit()\ndisplay.stop()\nfiles = {\n 'file': ('padel-info-world-padel-tour-html.txt', open('/home/pi/Websites/padel-info-world-padel-tour-html.txt', 'rb')),\n}\nresponse = requests.post('http://localhost:38000/api/UpdateWorldPadelTourRankingFromFileUpload', files=files)\nprint (response)","repo_name":"mgimeno/padel-info-alexa-skill","sub_path":"WPT-Ranking-Script/padel-info-world-padel-tour-ranking-script.py","file_name":"padel-info-world-padel-tour-ranking-script.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"25199924869","text":"import random\n\n\nclass Dealer:\n def __init__(self):\n self.initial_deck = self.create_initial_deck()\n\n def create_initial_deck(self) -> list:\n \"\"\"53枚のカードを生成する(13 * 4マーク分のセット + Joker)\n X = Joker\n\n Returns:\n initial_deck: A ~ K + Jokerの文字列を格納しているリスト\n \"\"\"\n initial_deck = []\n\n for n in range(1, 14):\n if n == 1:\n court_Card = \"A\"\n elif n == 11:\n court_Card = \"J\"\n elif n == 12:\n court_Card = \"Q\"\n elif n == 13:\n court_Card = \"k\"\n else:\n court_Card = str(n)\n initial_deck.append(court_Card)\n\n initial_deck = initial_deck * 4\n initial_deck.append(\"X\")\n return initial_deck\n\n def initial_deal(self, *args: tuple()) -> list:\n \"\"\"プレイヤーの数に応して初期手札を配る\n\n Args:\n player (dict): ゲーム参加するユーザー情報\n\n Returns:\n dict: 各プレイヤーに初期手札を分配した結果の情報\n \"\"\"\n random.shuffle(self.initial_deck)\n players = list(args)\n q, mod = divmod(len(self.initial_deck), len(players))\n\n for i in range(len(players)):\n slice_n = q\n # 端数のカードが存在する場合はそれが無くなるまで追加して配る\n if i < mod:\n slice_n += 1\n players[i].deck = self.initial_deck[:slice_n]\n del self.initial_deck[:slice_n]\n return players\n\n def initial_putdown(self, deck: list) -> list:\n \"\"\"初期手札を重複しているカードを捨てる\n\n Args:\n deck (list): 1プレイヤーの手札\n\n Returns:\n list: 重複削除後の手札\n \"\"\"\n while len(set(deck)) != len(deck):\n popped_card = deck.pop(0)\n if popped_card in deck:\n # 同じ数字を持っているペアが存在する場合\n deck.remove(popped_card)\n else:\n # 同じ数字を持っているペアが存在しない場合\n deck.append(popped_card)\n return deck\n","repo_name":"HT0323/old_maid","sub_path":"dealer.py","file_name":"dealer.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"}
+{"seq_id":"41710163014","text":"# -*- coding: utf-8 -*-\n# noqa: E402\n\"\"\"\nThis module contains the Block class.\n\"\"\"\nimport os\nimport gi\ngi.require_version('Gtk', '3.0')\ngi.require_version('GooCanvas', '2.0')\ngi.require_version('PangoCairo', '1.0')\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GooCanvas\nfrom gi.repository import GdkPixbuf\nfrom gi.repository import Pango\nfrom mosaicode.system import System\nfrom mosaicode.model.blockmodel import BlockModel\nfrom mosaicode.model.port import Port\n\nclass Block(GooCanvas.CanvasGroup, BlockModel):\n \"\"\"\n This class contains methods related the Block class\n \"\"\"\n\n # ----------------------------------------------------------------------\n\n def __init__(self, diagram, block):\n \"\"\"\n This method is the constuctor.\n \"\"\"\n GooCanvas.CanvasGroup.__init__(self)\n BlockModel.__init__(self, block)\n\n self.diagram = diagram\n self.remember_x = 0\n self.remember_y = 0\n\n self.widgets = {}\n self.focus = False\n self.has_flow = False\n self.is_selected = False\n self.is_collapsed = False\n\n self.width = 112\n\n self.connect(\"button-press-event\", self.__on_button_press)\n self.connect(\"motion-notify-event\", self.__on_motion_notify)\n self.connect(\"enter-notify-event\", self.__on_enter_notify)\n self.connect(\"leave-notify-event\", self.__on_leave_notify)\n self.move(int(float(self.x)), int(float(self.y)))\n\n self.height = self.__calculate_height()\n\n self.__draw_rect()\n self.__draw_label()\n self.__draw_ports()\n self.__draw_icon()\n self.update_flow()\n\n # ----------------------------------------------------------------------\n def __on_button_press(self, canvas_item, target_item, event):\n \"\"\"\n This method monitors when the button is pressed.\n\n Parameters:\n canvas_item\n Returns:\n * **Types** (:class:`boolean`)\n Indicates the button is pressed.\n \"\"\"\n # with Shift\n if event.state == Gdk.ModifierType.SHIFT_MASK \\\n | Gdk.ModifierType.MOD2_MASK:\n if self.is_selected:\n self.is_selected = False\n else:\n self.is_selected = True\n\n else:\n if not self.is_selected:\n self.diagram.deselect_all()\n self.is_selected = True\n\n self.diagram.show_block_property(self)\n\n if event.button == 1:\n self.remember_x = event.x\n self.remember_y = event.y\n\n self.diagram.update_flows()\n\n if event.button == 3:\n return False\n\n return True\n\n # ----------------------------------------------------------------------\n def __on_motion_notify(self, canvas_item, target_item, event=None):\n \"\"\"\n This method monitors the motion.\n\n Parameters:\n canvas_item\n target_item\n\n Returns:\n * **Types** (:class:`boolean`)\n\n \"\"\"\n if not event.state & Gdk.ModifierType.BUTTON1_MASK:\n return False\n if self.diagram.curr_connector is not None:\n return False\n # Get the new position and move by the difference\n new_x = event.x - self.remember_x\n new_y = event.y - self.remember_y\n self.diagram.move_selected(new_x, new_y)\n return False\n\n # ----------------------------------------------------------------------\n def __on_enter_notify(self, canvas_item, target_item, event=None):\n \"\"\"\n This method monitors the motion.\n\n Parameters:\n canvas_item\n Returns:\n * **TYpes** (:class:`boolean`)\n \"\"\"\n self.focus = True\n self.__update_state()\n return False\n\n # ----------------------------------------------------------------------\n def __on_leave_notify(self, canvas_item, target_item, event=None):\n \"\"\"\n This method monitors the motion.\n\n Parameters:\n canvas_item\n target_item\n\n Returns:\n * **Types** (:class:`boolean`)\n\n \"\"\"\n self.focus = False\n self.__update_state()\n return False\n\n # ----------------------------------------------------------------------\n def __draw_rect(self):\n \"\"\"\n This method draw a rectangle.\n \"\"\"\n rect = GooCanvas.CanvasRect(parent=self,\n x=0,\n y=10,\n width=self.width,\n height=self.height - 15,\n radius_x=10,\n radius_y=10,\n stroke_color=\"black\",\n fill_color_rgba=self.get_color_as_int(),\n tooltip=self.label\n )\n self.widgets[\"Rect\"] = rect\n\n # ----------------------------------------------------------------------\n def __draw_icon(self):\n \"\"\"\n This method draw a icon.\n \"\"\"\n text_label = \" \" + \\\n self.label.title()[0] + \"\"\n\n icon = GooCanvas.CanvasText(parent=self,\n text=text_label,\n fill_color='white',\n anchor=GooCanvas.CanvasAnchorType.CENTER,\n x=(self.width / 2),\n y=(self.height / 2),\n use_markup=True,\n stroke_color='black',\n tooltip=self.label\n )\n\n self.widgets[\"Icon\"] = icon\n\n # ----------------------------------------------------------------------\n def __draw_label(self):\n \"\"\"\n This method draw the label.\n\n \"\"\"\n text_label = \" \" + \\\n self.label + \"\"\n\n label = GooCanvas.CanvasText(parent=self,\n text=text_label,\n fill_color='black',\n anchor=GooCanvas.CanvasAnchorType.CENTER,\n x=(self.width / 2),\n y=0,\n use_markup=True,\n stroke_color='black'\n )\n self.widgets[\"Label\"] = label\n\n # ----------------------------------------------------------------------\n def __create_ports_label(self, port):\n text_name = \"\" + \\\n \"\"\n if self.is_collapsed:\n text_name += \" - \"\n else:\n text_name += \"{\" + port.hint + \"}\"\n text_name += \"\"\n return text_name\n\n # ----------------------------------------------------------------------\n def __draw_ports(self):\n \"\"\" for port in self.ports:\n\n This method draws the ports.\n \"\"\"\n for port in self.ports:\n text_name = self.__create_ports_label(port)\n x,y = self.__get_port_pos(port)\n if port.is_input():\n alignment = Pango.Alignment.LEFT\n anchor=GooCanvas.CanvasAnchorType.WEST\n press_event = self.__on_input_press\n release_event = self.__on_input_release\n else:\n alignment = Pango.Alignment.RIGHT\n anchor = GooCanvas.CanvasAnchorType.EAST\n press_event = self.__on_output_press\n release_event = self.__on_output_release\n\n text = GooCanvas.CanvasText(parent=self,\n text=text_name,\n fill_color='black',\n anchor=anchor,\n alignment=alignment,\n x=x,\n y=y,\n use_markup=True,\n tooltip=port.label\n )\n text.connect(\"button-press-event\", press_event , port)\n text.connect(\"button-release-event\", release_event, port)\n self.widgets[\"port\" + str(port)] = text\n\n # ----------------------------------------------------------------------\n def __on_input_press(self, canvas_item, target_item, event, port):\n \"\"\"\n This method return true if a input was connected.\n\n Parameters:\n * **canvas_item**\n * **target_item**\n * **event**\n Returns:\n * **Types** (:class:`boolean`): Indicates the input as connected.\n \"\"\"\n self.diagram.end_connection(self, port)\n return True\n\n # ----------------------------------------------------------------------\n def __on_input_release(self, canvas_item, target_item, event, args):\n \"\"\"\n This method monitors the input release.\n\n Parameters:\n * **canvas_item**\n * **target_item**\n * **event **\n Return:\n * **Types** (:class:`boolean`)\n \"\"\"\n return True\n\n # ----------------------------------------------------------------------\n def __on_output_press(self, canvas_item, target_item, event, port):\n \"\"\"\n This method monitors the output state, monitors if output was pressed.\n\n Parameters:\n canvas_item\n target_item\n event\n args\n Returns:\n * **Types** (:class:`boolean`)\n \"\"\"\n self.diagram.start_connection(self, port)\n return True\n\n # ----------------------------------------------------------------------\n def __on_output_release(self, canvas_item, target_item, event, args):\n \"\"\"\n This method monitors the output state, monitors if output was release.\n\n Returns:\n * **Types** (:class:`boolean`)\n \"\"\"\n return True\n\n # ----------------------------------------------------------------------\n def __get_port_pos(self, port):\n\n if self.is_collapsed:\n y = 16 + (port.type_index * 6)\n else:\n y = 26 + (port.type_index * 11)\n\n if port.is_input():\n x = 0\n else:\n x = self.width\n\n if not self.is_collapsed:\n return (x, y)\n\n if port.is_input():\n return (x + 36, y - 8)\n else:\n return (x - 25, y - 8)\n\n # ----------------------------------------------------------------------\n def get_port_pos(self, port):\n \"\"\"\n This method get input position.\n\n Parameters:\n * **input_id**\n Returns:\n * **Types** (:class:`float`)\n \"\"\"\n x, y = self.get_position()\n x2, y2 = self.__get_port_pos(port)\n return x + x2, y + y2 + 1\n\n # ----------------------------------------------------------------------\n def __calculate_height(self):\n if self.is_collapsed:\n return max(((self.maxIO - 1) * 5) + (self.maxIO * 4), 40)\n else:\n return max(((self.maxIO) * 5) + 15 + (self.maxIO * 7), 50)\n\n # ----------------------------------------------------------------------\n def move(self, x, y):\n \"\"\"\n This method move a block.\n\n Parameters:\n * **(x,y)** (:class:`float`)\n Returns:\n * **Types** (:class:`float`)\n \"\"\"\n new_x = x - (x % System.get_preferences().grid)\n new_y = y - (y % System.get_preferences().grid)\n self.translate(new_x, new_y)\n\n # ----------------------------------------------------------------------\n def adjust_position(self):\n position = self.get_position()\n grid = System.get_preferences().grid\n new_x = position[0] - position[0] % grid\n new_y = position[1] - position[1] % grid\n self.translate(new_x - position[0], new_y - position[1])\n\n # ----------------------------------------------------------------------\n def get_position(self):\n \"\"\"\n This method get position the block.\n\n Returns:\n * **Types** (:class:`float`)\n \"\"\"\n isSet, x, y, scale, rotation = self.get_simple_transform()\n return x, y\n\n # ----------------------------------------------------------------------\n def set_properties(self, data):\n \"\"\"\n This method set properties of each block.\n\n Parameters:\n * **data**\n \"\"\"\n BlockModel.set_properties(self, data)\n\n # ----------------------------------------------------------------------\n def get_properties(self):\n \"\"\"\n This method get properties of each block.\n\n Returns:\n * **Types** ()\n \"\"\"\n return BlockModel.get_properties(self)\n\n # ----------------------------------------------------------------------\n def update_flow(self):\n \"\"\"\n This method update flow.\n\n Returns:\n * **Types** (:class:`boolean`)\n \"\"\"\n self.has_flow = True\n distinct_con = []\n for conn in self.diagram.connectors:\n if conn.input != self:\n continue\n if conn.input_port not in distinct_con:\n distinct_con.append(conn.input_port)\n in_count = 0\n for port in self.ports:\n if port.is_input():\n in_count += 1\n if len(distinct_con) < in_count:\n self.has_flow = False\n self.__update_state()\n return self.has_flow\n\n # ----------------------------------------------------------------------\n def __update_state(self):\n \"\"\"\n This method update the Line state.\n \"\"\"\n # Not connected: Color = red\n if self.has_flow:\n self.widgets[\"Rect\"].set_property(\"stroke_color\", 'black')\n else:\n self.widgets[\"Rect\"].set_property(\"stroke_color\", 'red')\n\n # in focus: Line width = 3\n if self.focus:\n self.widgets[\"Rect\"].set_property(\"line-width\", 3)\n else:\n self.widgets[\"Rect\"].set_property(\"line-width\", 1)\n\n # selected: Line = dashed\n if self.is_selected:\n self.widgets[\"Rect\"].set_property(\n \"line_dash\", GooCanvas.CanvasLineDash.newv((4.0, 2.0)))\n else:\n self.widgets[\"Rect\"].set_property(\n \"line_dash\", GooCanvas.CanvasLineDash.newv((10.0, 0.0)))\n\n self.height = self.__calculate_height()\n\n if self.is_collapsed:\n self.widgets[\"Label\"].set_property(\"visibility\", GooCanvas.CanvasItemVisibility.INVISIBLE)\n self.widgets[\"Rect\"].set_property(\"width\", self.width - 60)\n self.widgets[\"Rect\"].set_property(\"x\", 35)\n self.widgets[\"Rect\"].set_property(\"y\", 0)\n self.widgets[\"Rect\"].set_property(\"height\", self.height - 10)\n self.widgets[\"Icon\"].set_property(\"y\", (self.height - 10)/2)\n self.widgets[\"Icon\"].set_property(\"x\", (self.width / 2) + 2)\n for port in self.ports:\n x,y = self.__get_port_pos(port)\n if \"port\" + str(port) in self.widgets:\n self.widgets[\"port\" + str(port)].set_property(\"x\", x)\n self.widgets[\"port\" + str(port)].set_property(\"y\", y)\n self.widgets[\"port\" + str(port)].set_property(\"text\", self.__create_ports_label(port))\n return True\n\n if not self.is_collapsed:\n self.widgets[\"Label\"].set_property(\"visibility\", GooCanvas.CanvasItemVisibility.VISIBLE)\n self.widgets[\"Rect\"].set_property(\"width\", self.width)\n self.widgets[\"Rect\"].set_property(\"x\", 0)\n self.widgets[\"Rect\"].set_property(\"y\", 10)\n self.widgets[\"Rect\"].set_property(\"height\", self.height)\n self.widgets[\"Icon\"].set_property(\"y\", (self.height + 20)/2)\n self.widgets[\"Icon\"].set_property(\"x\", (self.width / 2))\n for port in self.ports:\n x,y = self.__get_port_pos(port)\n if \"port\" + str(port) in self.widgets:\n self.widgets[\"port\" + str(port)].set_property(\"x\", x)\n self.widgets[\"port\" + str(port)].set_property(\"y\", y)\n self.widgets[\"port\" + str(port)].set_property(\"text\", self.__create_ports_label(port))\n\n# ----------------------------------------------------------------------\n","repo_name":"Alice-ArtsLab/mosaicode","sub_path":"mosaicode/GUI/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":17247,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"}
+{"seq_id":"41690768114","text":"\"\"\"Create schema report\n\nRevision ID: 9d445cb42847\nRevises: 7ed350929292\nCreate Date: 2020-09-17 15:21:04.532792\n\n\"\"\"\nfrom alembic import op\nfrom sqlalchemy.sql import text\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9d445cb42847'\ndown_revision = '7ed350929292'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n connection = op.get_bind()\n connection.execute(sa.schema.CreateSchema('report'))\n\n\ndef downgrade():\n connection = op.get_bind()\n connection.execute(text(\"DROP SCHEMA report\"))\n","repo_name":"ALMPartners/ahjo","sub_path":"test/samples/mssql_project/alembic/versions/20200917_15_21_04_9d445cb42847_create_schema_report.py","file_name":"20200917_15_21_04_9d445cb42847_create_schema_report.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"}
+{"seq_id":"5963073131","text":"import sys\nimport os\nfrom subprocess import *\nfrom threading import Thread\n\nif(sys.hexversion < 0x03000000):\n\timport Queue\nelse:\n\timport queue as Queue\n\n#global parameters\nfold = 5\nc_begin, c_end, c_step = -5, 15, 2\ng_begin, g_end, g_step = 3, -15, -2\nnr_local_worker = 1\n\nclass WorkerStopToken: # used to notify the worker to stop\n pass\n\nclass Worker(Thread):\n def __init__(self,name,job_queue,result_queue, svmtrain_exe, dataset_pathname):\n Thread.__init__(self)\n self.name = name\n self.job_queue = job_queue\n self.result_queue = result_queue\n self.svmtrain_exe = svmtrain_exe\n self.dataset_pathname = dataset_pathname\n \n def run(self):\n while True:\n (cexp,gexp) = self.job_queue.get()\n if cexp is WorkerStopToken:\n self.job_queue.put((cexp,gexp))\n # print('worker {0} stop.'.format(self.name))\n break\n try:\n rate = self.run_one(2.0**cexp,2.0**gexp)\n if rate is None: raise RuntimeError(\"get no rate\")\n except:\n # we failed, let others do that and we just quit\n self.job_queue.put((cexp,gexp))\n print('worker {0} quit.'.format(self.name))\n break\n else:\n self.result_queue.put((self.name,cexp,gexp,rate))\n\nclass LocalWorker(Worker):\n def run_one(self,c,g):\n cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \\\n (self.svmtrain_exe,c,g,fold, ' ' , self.dataset_pathname)\n result = Popen(cmdline,shell=True,stdout=PIPE).stdout\n for line in result.readlines():\n if str(line).find(\"Cross\") != -1:\n return float(line.split()[-1][0:-1])\n\ndef calculate_jobs():\n c_seq = permute_sequence(range_f(c_begin,c_end,c_step))\n g_seq = permute_sequence(range_f(g_begin,g_end,g_step))\n nr_c = float(len(c_seq))\n nr_g = float(len(g_seq))\n i = 0\n j = 0\n jobs = []\n\n while i < nr_c or j < nr_g:\n if i/nr_c < j/nr_g:\n # increase C resolution\n line = []\n for k in range(0,j):\n line.append((c_seq[i],g_seq[k]))\n i = i + 1\n jobs.append(line)\n else:\n # increase g resolution\n line = []\n for k in range(0,i):\n line.append((c_seq[k],g_seq[j]))\n j = j + 1\n jobs.append(line)\n return jobs\n\ndef range_f(begin,end,step):\n # like range, but works on non-integer too\n seq = []\n while True:\n if step > 0 and begin > end: break\n if step < 0 and begin < end: break\n seq.append(begin)\n begin = begin + step\n return seq\n\ndef permute_sequence(seq):\n n = len(seq)\n if n <= 1: return seq\n\n mid = int(n/2)\n left = permute_sequence(seq[:mid])\n right = permute_sequence(seq[mid+1:])\n\n ret = [seq[mid]]\n while left or right:\n if left: ret.append(left.pop(0))\n if right: ret.append(right.pop(0))\n\n return ret\n\nclass SvmTools(object):\n def __init__(self, svm_bin_path):\n # svm, grid, and gnuplot executable files\n is_win32 = (sys.platform == 'win32')\n\n if is_win32:\n self.svmscale_exe = os.path.join(svm_bin_path, \"svm-scale.exe\")\n self.svmtrain_exe = os.path.join(svm_bin_path, \"svm-train.exe\")\n self.svmpredict_exe = os.path.join(svm_bin_path,\"svm-predict.exe\")\n else:\n self.svmscale_exe = os.path.join(svm_bin_path, \"svm-scale\")\n self.svmtrain_exe = os.path.join(svm_bin_path, \"svm-train\")\n self.svmpredict_exe = os.path.join(svm_bin_path,\"svm-predict\")\n\n assert os.path.exists(self.svmscale_exe),\"svm-scale executable not found:%s\" % self.svmscale_exe\n assert os.path.exists(self.svmtrain_exe),\"svm-train executable not found:%s\" % self.svmtrain_exe\n assert os.path.exists(self.svmpredict_exe),\"svm-predict executable not found:%s\" % self.svmpredict_exe\n\n def gen_model(self, train_pathname, model_file, range_file, tmp_path, lower_limit = -1):\n assert os.path.exists(train_pathname),\"training file not found\"\n file_name = os.path.split(train_pathname)[1]\n scaled_file = os.path.join(tmp_path, file_name + \".scale\")\n\n cmd = '{0} -l {4} -s \"{1}\" \"{2}\" > \"{3}\"'.format(self.svmscale_exe, range_file, train_pathname, scaled_file, lower_limit)\n print('Scaling training data...')\n Popen(cmd, shell = True, stdout = PIPE).communicate()\t\n\n print('Cross Validation data...')\n c,g,rate = self.cross_validation_model(scaled_file)\n\n print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))\n\n cmd = '{0} -c {1} -g {2} \"{3}\" \"{4}\"'.format(self.svmtrain_exe,c,g,scaled_file,model_file)\n print('Training...')\n Popen(cmd, shell = True, stdout = PIPE).communicate()\n\n print('Output model: {0}'.format(model_file))\n\n def cross_validation_model(self, scale_file):\n # put jobs in queue\n\n jobs = calculate_jobs()\n job_queue = Queue.Queue(0)\n result_queue = Queue.Queue(0)\n\n for line in jobs:\n for (c,g) in line:\n job_queue.put((c,g))\n\n # hack the queue to become a stack --\n # this is important when some thread\n # failed and re-put a job. It we still\n # use FIFO, the job will be put\n # into the end of the queue, and the graph\n # will only be updated in the end\n job_queue._put = job_queue.queue.appendleft\n \n # fire local workers\n for i in range(nr_local_worker):\n LocalWorker('local',job_queue,result_queue, self.svmtrain_exe, scale_file).start()\n\n # gather results\n done_jobs = {}\n\n best_rate = -1\n best_c1,best_g1 = None,None\n\n for line in jobs:\n for (c,g) in line:\n while (c, g) not in done_jobs:\n (worker,c1,g1,rate) = result_queue.get()\n done_jobs[(c1,g1)] = rate\n\n if (rate > best_rate) or (rate==best_rate and g1==best_g1 and c1 T_contra:\n \"\"\"\n :class:`fget_type` callables need to have this signature\n \"\"\"\n\n\nclass fset_type(Protocol[T_co]):\n def __call__(self, value: T_co) -> None:\n \"\"\"\n :class:`fset_type` callables need to have this signature\n \"\"\"\n\n\nclass accessor(typing.Generic[T]):\n \"\"\"\n An accessor provides easy shared access to a resource and\n\n Example:\n\n In the simplest case, an accessor synchronizes reads and writes out of the box ::\n\n >>> import asyncio\n >>> from codestare.async_utils import accessor\n >>> foo = accessor()\n >>> async def wait_for_write(accessor_):\n ... print(await accessor_.get())\n ...\n >>> background = asyncio.create_task(wait_for_write(foo))\n >>> await foo.set(\"Bar\")\n Bar\n\n It's possible to use custom getters / setter e.g. create an :class:`accessor` to the value managed\n by a normal property if one needs shared access as well ::\n\n >>> class Thing:\n ... def __init__(self):\n ... self._value = None\n ... @property\n ... def value(self):\n ... return self._value\n ... @value._setter\n ... def value(self, val):\n ... if not val:\n ... raise ValueError(f\"Illegal value {val}\")\n ... self._value = val\n ...\n >>> thing = Thing()\n >>> thing.value = 3\n >>> thing.value\n 3\n >>> thing.value = 0\n ValueError: Illegal value 0\n >>> class BetterThing(Thing):\n ... def __init__(self):\n ... super().__init__()\n ... self.value_accessor = accessor(funcs=(\n ... type(self).value.fget.__get__(self),\n ... type(self).value.fset.__get__(self)\n ... ))\n ...\n >>> better_thing = BetterThing()\n >>> background = asyncio.create_task(wait_for_write(better_thing.value_accessor))\n >>> await better_thing.value_accessor.set(3)\n 3\n >>> better_thing.value\n 3\n >>> await better_thing.value_accessor.set(0)\n ValueError: Illegal value 0\n\n See Also:\n :class:`condition_property` -- decorator to create `accessor properties` more easily\n\n \"\"\"\n fget: 'fget_type[T]'\n fset: 'fset_type[T]'\n\n @typing.overload\n def __init__(self: accessor[typing.Any],\n *,\n funcs: None = ...,\n condition: asyncio.Condition | None = None,\n name: str = None\n ):\n ...\n\n @typing.overload\n def __init__(self: accessor[T],\n *,\n condition: asyncio.Condition | None = None,\n name: str = None\n ):\n ...\n\n @typing.overload\n def __init__(self: accessor[T],\n *,\n funcs: typing.Tuple[fget_type[T], fset_type[T]] = ...,\n condition: asyncio.Condition | None = None,\n name: str = None\n ):\n ...\n\n def __init__(self,\n *,\n funcs: typing.Tuple | None = None,\n condition: asyncio.Condition | None = None,\n name: str = None\n ):\n \"\"\"\n Args:\n funcs: getter and setter for some value -- `optional`, if not passed get / set a private field of the\n object\n condition: condition to synchronize access to the value -- `optional`, if not passed a new condition\n is created\n \"\"\"\n if funcs is None:\n def set(instance, value):\n instance._value = value\n\n def get(instance):\n return instance._value\n\n self._value = None\n fget = get.__get__(self, type(self))\n fset = set.__get__(self, type(self))\n else:\n non_callable = [f for f in funcs if not callable(f)]\n if non_callable:\n raise ValueError(f\"parameters {non_callable} passed as ``funcs`` tuple are not callable\")\n fget, fset = funcs\n\n self.fset = fset\n \"\"\"\n Setter, either passed via ``funcs`` argument, or a setter of an internal value if no ``funcs`` where passed\n \"\"\"\n self.fget = fget\n \"\"\"\n Getter, either passed via ``funcs`` argument, or a getter of an internal value if no ``funcs`` where passed\n \"\"\"\n self.condition: asyncio.Condition = condition or asyncio.Condition()\n \"\"\"\n Used to synchronized access, either passed via ``condition`` argument, or a new condition created specifically\n for this accessor\n \"\"\"\n self.name = name\n \"\"\"\n For debug purposes\n \"\"\"\n self.has_waiting_read = helper.awaitable_predicate(predicate=lambda: self._waiter_count > 0)\n \"\"\"\n Use this awaitable if you want to wait for read access\n \"\"\"\n self._waiter_count = 0\n\n @property\n def value(self) -> T | None:\n \"\"\"\n Simple access to the value produced by :attr:`.fget` without async locks i.e. not safe if you did not\n acquire the lock of :attr:`.condition`\n \"\"\"\n return self.fget()\n\n async def set(self, value: T, wait_for_read=False) -> None:\n \"\"\"\n Sets the value (using :attr:`.fset`) and notifies every coroutine waiting on the :attr:`.condition` (e.g.\n :meth:`.get`\n\n Args:\n value: new value passed to :attr:`.fset`\n wait_for_read: if True, will set the value only after futures are waiting by using :attr:`.get`. Use this\n to invert the semantics -- a write waiting for a read, instead of a read waiting for a write)\n\n \"\"\"\n if wait_for_read:\n await self.has_waiting_read\n\n async with self.condition:\n self.fset(value)\n self.condition.notify_all()\n\n async def get(self,\n *,\n predicate: typing.Callable[[T], bool] | None = None,\n wait_for_write: bool | None = None) -> T:\n \"\"\"\n Shared access to value produced by :attr:`.fget`\n\n Args:\n predicate: waits for the predicate result to be truthy, then returns the result of :attr:`.fget`.\n The default predicate (used when ``predicate=None``) returns ``[False, True, True, ...]``, so\n :meth:`.get` blocks once, until it is notified from a :meth:`.set` and then does not block again.\n Passing ``predicate=(lambda: True)`` will make :meth:`.get` not block at all.\n wait_for_write: if set to ``True``, and a predicate is passed, the predicate will only be applied\n once the default predicate (see above) also returns ``True`` i.e. you get the next value that matches\n the predicate, even if the current value also matches. You can set this value to False, to ignore\n the default predicate behaviour (which is the same as passing ``predicate=(lambda: True)`` and\n using the default for this value. -- **optional**\n\n Returns:\n value produced by :attr:`.fget`\n\n Raises:\n ValueError: if ``predicate`` is not a callable\n\n See Also:\n :meth:`asyncio.Condition.wait_for` -- used to wait for internal condition\n \"\"\"\n if predicate is None and wait_for_write is None:\n wait_for_write = True\n\n # this predicate returns False, True i.e. it will block once and always return after notify\n wait_predicate = (\n itertools.chain.from_iterable([[False], itertools.repeat(True)]).__next__\n if wait_for_write\n else None\n )\n\n if predicate is not None and not callable(predicate):\n raise ValueError(f\"{predicate} is not callable\")\n\n def combined_predicate(acc: accessor):\n use_value = True if not wait_predicate else wait_predicate()\n matching_value = True if not predicate else predicate(acc.fget())\n return use_value and matching_value\n\n async with self.condition:\n async with self.has_waiting_read.condition:\n self._waiter_count += 1\n self.has_waiting_read.condition.notify_all()\n\n await self.condition.wait_for(combined_predicate.__get__(self, type(self)))\n\n async with self.has_waiting_read.condition:\n self._waiter_count -= 1\n\n return self.fget()\n\n def __repr__(self):\n params = {param: getattr(self, param, None) for param in ['name', 'fget', 'fset']}\n return (f\"<{self.__class__.__name__} object \"\n f\"[{', '.join('{}={!r}'.format(name, value) for name, value in params.items())}]>\")\n\n\nclass condition_property(cached_property, typing.Generic[T]):\n \"\"\"\n This is a decorator to create a cached :class:`accessor` to handle access to some data via a\n :class:`asyncio.Condition`.\n\n You can use it like the normal `@property` decorator, but the result of the lookup (`__get__` of the descriptor)\n will be an :class:`accessor` with coroutine attributes to handle safely setting and getting the value\n (from the objects methods passed via ``setter`` and ``getter``, like in normal properties) by means of a condition.\n\n See Also:\n :class:`accessor` -- how to access the value\n \"\"\"\n\n def __init__(self: condition_property[T],\n fget: typing.Callable[[typing.Any], T] | None = None,\n fset: typing.Callable[[typing.Any, T], None] | None = None,\n fdel: typing.Callable[[typing.Any], None] | None = None,\n doc: str | None = None) -> None:\n self.fget = fget\n self.fset = fset\n self.fdel = fdel\n if doc is None and fget is not None:\n doc = fget.__doc__\n self.__doc__ = doc\n\n super().__init__(self._create_accessor)\n\n def _create_accessor(self: 'condition_property[T]', obj: object) -> accessor[T]:\n return accessor(\n funcs=(functools.partial(self._get, obj), functools.partial(self._set, obj),), name=self.attrname\n )\n\n def _set(self, obj: object, value: T):\n if self.fset is None:\n raise AttributeError(f\"can't set attribute {self.attrname}\")\n self.fset(obj, value)\n\n def _get(self, obj: object):\n if self.fget is None:\n raise AttributeError(f'unreadable attribute {self.attrname}')\n\n if self.fset is None:\n raise AttributeError(f\"`get` will block until the next value is set, but no setter is defined.\")\n\n return self.fget(obj)\n\n def __set__(self, obj, value: T):\n raise AttributeError(f\"can't set {self.attrname} directly, use set()\")\n\n @typing.overload\n def __get__(self, instance: None, owner: typing.Type[typing.Any] | None = None) -> condition_property[T]:\n ...\n\n @typing.overload\n def __get__(self, instance: object, owner: typing.Type[typing.Any] | None = None) -> accessor[T]:\n ...\n\n def __get__(self, instance: object | None,\n owner: typing.Type[typing.Any] | None = None) -> accessor[T] | condition_property[T]:\n if instance is None:\n return typing.cast(condition_property[T], super().__get__(instance, owner))\n else:\n return typing.cast(accessor[T], super().__get__(instance, owner))\n\n def getter(self: condition_property[T], fget: typing.Callable[[typing.Any], T]) -> condition_property[T]:\n \"\"\"\n This is a cached property but uses the same interface as a normal :obj:`property`.\n See example of :obj:`property` documentation on how to use.\n \"\"\"\n prop = type(self)(fget, self.fset, self.fdel, self.__doc__)\n prop.attrname = self.attrname\n return prop\n\n def setter(self: condition_property[T], fset: typing.Callable[[typing.Any, T], None]) -> condition_property[T]:\n \"\"\"\n This is a cached property but uses the same interface as a normal :obj:`property`.\n See example of :obj:`property` documentation on how to use.\n \"\"\"\n prop = type(self)(self.fget, fset, self.fdel, self.__doc__)\n prop.attrname = self.attrname\n return prop\n\n def deleter(self: condition_property[T], fdel) -> condition_property[T]:\n \"\"\"\n This is a cached property but uses the same interface as a normal :obj:`property`.\n See example of :obj:`property` documentation on how to use.\n \"\"\"\n prop = type(self)(self.fget, self.fset, fdel, self.__doc__)\n prop.attrname = self.attrname\n return prop\n","repo_name":"saggitar/codestare-async-utils","sub_path":"src/codestare/async_utils/descriptor.py","file_name":"descriptor.py","file_ext":"py","file_size_in_byte":13518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"}
+{"seq_id":"22165263239","text":"import tkinter\nfrom random import randint\n\ncanvas = tkinter.Canvas(width=400, height = 400)\ncanvas.pack(fill='both', expand=True)\nwidth, height = canvas.winfo_width(), canvas.winfo_height()\npoints = list()\n\ndef priamka(event):\n if len(points)>4:\n return\n\n points.append((event.x, event.y))\n canvas.delete('ciary')\n\n if len(points)//2:\n for i in range(0, 1+len(points)//2, 2):\n canvas.create_line(points[i], points[i+1], tags='ciary')\n canvas.create_oval(points[i][0]+5, points[i][1]+5, points[i][0]-5, points[i][1]-5, tags='ciary')\n canvas.create_oval(points[i+1][0]+5, points[i+1][1]+5, points[i+1][0]-5, points[i+1][1]-5, tags='ciary')\n\ncanvas.bind('<1>', priamka)\n\nwhile True:\n try:\n if width != canvas.winfo_width() or height != canvas.winfo_height():\n width, height = canvas.winfo_width(), canvas.winfo_height()\n canvas.delete('legenda')\n points.clear()\n\n size = ((width*height)>>15)+5\n canvas.create_text(width/2, height-height//4, text='priesecnik - stlac