')\n in_item = False\n while indent > 0:\n output.append('')\n indent -= 2\n\n if line.startswith('/'):\n if not '?' in line:\n line_full = line + '?as_text=1'\n else:\n line_full = line + '&as_text=1'\n output.append('' +\n html.escape(line) + '')\n else:\n output.append(html.escape(line).replace(' ', ' '))\n if not in_item:\n output.append(' ')\n\n if in_item:\n output.append('')\n while indent > 0:\n output.append('')\n indent -= 2\n return '\\n'.join(output)\n\n\nclass HelpResource(HtmlResource):\n def __init__(self, text, pageTitle, parent_node):\n HtmlResource.__init__(self)\n self.text = text\n self.pageTitle = pageTitle\n self.parent_level = parent_node.level\n self.parent_children = parent_node.children.keys()\n\n def content(self, request, cxt):\n cxt['level'] = self.parent_level\n cxt['text'] = ToHtml(self.text)\n cxt['children'] = [ n for n in self.parent_children if n != 'help' ]\n cxt['flags'] = ToHtml(FLAGS)\n cxt['examples'] = ToHtml(EXAMPLES).replace(\n 'href=\"/json',\n 'href=\"%sjson' % (self.level * '../'))\n\n template = request.site.buildbot_service.templates.get_template(\"jsonhelp.html\")\n return template.render(**cxt)\n\nclass BuilderPendingBuildsJsonResource(JsonResource):\n help = \"\"\"Describe pending builds for a builder.\n\"\"\"\n pageTitle = 'Builder'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n\n def asDict(self, request):\n # buildbot.status.builder.BuilderStatus\n d = self.builder_status.getPendingBuildRequestStatuses()\n def to_dict(statuses):\n return defer.gatherResults(\n [ b.asDict_async() for b in statuses ])\n d.addCallback(to_dict)\n return d\n\n\nclass BuilderJsonResource(JsonResource):\n help = \"\"\"Describe a single builder.\n\"\"\"\n pageTitle = 'Builder'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n self.putChild('builds', BuildsJsonResource(status, builder_status))\n self.putChild('slaves', BuilderSlavesJsonResources(status,\n builder_status))\n self.putChild(\n 'pendingBuilds',\n BuilderPendingBuildsJsonResource(status, builder_status))\n\n def asDict(self, request):\n # buildbot.status.builder.BuilderStatus\n return self.builder_status.asDict_async()\n\n\nclass BuildersJsonResource(JsonResource):\n help = \"\"\"List of all the builders defined on a master.\n\"\"\"\n pageTitle = 'Builders'\n\n def __init__(self, status):\n JsonResource.__init__(self, status)\n for builder_name in self.status.getBuilderNames():\n self.putChild(builder_name,\n BuilderJsonResource(status,\n status.getBuilder(builder_name)))\n\n\nclass BuilderSlavesJsonResources(JsonResource):\n help = \"\"\"Describe the slaves attached to a single builder.\n\"\"\"\n pageTitle = 'BuilderSlaves'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n for slave_name in self.builder_status.slavenames:\n self.putChild(slave_name,\n SlaveJsonResource(status,\n self.status.getSlave(slave_name)))\n\n\nclass BuildJsonResource(JsonResource):\n help = \"\"\"Describe a single build.\n\"\"\"\n pageTitle = 'Build'\n\n def __init__(self, status, build_status):\n JsonResource.__init__(self, status)\n self.build_status = build_status\n self.putChild('source_stamp',\n SourceStampJsonResource(status,\n build_status.getSourceStamp()))\n self.putChild('steps', BuildStepsJsonResource(status, build_status))\n\n def asDict(self, request):\n return self.build_status.asDict()\n\n\nclass AllBuildsJsonResource(JsonResource):\n help = \"\"\"All the builds that were run on a builder.\n\"\"\"\n pageTitle = 'AllBuilds'\n\n def __init__(self, status, builder_status):\n JsonResource.__init__(self, status)\n self.builder_status = builder_status\n\n def getChild(self, path, request):\n # Dynamic childs.\n if isinstance(path, int) or _IS_INT.match(path):\n build_status = self.builder_status.getBuild(int(path))\n if build_status:\n # Don't cache BuildJsonResource; that would defeat the cache-ing\n # mechanism in place for BuildStatus objects (in BuilderStatus).\n return BuildJsonResource(self.status, build_status)\n return JsonResource.getChild(self, path, request)\n\n def asDict(self, request):\n results = {}\n # If max is too big, it'll trash the cache...\n max = int(RequestArg(request, 'max',\n self.builder_status.buildCacheSize/2))\n for i in range(0, max):\n child = self.getChildWithDefault(-i, request)\n if not isinstance(child, BuildJsonResource):\n continue\n results[child.build_status.getNumber()] = child.asDict(request)\n return results\n\n\nclass BuildsJsonResource(AllBuildsJsonResource):\n help = \"\"\"Builds that were run on a builder.\n\"\"\"\n pageTitle = 'Builds'\n\n def __init__(self, status, builder_status):\n AllBuildsJsonResource.__init__(self, status, builder_status)\n self.putChild('_all', AllBuildsJsonResource(status, builder_status))\n\n def getChild(self, path, request):\n # Transparently redirects to _all if path is not ''.\n return self.children['_all'].getChildWithDefault(path, request)\n\n def asDict(self, request):\n # This would load all the pickles and is way too heavy, especially that\n # it would trash the cache:\n # self.children['builds'].asDict(request)\n # TODO(maruel) This list should also need to be cached but how?\n builds = dict([\n (int(file), None)\n for file in os.listdir(self.builder_status.basedir)\n if _IS_INT.match(file)\n ])\n return builds\n\n\nclass BuildStepJsonResource(JsonResource):\n help = \"\"\"A single build step.\n\"\"\"\n pageTitle = 'BuildStep'\n\n def __init__(self, status, build_step_status):\n # buildbot.status.buildstep.BuildStepStatus\n JsonResource.__init__(self, status)\n self.build_step_status = build_step_status\n # TODO self.putChild('logs', LogsJsonResource())\n\n def asDict(self, request):\n return self.build_step_status.asDict()\n\n\nclass BuildStepsJsonResource(JsonResource):\n help = \"\"\"A list of build steps that occurred during a build.\n\"\"\"\n pageTitle = 'BuildSteps'\n\n def __init__(self, status, build_status):\n JsonResource.__init__(self, status)\n self.build_status = build_status\n # The build steps are constantly changing until the build is done so\n # keep a reference to build_status instead\n\n def getChild(self, path, request):\n # Dynamic childs.\n build_step_status = None\n if isinstance(path, int) or _IS_INT.match(path):\n build_step_status = self.build_status.getSteps()[int(path)]\n else:\n steps_dict = dict([(step.getName(), step)\n for step in self.build_status.getSteps()])\n build_step_status = steps_dict.get(path)\n if build_step_status:\n # Create it on-demand.\n child = BuildStepJsonResource(self.status, build_step_status)\n # Cache it.\n index = self.build_status.getSteps().index(build_step_status)\n self.putChild(str(index), child)\n self.putChild(build_step_status.getName(), child)\n return child\n return JsonResource.getChild(self, path, request)\n\n def asDict(self, request):\n # Only use the number and not the names!\n results = {}\n index = 0\n for step in self.build_status.getSteps():\n results[index] = step.asDict()\n index += 1\n return results\n\n\nclass ChangeJsonResource(JsonResource):\n help = \"\"\"Describe a single change that originates from a change source.\n\"\"\"\n pageTitle = 'Change'\n\n def __init__(self, status, change):\n # buildbot.changes.changes.Change\n JsonResource.__init__(self, status)\n self.change = change\n\n def asDict(self, request):\n return self.change.asDict()\n\n\nclass ChangesJsonResource(JsonResource):\n help = \"\"\"List of changes.\n\"\"\"\n pageTitle = 'Changes'\n\n def __init__(self, status, changes):\n JsonResource.__init__(self, status)\n for c in changes:\n # c.number can be None or clash another change if the change was\n # generated inside buildbot or if using multiple pollers.\n if c.number is not None and str(c.number) not in self.children:\n self.putChild(str(c.number), ChangeJsonResource(status, c))\n else:\n # Temporary hack since it creates information exposure.\n self.putChild(str(id(c)), ChangeJsonResource(status, c))\n\n def asDict(self, request):\n \"\"\"Don't throw an exception when there is no child.\"\"\"\n if not self.children:\n return {}\n return JsonResource.asDict(self, request)\n\n\nclass ChangeSourcesJsonResource(JsonResource):\n help = \"\"\"Describe a change source.\n\"\"\"\n pageTitle = 'ChangeSources'\n\n def asDict(self, request):\n result = {}\n n = 0\n for c in self.status.getChangeSources():\n # buildbot.changes.changes.ChangeMaster\n change = {}\n change['description'] = c.describe()\n result[n] = change\n n += 1\n return result\n\n\nclass ProjectJsonResource(JsonResource):\n help = \"\"\"Project-wide settings.\n\"\"\"\n pageTitle = 'Project'\n\n def asDict(self, request):\n return self.status.asDict()\n\n\nclass SlaveJsonResource(JsonResource):\n help = \"\"\"Describe a slave.\n\"\"\"\n pageTitle = 'Slave'\n\n def __init__(self, status, slave_status):\n JsonResource.__init__(self, status)\n self.slave_status = slave_status\n self.name = self.slave_status.getName()\n self.builders = None\n\n def getBuilders(self):\n if self.builders is None:\n # Figure out all the builders to which it's attached\n self.builders = []\n for builderName in self.status.getBuilderNames():\n if self.name in self.status.getBuilder(builderName).slavenames:\n self.builders.append(builderName)\n return self.builders\n\n def getSlaveBuildMap(self, buildcache, buildercache):\n for builderName in self.getBuilders():\n if builderName not in buildercache:\n buildercache.add(builderName)\n builder_status = self.status.getBuilder(builderName)\n\n buildnums = range(-1, -(builder_status.buildCacheSize - 1), -1)\n builds = builder_status.getBuilds(buildnums)\n\n for build_status in builds:\n if not build_status or not build_status.isFinished():\n # If not finished, it will appear in runningBuilds.\n break\n slave = buildcache[build_status.getSlavename()]\n slave.setdefault(builderName, []).append(\n build_status.getNumber())\n return buildcache[self.name]\n\n def asDict(self, request):\n if not hasattr(request, 'custom_data'):\n request.custom_data = {}\n if 'buildcache' not in request.custom_data:\n # buildcache is used to cache build information across multiple\n # invocations of SlaveJsonResource. It should be set to an empty\n # collections.defaultdict(dict).\n request.custom_data['buildcache'] = collections.defaultdict(dict)\n\n # Tracks which builders have been stored in the buildcache.\n request.custom_data['buildercache'] = set()\n\n results = self.slave_status.asDict()\n # Enhance it by adding more information.\n results['builders'] = self.getSlaveBuildMap(\n request.custom_data['buildcache'],\n request.custom_data['buildercache'])\n return results\n\n\nclass SlavesJsonResource(JsonResource):\n help = \"\"\"List the registered slaves.\n\"\"\"\n pageTitle = 'Slaves'\n\n def __init__(self, status):\n JsonResource.__init__(self, status)\n for slave_name in status.getSlaveNames():\n self.putChild(slave_name,\n SlaveJsonResource(status,\n status.getSlave(slave_name)))\n\n\nclass SourceStampJsonResource(JsonResource):\n help = \"\"\"Describe the sources for a SourceStamp.\n\"\"\"\n pageTitle = 'SourceStamp'\n\n def __init__(self, status, source_stamp):\n # buildbot.sourcestamp.SourceStamp\n JsonResource.__init__(self, status)\n self.source_stamp = source_stamp\n self.putChild('changes',\n ChangesJsonResource(status, source_stamp.changes))\n # TODO(maruel): Should redirect to the patch's url instead.\n #if source_stamp.patch:\n # self.putChild('patch', StaticHTML(source_stamp.path))\n\n def asDict(self, request):\n return self.source_stamp.asDict()\n\nclass MetricsJsonResource(JsonResource):\n help = \"\"\"Master metrics.\n\"\"\"\n title = \"Metrics\"\n\n def asDict(self, request):\n metrics = self.status.getMetrics()\n if metrics:\n return metrics.asDict()\n else:\n # Metrics are disabled\n return None\n\n\n\nclass JsonStatusResource(JsonResource):\n \"\"\"Retrieves all json data.\"\"\"\n help = \"\"\"JSON status\n\nRoot page to give a fair amount of information in the current buildbot master\nstatus. You may want to use a child instead to reduce the load on the server.\n\nFor help on any sub directory, use url /child/help\n\"\"\"\n pageTitle = 'Buildbot JSON'\n\n def __init__(self, status):\n JsonResource.__init__(self, status)\n self.level = 1\n self.putChild('builders', BuildersJsonResource(status))\n self.putChild('change_sources', ChangeSourcesJsonResource(status))\n self.putChild('project', ProjectJsonResource(status))\n self.putChild('slaves', SlavesJsonResource(status))\n self.putChild('metrics', MetricsJsonResource(status))\n # This needs to be called before the first HelpResource().body call.\n self.hackExamples()\n\n def content(self, request):\n result = JsonResource.content(self, request)\n # This is done to hook the downloaded filename.\n request.path = 'buildbot'\n return result\n\n def hackExamples(self):\n global EXAMPLES\n # Find the first builder with a previous build or select the last one.\n builder = None\n for b in self.status.getBuilderNames():\n builder = self.status.getBuilder(b)\n if builder.getBuild(-1):\n break\n if not builder:\n return\n EXAMPLES = EXAMPLES.replace('', builder.getName())\n build = builder.getBuild(-1)\n if build:\n EXAMPLES = EXAMPLES.replace('', str(build.getNumber()))\n if builder.slavenames:\n EXAMPLES = EXAMPLES.replace('', builder.slavenames[0])\n\n# vim: set ts=4 sts=4 sw=4 et:\n","repo_name":"houseoflifeproperty/bitpop","sub_path":"build/third_party/buildbot_8_4p1/buildbot/status/web/status_json.py","file_name":"status_json.py","file_ext":"py","file_size_in_byte":27016,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"}
+{"seq_id":"29585326781","text":"import multiprocessing\nimport math\nimport logging\nimport datetime\nfrom multiprocessing.pool import Pool\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - [%(filename)s:%(lineno)d] - %(levelname)s - %(message)s\",\n datefmt=\"%Y-%m-%d %I:%M:%S %p\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\ndef sqrt_even_numbers(limit: int):\n for n in range(1, limit + 1):\n if n % 2 == 0:\n # math.sqrt(n)\n # print(f\"number: {n}, square root: {math.sqrt(n)}\")\n logger.info(\"number: %s, square root: %s\", n, math.sqrt(n))\n\n\ndef main():\n # sqrt_even_numbers(10)\n print(\"Starting multiprocessing now...\")\n\n t0 = datetime.datetime.now()\n # create pool of processors to use\n pool = Pool(processes=multiprocessing.cpu_count())\n\n # send computation task to different processors in the pool\n pool.apply_async(func=sqrt_even_numbers, args=(100000,))\n pool.apply_async(func=sqrt_even_numbers, args=(200000,))\n pool.apply_async(func=sqrt_even_numbers, args=(300000,))\n pool.apply_async(func=sqrt_even_numbers, args=(400000,))\n\n # close the pool\n pool.close()\n\n # wait for the tasks to complete\n pool.join()\n\n dt = datetime.datetime.now() - t0\n\n print(f\"Total time taken in seconds: {dt.total_seconds():,.2f}\")\n\n\nif __name__ == \"__main__\":\n # sqrt_even_numbers()\n main()\n","repo_name":"andy-ifeanyi/concurrency_pattern_python","sub_path":"multiprocessing/basic_multiprocessing.py","file_name":"basic_multiprocessing.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"8735012946","text":"#problem 34 / find first and last position of element in sorted array\nclass Solution(object):\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n leftidx = self.binarysearch(nums,target,True)\n if leftidx == len(nums) or nums[leftidx] != target:\n return [-1,-1]\n return [leftidx,self.binarysearch(nums,target,False)-1]\n \n def binarysearch(self,nums,target,ifleft):\n lo = 0\n hi = len(nums)\n while lo < hi:\n mid = (lo+hi)/2\n if nums[mid] > target or (ifleft and target == nums[mid]):\n hi = mid\n else:\n lo = mid+1\n return lo","repo_name":"digitalladder/leetcode","sub_path":"problem34.py","file_name":"problem34.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"32566416879","text":"import threading\nfrom multiprocessing.pool import ThreadPool\n\nfrom pyhtmlgui import Observable\n\nfrom app.devices.devices import DevicesInstance\nfrom app.files.shots import ShotsInstance\n\n\nclass Task_SyncShots(Observable):\n def __init__(self):\n super().__init__()\n self.name = \"Sync Shots\"\n self.status = \"idle\"\n self.worker = None\n\n def set_status(self, value):\n self.status = value\n self.notify_observers()\n\n def run(self):\n if self.worker is None:\n self.worker = threading.Thread(target=self._run, daemon=True)\n self.worker.start()\n\n def _run(self):\n if self.status != \"idle\":\n return\n self.set_status(\"list\")\n cameras = DevicesInstance().cameras.list()\n\n cameras = [c for c in cameras if c.status == \"online\"]\n with ThreadPool(20) as p:\n p.map(lambda device: device.camera.shots._refresh_list(), cameras)\n\n self.set_status(\"shots\")\n with ThreadPool(5) as p:\n p.map(lambda shot: shot._sync_remote(), ShotsInstance().shots)\n\n self.set_status(\"idle\")\n self.worker = None\n\n \n_taskSyncShotsInstance = None\n\n\ndef TaskSyncShotsInstance():\n global _taskSyncShotsInstance\n if _taskSyncShotsInstance is None:\n _taskSyncShotsInstance = Task_SyncShots()\n return _taskSyncShotsInstance\n","repo_name":"dirk-makerhafen/openpi3dscan","sub_path":"server/app/tasks/task_SyncShots.py","file_name":"task_SyncShots.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"34435636101","text":"# uninhm\n# https://atcoder.jp/contests/abc089/tasks/abc089_c\n# combinatorics, semi-brute force\n\nfrom collections import Counter\n\nn = int(input())\ns = [input() for _ in range(n)]\n\nc = Counter()\n\nfor name in s:\n c[name[0]] += 1\n\nmarch = 'MARCH'\n\nans = 0\nfor i in range(len(march)):\n for j in range(i+1, len(march)):\n for k in range(j+1, len(march)):\n ans += c[march[i]] * c[march[j]] * c[march[k]]\n\nprint(ans)\n","repo_name":"Vicfred/kyopro","sub_path":"atcoder/abc089C_march.py","file_name":"abc089C_march.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"72"}
+{"seq_id":"3716121238","text":"'''\r\n\tArima model\r\n\thttps://machinelearningmastery.com/arima-for-time-series-forecasting-with-python/\r\n'''\r\nfrom pandas import read_csv\r\nfrom pandas import datetime\r\nfrom pandas import DataFrame\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\nfrom matplotlib import pyplot\r\nimport numpy as np \r\n\r\ndef parser(x):\r\n#\treturn datetime.strptime(str(x), '%Y%m.0')\r\n\treturn datetime.strptime(str(x), '%m')\r\n\r\nseries = read_csv(\r\n#\t'/home/evanb/output/data-randomforest2.txt', \r\n\t'/home/evanb/datasets/onedist.csv',\r\n\theader=0, \r\n\tparse_dates=[11], \r\n\t#index_col=0, \r\n\tsqueeze=True, \r\n\tdate_parser=parser,\r\n\tlow_memory=False\r\n)\r\nprint(series)\r\n# fit model\r\nmodel = ARIMA(np.asarray(series), order=(3,1,0))\r\nmodel_fit = model.fit(disp=0)\r\nprint(model_fit.summary())\r\n# plot residual errors\r\nresiduals = DataFrame(model_fit.resid)\r\nresiduals.plot()\r\npyplot.show()\r\nresiduals.plot(kind='kde')\r\npyplot.show()\r\nprint(residuals.describe())\r\n\r\n","repo_name":"ebradham/mlcode","sub_path":"arima.py","file_name":"arima.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"26334362070","text":"import sys\n\ndef isPrime(number): \n counter = 0\n for i in range(number + 1):\n if i == 0:\n continue\n if number % i == 0:\n counter += 1\n \n if counter == 2:\n print(True)\n else: \n print(False)\n\nif __name__ == \"__main__\":\n isPrime(int(sys.argv[1]))\n","repo_name":"tam2628/linear_regression","sub_path":"checkprime.py","file_name":"checkprime.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"1997020156","text":"from setuptools import setup, find_packages\nfrom MineRL.globals import VERSION\n\n\nextras = {}\ntest_deps = ['pytest']\n\nall_deps = []\nfor group_name in extras:\n all_deps += extras[group_name]\nall_deps = all_deps + test_deps\nextras['all'] = all_deps\n\n\nsetup(\n name='MineRL',\n version=VERSION,\n author='heron',\n author_email='wyatt.lansford@heronsystems.com',\n description='Minecraft Offline Learning Env',\n long_description='',\n long_description_content_type=\"text/markdown\",\n url='https://github.com/wyattlansford/MineRL',\n license='Closed',\n python_requires='>=3.6.0',\n packages=find_packages(),\n install_requires=[\n ],\n test_requires=test_deps,\n extras_require=extras,\n include_package_data=True\n)","repo_name":"wyattlansford/MineRL","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"}
+{"seq_id":"12668710329","text":"import csv\nimport json\nimport os\nimport re\nfrom glob import glob\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom cmcrameri import cm\nfrom zipfile import BadZipFile\n\nsns.set_style(\"dark\")\nsns.set_context(\"paper\")\n\nX_LABEL = [\n f\"{int(x)}:00 AM\" if x < 12 else f\"{int(x - [12 if x != 12 else 0])}:00 PM\"\n for x in np.arange(8, 22, 2)\n]\n\n\ndef add_bool_arg(parser, name, default=False):\n \"\"\"\n Adds boolean arguments to parser by registering both the positive argument and the \"no\"-argument.\n :param parser:\n :param name: Name of argument.\n :param default:\n :return:\n \"\"\"\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\"--\" + name, dest=name, action=\"store_true\")\n group.add_argument(\"--no-\" + name, dest=name, action=\"store_false\")\n parser.set_defaults(**{name: default})\n\n\ndef occupancy_reward_function(\n colours: List[str], current_state: Dict[str, float], global_mode=False\n):\n \"\"\"\n Rewards occupancy rates between 75% and 90%. Punishes deviations exponentially.\n :param current_state: State dictionary.\n :param colours: Colours of different CPZs.\n :param global_mode: Whether or not to use the global occupancies or the one of the individual CPZs.\n :return: reward\n \"\"\"\n reward = 0\n if global_mode:\n cpz_occupancies = [current_state[\"overall_occupancy\"]]\n else:\n cpz_occupancies = [current_state[f\"{c}-lot occupancy\"] for c in colours]\n\n for val in cpz_occupancies:\n if 0.75 < val < 0.9:\n reward += 1\n elif val <= 0.75:\n value = 1 - (abs(val - 0.825) / 0.825) ** -1.2\n min_value = 1 - (abs(0 - 0.825) / 0.825) ** -1.2\n max_value = 1 - (abs(0.75 - 0.825) / 0.825) ** -1.2\n max_distance = max_value - min_value\n actual_distance = value - min_value\n reward += actual_distance / max_distance\n elif val >= 0.9:\n value = 1 - (abs(val - 0.825) / 0.825) ** -1.2\n min_value = 1 - (abs(1 - 0.825) / 0.825) ** -1.2\n max_value = 1 - (abs(0.9 - 0.825) / 0.825) ** -1.2\n max_distance = max_value - min_value\n actual_distance = value - min_value\n reward += actual_distance / max_distance\n return reward / len(cpz_occupancies)\n\n\ndef n_cars_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Minimizes the number of cars in the simulation.\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return optimize_attr(current_state, \"n_cars\", mode=\"min\")\n\n\ndef social_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Maximizes the normalized share of poor cars in the model.\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return optimize_attr(current_state, \"normalized_share_low\")\n\n\ndef speed_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Maximizes the average speed of the turtles in the model.\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return optimize_attr(current_state, \"mean_speed\")\n\n\ndef composite_reward_function(colours: List[str], current_state: Dict[str, float]):\n \"\"\"\n Maximizes 1/2 occupancy_reward_function + 1/4 n_cars_reward_function + 1/4 social_reward_function\n :param colours: Colours of different CPZs (only present to be able to use one call in custom_environment.py).\n :param current_state:State dictionary.\n :return: reward\n \"\"\"\n return (\n 0.5 * occupancy_reward_function(colours, current_state, global_mode=True)\n + 0.25 * n_cars_reward_function(colours, current_state)\n + 0.25 * social_reward_function(colours, current_state)\n )\n\n\ndef optimize_attr(current_state: Dict[str, float], attr: str, mode=\"max\"):\n \"\"\"\n Abstract function to optimize attributes.\n :param mode: either \"min\" or \"max\" (default).\n :param current_state: State dictionary.\n :param attr: Attribute in state dictionary to optimize.\n :return: reward-value\n \"\"\"\n if mode == \"min\":\n return abs(current_state[attr] - 1) ** 2\n else:\n return current_state[attr] ** 2\n\n\ndef document_episode(nl, path: Path, reward_sum):\n \"\"\"\n Create directory for current episode and command NetLogo to save model as csv.\n :param nl: NetLogo-Session of environment.\n :param path: Path of current episode.\n :param reward_sum: Sum of accumulated rewards for episode.\n :return:\n \"\"\"\n path.mkdir(parents=True, exist_ok=True)\n # Get all directories to check which episode this is\n dirs = glob(str(path) + \"/E*.pkl\")\n current_episode = 1\n if dirs:\n last_episode = max(\n [int(re.findall(\"E(\\d+)\", dirs[i])[0]) for i in range(len(dirs))]\n )\n current_episode = last_episode + 1\n episode_path = str(path / f\"E{current_episode}_{np.around(reward_sum, 8)}\").replace(\n \"\\\\\", \"/\"\n )\n\n nl.command(f'export-world \"{episode_path}.csv\"')\n nl.command(f'export-view \"{episode_path}.png\"')\n\n # Save relevant data as pickle to save storage\n df = get_data_from_run(f\"{episode_path}.csv\")\n df.to_pickle(f\"{episode_path}.pkl\", compression=\"zip\")\n\n # Delete csv\n os.remove(f\"{episode_path}.csv\")\n\n\ndef label_episodes(path: Path, df: pd.DataFrame, mode: str):\n \"\"\"\n Identifies worst, median and best episode of run. Renames them and saves plots.\n :param path: Path of current Experiment.\n :param df: DataFrame containing the results.\n :param mode: Usually either \"training\" or \"evaluation\".\n :return:\n \"\"\"\n episode_files = glob(str(path) + \"/E*.pkl\")\n performances = dict()\n performances[\"max\"] = np.around(df.rewards.max(), 8)\n performances[\"min\"] = np.around(df.rewards.min(), 8)\n performances[\"median\"] = np.around(\n df.rewards.sort_values(ignore_index=True)[np.ceil(len(df) / 2) - 1], 8\n )\n\n print(f\"Performances for {mode}:\")\n print(performances)\n\n for metric in performances.keys():\n if performances[metric] == 0.0:\n performances[metric] = 0\n found = False\n for episode in episode_files:\n # Baseline\n if mode not in [\"training\", \"eval\"]:\n if str(performances[metric]) == episode.split(\"_\")[-1].split(\".pkl\")[0]:\n found = True\n elif str(performances[metric]) in episode:\n found = True\n if found:\n new_path = path / mode / metric\n new_path.mkdir(parents=True, exist_ok=True)\n save_plots(new_path, episode)\n os.rename(\n episode,\n str(new_path / f\"{mode}_{metric}_{performances[metric]}.pkl\"),\n )\n os.rename(\n episode.replace(\"pkl\", \"png\"),\n str(new_path / f\"view_{mode}_{metric}_{performances[metric]}.png\"),\n )\n episode_files.remove(episode)\n break\n\n\ndef delete_unused_episodes(path: Path):\n \"\"\"\n Deletes episodes that did not produce either min, median or max performances to save storage.\n :param path: Path of current Experiment\n :return:\n \"\"\"\n # Get all episodes not moved due to being min, median or max\n episode_files = glob(str(path) + \"/E*\")\n\n # Remove files of episodes\n for file in episode_files:\n if os.path.isfile(file) and \"eval\" not in file:\n os.remove(file)\n\n print(\"Unused Files deleted!\")\n\n\ndef save_plots(outpath: Path, episode_path: str):\n \"\"\"\n Calls all plot functions for given episode.\n :param outpath: Path to save plots.\n :param episode_path: Path of current episode.\n :return:\n \"\"\"\n try:\n data_df = pd.read_pickle(episode_path, compression=\"zip\")\n except FileNotFoundError:\n data_df = get_data_from_run(episode_path)\n\n for func in [\n plot_fees,\n plot_occup,\n plot_social,\n plot_n_cars,\n plot_speed,\n plot_income_stats,\n plot_share_yellow,\n plot_share_parked,\n plot_share_vanished,\n ]:\n func(data_df, outpath)\n\n\ndef get_data_from_run(episode_path):\n \"\"\"\n Extracts data for plots from episode.csv saved by NetLogo.\n :param episode_path: Path of current episode.\n :return: DataFrame with data of current episode.\n \"\"\"\n # Open JSON file containing the indexing information required to extract the information needed for plotting\n with open(\"df_index.json\", \"r\") as fp:\n INDEX_DICT = json.load(fp=fp)\n\n with open(episode_path, newline=\"\") as csvfile:\n file_reader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n for i, row in enumerate(file_reader):\n for key in INDEX_DICT.keys():\n if INDEX_DICT[key][\"title\"] in row:\n INDEX_DICT[key][\"i\"] = i\n\n data_df = pd.read_csv(\n episode_path, skiprows=INDEX_DICT[\"fee\"][\"i\"] + 11, nrows=21601\n )\n data_df = data_df.rename(\n columns={\n \"y\": \"yellow_lot_fee\",\n \"y.1\": \"teal_lot_fee\",\n \"y.2\": \"green_lot_fee\",\n \"y.3\": \"blue_lot_fee\",\n }\n )\n data_df = data_df[\n [\"x\", \"yellow_lot_fee\", \"green_lot_fee\", \"teal_lot_fee\", \"blue_lot_fee\"]\n ]\n data_df.x = data_df.x / 1800\n del INDEX_DICT[\"fee\"]\n\n i = 0\n # Catch exceptions for different versions of NetLogo model run\n while i < len(INDEX_DICT.keys()):\n key = sorted(INDEX_DICT)[i]\n try:\n temp_df = pd.read_csv(\n episode_path,\n skiprows=INDEX_DICT[key][\"i\"] + INDEX_DICT[key][\"offset\"],\n nrows=21601,\n )\n for j, col in enumerate(INDEX_DICT[key][\"cols\"]):\n temp_df = temp_df.rename(columns={f\"y.{j}\" if j > 0 else \"y\": col})\n temp_df = temp_df[INDEX_DICT[key][\"cols\"]]\n data_df = data_df.join(temp_df)\n i += 1\n except KeyError:\n INDEX_DICT[key][\"offset\"] += 1\n\n return data_df\n\n\ndef plot_fees(data_df, outpath):\n \"\"\"\n Plot fees for CPZs over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n color_list = [\n cm.imola_r(0),\n cm.imola_r(1.0 * 1 / 3),\n cm.imola_r(1.0 * 2 / 3),\n cm.imola_r(1.0),\n ]\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n ax.plot(\n data_df.x,\n data_df.yellow_lot_fee,\n linewidth=4,\n color=color_list[0],\n linestyle=\"solid\",\n )\n ax.plot(\n data_df.x,\n data_df.green_lot_fee,\n linewidth=4,\n color=color_list[1],\n linestyle=\"dashed\",\n )\n ax.plot(\n data_df.x,\n data_df.teal_lot_fee,\n linewidth=4,\n color=color_list[2],\n linestyle=\"dashed\",\n )\n ax.plot(\n data_df.x,\n data_df.blue_lot_fee,\n linewidth=4,\n color=color_list[3],\n linestyle=\"dashed\",\n )\n\n ax.set_ylim(bottom=0, top=10.1)\n\n ax.set_ylabel(\"Hourly Fee in €\", fontsize=30)\n ax.set_xlabel(\"\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n\n create_colourbar(fig)\n fig.savefig(str(outpath / \"fees.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_occup(data_df, outpath):\n \"\"\"\n Plot occupation levels of different CPZs over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n\n color_list = [\n cm.imola_r(0),\n cm.imola_r(1.0 * 1 / 3),\n cm.imola_r(1.0 * 2 / 3),\n cm.imola_r(1.0),\n ]\n ax.plot(\n data_df.x, data_df.yellow_lot_occup / 100, linewidth=2, color=color_list[0]\n )\n ax.plot(\n data_df.x, data_df.green_lot_occup / 100, linewidth=2, color=color_list[1]\n )\n ax.plot(\n data_df.x, data_df.teal_lot_occup / 100, linewidth=2, color=color_list[2]\n )\n ax.plot(\n data_df.x, data_df.blue_lot_occup / 100, linewidth=2, color=color_list[3]\n )\n ax.plot(\n data_df.x,\n data_df.garages_occup / 100,\n label=\"Garage(s)\",\n linewidth=2,\n color=\"black\",\n )\n ax.plot(\n data_df.x,\n data_df.overall_occup / 100,\n label=\"Kerbside Parking Overall\",\n linewidth=4,\n color=cm.berlin(1.0),\n linestyle=(0, (1, 5)),\n ) if \"composite\" in str(outpath).lower() else None\n ax.plot(\n data_df.x,\n [0.75] * len(data_df.x),\n linewidth=2,\n color=\"red\",\n linestyle=\"dashed\",\n )\n ax.plot(\n data_df.x,\n [0.90] * len(data_df.x),\n linewidth=2,\n color=\"red\",\n linestyle=\"dashed\",\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Utilised Capacity\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n create_colourbar(fig)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"occupancy_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_social(data_df, outpath):\n \"\"\"\n PLot shares of different income classes over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.low_income / 100,\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.middle_income / 100,\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.high_income / 100,\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Cars per Income Class\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"social_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_speed(data_df, outpath):\n \"\"\"\n Plot average speed over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n ax.plot(data_df.x, data_df.average_speed, linewidth=3, color=cm.bamako(0))\n ax.plot(\n data_df.x,\n data_df.average_speed.rolling(50).mean(),\n linewidth=3,\n color=cm.bamako(1.0),\n )\n\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Average Normalised Speed\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n\n fig.savefig(str(outpath / \"speed.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_n_cars(data_df, outpath):\n \"\"\"\n Plot number of cars over run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n ax.plot(data_df.x, data_df.cars_overall / 100, linewidth=3, color=cm.bamako(0))\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Initially Spawned Cars\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n\n fig.savefig(str(outpath / \"n_cars.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_income_stats(data_df, outpath):\n \"\"\"\n Plot mean, median and std. of income distribution of run of episode.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.berlin(0), cm.berlin(1.0 * 1 / 2), cm.berlin(1.0)]\n ax.plot(\n data_df.x, data_df[\"mean\"], label=\"Mean\", linewidth=3, color=color_list[0]\n )\n ax.plot(\n data_df.x,\n data_df[\"median\"],\n label=\"Median\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df[\"std\"],\n label=\"Standard Deviation\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=max(data_df[[\"mean\", \"median\", \"std\"]].max()) + 1)\n\n ax.set_ylabel(\"Income in €\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"income_stats_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_share_yellow(data_df, outpath):\n \"\"\"\n Plot share of different income classes on yellow CPZ.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.share_y_low / 100,\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.share_y_middle / 100,\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.share_y_high / 100,\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Cars in Yellow CPZ\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"share_yellow_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_share_parked(data_df, outpath):\n \"\"\"\n Plot share of parked cars per income class.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.share_p_low / 100,\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.share_p_middle / 100,\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.share_p_high / 100,\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Share of Cars Finding Parking\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"share_parked_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef plot_share_vanished(data_df, outpath):\n \"\"\"\n Plot share of vanished cars per income class.\n :param data_df: DataFrame with data from current episode.\n :param outpath: Path to save plot.\n :return:\n \"\"\"\n # Save plot with three variants of legend location\n for loc in [\"lower right\", \"right\", \"upper right\"]:\n fig, ax = plt.subplots(1, 1, figsize=(20, 8), dpi=300)\n color_list = [cm.bamako(0), cm.bamako(1.0 * 1 / 2), cm.bamako(1.0)]\n ax.plot(\n data_df.x,\n data_df.share_v_low / (data_df.low_income[0] / 100 * 525),\n label=\"Low Income\",\n linewidth=3,\n color=color_list[0],\n )\n ax.plot(\n data_df.x,\n data_df.share_v_middle / (data_df.middle_income[0] / 100 * 525),\n label=\"Middle Income\",\n linewidth=3,\n color=color_list[1],\n )\n ax.plot(\n data_df.x,\n data_df.share_v_high / (data_df.high_income[0] / 100 * 525),\n label=\"High Income\",\n linewidth=3,\n color=color_list[2],\n )\n ax.set_ylim(bottom=0, top=1.01)\n\n ax.set_ylabel(\"Normalised Share of Cars Vanished\", fontsize=30)\n ax.grid(True)\n ax.tick_params(axis=\"both\", labelsize=25)\n ax.set_xlabel(\"Time of Day\", fontsize=30)\n ax.set_xticks(ticks=np.arange(0, max(data_df[\"x\"]) + 1, 2))\n ax.set_xticklabels(labels=X_LABEL)\n ax.legend(fontsize=25, loc=loc)\n\n fig.savefig(str(outpath / f\"share_vanished_{loc}.pdf\"), bbox_inches=\"tight\")\n plt.close(fig)\n\n\ndef create_colourbar(fig):\n \"\"\"\n Draws colourbar with colour of different CPZs on given figure.\n :param fig: Figure to draw colourbar on.\n :return:\n \"\"\"\n cmap = cm.imola\n\n fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.01)\n cb_ax = fig.add_axes([0.8, 0.1, 0.015, 0.8])\n\n bounds = [0, 1, 2, 3, 4]\n norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)\n cbar = fig.colorbar(\n matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap),\n cax=cb_ax,\n orientation=\"vertical\",\n )\n\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\n r\"$\\Leftarrow$ Distance of CPZ to City Centre\", fontsize=25, loc=\"top\"\n )\n\n\ndef draw_radar_plot(input_dir):\n \"\"\"\n\n :param input_dir:\n :return:\n \"\"\"\n if glob(input_dir + \"/*.pkl\"):\n median_runs = glob(input_dir + \"/*.pkl\")\n median_labels = [re.findall(\"([a-zA-Z]+).pkl\", run)[0] for run in median_runs]\n else:\n median_runs = glob(input_dir + \"/*.csv\")\n median_labels = [re.findall(\"([a-zA-Z]+).csv\", run)[0] for run in median_runs]\n\n categories = [\n \"Optimize Occupancy\",\n \"Preserve Social Composition\",\n \" Maximize Speed\",\n \"Minimize Cars\",\n ]\n categories = [*categories, categories[0]]\n color_list = sns.color_palette(\"colorblind\")\n occup_scores = []\n social_scores = []\n speed_scores = []\n n_cars_scores = []\n performance_dict = dict()\n for i, run in enumerate(median_runs):\n try:\n df = pd.read_pickle(run, compression=\"zip\")\n except (FileNotFoundError, BadZipFile):\n df = get_data_from_run(run)\n label = median_labels[i]\n performance_dict[label] = dict()\n # Overall time\n occup_score = 0\n for c in [\"yellow\", \"green\", \"teal\", \"blue\"]:\n occup_score += (\n len(df[(df[f\"{c}_lot_occup\"] > 75) & (df[f\"{c}_lot_occup\"] < 90)])\n / len(df)\n ) * 0.25\n occup_scores.append(occup_score)\n n_cars_scores.append(1 - df[\"cars_overall\"].iloc[-1] / 100)\n speed_scores.append(df.average_speed.mean())\n social_scores.append(df.low_income.iloc[-1])\n\n for i, label in enumerate(median_labels):\n scores = []\n for score_list in [occup_scores, social_scores, speed_scores, n_cars_scores]:\n scores.append(score_list[i] / max(score_list))\n scores.append(scores[0])\n performance_dict[label][\"scores\"] = scores\n\n plt.rc(\"xtick\", labelsize=30)\n plt.rc(\"ytick\", labelsize=30)\n\n label_loc = np.linspace(start=0, stop=2 * np.pi, num=len(scores))\n\n fig = plt.figure(figsize=(20, 20))\n ax = fig.add_subplot(111, polar=True)\n for run_label, colour_i in zip(median_labels, [7, 0, 2, 1, 4, 8, 9]):\n if \"static\" in run_label or \"dynamic\" in run_label:\n label = r\"$\\mathrm{Baseline_{\" + run_label + r\"}}$\"\n else:\n label = r\"$\\mathrm{r_{\" + run_label + r\"}}$\"\n ax.plot(\n label_loc,\n performance_dict[run_label][\"scores\"],\n label=label,\n linewidth=4,\n color=color_list[colour_i],\n )\n ax.fill(\n label_loc,\n performance_dict[run_label][\"scores\"],\n alpha=0.25,\n color=color_list[colour_i],\n )\n\n ax.set_ylim(0, 1)\n\n ax.set_thetagrids(np.degrees(label_loc), labels=categories)\n for label, category in zip(ax.get_xticklabels(), categories):\n if \"Speed\" in category:\n label.set_horizontalalignment(\"left\")\n elif \"Occup\" in category:\n label.set_horizontalalignment(\"right\")\n ax.set_theta_offset(np.pi)\n ax.legend(fontsize=28, loc=\"upper right\", bbox_to_anchor=(1.35, 1.15))\n # ax.xaxis.set_tick_params(pad=15)\n ax.axes.yaxis.set_ticklabels([])\n #\n plt.tight_layout()\n ax.spines[\"polar\"].set_color(\"#222222\")\n\n fig.savefig(\"radar_plot.pdf\", bbox_inches=\"tight\")\n\n plt.show()\n","repo_name":"JakobKappenberger/ai-priced-parking","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":27542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"40127159523","text":"import os\nimport typing\nimport inspect\nimport pickle\nimport warnings\nimport portalocker\nimport pathlib\n\nExportable = type(typing.Any)(\n \"Exportable\",\n doc=\"\"\"Special type indicating an exportable type for the offshore package.\n This type behaves like Any for all practical purposes.\n \"\"\",\n)\n\n\nclass Offshore:\n def __init__(self, filename=\".offshore\", autosave=False, autoload=False):\n self._stack = inspect.stack()\n self._path = pathlib.Path(os.getcwd()) / filename\n self._autosave = bool(autosave)\n self._autoload = bool(autoload)\n self._store = {}\n\n try:\n self._load()\n except FileNotFoundError:\n self.dump()\n\n def __getattr__(self, item):\n if self._autoload:\n self.load()\n\n try:\n return self._store[item]\n except KeyError as e:\n raise AttributeError(str(e))\n\n def __setattr__(self, key, value):\n if key.startswith(\"_\"):\n self.__dict__[key] = value\n return\n\n self._store[key] = value\n\n if self._autosave:\n self.dump()\n\n def __getitem__(self, item):\n if self._autoload:\n self.load()\n\n return self._store[item]\n\n def __setitem__(self, key, value):\n self._store[key] = value\n\n if self._autosave:\n self.dump()\n\n def __delitem__(self, key):\n del self._store[key]\n\n if self._autosave:\n self.dump()\n\n def __contains__(self, item):\n if self._autoload:\n self.load()\n\n return item in self._store\n\n def __len__(self):\n if self._autoload:\n self.load()\n\n return len(self._store)\n\n def _load(self):\n with portalocker.Lock(str(self._path), \"rb\", timeout=60) as file:\n self._store = pickle.load(file)\n\n @staticmethod\n def _parse_stack(stack):\n frame = stack[1][0]\n global_vars = frame.f_globals\n module = inspect.getmodule(frame)\n\n if module is None:\n return [], global_vars\n\n annotations = [key for key, value in typing.get_type_hints(module).items() if value is Exportable]\n return annotations, global_vars\n\n def clear(self):\n self._store = {}\n self.dump()\n\n def snapshot(self):\n annotations, global_vars = self._parse_stack(self._stack)\n\n if not annotations:\n warnings.warn(f\"No exportable variables found\")\n\n for key in annotations:\n self._store[key] = global_vars[key]\n\n self.dump()\n\n def restore(self):\n annotations, global_vars = self._parse_stack(self._stack)\n\n if not annotations:\n warnings.warn(f\"No exportable variables found\")\n\n self.load()\n\n for key in annotations:\n if key not in self._store:\n warnings.warn(f\"Key '{key}' was not found in the state store and has not been restored\")\n continue\n\n global_vars[key] = self._store[key]\n\n def dump(self):\n with portalocker.Lock(str(self._path), \"wb\", timeout=60) as file:\n pickle.dump(self._store, file)\n\n file.flush()\n os.fsync(file.fileno())\n\n def load(self):\n try:\n self._load()\n except FileNotFoundError:\n warnings.warn(f\"State store not found in '{str(self._path)}'\")\n","repo_name":"kpdemetriou/offshore","sub_path":"offshore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"}
+{"seq_id":"4273916733","text":"import codecs\n\nline_seen=set()#初始化空的无序集合\n\nin_file=codecs.open('I:/codes/track4new.txt','r',encoding='utf-8')\n\nout_file=codecs.open('I:/codes/track4.1.txt','w',encoding='utf-8')\n\nlines=in_file.readlines()\n\nfor line in lines:\n if line not in line_seen:\n out_file.write(line)\n line_seen.add(line)\n\nin_file.close()\nout_file.close()\n","repo_name":"LuicelZhou/MultiView-Tracking-ReID","sub_path":"Detecting_and_Tracking/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"20876661221","text":"'''\n딱지놀이\nhttps://www.acmicpc.net/problem/14696\n백준 브론즈1 14696\n\n두 어린이 A, B가 딱지놀이를 한다. 딱지놀이 규칙은 다음과 같다. 두 어린이는 처음에 여러 장의 딱지를 가지고 있고, 매 라운드마다 각자 자신이 가진 딱지 중 하나를 낸다. 딱지에는 별(★), 동그라미(●), 네모(■), 세모(▲), 네 가지 모양 중 하나 이상의 모양이 표시되어 있다. 두 어린이가 낸 딱지 중 어느 쪽이 더 강력한 것인지는 다음 규칙을 따른다.\n\n만약 두 딱지의 별의 개수가 다르다면, 별이 많은 쪽의 딱지가 이긴다.\n별의 개수가 같고 동그라미의 개수가 다르다면, 동그라미가 많은 쪽의 딱지가 이긴다.\n별, 동그라미의 개수가 각각 같고 네모의 개수가 다르다면, 네모가 많은 쪽의 딱지가 이긴다.\n별, 동그라미, 네모의 개수가 각각 같고 세모의 개수가 다르다면, 세모가 많은 쪽의 딱지가 이긴다.\n별, 동그라미, 네모, 세모의 개수가 각각 모두 같다면 무승부이다.\n\n라운드의 수 N과 두 어린이가 순서대로 내는 딱지의 정보가 주어졌을 때, 각 라운드별로 딱지놀이의 결과를 구하는 프로그램을 작성하시오.\n'''\n\n\nN = int(input()) # 총 라운드\n\nfor _ in range(N):\n A = {4:0, 3:0, 2:0, 1:0} # 보유한 딱지 개수\n B = {4:0, 3:0, 2:0, 1:0}\n _, *arg = map(int, input().split())\n for a in arg:\n A[a] += 1\n _, *arg = map(int, input().split())\n for b in arg:\n B[b] += 1\n\n\n for i in range(4, 0, -1): # 4부터 점검\n if A[i] > B[i]:\n print('A')\n break\n elif B[i] > A[i]:\n print('B')\n break\n else:\n print('D') # 무승부\n\n\n","repo_name":"seoda0000/TIL","sub_path":"AlgorithmProblemSolving/04_백준/Bronze/14696_딱지놀이.py","file_name":"14696_딱지놀이.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"}
+{"seq_id":"42074071111","text":"from random import randint\nfrom termcolor import colored\nimport colorama\n\ncolorama.init()\n\n\ndef alg(a, i, j):\n if j - i == 1:\n if a[i] < 0 and a[j] > 0:\n return i\n else:\n return \"non c'è\"\n\n m = (i + j) // 2\n if a[m] > 0:\n return alg(a, i, m)\n else:\n return alg(a, m, j)\n\n\nLIM = 15\n\nfor _ in range(LIM // 2):\n array = [randint(LIM * -1, LIM) for _ in (range(randint(2, LIM)))]\n array.sort()\n # to remove duplicates\n array = list(dict.fromkeys(array))\n if array[- 1] < 0:\n array.append(array[- 1] * -1 + 1)\n if array[0] > 0:\n array[0] = array[0] * -1\n _n = len(array)\n\n alg_res = alg(array, 0, _n - 1)\n\n expected_value = \"non c'è\"\n for ind in range(_n - 1):\n if array[ind] < 0 and array[ind + 1] > 0:\n expected_value = ind\n break\n\n print(f'array: {array}\\nn : {_n}\\nexpected result: {expected_value}\\nalg result: {alg_res}\\nsame results: {colored(\"True\", \"green\") if alg_res == expected_value else colored(\"False\", \"red\")}\\n')\n","repo_name":"OB-UNISA/Algorithm-Design","sub_path":"exercises/exercises2_10.py","file_name":"exercises2_10.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"30450677949","text":"from django.db import models\nfrom user_accounts.models import CustomUser\n\n# Create your models here.\nLOG = (\n ('Report','Report'),\n ('Refer','Refer'),\n ('Tip Upload','Tip Uplod'),\n ('Physical','Physical'),\n ('Relationship','Relationship'),\n ('Book Reading','Book Reading'),\n ('Meditation','Meditation'),\n\n)\nclass PointLogs(models.Model):\n log_description = models.CharField(max_length=500)\n user = models.ForeignKey(CustomUser,on_delete=models.CASCADE)\n datetime = models.DateTimeField(auto_now_add = True)\n log_type = models.CharField(max_length=100,choices=LOG)","repo_name":"Prakash617/bookwish","sub_path":"logs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"23077780623","text":"#-*- encoding:UTF-8 -*-\n'''\nCreated on 2019/6/30\n\n@author: xcKev\n'''\n\nimport difflib\nimport alvintools.common_filer as filer\nimport alvintools.common_logger as log\nimport sys\ncurrent_log=log.get_log('comparer', log.LOG_DIR, 'comparer')\n\ndef compare_file(file_path1,file_path2,diff_path):\n if file_path1 == \"\" or file_path2 ==\"\":\n current_log.info(F\"path can't be blank: first path:{file_path1},second path:{file_path2}\")\n sys.exit()\n else:\n current_log.info(F\"comparing file between {file_path1} and {file_path2}\")\n text1_lines = filer.get_file_details(file_path1)\n text2_lines = filer.get_file_details(file_path2)\n diff = difflib.HtmlDiff()\n result = diff.make_file(text1_lines, text2_lines)\n try:\n result_h = open(diff_path,'w')\n result_h.write(result)\n current_log.info(\"Compared successfully finished\\n\")\n except IOError as error:\n current_log.error(F\"Failed to write html diff file: {error}\")","repo_name":"Kevin-san/ToolLesson","sub_path":"core/alvintools/common_comparer.py","file_name":"common_comparer.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"7885179866","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nfrom Text_Traffic_Analysis.Packet_Segment import pkt_seg_by_delimiters\nfrom Text_Traffic_Analysis.Select_Words import top_words_set, select_key_words\nfrom Text_Traffic_Analysis.Format_Extract import infer_protocol_format\nfrom Text_Traffic_Analysis.Protocol_Feature import get_traffic_feature\n\ndef Text_Re(DATA_PATH, MODE, NAME):\n\n print(os.path.join(DATA_PATH, \"text_tcp/0\"))\n if os.path.exists(os.path.join(DATA_PATH, \"text_tcp/0\")):\n DATA_PATH += \"/text_tcp/0/\"\n else:\n print(\"[info] No text data set.\")\n return\n if len(os.listdir(DATA_PATH)) < 50:\n print(\"[info] No text data.\")\n return \n\n SEG_OUT_PATH = './run_file/seg_' + NAME \n WORDS_PATH = './run_file/words_' + NAME \n P_OUT_PATH = './run_file/pattern_' + NAME \n\n print(\"Data directory path: \", DATA_PATH)\n\n # 协议逆向开始\n first_words = pkt_seg_by_delimiters(DATA_PATH, SEG_OUT_PATH)\n\n tagged_weighted_word = top_words_set(SEG_OUT_PATH, WORDS_PATH)\n\n f_words, b_words, f_data, b_data = select_key_words(tagged_weighted_word, DATA_PATH, P_OUT_PATH, MODE)\n\n f_formats, b_formats = infer_protocol_format(P_OUT_PATH)\n\n # 基于逆向的报文格式推断协议特征字符串\n get_traffic_feature(NAME, f_formats, b_formats, f_data, b_data, DATA_PATH, MODE)\n","repo_name":"737898487/2018","sub_path":"Text_Traffic_Analysis/Text_Entrance.py","file_name":"Text_Entrance.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"43088349066","text":"import numpy as np\n\n\nALL_DIRECTIONS = np.array(\n [[-1, -1], [-1, 0], [-1, 1],\n [0, -1], [0, 0], [0, 1],\n [1, -1], [1, 0], [1, 1]]\n)\nNW, N, NE, W, NOOP, E, SW, S, SE = ALL_DIRECTIONS\n\n\ndef get_movement_vectors(num_directions: int):\n mapping = {9: ALL_DIRECTIONS,\n 8: np.stack([NW, N, NE, W, E, SW, S, SE]),\n 5: np.stack([W, N, E, S, NOOP]),\n 4: np.stack([W, N, E, S])}\n if num_directions not in mapping:\n raise ValueError(\"Can only handle 9, 8, 5 or 4 directions!\")\n return mapping[num_directions]\n\n\nclass MovementTranslator:\n\n @staticmethod\n def translate(opencv_keypress):\n return {\n -1: NOOP, 83: E, 81: W, 82: N, 84: S, 27: None,\n 119: N, 115: S, 97: W, 100: E\n }[opencv_keypress]\n","repo_name":"csxeba/grund","sub_path":"grund/util/movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"35608957389","text":"'''\nCreated on 12 Nov 2013\n\n@author: fafey\n'''\n\nfrom z3 import *\n\n\nx = Int('x')\ny = Int('x')\nfun = Function('fun', IntSort(), IntSort())\n\n\n\nsolve(x > 5, x<7, ForAll(x, fun(x) == 10), fun(y) == 1)\n","repo_name":"songhui/cspadapt","sub_path":"vmplacement/z4/src/DatatypeTest.py","file_name":"DatatypeTest.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"71345239253","text":"# Charles Buyas cjb8qf\n\n\nimport random\nprint(\"Input a -1 to play with a random number\")\nstart = int(input(\"What should the answer be?: \"))\ncount = 0\nif start == -1:\n start = random.randint(1, 100)\nelse:\n num = start\n\nwhile count < 4:\n guess = int(input(\"Guess a number: \"))\n if int(guess) == int(start):\n print(\"You win!\")\n exit(0)\n elif int(guess) > int(start):\n count += 1\n print(\"The number is lower than that.\")\n else:\n count += 1\n print(\"The number is higher than that.\")\n\nif count == 4:\n guess = int(input(\"Guess a number: \"))\n if int(guess) == int(start):\n print(\"You win!\")\n exit(0)\n else:\n print(\"You lose; the number was \" + str(start) + \".\")\n","repo_name":"AliveSphere/Introductory_PyCharm_Files","sub_path":"POTD/higher_lower.py","file_name":"higher_lower.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"39676764635","text":"# 브루트포스 풀이\n'''\n주어진 테트로미노를 회전, 대칭하여 만들어지는 가지수는 총 19종류이다.\n문제에서 주어지는 종이를 2중 for문으로 탐색하면서, 주어진 테트로미노 형태의 숫자 합계를 비교한다.\n각 테트로미노당 정사각형 4개로 이루어져 있으므로 i, j가 이동하는 delta를 3개씩 19종류, 총 57개의 delta를 만들어둔다.\n본 풀이에서 delta의 순서는 문제에 주어진 도형을 회전/대칭 후 회전 하는 순서대로 설정하였다.\n'''\n# [A] 테트로미노의 경우의 수에 따른 delta 설정\ndi = [0, 0, 0, 1, 2, 3, 0, 1, 1, 1, 2, 2, 0, 0, 1, 0, 1, 2, 0, 0, -1, 0, -1, -2, 1, 1, 1, 0, 1, 2, 0, 0, 1, 1, 1, 2, 0, -1, -1, 1, 1, 2, 0, 1, 1, 0, 0, 1, -1, 0, 1, 1, 1, 1, 1, 2, 1]\ndj = [1, 2, 3, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 2, 0, 1, 1, 1, 1, 2, 2, 1, 1, 1, 0, 1, 2, 1, 0, 0, 1, 2, 2, 0, 1, 1, 1, 1, 2, 0, -1, -1, 1, 1, 2, 1, 2, 1, 1, 1, 1, -1, 0, 1, 0, 0, 1]\ndirection = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54]\n\n# [1] 입력값 설정\nfrom sys import stdin\nN, M = map(int, stdin.readline().split())\npaper = [[] for _ in '_'*N]\nfor i in range(N):\n paper[i] = list(map(int, stdin.readline().split()))\n\n# [2] 종이에 적혀진 수 탐색 시작\nresult = 0\nfor i in range(N):\n for j in range(M):\n for dr in direction:\n sumV = paper[i][j] # dr이 갱신될 때마다 sumV도 초기화된다.\n\n ni, nj = i+di[dr], j+dj[dr] # 첫 번째 ni, nj\n if 0 <= ni < N and 0 <= nj < M:\n sumV += paper[ni][nj]\n\n ni, nj = i+di[dr+1], j+dj[dr+1] # 두 번째 ni, nj\n if 0 <= ni < N and 0 <= nj < M:\n sumV += paper[ni][nj]\n\n ni, nj = i+di[dr+2], j+dj[dr+2] # 마지막 ni, nj\n if 0 <= ni < N and 0 <= nj < M:\n sumV += paper[ni][nj]\n\n # 총 4개의 수를 더했다면 result값과 비교\n if result < sumV:\n result = sumV\n\nprint(result)\n\nㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ\n# DFS 풀이\n# [A] delta 방향 설정\ndi = [-1, 1, 0, 0]\ndj = [0, 0, -1, 1] # 상하좌우\n\n# [B] dfs 함수 설정\ndef dfs(i, j, n, sumV):\n global result\n # [B-1] 가지치기 조건. 아무리 max를 더해도 result보다 작을 때\n if result > sumV + maxV * (4 - n):\n return\n # [B-2] 종료 조건. 4개의 정사각형을 더했다면 result값과 비교\n if n == 4:\n if result < sumV:\n result = sumV\n return\n\n # [B-3] 상하좌우 방향대로 dfs 탐색\n for dr in range(4):\n ni, nj = i+di[dr], j+dj[dr]\n if 0 <= ni < N and 0 <= nj < M and not visited[ni][nj]:\n if n == 2: # [B-4] 이 조건을 넣지 않으면 'ㅏ' 블럭이 만들어지지 않는다.\n visited[ni][nj] = 1\n dfs(i, j, n+1, sumV + paper[ni][nj])\n visited[ni][nj] = 0\n\n visited[ni][nj] = 1\n dfs(ni, nj, n+1, sumV + paper[ni][nj])\n visited[ni][nj] = 0\n\n# [1] 입력값 설정\nfrom sys import stdin\nN, M = map(int, stdin.readline().split())\npaper = [[] for _ in '_'*N]\nfor i in range(N):\n paper[i] = list(map(int, stdin.readline().split()))\n\n# [2] dfs 탐색\nresult = 0\nmaxV = max(map(max, paper))\nvisited = [[0]*M for _ in '_'*N]\nfor i in range(N):\n for j in range(M):\n visited[i][j] = 1\n dfs(i, j, 1, paper[i][j])\n visited[i][j] = 0\n\nprint(result)","repo_name":"Seori15/algorithm","sub_path":"python/BOJ/BOJ_14500_테트로미노.py","file_name":"BOJ_14500_테트로미노.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"39533115944","text":"\"\"\"StudentiUniMi URL Configuration\n\nThe `urlpatterns` list routes URLs to views.\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.http import HttpResponse, HttpRequest\nfrom django.urls import path, include\nfrom django.views.generic import RedirectView\nfrom sentry_sdk import configure_scope\n\nimport telegrambot.urls\nimport university.urls\n\nadmin.site.site_header = \"Network StudentiUniMi - administration\"\n\n\ndef healthcheck(_: HttpRequest) -> HttpResponse:\n with configure_scope() as scope:\n if scope.transaction:\n scope.transaction.sampled = False\n return HttpResponse(\"hello, you hacker!\\n\", content_type=\"text/plain\")\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(university.urls)),\n path('telegrambot/', include(telegrambot.urls)),\n url(r'^robots.txt$', lambda r: HttpResponse(\n \"User-Agent: *\\nDisallow: /\",\n content_type=\"text/plain\",\n ), name=\"robots_txt\"),\n url(r\"^healthcheck$\", healthcheck, name=\"healthcheck\"),\n]\n\nif not settings.DEBUG:\n urlpatterns.append(\n path('', RedirectView.as_view(url=\"https://api.studentiunimi.it/admin/\"),\n name='root-redirect'),\n )\n","repo_name":"StudentiUniMi/backend","sub_path":"StudentiUniMi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"}
+{"seq_id":"25164658409","text":"import numpy as np\nfrom gurobipy import quicksum\n\nfrom CounterfactualAnalysis.TreeEnsembleSolver import CESolver_TreeEnsemble\n\nclass CESolver_RandomForest(CESolver_TreeEnsemble):\n def __init__(self, estimator, lambda0, lambda1, lambda2, eps, timelimit):\n super().__init__(estimator, lambda0, lambda1, lambda2, eps, timelimit)\n self.T = self.estimator.n_estimators\n self.M1 = 1\n self.M2 = 1\n\n def build(self, x0, yCE):\n super().build(x0, yCE)\n self.class_assignment = self.model.addConstrs((quicksum(self.getWeight(t,l,yCE)*self.z[t,l] for t in range(self.T) for l in self.getLeaves(t)) >= 1.e-4 + quicksum(self.getWeight(t,l,k)*self.z[t,l] for t in range(self.T) for l in self.getLeaves(t)) for k in self.K if k!=yCE))\n self.reset.append(self.class_assignment)\n\n def getWeight(self, t, l, k):\n value = self.getTree(t).value[l,0,:]\n return (1/self.T*(value[np.where(self.K==k)[0]]/np.sum(value)))[0]\n","repo_name":"ceciliasalvatore/sFCCA","sub_path":"CounterfactualAnalysis/RandomForestSolver.py","file_name":"RandomForestSolver.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"15341155148","text":"import base64\r\nimport mysql.connector\r\nfrom datetime import date\r\nimport time\r\nimport numpy as np\r\nfrom keras.models import load_model\r\nfrom keras.utils import load_img, img_to_array\r\nfrom io import BytesIO\r\n\r\ndef predict_value(bytes_data):\r\n model = load_model(\"E:/APASD/predimg.h5\")\r\n # Create a binary buffer from the bytes object\r\n buffer = BytesIO(bytes_data)\r\n\r\n image = load_img(buffer, target_size=(150,150))\r\n image = img_to_array(image)\r\n image = np.expand_dims(image,axis=0)\r\n val = model.predict(image)\r\n if val>0.5:\r\n return \"Signature\"\r\n else:\r\n return \"Photograph\"\r\n\r\ndef insertIMAGEDATA(image):\r\n try:\r\n connection = mysql.connector.connect(\r\n host=\"127.0.0.1\",\r\n user=\"root\",\r\n database=\"mydatabase\",\r\n password=\"P@ssword1\"\r\n )\r\n \r\n cursor = connection.cursor()\r\n query = \"\"\" INSERT INTO IMAGEDATA\r\n (UPLOADED_DATE, DATE, UPLOADED_TIME, TIME, IMAGE, PREDICTED_VALUE) \r\n VALUES (%s,%s,%s,%s,%s,%s)\"\"\"\r\n \r\n uploaded_date = date.today()\r\n r_date = uploaded_date.strftime(\"%d %B, %Y\")\r\n uploaded_time = time.strftime(\"%I:%M:%S\",time.localtime())\r\n r_time = time.strftime(\"%I:%M %p\",time.localtime())\r\n predicted_value = predict_value(image)\r\n insert_tuple = (uploaded_date, r_date, uploaded_time, r_time, image, predicted_value)\r\n cursor.execute(query, insert_tuple)\r\n connection.commit()\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed inserting data into MySQL table {}\".format(error))\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n finally:\r\n if connection.is_connected():\r\n cursor.close()\r\n connection.close()\r\n\r\ndef retrieveIMAGEDATA():\r\n final_array = []\r\n try:\r\n connection = mysql.connector.connect(\r\n host=\"127.0.0.1\",\r\n user=\"root\",\r\n database=\"mydatabase\",\r\n password=\"Deepak@973\"\r\n )\r\n\r\n cursor = connection.cursor()\r\n query = \"\"\"SELECT DATE, TIME, IMAGE, PREDICTED_VALUE FROM IMAGEDATA ORDER BY UPLOADED_DATE DESC,UPLOADED_TIME DESC LIMIT 5\"\"\"\r\n cursor.execute(query)\r\n record = cursor.fetchall()\r\n for row in record:\r\n sub_dict = dict()\r\n sub_dict[\"uploaded_date\"] = str(row[0])\r\n sub_dict[\"uploaded_time\"] = str(row[1])\r\n sub_dict[\"image\"] = base64.b64encode(row[2]).decode('utf-8')\r\n sub_dict[\"predicted_value\"] = str(row[3])\r\n final_array.append(sub_dict)\r\n connection.commit()\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed inserting data into MySQL table {}\".format(error))\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n finally:\r\n if connection.is_connected():\r\n cursor.close()\r\n connection.close()\r\n return final_array\r\n","repo_name":"Deepak973-create/Photo-Sign-Detection","sub_path":"my_project/my_library.py","file_name":"my_library.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"70023404375","text":"\"\"\" Compute average energy in input bins.\n\"\"\"\nimport sys\nfrom copy import copy\n\nfrom pyrate.core.Algorithm import Algorithm\n\n\nclass AverageBinEnergy(Algorithm):\n __slots__ = ()\n\n def __init__(self, name, store, logger):\n super().__init__(name, store, logger)\n\n def execute(self, config):\n\n e = self.store.get(\"EVENT:nT:edepScint\")\n\n self.store.put(config[\"name\"], e)\n\n\n# EOF\n","repo_name":"fscutti/pyrate","sub_path":"pyrate/algorithms/muondet/AverageBinEnergy.py","file_name":"AverageBinEnergy.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"29439243256","text":"# rosalind_lgis.txt\nimport sys\nimport numpy as np \n\nf = sys.argv[1]\n\nnum_list = []\nwith open(f,'r') as handle:\n for line in handle:\n line = line.replace('\\n','')\n num_list.append(line.split(' ')) \nx = []\nfor i in num_list:\n for j in i:\n x.append(int(j))\n\nx1 = x[:30]\n'''\ni_count = {}\nd_count = {}\nfor i in range(len(x1)):\n if i == 0:\n i_count[x1[i]] = 1\n d_count[x1[i]] = 1\n elif max(list(i_count.keys())) < x1[i]:\n i_count[x1[i]] = max(i_count.keys())+1\n elif max(i_count.keys()) > x1[i]:\n i_count[x1[i]] = max(i_count.keys()< x1[i]) +1\n\nprint(i_count)\n'''\nd = {1:2,2:3,4:5,6:8,3:2}\nprint(max(list(d.keys())) < 5)\n","repo_name":"kjh918/rosalind","sub_path":"Bioinformatics_Stronghold/rosalind_LIS2.py","file_name":"rosalind_LIS2.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"40511390855","text":"from typing import Literal, Tuple\n\nimport torch\nimport torch.nn as nn\n\nfrom src.layers.initialization import weights_init\nfrom src.training.loss import ReconstructionLoss\nfrom .base import BaseModel, TrainingInit\n\n\nclass VariationalAutoEncoder(BaseModel):\n def __init__(self,\n encoder: nn.Sequential,\n latent: nn.Module,\n decoder: nn.Sequential,\n recons_loss: ReconstructionLoss,\n latent_loss: nn.Module,\n training: TrainingInit,\n ):\n super().__init__(training)\n self.encoder = encoder\n self.decoder = decoder\n self.latent = latent\n self.recons_loss = recons_loss\n self.latent_loss = latent_loss\n\n self.reset_parameters()\n\n def reset_parameters(self):\n weights_init(self.encoder)\n weights_init(self.decoder)\n\n def forward(self, inputs):\n h = self.encoder(inputs)\n z, params = self.latent(h)\n recons = self.decoder(z)\n return recons, z, params\n\n def embed(self, inputs):\n return self.latent(self.encoder(inputs))[0]\n\n def decode(self, z):\n return self.decoder(z)\n\n def posterior(self, inputs):\n return self.latent(self.encoder(inputs))[1]\n\n def _step(\n self,\n batch: Tuple[torch.Tensor, torch.Tensor],\n batch_idx: int,\n phase: Literal[\"train\", \"val\", \"test\"]\n ):\n is_train = phase == \"train\"\n if is_train and hasattr(self.latent_loss, \"update_parameters\"):\n self.latent_loss.update_parameters(self.global_step)\n\n inputs, targets = batch\n recons, z, params = self.forward(inputs)\n\n recons_loss = self.recons_loss(recons, targets)\n latent_loss = self.latent_loss(z, params)\n\n loss = recons_loss + latent_loss\n\n self.log_dict(\n {\n f\"{phase}/loss\": loss,\n f\"{phase}/latent_term\": latent_loss,\n f\"{phase}/reconstruction_loss\": recons_loss\n },\n on_epoch=not is_train,\n on_step=is_train,\n prog_bar=is_train,\n sync_dist=not is_train,\n rank_zero_only=True\n )\n\n return loss\n","repo_name":"miltonllera/ocdm","sub_path":"src/model/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"11665353399","text":"num = input()\n\ndial = ['ABC', 'DEF', 'GHI', 'JKL', 'MNO', 'PQRS', 'TUV', 'WXYZ']\ntime = 0\n\nfor n in num:\n for d in dial:\n if n in d:\n time += dial.index(d)+3\n\nprint(time)","repo_name":"SsoYeon-kim/CodingTest-Python","sub_path":"BJ/step_06/5622.py","file_name":"5622.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"33727350735","text":"from git import Repo\nimport argparse\n\nimport chain_runner\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='index_generator_chain_runner.py --project-dir '\n '--project-id --branch ')\n parser.add_argument('-pd', '--project-dir', help='Project directory for indexing ', required=True)\n parser.add_argument('-pi', '--project-id', help='Project Id', required=True)\n parser.add_argument('-b', '--branch', help='Branch', required=False)\n args = parser.parse_args()\n\n return args.project_dir, args.project_id, args.branch\n\n\nrepo_directory, project_id, branch = parse_arguments()\n\nprint('project_dir = ' + repo_directory)\nprint('project_id = ' + project_id)\n\nrepo = Repo(repo_directory)\n\nrepo.git.checkout(\"develop\")\nrepo.remotes.origin.fetch()\ncommits_behind = repo.iter_commits('develop..develop@{u}')\ncommits = list(commits_behind)\n\nif len(commits) == 0:\n print(\"Current branch is {} behind. Pulling new code\".format(len(commits)))\n repo_is_dirty = repo.is_dirty()\n if repo_is_dirty:\n print(\"Dirty\")\n print(\"Stashing...\")\n repo.git.stash('save')\n\n repo.remotes.origin.pull()\n chain_runner.generate_index_and_send(repo_directory, project_id)\n\n if repo_is_dirty:\n print(\"unstashing\")\n repo.git.stash('pop')\nelse:\n print(\"No updates nothing to do here\")\n","repo_name":"damintsew/idea-shared-index-standalone-runner","sub_path":"remote_git_checker.py","file_name":"remote_git_checker.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"}
+{"seq_id":"13733780478","text":"import discord\nimport random\nimport asyncio\nfrom discord.ext import commands\nfrom discord.ext.commands import has_permissions, CheckFailure\n\n\nclass Interaction(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=[\"dankr8\"])\n async def dankrater(self, ctx):\n v = [\"100%\",\n \"99%\",\n \"98%\",\n \"95%\",\n \"87%\",\n \"85%\",\n \"82%\",\n \"80%\",\n \"76%\",\n \"74%\",\n \"70%\",\n \"69%\",\n \"65%\",\n \"54%\",\n \"57%\",\n \"50%\",\n \"49%\"\n \"45%\",\n \"47%\",\n \"42%\",\n \"40%\",\n \"36%\",\n \"35%\",\n \"25%\",\n \"23%\",\n \"18%\",\n \"15%\",\n \"13%\",\n \"10%\",\n \"7%\",\n \"5%\",\n \"3%\",\n \"2%\",\n \"1%\"]\n embed=discord.Embed(color=0x0338b5)\n embed.add_field(name=\"Dank r8 machine\", value=f\"Il tuo potenziale memetico è: **{random.choice(v)}**\", inline=False)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def say(self, ctx, *, arg):\n if (arg) == \"Sono stupido\":\n await ctx.send(\"Lo sappiamo.\")\n elif (arg) == \"sono stupido\":\n await ctx.send(\"Lo sappiamo.\")\n elif (arg) == \"Sono uno stupido\":\n await ctx.send(\"Lo sappiamo.\")\n elif (arg) == \"sono uno stupido\":\n await ctx.send(\"Lo sappiamo.\")\n else:\n await ctx.send(f\"{arg} \\n\\n\\n- **{ctx.author}**\")\n\n # Command of questions. The bot send a random response for any type of question\n @commands.command(aliases=[\"8ball\", \"oracolo\", \"predizione\", \"domanda\"])\n async def erdubbio(self, ctx, *, question):\n responses = [\"E' certo.\",\n \"E' stato deciso così.\",\n \"Senza dubbio.\",\n \"Già....indubbiamente.\",\n \"Contaci.\",\n \"Per come la vedo io, si.\",\n \"Preferibilmente.\",\n \"Vedila così.\",\n \"Sì.\",\n \"Un punto in più per il 'Sì'.\",\n \"uhm... Sono un po' confuso, potresti ripetere la domanda?\",\n \"Chiedimelo più tardi..\",\n \"E' meglio non dirtelo ora.\",\n \"Mi risulta difficile predirlo ora.\",\n \"Concentrati e chiedimelo di nuovo.\",\n \"Non ci contare.\",\n \"La mia risposta è no.\",\n \"Le mie fonti dicono di no.\",\n \"Pessima prospettiva.\",\n \"Dubito.\"]\n embed = discord.Embed(color=0xa7c7fb)\n embed.add_field(name=\"(?) ErDubbio (?)\", value=\"*Poni le tue più strambe domande a questo magnifico oracolo dotato di tanta saggezza e righe di codice\", inline=False)\n embed.add_field(name=\":question: Domandona:\", value=f\"{question}\", inline=True)\n embed.add_field(name=\":speech_left: Risposta epica:\", value=f\"{random.choice(responses)}\", inline=True)\n embed.set_footer(text=\"Leonardus Project\")\n await ctx.send(embed=embed)\n\n @commands.command(aliases=[\"hack\"])\n async def akeraggio(self, ctx, member: commands.MemberConverter):\n message = await ctx.send(f\"Sto hackerando con paint {member}...\")\n await asyncio.sleep(2)\n await message.edit(content='Sono penetrato nel sistema!')\n await asyncio.sleep(3)\n await message.delete()\n await asyncio.sleep(0.2)\n message = await ctx.send(\"[▙] Eseguo un leak dell'email discord...(2fa Bypass)\")\n await asyncio.sleep(3)\n await message.edit(content=\"[▛] **Gotcha!**\")\n email = await ctx.send(f\"**EMAIL:** `{member}@email.net` \\n**PASSWORD:** `PASSW0RD`\")\n await asyncio.sleep(4)\n await message.delete()\n await email.delete()\n await asyncio.sleep(0.1)\n message = await ctx.send(\"[▟] Spio i messaggi recenti...\")\n await asyncio.sleep(2)\n dms = [\"send nudes\",\n \"Ammetto che adoro i canditi\",\n \"Napoli merda\",\n \"Tifo Juve\",\n \"Ieri ho rubato 2 orologi\",\n \"Cyca mala criminale\",\n \"mlmlml, che belli i bimbi neri\"]\n await message.edit(content=f\"**Leak degli ultimi dms**: '`{random.choice(dms)}`'\")\n await asyncio.sleep(3)\n await message.edit(content=f\"[▙] Rubo le credenziali di steam...\")\n await asyncio.sleep(3)\n await message.edit(content=f\"[▛] Credenziali di steam rubate :)\")\n await asyncio.sleep(2)\n await message.edit(content=f\"[▜] Traccio l'IP...\")\n await asyncio.sleep(3)\n await message.edit(content=f\"[▟] **IP TROVATO:** `127.0.0.1`\")\n await asyncio.sleep(2)\n await message.edit(content=f\"[▙] Scopro la cronologia...\")\n await asyncio.sleep(3)\n await message.edit(content=f\"**CRONOLOGIA TROVATA** \\n*Lista:* \\n`How to buil a bomb`\\n`How to kidnapp`\\n`Come dichiarare le variabili in HTML`\")\n await asyncio.sleep(5)\n await message.edit(content=f\"*Rivendo i dati al governo...*\")\n await asyncio.sleep(3)\n await message.edit(content=f\"*Rendo {member} ricercato in 5 paesi differenti...*\")\n await asyncio.sleep(4)\n await message.edit(content=f\"*Infetto il computer di {member} con diversi virus...*\")\n await asyncio.sleep(2)\n await message.edit(content=f\"Fine. *{member}* è stato hackerato!\")\n await ctx.send(\"Processo di hackeraggio **100%** *reale* e *pericoloso* terminato.\")\n\n\n\n\n\n\n\ndef setup(client):\n client.add_cog(Interaction(client))","repo_name":"sl04zy/Leonardus-Project","sub_path":"src/cogs/Interaction.py","file_name":"Interaction.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"it","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"}
+{"seq_id":"72758450134","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on: Friday December 17th of January 2020\n\nAuthor: Daniel Cortild (https://github.com/DanielCortild)\n\nTicTacToe Judger Module\nThe Judger Monitors the Game\n\"\"\"\n\nfrom modules.state import State\n\nclass Judger:\n\n def __init__ ( self, p1, p2, learning = True ):\n\n self.p1 = p1\n self.p1Symbol = 1\n self.p1.setSymbol( self.p1Symbol )\n\n self.p2 = p2\n self.p2Symbol = -1\n self.p2.setSymbol( self.p2Symbol )\n\n self.currentPlayer = None\n\n self.learning = learning\n\n self.currentState = State()\n\n def giveReward ( self ):\n\n if self.currentState.winner == self.p1Symbol:\n\n self.p1.feedReward(1)\n self.p2.feedReward(0)\n\n elif self.currentState.winner == self.p2Symbol:\n\n self.p1.feedReward(0)\n self.p2.feedReward(1)\n\n else:\n\n self.p1.feedReward(0.5)\n self.p2.feedReward(0.5)\n\n def feedCurrentState ( self ):\n\n self.p1.feedState( self.currentState )\n self.p2.feedState( self.currentState )\n\n def reset ( self ):\n\n self.p1.reset()\n self.p2.reset()\n\n self.currentState = State()\n self.currentPlayer = None\n\n def play ( self, show = False ):\n\n self.reset()\n self.feedCurrentState()\n\n if show:\n self.currentState.show()\n\n while True:\n\n if self.currentPlayer == self.p1:\n self.currentPlayer = self.p2\n else:\n self.currentPlayer = self.p1\n\n [i, j, symbol] = self.currentPlayer.takeAction()\n\n self.currentState = self.currentState.nextState( i, j, symbol )\n hashValue = self.currentState.getHash()\n\n self.feedCurrentState()\n\n if show:\n self.currentState.show()\n\n if self.currentState.isEnd():\n\n if self.learning:\n self.giveReward()\n\n return self.currentState.winner\n","repo_name":"DanielCortild/TicTacToe","sub_path":"Python/modules/judger.py","file_name":"judger.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"11780679149","text":"def encrypt(s,key):\n n=len(s)\n matrix=[[0 for _ in range(n)] for _ in range(key)]\n bool=False\n i=0\n j=0\n for k in range(n):\n matrix[i][j]=s[k]\n k+=1\n if i==0 or i==key-1:\n bool=not bool\n \n if bool:\n i+=1\n else:\n i-=1\n j+=1\n\n ans=\"\"\n for i in range(key):\n for j in range(n):\n if matrix[i][j]!=0:\n ans+=matrix[i][j]\n return ans\n\n\ns=input(\"Enter the string to be encrypted : \")\nk=int(input(\"Enter the key value of rail fence : \"))\nnew=encrypt(s,k)\nprint(\"The encrypted String is : \" , new)","repo_name":"Iamayushgupta/Blockchain","sub_path":"cryptography/rail_fence.py","file_name":"rail_fence.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"31142745079","text":"# Ask the user for a number and determine whether the number is prime or not.\n# (For those who have forgotten, a prime number is a number that has no divisors.).\n# You can (and should!) use your answer in Exercise 4 to help you.\n# Take this opportunity to practice using functions, described below\n\n\ndef is_divisor(num, divisor):\n return num % divisor == 0\n\n\ndef is_prime(num):\n count = 0\n for i in range(2, num):\n if is_divisor(num, i):\n count += 1\n if count == 0:\n return True\n else:\n return False\n\n\nprime_number = int(input(\"Please enter a number: \"))\nprint(is_prime(prime_number))\n\n","repo_name":"nitzanpap/practiceAndExercises","sub_path":"pythonExercises/exercise11.py","file_name":"exercise11.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"33057914787","text":"\nimport tensorflow as tf\nfrom model.common import conv2d_bn_mish\n\ndef spp(x):\n pool_sizes = [5, 9, 13]\n pooling_results = [tf.keras.layers.MaxPooling2D((pool_size,pool_size), strides=(1, 1),padding='same')(x) for pool_size in pool_sizes]\n spp_result = tf.keras.layers.Concatenate()(pooling_results+[x])\n spp_result = conv2d_bn_mish(spp_result, x.shape[3], (1, 1))\n return spp_result\n\n","repo_name":"wangermeng2021/Scaled-YOLOv4-tensorflow2","sub_path":"model/spp.py","file_name":"spp.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"67"}
+{"seq_id":"71577354132","text":"import sys\nfrom obj.BibliotekaObj import Biblioteka\nimport utils.constants as c\nimport utils.db as db\nimport utils.utils as u\n\nTEST_MODE = False\n\ndef sprawdzCzyTrybTestowy():\n if len(sys.argv) > 1:\n if sys.argv[1] == 'test':\n TEST_MODE = True\n print(\"Uruchomiono w trybie testowym...\")\n\nif __name__ == \"__main__\":\n sprawdzCzyTrybTestowy()\n db.inicjujDane()\n biblioteka = Biblioteka(TEST_MODE)\n biblioteka.ladujBiblioteke()\n while True:\n menuWybor = u.czyscWejscie(input(c.ASCII_MENU), trybTestowania=TEST_MODE)\n if (menuWybor == '1'):\n biblioteka.dodajKsiazke()\n elif (menuWybor == '2'):\n biblioteka.wypozyczKsiazke()\n elif (menuWybor == '3'):\n biblioteka.oddajKsiazke()\n elif (menuWybor == '4'):\n biblioteka.podejrzyjHistorieKsiazki()\n elif (menuWybor == '5'):\n biblioteka.dodajCzytacza()\n elif (menuWybor == '6'):\n print(f\"Zamykanie programu...\")\n break\n else:\n print(\"Wybrano nie istniejącą opcję w menu...\")\n SystemExit(0)\n","repo_name":"gkk-dev-ops/py-beginner-exercises","sub_path":"Projekt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"10208812690","text":"# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nimport numpy\nfrom ._op import OpRun\n\n\nclass ZipMapDictionary(dict):\n \"\"\"\n Custom dictionary class much faster for this runtime,\n it implements a subset of the same methods.\n \"\"\"\n __slots__ = ['_rev_keys', '_values', '_mat']\n\n @staticmethod\n def build_rev_keys(keys):\n res = {}\n for i, k in enumerate(keys):\n res[k] = i\n return res\n\n def __init__(self, rev_keys, values, mat=None):\n \"\"\"\n @param rev_keys returns by @see me build_rev_keys,\n *{keys: column index}*\n @param values values\n @param mat matrix if values is a row index,\n one or two dimensions\n \"\"\"\n if mat is not None:\n if not isinstance(mat, numpy.ndarray):\n raise TypeError( # pragma: no cover\n f'matrix is expected, got {type(mat)}.')\n if len(mat.shape) not in (2, 3):\n raise ValueError( # pragma: no cover\n f\"matrix must have two or three dimensions but got {mat.shape}.\")\n dict.__init__(self)\n self._rev_keys = rev_keys\n self._values = values\n self._mat = mat\n\n def __getstate__(self):\n \"\"\"\n For pickle.\n \"\"\"\n return dict(_rev_keys=self._rev_keys,\n _values=self._values,\n _mat=self._mat)\n\n def __setstate__(self, state):\n \"\"\"\n For pickle.\n \"\"\"\n if isinstance(state, tuple):\n state = state[1]\n self._rev_keys = state['_rev_keys']\n self._values = state['_values']\n self._mat = state['_mat']\n\n def __getitem__(self, key):\n \"\"\"\n Returns the item mapped to keys.\n \"\"\"\n if self._mat is None:\n return self._values[self._rev_keys[key]]\n return self._mat[self._values, self._rev_keys[key]]\n\n def __setitem__(self, pos, value):\n \"unused but used by pickle\"\n pass\n\n def __len__(self):\n \"\"\"\n Returns the number of items.\n \"\"\"\n return len(self._values) if self._mat is None else self._mat.shape[1]\n\n def __iter__(self):\n for k in self._rev_keys:\n yield k\n\n def __contains__(self, key):\n return key in self._rev_keys\n\n def items(self):\n if self._mat is None:\n for k, v in self._rev_keys.items():\n yield k, self._values[v]\n else:\n for k, v in self._rev_keys.items():\n yield k, self._mat[self._values, v]\n\n def keys(self):\n for k in self._rev_keys.keys():\n yield k\n\n def values(self):\n if self._mat is None:\n for v in self._values:\n yield v\n else:\n for v in self._mat[self._values]:\n yield v\n\n def asdict(self):\n res = {}\n for k, v in self.items():\n res[k] = v\n return res\n\n def __str__(self):\n return f\"ZipMap({str(self.asdict())!r})\"\n\n\nclass ArrayZipMapDictionary(list):\n \"\"\"\n Mocks an array without changing the data it receives.\n Notebooks :ref:`onnxnodetimerst` illustrates the weaknesses\n and the strengths of this class compare to a list\n of dictionaries.\n\n .. index:: ZipMap\n \"\"\"\n\n def __init__(self, rev_keys, mat):\n \"\"\"\n @param rev_keys dictionary *{keys: column index}*\n @param mat matrix if values is a row index,\n one or two dimensions\n \"\"\"\n if mat is not None:\n if not isinstance(mat, numpy.ndarray):\n raise TypeError( # pragma: no cover\n f'matrix is expected, got {type(mat)}.')\n if len(mat.shape) not in (2, 3):\n raise ValueError( # pragma: no cover\n f\"matrix must have two or three dimensions but got {mat.shape}.\")\n list.__init__(self)\n self._rev_keys = rev_keys\n self._mat = mat\n\n @property\n def dtype(self):\n return self._mat.dtype\n\n def __len__(self):\n return self._mat.shape[0]\n\n def __iter__(self):\n for i in range(len(self)):\n yield self[i]\n\n def __getitem__(self, i):\n return ZipMapDictionary(self._rev_keys, i, self._mat)\n\n def __setitem__(self, pos, value):\n raise RuntimeError(\n f\"Changing an element is not supported (pos=[{pos}]).\")\n\n @property\n def values(self):\n \"\"\"\n Equivalent to ``DataFrame(self).values``.\n \"\"\"\n if len(self._mat.shape) == 3:\n return self._mat.reshape((self._mat.shape[1], -1))\n return self._mat\n\n @property\n def columns(self):\n \"\"\"\n Equivalent to ``DataFrame(self).columns``.\n \"\"\"\n res = [(v, k) for k, v in self._rev_keys.items()]\n if len(res) == 0:\n if len(self._mat.shape) == 2:\n res = [(i, 'c%d' % i) for i in range(self._mat.shape[1])]\n elif len(self._mat.shape) == 3:\n # multiclass\n res = [(i, 'c%d' % i)\n for i in range(self._mat.shape[0] * self._mat.shape[2])]\n else:\n raise RuntimeError( # pragma: no cover\n \"Unable to guess the right number of columns for \"\n \"shapes: {}\".format(self._mat.shape))\n else:\n res.sort()\n return [_[1] for _ in res]\n\n @property\n def is_zip_map(self):\n return True\n\n def __str__(self):\n return f\"ZipMaps[{', '.join(map(str, self))}]\"\n\n\nclass ZipMap(OpRun):\n \"\"\"\n The class does not output a dictionary as\n specified in :epkg:`ONNX` specifications\n but a @see cl ArrayZipMapDictionary which\n is wrapper on the input so that it does not\n get copied.\n \"\"\"\n\n atts = {'classlabels_int64s': [], 'classlabels_strings': []}\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRun.__init__(self, onnx_node, desc=desc,\n expected_attributes=ZipMap.atts,\n **options)\n if hasattr(self, 'classlabels_int64s') and len(self.classlabels_int64s) > 0:\n self.rev_keys_ = ZipMapDictionary.build_rev_keys(\n self.classlabels_int64s)\n elif hasattr(self, 'classlabels_strings') and len(self.classlabels_strings) > 0:\n self.rev_keys_ = ZipMapDictionary.build_rev_keys(\n self.classlabels_strings)\n else:\n self.rev_keys_ = {}\n\n def _run(self, x, attributes=None, verbose=0, fLOG=None): # pylint: disable=W0221\n res = ArrayZipMapDictionary(self.rev_keys_, x)\n return (res, )\n","repo_name":"sdpython/mlprodict","sub_path":"mlprodict/onnxrt/ops_cpu/op_zipmap.py","file_name":"op_zipmap.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"67"}
+{"seq_id":"1119918561","text":"from cartoframes.auth import set_default_credentials\nfrom cartoframes import read_carto, to_carto\nimport geopandas as gpd\nimport pandas as pd\nimport os\nfrom shapely.validation import make_valid\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nimport glob\nimport json\n\nfrom dotenv import load_dotenv\nload_dotenv('/home/chemmerly/cred/.env')\n\ndata_dir = \"data\"\n\n#download list of EBSA data urls from https://chm.cbd.int/database\nraw_data_file = 'Aichi-Targets-data.csv'\n\n# read in the csv with the urls for the EBSA jsons\nurl_df = pd.read_csv(raw_data_file,encoding='latin-1')\nurl_list = url_df['CHM Url']\n\n# regex pattern for the finding a geojson \nmatch_st = re.compile(r'geojson') \nfor url in url_list:\n # scrape the page for the geojson\n r = requests.get(url) \n c = r.content \n soup = BeautifulSoup(c)\n for link in soup.findAll('a', attrs={'href': re.compile(\"geojson$\")}):\n href = link.get('href')\n url = 'https://chm.cbd.int' + href\n # download raw data\n r = requests.get(url)\n j = json.loads(r.content)\n #store data as geojson files\n raw_data_file = os.path.join(data_dir, os.path.basename(url))\n with open(raw_data_file, \"w\") as file:\n json.dump(j, file)\n\n#create list of geojson data for each ebsa from stored geojson file\nebsa_files = glob.glob(os.path.join(data_dir, '*geojson'))\ngdf_list = []\nfor file in ebsa_files:\n try:\n gdf = gpd.read_file(file)\n gdf_list.append(gdf)\n except Exception:\n print(\"Could not read \" + file)\n\n#create geopandas dataframe of EBSA data from list\ngdf_ebsa = gpd.GeoDataFrame(pd.concat(gdf_list))\n\n#store EBSA data locally as shapefiles\ngdf_ebsa.to_file('merged_ebsa.shp',driver='ESRI Shapefile')\n\n#upload EBSA data to Carto\ngdf_ebsa.columns = [x.lower().replace(' ', '_') for x in gdf_ebsa.columns]\ndataset_name = \"Ecologically and Biologically Significant Areas\"\nCARTO_USER = os.getenv('CARTO_WRI_RW_USER')\nCARTO_KEY = os.getenv('CARTO_WRI_RW_KEY')\nset_default_credentials(username=CARTO_USER, base_url=\"https://{user}.carto.com/\".format(user=CARTO_USER),api_key=CARTO_KEY)\nto_carto(gdf_ebsa, dataset_name + '_edit', if_exists='replace')","repo_name":"clairehemmerly/ocean_watch","sub_path":"EBSA_files_fetch.py","file_name":"EBSA_files_fetch.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"26321872271","text":"import pygame\npygame.init()\nwidth, height = 1280, 720\nwindow = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"My Awesome Game\")\nfps = 30\nclock = pygame.time.Clock()\nstart = True\nwhile start:\n # Get Events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n start = False\n pygame.quit()\n window.fill((240, 143, 250))\n yellow,pink,green = (253,235,8),(253,8,200),(58,223,117)\n pygame.draw.polygon(window,yellow,((491, 100), (788, 100), (937, 357),(788, 614), (491, 614), (342, 357)))\n pygame.draw.circle(window, green, (640, 360), 200)\n pygame.draw.line(window, pink, (468, 392), (812, 392), 10)\n pygame.draw.rect(window, pink, (468, 307, 345, 70), border_radius=5)\n\n\n # Update Display\n pygame.display.update()\n # Set FPS\n clock.tick(fps)","repo_name":"kirankuyate2157/python_programs","sub_path":"download_proggrams/Game_development_opencv-main/game_dev/draw_shapes.py","file_name":"draw_shapes.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"2787667447","text":"import numpy as np\nimport tensorflow as tf\n\nfrom surreal.spaces.primitive_spaces import PrimitiveSpace, Bool, Int, Float, Text\nfrom surreal.spaces.container_spaces import ContainerSpace, Dict, Tuple\nfrom surreal.utils.errors import SurrealError, SurrealSpaceError\nfrom surreal.utils.util import convert_dtype, LARGE_INTEGER, force_tuple\n\n\n# TODO: replace completely by `Component.get_variable` (python-backend)\ndef get_list_registry(from_space, capacity=None, initializer=0, flatten=True, add_batch_rank=False):\n \"\"\"\n Creates a list storage for a space by providing an ordered dict mapping space names\n to empty lists.\n\n Args:\n from_space: Space to create registry from.\n capacity (Optional[int]): Optional capacity to initialize list.\n initializer (Optional(any)): Optional initializer for list if capacity is not None.\n flatten (bool): Whether to produce a FlattenedDataOp with auto-keys.\n\n add_batch_rank (Optional[bool,int]): If from_space is given and is True, will add a 0th rank (None) to\n the created variable. If it is an int, will add that int instead of None.\n Default: False.\n\n Returns:\n dict: Container dict mapping core to empty lists.\n \"\"\"\n if flatten:\n if capacity is not None:\n var = from_space.flatten(\n custom_scope_separator=\"-\", scope_separator_at_start=False,\n mapping=lambda k, primitive: [initializer for _ in range(capacity)]\n )\n else:\n var = from_space.flatten(\n custom_scope_separator=\"-\", scope_separator_at_start=False,\n mapping=lambda k, primitive: []\n )\n else:\n if capacity is not None:\n var = [initializer for _ in range(capacity)]\n else:\n var = []\n return var\n\n\ndef get_space_from_data(data, num_categories=None, main_axes=None):\n \"\"\"\n Tries to re-create a Space object given some DataOp (e.g. a tf op).\n This is useful for shape inference on returned ops after having run through a graph_fn.\n\n Args:\n data (any): The data to create a corresponding Space for.\n\n num_categories (Optional[int]): An optional indicator, what the `num_categories` property for\n an Int should be.\n\n Returns:\n Space: The inferred Space object.\n \"\"\"\n # Dict.\n if isinstance(data, dict):\n spec = {}\n for key, value in data.items():\n\n # OBSOLETE THIS! Special case for Ints:\n # If another key exists, with the name: `_num_[key]` -> take num_categories from that key's value.\n #if key[:5] == \"_num_\":\n # continue\n #num_categories = data.get(\"_num_{}\".format(key))\n\n num_categories = num_categories.get(key, None) if isinstance(num_categories, dict) else num_categories\n spec[key] = get_space_from_data(value, num_categories=num_categories, main_axes=main_axes)\n # Return\n if spec[key] == 0:\n return 0\n return Dict(spec, main_axes=main_axes)\n # Tuple.\n elif isinstance(data, tuple):\n spec = []\n for i in data:\n space = get_space_from_data(i, main_axes=main_axes)\n if space == 0:\n return 0\n spec.append(space)\n return Tuple(spec, main_axes=main_axes)\n # Primitive Space -> Infer from data dtype and shape.\n else:\n # `data` itself is a single value, simple python type.\n if isinstance(data, int):\n int_high = {\"high\": num_categories} if num_categories is not None else {}\n return PrimitiveSpace.make(spec=type(data), shape=(), **int_high)\n elif isinstance(data, (bool, float)):\n return PrimitiveSpace.make(spec=type(data), shape=())\n elif isinstance(data, str):\n raise SurrealError(\"Cannot derive Space from str data ({})!\".format(data))\n # A single numpy array.\n elif isinstance(data, (np.ndarray, tf.Tensor)):\n dtype = convert_dtype(data.dtype, \"np\")\n int_high = {\"high\": num_categories} if num_categories is not None and \\\n dtype in [np.uint8, np.int16, np.int32, np.int64] else {}\n # Must subtract main_axes from beginning of data.shape.\n shape = tuple(data.shape[len(main_axes or []):])\n return PrimitiveSpace.make(\n spec=dtype, shape=shape, main_axes=main_axes, **int_high\n )\n # Try inferring the Space from a python list.\n elif isinstance(data, list):\n return try_space_inference_from_list(data)\n # No Space: e.g. the tf.no_op, a distribution (anything that's not a tensor).\n # PyTorch Tensors do not have get_shape so must check backend.\n elif hasattr(data, \"dtype\") is False or not hasattr(data, \"get_shape\"):\n return 0\n\n raise SurrealError(\"ERROR: Cannot derive Space from data '{}' (unknown type?)!\".format(data))\n\n\ndef sanity_check_space(\n space, allowed_types=None, allowed_sub_types=None, non_allowed_types=None, non_allowed_sub_types=None,\n must_have_batch_rank=None, must_have_time_rank=None, must_have_batch_or_time_rank=False,\n must_have_categories=None, num_categories=None,\n must_have_lower_limit=None, must_have_upper_limit=None,\n rank=None, shape=None\n):\n \"\"\"\n Sanity checks a given Space for certain criteria and raises exceptions if they are not met.\n\n Args:\n space (Space): The Space object to check.\n allowed_types (Optional[List[type]]): A list of types that this Space must be an instance of.\n\n allowed_sub_types (Optional[List[type]]): For container core, a list of sub-types that all\n flattened sub-Spaces must be an instance of.\n\n non_allowed_types (Optional[List[type]]): A list of type that this Space must not be an instance of.\n\n non_allowed_sub_types (Optional[List[type]]): For container core, a list of sub-types that all\n flattened sub-Spaces must not be an instance of.\n\n must_have_batch_rank (Optional[bool]): Whether the Space must (True) or must not (False) have the\n `has_batch_rank` property set to True. None, if it doesn't matter.\n\n must_have_time_rank (Optional[bool]): Whether the Space must (True) or must not (False) have the\n `has_time_rank` property set to True. None, if it doesn't matter.\n\n must_have_batch_or_time_rank (Optional[bool]): Whether the Space must (True) or must not (False) have either\n the `has_batch_rank` or the `has_time_rank` property set to True.\n\n must_have_categories (Optional[bool]): For IntBoxes, whether the Space must (True) or must not (False) have\n global bounds with `num_categories` > 0. None, if it doesn't matter.\n\n num_categories (Optional[int,tuple]): An int or a tuple (min,max) range within which the Space's\n `num_categories` rank must lie. Only valid for IntBoxes.\n None if it doesn't matter.\n\n must_have_lower_limit (Optional[bool]): If not None, whether this Space must have a lower limit.\n must_have_upper_limit (Optional[bool]): If not None, whether this Space must have an upper limit.\n\n rank (Optional[int,tuple]): An int or a tuple (min,max) range within which the Space's rank must lie.\n None if it doesn't matter.\n\n shape (Optional[tuple[int]]): A tuple of ints specifying the required shape. None if it doesn't matter.\n\n Raises:\n RLGraphSpaceError: If any of the conditions is not met.\n \"\"\"\n flattened_space = space.flatten()\n\n # Check the types.\n if allowed_types is not None:\n if not isinstance(space, force_tuple(allowed_types)):\n raise SurrealSpaceError(\n space, \"ERROR: Space ({}) is not an instance of {}!\".format(space, allowed_types)\n )\n\n if allowed_sub_types is not None:\n for flat_key, sub_space in flattened_space.items():\n if not isinstance(sub_space, force_tuple(allowed_sub_types)):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: sub-Space '{}' ({}) is not an instance of {}!\".\n format(flat_key, sub_space, allowed_sub_types)\n )\n\n if non_allowed_types is not None:\n if isinstance(space, force_tuple(non_allowed_types)):\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) must not be an instance of {}!\".format(space, non_allowed_types)\n )\n\n if non_allowed_sub_types is not None:\n for flat_key, sub_space in flattened_space.items():\n if isinstance(sub_space, force_tuple(non_allowed_sub_types)):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: sub-Space '{}' ({}) must not be an instance of {}!\".\n format(flat_key, sub_space, non_allowed_sub_types)\n )\n\n if must_have_batch_or_time_rank is True:\n if space.has_batch_rank is False and space.has_time_rank is False:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) does not have a batch- or a time-rank, but must have either one of \"\n \"these!\".format(space)\n )\n\n if must_have_batch_rank is not None:\n if (space.has_batch_rank is False and must_have_batch_rank is True) or \\\n (space.has_batch_rank is not False and must_have_batch_rank is False):\n # Last chance: Check for rank >= 2, that would be ok as well.\n if must_have_batch_rank is True and len(space.get_shape(main_axes=\"B\")) >= 2:\n pass\n # Something is wrong.\n elif space.has_batch_rank is not False:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) has a batch rank, but is not allowed to!\".format(space)\n )\n else:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) does not have a batch rank, but must have one!\".format(space)\n )\n\n if must_have_time_rank is not None:\n if (space.has_time_rank is False and must_have_time_rank is True) or \\\n (space.has_time_rank is not False and must_have_time_rank is False):\n # Last chance: Check for rank >= 3, that would be ok as well.\n if must_have_time_rank is True and len(space.get_shape(main_axes=[\"B\", \"T\"])) >= 2:\n pass\n # Something is wrong.\n elif space.has_time_rank is not False:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) has a time rank, but is not allowed to!\".format(space)\n )\n else:\n raise SurrealSpaceError(\n space,\n \"ERROR: Space ({}) does not have a time rank, but must have one!\".format(space)\n )\n\n if must_have_categories is not None:\n for flat_key, sub_space in flattened_space.items():\n if not isinstance(sub_space, Int):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) is not an Int. Only Int Spaces can have categories!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n elif sub_space.global_bounds is False:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must have categories (globally valid value bounds)!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n\n if must_have_lower_limit is not None:\n for flat_key, sub_space in flattened_space.items():\n low = sub_space.low\n if must_have_lower_limit is True and (low == -LARGE_INTEGER or low == float(\"-inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must have a lower limit, but has none!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n elif must_have_lower_limit is False and (low != -LARGE_INTEGER and low != float(\"-inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must not have a lower limit, but has one ({})!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space, low)\n )\n\n if must_have_upper_limit is not None:\n for flat_key, sub_space in flattened_space.items():\n high = sub_space.high\n if must_have_upper_limit is True and (high != LARGE_INTEGER and high != float(\"inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must have an upper limit, but has none!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space)\n )\n elif must_have_upper_limit is False and (high == LARGE_INTEGER or high == float(\"inf\")):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: Space {}({}) must not have a upper limit, but has one ({})!\".\n format(\"\" if flat_key == \"\" else \"'{}' \".format(flat_key), space, high)\n )\n\n if rank is not None:\n if isinstance(rank, int):\n for flat_key, sub_space in flattened_space.items():\n if sub_space.rank != rank:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has rank {}, but must have rank \"\n \"{}!\".format(flat_key, space, sub_space.rank, rank)\n )\n else:\n for flat_key, sub_space in flattened_space.items():\n if not ((rank[0] or 0) <= sub_space.rank <= (rank[1] or float(\"inf\"))):\n raise SurrealSpaceError(\n\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has rank {}, but its rank must be between {} and \"\n \"{}!\".format(flat_key, space, sub_space.rank, rank[0], rank[1])\n )\n\n if shape is not None:\n for flat_key, sub_space in flattened_space.items():\n if sub_space.shape != shape:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has shape {}, but its shape must be \"\n \"{}!\".format(flat_key, space, sub_space.get_shape(), shape)\n )\n\n if num_categories is not None:\n for flat_key, sub_space in flattened_space.items():\n if not isinstance(sub_space, Int):\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' is not an Int. Only Int Spaces can have \"\n \"categories!\".format(flat_key, space)\n )\n elif isinstance(num_categories, int):\n if sub_space.num_categories != num_categories:\n raise SurrealSpaceError(\n sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has `num_categories` {}, but must have {}!\".\n format(flat_key, space, sub_space.num_categories, num_categories)\n )\n elif not ((num_categories[0] or 0) <= sub_space.num_categories <= (num_categories[1] or float(\"inf\"))):\n raise SurrealSpaceError(sub_space,\n \"ERROR: A Space (flat-key={}) of '{}' has `num_categories` {}, but this value must be between \"\n \"{} and {}!\".format(flat_key, space, sub_space.num_categories, num_categories[0], num_categories[1])\n )\n\n\ndef check_space_equivalence(space1, space2):\n \"\"\"\n Compares the two input Spaces for equivalence and returns the more generic Space of the two.\n The more generic Space is the one that has the properties has_batch_rank and/or has _time_rank set (instead of\n hard values in these ranks).\n E.g.: Float((64,)) is equivalent with Float((), +batch-rank). The latter will be returned.\n\n NOTE: Float((2,)) and Float((3,)) are NOT equivalent.\n\n Args:\n space1 (Space): The 1st Space to compare.\n space2 (Space): The 2nd Space to compare.\n\n Returns:\n Union[Space,False]: False is the two core are not equivalent. The more generic Space of the two if they are\n equivalent.\n \"\"\"\n # Spaces are the same: Return one of them.\n if space1 == space2:\n return space1\n # One has batch-rank, the other doesn't, but has one more rank.\n elif space1.has_batch_rank and not space2.has_batch_rank and \\\n (np.asarray(space1.rank) == np.asarray(space2.rank) - 1).all():\n return space1\n elif space2.has_batch_rank and not space1.has_batch_rank and \\\n (np.asarray(space2.rank) == np.asarray(space1.rank) - 1).all():\n return space2\n # TODO: time rank?\n\n return False\n\n\ndef try_space_inference_from_list(list_op):\n \"\"\"\n Attempts to infer shape space from a list op. A list op may be the result of fetching state from a Python\n memory.\n\n Args:\n list_op (list): List with arbitrary sub-structure.\n\n Returns:\n Space: Inferred Space object represented by list.\n \"\"\"\n shape = len(list_op)\n if shape > 0:\n # Try to infer more things by looking inside list.\n elem = list_op[0]\n if isinstance(elem, tf.Tensor):\n list_type = elem.dtype\n inner_shape = elem.shape\n return PrimitiveSpace.make(spec=convert_dtype(list_type, \"np\"), shape=(shape,) + inner_shape,\n add_batch_rank=True)\n elif isinstance(elem, list):\n inner_shape = len(elem)\n return PrimitiveSpace.make(spec=convert_dtype(float, \"np\"), shape=(shape, inner_shape),\n add_batch_rank=True)\n elif isinstance(elem, int):\n # In case of missing comma values, check all other items in list for float.\n # If one float in there -> Float, otherwise -> Int.\n has_floats = any(isinstance(el, float) for el in list_op)\n if has_floats is False:\n return Int.make(shape=(shape,), add_batch_rank=True)\n else:\n return Float.make(shape=(shape,), add_batch_rank=True)\n elif isinstance(elem, float):\n return Float.make(shape=(shape,), add_batch_rank=True)\n else:\n # Most general guess is a Float box.\n return Float(shape=(shape,))\n\n\ndef get_default_distribution_from_space(\n space, *, num_mixture_experts=0, bounded_distribution_type=\"beta\",\n discrete_distribution_type=\"categorical\", gumbel_softmax_temperature=1.0\n):\n \"\"\"\n Args:\n space (Space): The primitive Space for which to derive a default distribution spec.\n\n num_mixture_experts (int): If > 0, use a mixture distribution over the determined \"base\"-distribution using n\n experts. TODO: So far, this only works for continuous distributions.\n\n bounded_distribution_type (str): The lookup class string for a bounded Float distribution.\n Default: \"beta\".\n\n discrete_distribution_type(str): The class of distributions to use for discrete action core. For options\n check the components.distributions package. Default: categorical. Agents requiring reparameterization\n may require a GumbelSoftmax distribution instead.\n\n gumbel_softmax_temperature (float): Temperature parameter for the Gumbel-Softmax distribution used\n for discrete actions.\n\n Returns:\n Dict: A Spec dict, from which a valid default distribution object can be created.\n \"\"\"\n # Int: Categorical.\n if isinstance(space, Int):\n assert discrete_distribution_type in [\"gumbel-softmax\", \"categorical\"]\n if discrete_distribution_type == \"gumbel-softmax\":\n return dict(type=\"gumbel-softmax\", temperature=gumbel_softmax_temperature)\n else:\n return dict(type=discrete_distribution_type)\n\n # Bool: Bernoulli.\n elif isinstance(space, Bool):\n return dict(type=\"bernoulli\")\n\n # Continuous action space: Normal/Beta/etc. distribution.\n elif isinstance(space, Float):\n # Unbounded -> Normal distribution.\n if not is_bounded_space(space):\n single = dict(type=\"normal\")\n # Bounded -> according to the bounded_distribution parameter.\n else:\n assert bounded_distribution_type in [\"beta\", \"squashed-normal\"]\n single = dict(type=bounded_distribution_type, low=space.low, high=space.high)\n\n # Use a mixture distribution?\n if num_mixture_experts > 0:\n return dict(type=\"mixture\", _args=single, num_experts=num_mixture_experts)\n else:\n return single\n\n # Container Space.\n elif isinstance(space, ContainerSpace):\n return dict(\n type=\"joint-cumulative\",\n distributions=tf.nest.pack_sequence_as(space.structure, tf.nest.map_structure(lambda s: get_default_distribution_from_space(s), tf.nest.flatten(space)))\n )\n else:\n raise SurrealError(\"No distribution defined for space {}!\".format(space))\n\n\ndef is_bounded_space(box_space):\n if not isinstance(box_space, Float):\n return False\n # Unbounded.\n if box_space.low == float(\"-inf\") and box_space.high == float(\"inf\"):\n return False\n # Bounded.\n elif box_space.low != float(\"-inf\") and box_space.high != float(\"inf\"):\n return True\n # TODO: Semi-bounded -> Exponential distribution.\n else:\n raise SurrealError(\n \"Semi-bounded core for distribution-generation are not supported yet! You passed in low={} high={}.\".\n format(box_space.low, box_space.high)\n )\n","repo_name":"ducandu/surreal","sub_path":"surreal/spaces/space_utils.py","file_name":"space_utils.py","file_ext":"py","file_size_in_byte":22267,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"}
+{"seq_id":"15140857611","text":"import re\n\nwhile True:\n input_text = input()\n if len(input_text) != 0:\n match_pattern = re.compile(r'\\d+')\n matches = match_pattern.finditer(input_text)\n for match in matches:\n print(match.group(0), end=' ')\n else:\n break","repo_name":"radoslav-petkov/SoftUni---Fundamentals---Python---2022","sub_path":"25.Regular Expressions - Exercise/01. Capture the Numbers.py","file_name":"01. Capture the Numbers.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"44363882427","text":"import numpy as np\n\nfrom CNN.layers import convolutional_layer, maxpool_layer, loss_function, softmax, backpropagation_maxpool, backpropagation_convolutionalLayer\n\n\ndef build_net(image, label, parameters, stride, pooling_filter, pooling_stride):\n [weight1, weight2, weight3, weight4, bias1, bias2, bias3, bias4] = parameters\n\n #forward propagation\n first_convolution = convolutional_layer(image, weight1, bias1, stride)\n first_convolution[first_convolution <= 0] = 0\n\n second_convolution = convolutional_layer(first_convolution, weight2, bias2, stride)\n second_convolution[second_convolution <= 0] = 0\n\n pooling_layer = maxpool_layer(second_convolution, pooling_filter, pooling_stride)\n\n (num_filters, height_width, same) = pooling_layer.shape\n\n flatten = pooling_layer.reshape((num_filters * height_width * height_width, 1))\n\n fully_connected1 = weight3.dot(flatten) + bias3\n fully_connected1[fully_connected1 <= 0] = 0\n\n fully_connected2 = weight4.dot(fully_connected1) + bias4\n\n prediction = softmax(fully_connected2)\n\n loss = loss_function(prediction, label)\n\n #backpropagation\n derivative_second_fully_conn = prediction - label\n gradient_weight4 = derivative_second_fully_conn.dot(fully_connected1.T)\n gradient_bias4 = np.sum(derivative_second_fully_conn, axis=1).reshape(bias4.shape)\n\n derivative_first_fully_conn = weight4.T.dot(derivative_second_fully_conn)\n derivative_first_fully_conn[fully_connected1 <= 0] = 0\n gradient_weight3 = derivative_first_fully_conn.dot(flatten.T)\n gradient_bias3 = np.sum(derivative_first_fully_conn, axis=1).reshape(bias3.shape)\n\n der_second_fc = weight3.T.dot(derivative_first_fully_conn)\n der_maxpool = der_second_fc.reshape(pooling_layer.shape)\n\n der_conv2 = backpropagation_maxpool(der_maxpool, second_convolution, pooling_filter, pooling_stride)\n der_conv2[second_convolution <= 0] = 0\n\n der_conv1, gradient_weight2, der_bias2 = backpropagation_convolutionalLayer(der_conv2, first_convolution, weight2, stride)\n der_conv1[first_convolution <= 0] = 0\n\n image_der, gradient_weight1, der_bias1 = backpropagation_convolutionalLayer(der_conv1, image, weight1, stride)\n\n gradients = [gradient_weight1, gradient_weight2, gradient_weight3, gradient_weight4, bias1, bias2, bias3, bias4]\n return gradients, loss\n\n\n# Adams optimizer\n\ndef adam_optimizer(batch, num_classes, alpha, dim, n_c, beta1, beta2, parameters, cost_array, E=1e-7):\n\n [weight1, weight2, weight3, weight4, bias1, bias2, bias3, bias4] = parameters\n\n batch_size = len(batch)\n\n images = batch[:, 0:-1]\n images = images.reshape((batch_size, n_c, dim, dim))\n\n labels = batch[:, -1]\n\n cost = 0\n\n # initialize gradients with zeros\n grad_w1 = np.zeros(weight1.shape)\n grad_w2 = np.zeros(weight2.shape)\n grad_w3 = np.zeros(weight3.shape)\n grad_w4 = np.zeros(weight4.shape)\n grad_b1 = np.zeros(bias1.shape)\n grad_b2 = np.zeros(bias2.shape)\n grad_b3 = np.zeros(bias3.shape)\n grad_b4 = np.zeros(bias4.shape)\n\n # initialize momentum parameters with zeros\n moment_param_w1 = np.zeros(weight1.shape)\n moment_param_w2 = np.zeros(weight2.shape)\n moment_param_w3 = np.zeros(weight3.shape)\n moment_param_w4 = np.zeros(weight4.shape)\n moment_param_b1 = np.zeros(bias1.shape)\n moment_param_b2 = np.zeros(bias2.shape)\n moment_param_b3 = np.zeros(bias3.shape)\n moment_param_b4 = np.zeros(bias4.shape)\n\n # initialize RMS-prop parameters with zeros\n rmsprop_w1 = np.zeros(weight1.shape)\n rmsprop_w2 = np.zeros(weight2.shape)\n rmsprop_w3 = np.zeros(weight3.shape)\n rmsprop_w4 = np.zeros(weight4.shape)\n rmsprop_b1 = np.zeros(bias1.shape)\n rmsprop_b2 = np.zeros(bias2.shape)\n rmsprop_b3 = np.zeros(bias3.shape)\n rmsprop_b4 = np.zeros(bias4.shape)\n\n\n for i in range(batch_size):\n image = images[i]\n label = np.eye(num_classes)[int(labels[i])].reshape((num_classes, 1))\n\n gradients, loss = build_net(image, label, parameters, 1, 2, 2)\n\n [gradient_weight1, gradient_weight2, gradient_weight3, gradient_weight4, bias1, bias2, bias3, bias4] = gradients\n\n grad_w1 += gradient_weight1\n grad_w2 += gradient_weight2\n grad_w3 += gradient_weight3\n grad_w4 += gradient_weight4\n grad_b1 += bias1\n grad_b2 += bias2\n grad_b3 += bias3\n grad_b4 += bias4\n\n cost += loss\n\n # update momentum and RMS-prop parameters\n moment_param_w1 = beta1 * moment_param_w1 + (1 - beta1) * grad_w1 / batch_size\n rmsprop_w1 = beta2 * rmsprop_w1 + (1 - beta2) * (grad_w1 / batch_size) ** 2\n weight1 -= alpha * moment_param_w1 / np.sqrt(rmsprop_w1 + E)\n\n moment_param_w2 = beta1 * moment_param_w2 + (1 - beta1) * grad_w2 / batch_size\n rmsprop_w2 = beta2 * rmsprop_w2 + (1 - beta2) * (grad_w2 / batch_size) ** 2\n weight2 -= alpha * moment_param_w2 / np.sqrt(rmsprop_w2 + E)\n\n moment_param_w3 = beta1 * moment_param_w3 + (1 - beta1) * grad_w3 / batch_size\n rmsprop_w3 = beta2 * rmsprop_w3 + (1 - beta2) * (grad_w3 / batch_size) ** 2\n weight3 -= alpha * moment_param_w3 / np.sqrt(rmsprop_w3 + E)\n\n moment_param_w4 = beta1 * moment_param_w4 + (1 - beta1) * grad_w4 / batch_size\n rmsprop_w4 = beta2 * rmsprop_w4 + (1 - beta2) * (grad_w4 / batch_size) ** 2\n weight4 -= alpha * moment_param_w4 / np.sqrt(rmsprop_w4 + E)\n\n moment_param_b1 = beta1 * moment_param_b1 + (1 - beta1) * grad_b1 / batch_size\n rmsprop_b1 = beta2 * rmsprop_b1 + (1 - beta2) * (grad_b1 / batch_size) ** 2\n bias1 -= alpha * moment_param_b1 / np.sqrt(rmsprop_b1 + E)\n\n moment_param_b2 = beta1 * moment_param_b2 + (1 - beta1) * grad_b2 / batch_size\n rmsprop_b2 = beta2 * rmsprop_b2 + (1 - beta2) * (grad_b2 / batch_size) ** 2\n bias2 -= alpha * moment_param_b2 / np.sqrt(rmsprop_b2 + E)\n\n moment_param_b3 = beta1 * moment_param_b3 + (1 - beta1) * grad_b3 / batch_size\n rmsprop_b3 = beta2 * rmsprop_b3 + (1 - beta2) * (grad_b3 / batch_size) ** 2\n bias3 -= alpha * moment_param_b3 / np.sqrt(rmsprop_b3 + E)\n\n moment_param_b4 = beta1 * moment_param_b4 + (1 - beta1) * grad_b4 / batch_size\n rmsprop_b4 = beta2 * rmsprop_b4 + (1 - beta2) * (grad_b4 / batch_size) ** 2\n bias4 -= alpha * moment_param_b4 / np.sqrt(rmsprop_b4 + E)\n\n cost = cost / batch_size\n cost_array.append(cost)\n\n parameters = [weight1, weight2, weight3, weight4, bias1, bias2, bias3, bias4]\n\n return parameters, cost_array","repo_name":"DayanaPankova/Shut-Down-My-PC-When-I-Fall-Asleep-","sub_path":"CNN/build_net.py","file_name":"build_net.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"22256773793","text":"import glob\nfrom prody import *\nimport numpy as np\nimport os\nimport time\nimport subprocess\nimport argparse\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.cluster.hierarchy import dendrogram, linkage, cophenet, fcluster \nfrom scipy.spatial.distance import pdist\nimport pickle\n\nschrodinger_path = '/data/general_software/schrodinger2019-1'\n\ndef parserfunc():\n parser = argparse.ArgumentParser(\n description ='Given a directory with mae files, redactar blablabla')\n\n parser.add_argument('-d', dest=\"maedir\", help = \"MAEs directory\", required=True)\n parser.add_argument('-o', dest=\"outdir\", help = \"Output directory\", required=True)\n \n args = parser.parse_args()\n return args\n\ndef siteMap(maes,asl,delimiter=None,outfmt='mae',max_processes=4):\n \"\"\"\n Run a SiteMap calculation for a list of MAEs (can't be pdbs).\n maes: 'list'. MAE list of elements\n asl: 'str'. ASL (atom specification Language)\n delimiter: 'str'. Delimiter to obtain an identifier from each MAE name\n outfmt: 'str'. Outfile format. Either .mae or .pdb\n max_processes: Number of processors used to paralalize the different executions\n \"\"\"\n MAEnames = [os.path.basename(mae) for mae in maes]\n if delimiter != None:\n IDs = [ maename.replace(\".mae\",\"\").split(delimiter)[0] for maename in MAEnames]\n else:\n IDs = [ maename.replace(\".mae\",\"\") for maename in MAEnames]\n\n cmd_SiteMap = ['%s/sitemap -j %s -prot %s -sitebox 12 -resolution standard -reportsize 20 -writestructs no -writevis yes -maxsites 1 -siteasl \"%s\" -WAIT'%(schrodinger_path,IDs[i],mae,asl) for i,mae in enumerate(maes)]\n cmd_SiteMap = [cmd.replace(\"//\",\"/\") for cmd in cmd_SiteMap]\n\n processes = set()\n\n for cmd in cmd_SiteMap:\n print(cmd)\n processes.add(subprocess.Popen(cmd,shell=True))\n for p in processes:\n p.wait()\n if p.wait() != 0:\n print(\"There was an error\")\n\ndef _clean_siteMap(outdir,outfmt='maegz'):\n \"\"\"\n Move the SiteMap output to an specified directory\n outdir: 'str'. Output directory\n outfmt: 'str'. Outfile format of the PrepWizard. Either .mae or .pdb\n \"\"\"\n\n if outfmt != 'maegz':\n raise ValueError('outfmt must be maegz')\n os.system('mv *.%s %s'%(outfmt,outdir))\n logdir = '%s/logs'%outdir\n logdir = logdir.replace('//','/')\n if not os.path.isdir(logdir):\n os.system('mkdir %s'%logdir)\n os.system('mv *.vis %s'%logdir)\n os.system('mv *.smap %s'%logdir)\n os.system('mv *.log %s'%logdir)\n\ndef _group_siteMap(sites,out,outdir):\n \"\"\"\n Group all volume sites from SiteMap into a single mae file\n sites:\n out:\n outdir:\n \"\"\"\n conc_sites = ''\n for site in sites:\n conc_sites = conc_sites + ' ' + site\n\n cmd = '%s/utilities/structcat -imae %s -omae %s'%(schrodinger_path,conc_sites,out)\n cmd = cmd.replace('//','/')\n print(cmd)\n os.system(cmd)\n try:\n aux = 'mv %s %s'%(out,outdir)\n os.system(aux)\n except:\n print('wrong outdir')\n\ndef _uncompress_maegz(inp):\n \"\"\"\n inp:\n \"\"\"\n out = inp.replace('.maegz','.mae')\n cmd = '%s/utilities/structcat -imae %s -omae %s'%(schrodinger_path,inp,out)\n cmd = cmd.replace('//','/')\n print(cmd)\n os.system(cmd)\n\ndef get_volumeOverlapMatrix(sites,out,max_processes=4):\n \"\"\"\n Generate pairwise volume overlap matrix\n sites: 'str'. single file containing multiple SitMap files\n \"\"\"\n cmd = '%s/run volume_cluster.py -j %s -HOST localhost:%d -sc -r 2 %s'%(schrodinger_path,out,max_processes,sites)\n cmd = cmd.replace('//','/')\n print(cmd)\n p = subprocess.Popen(cmd, shell=True)\n p.wait()\n if p.wait() != 0:\n print(\"There was an error\")\n \n\ndef _clean_volumeMatrix(out,coutdir):\n \"\"\"\n Move the VolumeMatrix output to an specified directory\n out: 'str'\n outdir: 'str'. Output directory\n \"\"\"\n if not os.path.isdir(coutdir):\n os.system('mkdir %s'%coutdir)\n\n os.system('mv *.csv %s'%(coutdir))\n logdir = '%s/logs/'%coutdir\n logdir = logdir.replace('//','/')\n if not os.path.isdir(logdir):\n os.system('mkdir %s'%logdir)\n os.system('mv *.mae %s'%logdir)\n os.system('mv *.log %s'%logdir)\n\n\n\nif __name__ == '__main__':\n arg = parserfunc()\n inpdir = arg.maedir\n outdir = arg.outdir\n\n # create output directory\n if not os.path.isdir('%s/siteMap'%outdir):\n os.system('mkdir %s/siteMap'%outdir)\n\n # Compute the volume of each target specific binding site\n print(\"\\n-------------RUNNING SITEMAP----------------\\n\")\n TARGs = glob.glob('%s/*_prep.mae'%(inpdir))\n siteMap(maes=TARGs,asl = \"(res.num 145) AND ((atom.ptype \\' HB2 \\'))\",delimiter='_prep',outfmt='mae',max_processes=30)\n _clean_siteMap(outdir='%s/siteMap'%(outdir))\n sites = glob.glob('%s/siteMap/*_out.maegz'%outdir)\n _group_siteMap(sites=sites,out='Mpro_sites.maegz',outdir='%s/siteMap/'%outdir)\n _uncompress_maegz(inp='%s/siteMap/Mpro_sites.maegz'%outdir)\n\n # Find targets without binding site arround the specified atom\n print(\"\\n--------------CHECK STEP--------------------\\n\")\n print(\"These targets do not have the binding site around the specified atom. Please, remove them for further analysis:\")\n TARGs_IDs = [os.path.basename(TARG).split('_')[1] for TARG in TARGs]\n sites_IDs = [os.path.basename(site).split('_')[1] for site in sites]\n print(set(TARGs_IDs)-set(sites_IDs))\n\n # Get the volume overlapping matrix of the target sites\n print(\"\\n-----------VOLUME OVERLAPPING MATRIX----------\\n\")\n get_volumeOverlapMatrix(sites='%s/siteMap/Mpro_sites.maegz'%(outdir),out='Mpro_volumeMatrix',max_processes=4)\n _clean_volumeMatrix(out='Mpro_volumeMatrix',coutdir='%s/volumeMatrix/'%(outdir))\n\n\n","repo_name":"juliavilmor/Mpro","sub_path":"scripts/targetanalysis/volume_sitemap.py","file_name":"volume_sitemap.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"5300891552","text":"\"\"\"\nGiven an integer array nums, return true if any value appears at least twice in the array, and return false if every element is distinct.\n\n\n\nExample 1:\n\nInput: nums = [1,2,3,1]\nOutput: true\nExample 2:\n\nInput: nums = [1,2,3,4]\nOutput: false\nExample 3:\n\nInput: nums = [1,1,1,3,3,4,3,2,4,2]\nOutput: true\n\n\nConstraints:\n\n1 <= nums.length <= 105\n-109 <= nums[i] <= 109\n\"\"\"\nfrom typing import List\n\n\"\"\"\nSolution:\n\nfrom the concept of set in python\n\nset have unique elements\n\nso convert list to set and check both length\n\nif the length is same then no duplicate elements\n\n\n\"\"\"\n\n\nclass Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n return len(nums) != len(set(nums))\n\n\nif __name__ == '__main__':\n n = [1, 2, 3, 1]\n n2 = [1, 2, 3, 4]\n n3 = [1, 1, 1, 3, 3, 4, 3, 2, 4, 2]\n s = Solution()\n print(s.containsDuplicate(n))\n print(s.containsDuplicate(n2))\n print(s.containsDuplicate(n3))\n","repo_name":"mihirh19/Python","sub_path":"LeetCodeSolution/0217.Contains_Duplicate.py","file_name":"0217.Contains_Duplicate.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"29368810985","text":"import praw\nimport os\n\nfrom enum import Enum\nfrom constants import *\n\nclass Reply(Enum):\n BASE_REPLY = 1\n\nclass AnkiHelpBot:\n responses: dict[Reply, str]\n reddit: praw.Reddit\n\n def __init__(self) -> None:\n self.responses = self.loadReplies()\n self.reddit = self.connect()\n\n\n def main(self):\n subreddit = self.reddit.subreddit(CONST_SUBREDDIT)\n for submission in subreddit.stream.submissions(skip_existing=True):\n self.process_submission(submission)\n\n def process_submission(self, submission):\n print(self.responses[Reply.BASE_REPLY])\n if submission.link_flair_text == \"Question\":\n submission.reply(body=self.responses[Reply.BASE_REPLY])\n\n\n def loadReplies(self) -> dict[Reply, str]:\n response: dict[Reply, str] = {}\n\n with open('replies/baseReply.md', 'r') as baseReply:\n baseReplyText = baseReply.read()\n response[Reply.BASE_REPLY] = baseReplyText\n \n return response\n\n def connect(self) -> praw.Reddit:\n return praw.Reddit(\n client_id=os.getenv(CONST_CLIENT_ID_ENV),\n client_secret=os.getenv(CONST_CLIENT_SECRET_ENV),\n redirect_uri=\"https://github.com/LanguageLatte/public\",\n password=os.getenv(CONST_PASSWORD_ENV),\n user_agent=CONST_USER_AGENT,\n username=CONST_USERNAME,\n )\n\nif __name__ == \"__main__\":\n ankiHelpBot = AnkiHelpBot()\n ankiHelpBot.main()","repo_name":"LanguageLatte/public","sub_path":"AnkiHelpBot/AnkiHelpBot.py","file_name":"AnkiHelpBot.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"}
+{"seq_id":"70639444375","text":"'''\r\n1로 만들기\r\nhttps://www.acmicpc.net/problem/1463\r\n'''\r\nN = int(input())\r\n\r\ndp = [0] * (N+4) # 숫자 N일때의 답을 해당 인덱스에 삽입\r\ndp[2] = 1\r\ndp[3] = 1\r\ndp[4] = 2\r\nfor n in range(5, N+1): # 5부터 bottom-up으로 dp 계산\r\n op_a = 0 # 연산 1 플래그\r\n if n % 3 == 0:\r\n op_a = 1\r\n\r\n op_b = 0 # 연산 2 플래그\r\n if n % 2 == 0:\r\n op_b = 1\r\n\r\n op_flag = (op_a, op_b, 1) # 연산 1, 2, 3 플래그\r\n if op_flag == (1, 1, 1):\r\n dp[n] = min(dp[n//3] + 1, dp[n//2] + 1, dp[n-1] + 1)\r\n elif op_flag == (1, 0, 1):\r\n dp[n] = min(dp[n//3] + 1, dp[n-1] + 1)\r\n elif op_flag == (0, 1, 1):\r\n dp[n] = min(dp[n//2] + 1, dp[n-1] + 1)\r\n elif op_flag == (0, 0, 1):\r\n dp[n] = dp[n-1] + 1\r\n\r\nprint(dp[N])\r\n\r\n\r\n\r\n###\r\n# 테스트케이스 참고(https://www.acmicpc.net/board/view/49959)\r\n\r\n''' DFS 연습\r\ndef dfs(n):\r\n global answer, cnt\r\n\r\n if n < 1:\r\n return\r\n elif n == 1:\r\n answer = min(cnt, answer)\r\n\r\n return\r\n else:\r\n for o in op:\r\n if o == 'a' and n % 3 == 0:\r\n cnt += 1\r\n dfs(n // 3)\r\n cnt -= 1\r\n elif o == 'b' and n % 2 == 0:\r\n cnt += 1\r\n dfs(n // 2)\r\n cnt -= 1\r\n else:\r\n cnt += 1\r\n dfs(n - 1)\r\n cnt -= 1\r\n\r\n\r\nN = int(input())\r\n\r\ncnt = 0\r\nanswer = sys.maxsize\r\nop = ['a', 'b', 'c']\r\ndfs(N)\r\n\r\nprint(answer)\r\n'''\r\n","repo_name":"buyeolim/ps_prac","sub_path":"BOJ/python3/1463.py","file_name":"1463.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"20271338661","text":"# See 'Leetcode 206. Reverse Linked List' for practice\n\nfrom manim import *\n\nfrom src.list_utilities.LinkedList import LinkedList\nfrom src.list_utilities.Node import Node, NoneNode\n\n\nclass ReverseList(Scene):\n def construct(self):\n title = MarkupText(\"Reverse linked list\", font_size=45).shift(UP * 3)\n self.add(title)\n\n self.show_task(title)\n self.show_reverse_list_code(title)\n\n orig_nodes = [Node(str(j)) for j in range(1, 4)]\n orig_list = LinkedList(nodes=orig_nodes)\n self.play(FadeIn(orig_list.visual_list))\n self.wait(1)\n\n none_start = NoneNode()\n none_start.set_next(orig_list.head)\n none_end = NoneNode()\n orig_list.tail.set_next(none_end)\n self.show_none_start_end(orig_list, none_start, none_end)\n\n self.reverse_list(none_start, orig_list.head)\n self.play(FadeOut(none_start.vn_arrows.vnode.group),\n FadeOut(orig_list.head.vn_arrows.left_arrow))\n self.wait(2)\n\n def show_task(self, title: MarkupText) -> None:\n orig_nodes = [Node(str(j)) for j in range(1, 4)]\n orig_list = LinkedList(nodes=orig_nodes)\n\n rev_nodes = [Node(str(j)) for j in range(3, 0, -1)]\n rev_list = LinkedList(nodes=rev_nodes)\n\n down_arrow = MathTex(r\"\\Downarrow\", color=WHITE)\n result = VGroup(orig_list.visual_list, down_arrow, rev_list.visual_list)\\\n .arrange(DOWN, buff=0.5).next_to(title, DOWN * 5)\n self.play(FadeIn(result, run_time=3))\n self.wait(2)\n self.play(FadeOut(result))\n\n def show_reverse_list_code(self, title: MarkupText) -> None:\n code = '''\n prev, cur = None, head\n while cur:\n next_node = cur.next\n cur.next = prev\n prev, cur = cur, next_node\n return prev\n '''\n rendered_code = Code(code=code, tab_width=4, insert_line_no=False,\n language=\"Python\", font=\"Monospace\", font_size=14)\\\n .next_to(title, RIGHT * 2)\n self.play(Create(rendered_code))\n\n def show_none_start_end(self, list_: LinkedList, start: Node, end: Node) -> None:\n end.vn_arrows.group.next_to(list_.visual_list, RIGHT)\n start.vn_arrows.group.next_to(list_.visual_list, LEFT)\n self.play(FadeIn(end.vn_arrows.group, run_time=2))\n\n start.vn_arrows.remove_arrow()\n self.play(FadeIn(start.vn_arrows.group, run_time=2))\n self.wait(1)\n\n def reverse_list(self, none_node: Node, head: Node) -> Node:\n prev, cur = none_node, head\n tp = TextPointers(prev, cur, self)\n self.wait(0.8)\n\n while not cur.is_none:\n next_node = cur.next\n move_arrow(prev, cur, self)\n cur.next = prev\n tp.move_prev_cur()\n prev, cur = cur, next_node\n tp.update_next(cur)\n self.wait(0.8)\n\n self.play(FadeOut(tp.cur, cur.vn_arrows.group), tp.get_prev_to_cur_transform())\n self.wait(0.5)\n return prev\n\n\ndef move_arrow(prev: Node, cur: Node, scene: Scene) -> None:\n scene.wait(0.8)\n prev.vn_arrows.flip_arrow()\n scene.play(CounterclockwiseTransform(cur.vn_arrows.right_arrow, prev.vn_arrows.right_arrow))\n cur.vn_arrows.set_right_to_left_arrow()\n cur.vn_arrows.set_right_arrow()\n\n\nclass TextPointers:\n def __init__(self, prev: Node, cur: Node, scene: Scene) -> None:\n self.__font_size = 18\n self.__prev = Text(\"prev\", font_size=self.__font_size)\n self.__cur = Text(\"cur\", font_size=self.__font_size)\n self.__next = Text(\"next\", font_size=self.__font_size)\n self.scene = scene\n\n self.__prev.next_to(prev.vn_arrows.vnode.group, DOWN)\n self.__cur.next_to(cur.vn_arrows.vnode.group, DOWN)\n if cur.next:\n self.__next.next_to(cur.next.vn_arrows.vnode.group, DOWN)\n self.scene.play(FadeIn(self.__prev, self.__cur))\n\n @property\n def cur(self) -> Text:\n return self.__cur\n\n def move_prev_cur(self) -> None:\n self.__prev.generate_target()\n self.__prev.target.move_to(self.__cur)\n\n self.__cur.generate_target()\n self.__cur.target.move_to(self.__next)\n\n self.scene.play(MoveToTarget(self.__prev), MoveToTarget(self.__cur))\n self.scene.wait(1)\n\n def update_next(self, cur: Node) -> None:\n if cur.next:\n self.__next.next_to(cur.next.vn_arrows.vnode.group, DOWN)\n\n def get_prev_to_cur_transform(self) -> Transform:\n head_text = Text(\"head\", font_size=self.__font_size).shift(self.__prev.get_center())\n return Transform(self.__prev, head_text)\n","repo_name":"andreysmykov/algorithms_animation","sub_path":"src/reverse_linked_list/reverse_list.py","file_name":"reverse_list.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"25144537282","text":"import app\nimport logging\n\nfrom flask import jsonify\nfrom flask_restplus import Resource\nfrom .models import Search\n\nLOG = logging.getLogger(__name__)\napi = Search.api \n\n@api.route('//')\nclass Search(Resource):\n @api.doc(\"search\")\n def get(self,c_type, tag):\n result = []\n if c_type == \"company\":\n search_resp = app.App.mongodb.db.company.find( { '$text': { '$search': tag } } ).sort('_id')\n \n elif c_type == \"customer\":\n search_resp = app.App.mongodb.db.customer.find( { '$text': { '$search': tag } } ).sort('_id')\n \n for doc in search_resp:\n result.append(doc)\n\n return jsonify(result)","repo_name":"venkaiaha/Sree-app-flask","sub_path":"app/api/search/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"3329038158","text":"#Input de dados\nd = float(input(\"Digite a distância percorrida pelo usuário(km):\"))\nt = int(input(\"Digite o número de dias do aluguel:\"))\n#Dados armazenados\nP = 60\np = 0.15\n#Cálculo do pagamento\nV = (P*t) + (p*d)\n#Output\nprint(\"Valor a pagar: R$ %5.2f\" % V)","repo_name":"axelife2021/Python","sub_path":"Capítulo 3/ex3.14.py","file_name":"ex3.14.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"13264873269","text":"import pandas as pd\nfrom sklearn.feature_selection import RFE\nimport copy\nimport numpy as np\nimport warnings\nimport numpy as np\nfrom sklearn.impute import SimpleImputer\nimport matplotlib.pyplot as plt\nimport yaml\n\n\ndef add_binary_where_the_nan_was(table, column):\n \"\"\"takes the table and next to each columnputs information whether there was a nan\"\"\"\n table[f'bool_nan_{column}'] = table[column].isna()\n return table\n\n\ndef get_rid_of_outliers(table, column):\n up, low = np.percentile(table[column], [1, 99])\n y = np.clip(table[column], up, low)\n # pd.Series(y).hist(bins=30)\n table = table.drop(columns=[column])\n table[f'no_outliers_{column}'] = pd.Series(y)\n return table\n\n\ndef WoE_for_categorical_values(table, column, ret_woe=False):\n \"\"\"\n takes a table, with a categorical value in columns and returns WOE and IV for that variable\n \"\"\"\n\n different_values = table[column].unique().shape[0]\n list_of_bads = [0] * different_values # in each element of a list it contains woe for a corresponding interval\n list_of_goods = [0] * different_values\n for i in range(len(table[str(column)])): # iterate over every sample\n # print(i)\n for j in range(different_values): # how many separate values are there to deal with\n if table[column][i] == table[column].unique()[j] and table['target'][i] == 1: # default is an event here\n list_of_bads[j] += 1\n elif table[column][i] == table[column].unique()[j] and table['target'][i] == 0:\n list_of_goods[j] += 1\n\n total_bads = table.target.sum()\n total_goods = len(table.target) - total_bads\n distr_goods = []\n distr_bads = []\n WoE = []\n\n for i in range(len(list_of_goods)):\n distr_goods.append(list_of_goods[i] / total_goods)\n distr_bads.append(list_of_bads[i] / total_bads)\n\n # check whether there are no groups with 0 counts for good or bad - if there are drop the columns with that variable\n # all together\n flag = False\n if 0 in distr_goods or 0 in distr_bads:\n print(\"In at least one of the bins there is either no goods or bads distribution. Dropping that variable\")\n flag = True\n\n for i in range(len(list_of_goods)):\n WoE.append(np.log(distr_goods[i] / distr_bads[i]) * 100)\n\n # Information Value of the whole characteristic\n distr_bads_nans = table['target'][table[column].isna()].sum()/total_bads\n # how many is nan and is not default\n distr_goods_nans = (table['target'][table[column].isna()].shape[0] - \\\n table['target'][table[column].isna()].sum())/total_goods\n WoE_nan = np.log(distr_goods_nans / distr_bads_nans) * 100\n WoE = WoE.insert(0, WoE_nan) # inserting the value correspinding to NaNs in the first place\n\n # Information Value of the whole characteristic\n differences = [distr_goods[i] - distr_bads[i] for i in range(len(distr_goods))]\n differences.insert(0, distr_goods_nans-distr_bads_nans)\n IV = np.dot(differences, np.transpose(WoE))\n\n if ret_woe and not flag:\n return WoE, IV\n elif not ret_woe and not flag:\n return IV\n elif flag:\n return table.drop(columns=[column])\n# consider correlation for all continuous data\n\n\ndef drop_columns_with_many_nans(table, threshold=0.2):\n \"\"\"drops columns that contain over 20% of nan values\"\"\"\n for col in table.columns:\n if table[col].isna().sum() >= threshold * table[col].shape[0]:\n table = table.drop(columns=[col])\n return table\n\n\ndef woe_and_iv_continuous_data(table, column, number_of_bins, ret_woe=False):\n \"\"\"assumes that target is provided in column 'target'\n 1 - event, ie default\n 0 - no default\n returns bins, woe - tuples, iv - scalar\n \"\"\"\n\n bins = pd.qcut(table[str(column)], number_of_bins, retbins=True)[1]\n # bins = pd.cut(table[str(column)], number_of_bins, retbins=True)[1]\n bins[-1] += 1 # to include all points\n list_of_bads = [0] * number_of_bins # in each element of a list it contains woe for a corresponding interval\n list_of_goods = [0] * number_of_bins\n for i in range(len(table[str(column)])):\n for j in range(number_of_bins):\n if bins[j] <= table[column][i] < bins[j+1] and table['target'][i] == 1: # default is an event here\n list_of_bads[j] += 1\n elif bins[j] <= table[column][i] < bins[j+1] and table['target'][i] == 0:\n list_of_goods[j] += 1\n\n\n # WoE = ln(distr_goods / distr_bads) * 100\n\n total_bads = table.target.sum() # bad = default\n total_goods = len(table.target) - total_bads\n distr_goods = []\n distr_bads = []\n WoE = []\n\n for i in range(len(list_of_goods)):\n distr_goods.append(list_of_goods[i] / total_goods)\n distr_bads.append(list_of_bads[i] / total_bads)\n\n # check whether there are no groups with 0 counts for good or bad\n\n if 0 in distr_goods or 0 in distr_bads:\n warnings.warn(\"In at least one of the bins there is either no goods or bads distribution. Check the binning\")\n exit()\n\n for i in range(len(list_of_goods)):\n WoE.append(np.log(distr_goods[i] / distr_bads[i]) * 100)\n\n # group also nans\n # how many is nan and is default\n distr_bads_nans = table['target'][table[column].isna()].sum()/total_bads\n # how many is nan and is not default\n distr_goods_nans = (table['target'][table[column].isna()].shape[0] - \\\n table['target'][table[column].isna()].sum())/total_goods\n # WoE_nan = np.log(distr_goods_nans / distr_bads_nans) * 100\n # WoE = WoE.insert(0, WoE_nan) # inserting the value correspinding to NaNs in the first place\n\n # Information Value of the whole characteristic\n differences = [distr_goods[i] - distr_bads[i] for i in range(len(distr_goods))]\n # differences.insert(0, distr_goods_nans-distr_bads_nans)\n IV = np.dot(differences, np.transpose(WoE))\n\n if ret_woe:\n return bins, WoE, IV\n else:\n return IV/100\n\n\ndef input_missing_values(table, column, median=True, mode=False):\n if median:\n imp_mean = SimpleImputer(missing_values=np.nan, strategy='median')\n elif mode:\n imp_mean = SimpleImputer(missing_values=np.nan, strategy='most_frequent')\n\n imp_mean.fit(table[column].values.reshape(-1, 1))\n table[column] = imp_mean.transform(table[column].values.reshape(-1, 1))\n return table\n\n\ndef correlation(dataset, threshold=0.6):\n # deals only with numeric data, float64 and int64, so here the values\n\n col_corr = set() # Set of all the names of deleted columns\n corr_matrix = dataset.corr()\n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr):\n colname = corr_matrix.columns[i] # getting the name of column\n col_corr.add(colname)\n if colname in dataset.columns:\n del dataset[colname] # deleting the column from the dataset\n print(col_corr)\n return dataset\n\n\ndef exclude_data_few_unique_values(dataset):\n \"\"\"drops columns that have either 0 distinct values (NANS) or only 1 distinct value\"\"\"\n col_to_drop = []\n\n for column in dataset.columns:\n if dataset[column].nunique() == 1 or dataset[column].nunique() == 0:\n col_to_drop.append(column)\n\n for column in col_to_drop:\n dataset = dataset.drop(columns=column)\n\n return dataset\n\n\ndef split_dataset(table):\n \"\"\"categorical features object,\n numerical - float64\n ordinal - int64, the distinction between categorical and ordinal is to belooked into\"\"\"\n table_categorical = table.select_dtypes('object')\n table_numerical = table.select_dtypes('float64')\n table_ordinal = table.select_dtypes('int64')\n return table_numerical, table_categorical, table_ordinal\n\n\ndef bin_dataset(table, column):\n \"\"\" bins numerical value\"\"\"\n if column is not 'target':\n return pd.qcut(table[column], 4)\n\n\ndef drop_duplicated_ones_and_values_leaking_data_from_the_future(table):\n list_to_drop = ['id', 'member_id', 'url', 'emp_title', 'issue_d', 'funded_amnt', 'funded_amnt_inv',\n 'sub_grade', 'int_rate', 'addr_state', 'out_prncp', 'out_prncp_inv', 'total_pymnt',\n 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries',\n 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'zip_code',\n 'earliest_cr_line', 'next_pymnt_d', 'last_credit_pull_d', 'disbursement_method', 'delinq_amnt',\n 'open_rv_24m']\n\n for column in list_to_drop:\n try: # as some of these columns might already have been dropped\n table = table.drop(columns=column)\n except:\n KeyError\n return table\n\n\ndef look_at_value_distribution(table, columns):\n \"\"\"for later stage of analysis, to manually exclude data that has very few examples of some values\"\"\"\n print(table[columns].value_counts())\n\n\ndef fill_nans(table, column): # only after values with too many nans were excluded\n return table[column].fillna(\"MISSING\")\n\n\ndef calculate_woe_iv(dataset, feature):\n lst = []\n for i in range(dataset[feature].nunique()):\n val = list(dataset[feature].unique())[i]\n lst.append({\n 'Value': val,\n 'All': dataset[dataset[feature] == val].count()[feature],\n 'Good': dataset[(dataset[feature] == val) & (dataset['target'] == 0)].count()[feature],\n 'Bad': dataset[(dataset[feature] == val) & (dataset['target'] == 1)].count()[feature]\n })\n\n dset = pd.DataFrame(lst)\n dset['Distr_Good'] = dset['Good'] / dset['Good'].sum()\n dset['Distr_Bad'] = dset['Bad'] / dset['Bad'].sum()\n dset['WoE'] = np.log(dset['Distr_Good'] / dset['Distr_Bad'])\n dset = dset.replace({'WoE': {np.inf: 0, -np.inf: 0}})\n dset['IV'] = (dset['Distr_Good'] - dset['Distr_Bad']) * dset['WoE']\n iv = dset['IV'].sum()\n\n dset = dset.sort_values(by='WoE')\n\n return dset, iv\n\n\ndef conc_tables(t1, t2, t3): # concatenates tables\n return pd.concat([t1, t2, t3])\n\n\n\ndef std_test_as_train(X_train, X_test):\n from sklearn.preprocessing import StandardScaler\n\n for col in X_train.columns:\n scaler = StandardScaler()\n scaler.fit(X_train[col].to_numpy().reshape(-1, 1))\n X_train[col] = scaler.transform(X_train[col].to_numpy().reshape(-1, 1))\n X_test[col] = scaler.transform(X_test[col].to_numpy().reshape(-1, 1))\n return X_train, X_test\n\n\ndef stack(X_train, y_train, X_test, y_test):\n from sklearn.linear_model import LogisticRegression\n from vecstack import stacking\n from sklearn import svm\n import xgboost as xgb\n from sklearn.metrics import roc_auc_score, classification_report, f1_score\n\n models = [LogisticRegression(), svm.SVC(), xgb.XGBClassifier(n_jobs=-1)]\n\n S_train, S_test = stacking(models, X_train, y_train, X_test, regression=False, verbose=2)\n\n model = xgb.XGBClassifier(seed=0, n_jobs=-1, learning_rate=0.1,n_estimators=100, max_depth=3)\n\n # Fit 2-nd level model\n model = model.fit(S_train, y_train)\n\n # Predict\n y_pred = model.predict(S_test)\n\n # Final prediction score\n print('Final prediction score: [%.8f]' % f1_score(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n print(roc_auc_score(y_test, y_pred))\n\n\ndef stack_regression(X_train, y_train, X_test, y_test):\n from sklearn.linear_model import LinearRegression\n from vecstack import stacking\n from sklearn import linear_model\n import xgboost as xgb\n from sklearn.metrics import mean_squared_error, mean_absolute_error\n\n models = [linear_model.SGDRegressor(max_iter=1000, tol=1e-3), LinearRegression(), xgb.XGBRegressor(n_jobs=-1)]\n\n S_train, S_test = stacking(models, X_train, y_train, X_test, regression=False, verbose=2)\n\n model = xgb.XGBRegressor(seed=0, n_jobs=-1, learning_rate=0.1,n_estimators=100, max_depth=3)\n\n # Fit 2-nd level model\n model = model.fit(S_train, y_train)\n\n # Predict\n y_pred = model.predict(S_test)\n\n\n print(mean_squared_error(y_test, y_pred))\n\n\ndef upsample_data(X_train, y_train):\n\n from imblearn.over_sampling import SMOTE\n sm = SMOTE(random_state=42)\n X_res, y_res = sm.fit_resample(X_train, y_train)\n return X_res, y_res\n\n\n\"\"\"for later\"\"\"\ndef bayes_optim(X_train, X_test, y_train, y_test):\n\n from bayes_opt import BayesianOptimization\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.model_selection import cross_val_score\n\n def bayesian_optimization(X_train, X_test, y_train, y_test, function, parameters):\n n_iterations = 10\n gp_params = {\"alpha\": 1e-4}\n BO = BayesianOptimization(function, parameters)\n BO.maximize(n_iter=n_iterations, **gp_params)\n\n return BO.ma\n\n\n def rfc_optimization(cv_splits):\n def function(n_estimators, max_depth, min_samples_split):\n return cross_val_score(\n RandomForestClassifier(\n n_estimators=int(max(n_estimators, 0)),\n max_depth=int(max(max_depth, 1)),\n min_samples_split=int(max(min_samples_split, 2)),\n n_jobs=-1,\n random_state=42,\n class_weight=\"balanced\"),\n X=X_train,\n y=y_train,\n cv=cv_splits,\n scoring=\"f1_macro\",\n n_jobs=-1).mean()\n\n parameters = {\"n_estimators\": (10, 1000),\n \"max_depth\": (1, 150),\n \"min_samples_split\": (2, 10)}\n\n return function, parameters\n\n # Train model\n def train(X_train, y_train, X_test, y_test, function, parameters):\n cv_splits = 4\n\n best_solution = bayesian_optimization(X_train, y_train, X_test, y_test, function, parameters)\n params = best_solution[\"params\"]\n\n model = RandomForestClassifier(\n n_estimators=int(max(params[\"n_estimators\"], 0)),\n max_depth=int(max(params[\"max_depth\"], 1)),\n min_samples_split=int(max(params[\"min_samples_split\"], 2)),\n n_jobs=-1,\n random_state=42,\n class_weight=\"balanced\")\n\n model.fit(X_train, y_train)\n\n return model\n\n function, parameters = rfc_optimization(10)\n print(train(X_train, y_train, X_test, y_test, function, parameters))\n\n\ndef encode_ordinal_category_encoders(table):\n import category_encoders\n for col in table.columns:\n if str(col)[:7] != 'encoded':\n encode = category_encoders.ordinal.OrdinalEncoder()\n encode.fit(table[col])\n table[f'encoded_ordinal{col}'] = encode.transform(table[col])\n return table\n\n\ndef encode_ordinal_sklearn(table, col):\n from sklearn.preprocessing import OrdinalEncoder\n enc = OrdinalEncoder()\n enc.fit(table[col].values.reshape(-1, 1))\n table[f'ordinal_encoded_{col}'] = enc.transform(table[col].values.reshape(-1, 1))\n return table\n\n\ndef encode_categorical_TargetEncoder(table, y):\n import category_encoders\n for col in table.columns:\n if str(col)[:7] != 'encoded':\n encode = category_encoders.target_encoder.TargetEncoder()\n encode.fit(table[col], y)\n table[f'encoded_target{col}'] = encode.transform(table[col])\n return table\n\n\ndef drop_cat_not_encoded(table):\n \"\"\"drops non encoded categorical columns\"\"\"\n for col in table.columns:\n if str(col)[:7] != 'encoded':\n table = table.drop(columns=[col])\n return table\n\n\ndef fit_xgboost(X, y, upsample=True):\n from sklearn.metrics import roc_auc_score, classification_report, f1_score\n import xgboost as xgb\n\n model = xgb.XGBClassifier(seed=0, n_jobs=-1, learning_rate=0.1,\n n_estimators=10, max_depth=5)\n\n # Fit 2-nd level model\n if upsample:\n X_res, y_res = upsample_data(X, y)\n X_train, X_test, y_train, y_test = train_test_split(X_res, y_res)\n model = model.fit(X_train, y_train)\n else:\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n model = model.fit(X_train, y_train)\n\n\n # Predict\n y_pred = model.predict(X_test)\n print('f1:\\t\\n', f1_score(y_test, y_pred))\n print('roc_auc_score:\\t\\n', roc_auc_score(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n return model\n\n\ndef date_feature(date):\n \"\"\"number of month, day of week, vacation, assuming US vacations\"\"\"\n from dateutil import parser\n import holidays\n us_holidays = holidays.UnitedStates()\n datetime_obj = parser.parse(date)\n weekday = datetime_obj.weekday() # 0 is monday\n month = datetime_obj.month\n year = datetime_obj.year\n is_holiday = datetime_obj in us_holidays or weekday==6 or weekday==5 # includes weekends as holidays\n return weekday, month, year, int(is_holiday)\n\n\ndef mean_encoding(table, column, target, drop=False):\n \"\"\"assumes no nans\"\"\"\n \"\"\"assumes target is a column in table\"\"\"\n table[f'mean_encoded_{column}'] = table[column].map(table.iloc[table.index].groupby(column)[target].mean())\n if drop:\n table = table.drop(columns=[column])\n return table\n\n\ndef frequency_encode(table, column, drop=False):\n \"\"\"assumes no nans\"\"\"\n encoding = table.groupby(column).size()\n encoding = encoding / table.shape[0]\n table[f'freq_enc_{column}'] = table[column].map(encoding)\n if drop:\n table = table.drop(columns=[column])\n return table\n\n\ndef kfold_mean_encoding(table, column, target, nfolds=5):\n\n from sklearn.model_selection import KFold\n\n skf = KFold(nfolds, shuffle=False)\n\n for tr_ind, val_ind in skf.split(table[column]):\n\n X_tr, X_val = table[[column, target]].iloc[tr_ind], table[[column, target]].iloc[val_ind]\n\n table[column].iloc[val_ind] = table[column].iloc[val_ind].map(X_tr.groupby(column)[target].mean())\n\n table[column] = table[column].fillna(table[target].mean())\n\n return table\n\n\ndef train_test_split(X, y, test_size=0.2):\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n return X_train, X_test, y_train, y_test\n\n\ndef log_transform(table, column, drop=True):\n table[f'log_{column}'] = pd.Series(np.log(1 + table[column]))\n if drop:\n table = table.drop(columns=[column])\n return table\n\n\ndef remove_duplicates(table):\n return table.drop_duplicates()\n\n\ndef compare_data_distributions(table1, table2):\n \"\"\"This is a two-sided test for the null hypothesis that 2 independent samples are drawn from the same\n continuous distribution. \"\"\"\n from scipy import stats\n pvalue = stats.ks_2samp(table1, table2)[1]\n if pvalue > .10:\n return True\n else:\n return False\n\n\n","repo_name":"MateuszLewandowski1/mateusz.h.lewandowski-gmail.com","sub_path":"utils_ds.py","file_name":"utils_ds.py","file_ext":"py","file_size_in_byte":18925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"1110640887","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n if len(lists) == 0:\n return None\n\n if len(lists) > 1:\n temp = None\n for i in range(1,len(lists)):\n list1 = lists[0]\n list2 = lists[i]\n\n lists[0] = self.mergeList(list1, list2)\n \n return lists[0] \n\n def mergeList(self, list1: [ListNode], list2: [ListNode]) -> Optional[ListNode]:\n node = ListNode()\n tail = node\n\n while list1 and list2:\n if list1 and list2:\n if list1.val < list2.val:\n tail.next = list1\n list1 = list1.next\n\n else:\n tail.next = list2\n list2 = list2.next\n\n tail = tail.next\n \n if list1 or list2:\n tail.next = list1 if list1 else list2\n \n return node.next\n\n\n# class Solution:\n# def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n# if not lists:\n# return None\n# if len(lists) == 1:\n# return lists[0]\n# mid = len(lists) // 2\n# l, r = self.mergeKLists(lists[:mid]), self.mergeKLists(lists[mid:])\n# return self.merge(l, r)\n \n# def merge(self, l, r):\n# dummy = p = ListNode()\n# while l and r:\n# if l.val < r.val:\n# p.next = l\n# l = l.next\n# else:\n# p.next = r\n# r = r.next\n# p = p.next\n# p.next = l or r\n# return dummy.next\n \n","repo_name":"tayyab-tariq/Leetcode","sub_path":"Merge k Sorted Lists.py","file_name":"Merge k Sorted Lists.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"34352783806","text":"\"\"\"\n Predicting on predictions (NN's can be stacked)\n\"\"\"\n\n\ndef w_sum(a, b):\n assert len(a) == len(b)\n output = 0\n for i in range(len(a)):\n output += (a[i] * b[i])\n return output\n\n# VECTOR MATRIX MULTIPLICATION\ndef vect_mat_mul(vect, matrix):\n # For each output we are performing a weighted sum of inputs\n # this function iterates through each row of weigths and makes\n # a prediction using w_sum\n assert len(vect) == len(matrix)\n\n output = [0, 0, 0]\n\n for i in range(len(vect)):\n output[i] = w_sum(vect, matrix[i])\n\n return output\n\n # toes wins fans\nih_wgt = [\n [0.1, 0.2, -0.1], # hid[0]\n [-0.1, 0.1, 0.9], # hid[1]\n [0.1, 1.4, 0.1] # hid[2]\n]\n\n # hid[0] hid[1] hid[2]\nhp_wgt = [\n [0.3, 1.1, -0.3], # hurt?\n [0.1, 0.2, 0.0], # win?\n [0.0, 1.3, 0.1] # sad?\n]\n\n\ndef neural_network(input, weights):\n hid = vect_mat_mul(input, weights[0])\n pred = vect_mat_mul(hid, weights[1])\n return pred\n\n\nweights = [ih_wgt, hp_wgt]\n\ntoes = [8.5, 9.5, 9.9, 9.0]\nwlrec = [0.64, 0.8, 0.8, 0.9]\nnfans = [1.2, 1.3, 0.5, 1.0]\n\ninput = [toes[0], wlrec[0], nfans[0]]\npredictions = neural_network(input, weights)\nprint(predictions)\n","repo_name":"charliecharlieO-o/grokking-nn-study","sub_path":"chapter3/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"45336172237","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nimport pandas as pd\nimport plotly.graph_objs as go\n\nfrom dash.dependencies import Input, Output\n\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.layout = html.Div(children=[\n html.H1(children='Hello Dash'),\n\n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n\n dcc.Graph(\n id='example-graph' \n )\n])\n\ndef prepare_chart_bar_data():\n # In real world, below prob done something like...\n # df2018= pd.read_sql('mySQL',db)\n data2018 = [['January', 6], ['February', 7], ['March', 2]] \n data2019 = [['January', 7], ['February', 12], ['March', 5]] \n df2018 = pd.DataFrame(data2018, columns = ['Month', 'SnowDays']) \n df2019 = pd.DataFrame(data2019, columns = ['Month', 'SnowDays']) \n\n # Assemble our list data for graphic, we'll use the plotly graph object \"go\"\n \n bars_data_set_a = {'data':\n [\n {'x': df2018['Month'].tolist(),\n 'y': df2018['SnowDays'].tolist(),\n 'type':'bar',\n 'name':'2018'}\n ]\n }\n\n bars_data_set_b = {'data':\n [\n {'x': df2019['Month'].tolist(),\n 'y': df2019['SnowDays'].tolist(),\n 'type':'bar',\n 'name':'2019'}\n ],\n 'layout': {'title':'visualization'}}\n\n #my_figure_dict = dict(bars_data_set_a, **bars_data_set_b)\n bars_data_set_a.update(bars_data_set_b)\n\n my_figure_dict = bars_data_set_a\n\n return my_figure_dict\n\n\n@app.callback(\n Output('example-graph', 'figure'),\n [Input('example-graph', '')])\ndef update_figure(figure):\n return prepare_chart_bar_data()\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n \n # data': [\n # {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},\n # {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},\n # ],\n # 'layout': {\n # 'title': 'Dash Data Visualization'\n\n \n \n # return {\n # 'data': [\n # {'x': df2018['Month'], 'y': df2018['SnowDays'], 'type': 'bar', 'name': '2018'},\n # {'x': df2019['Month'], 'y': df2019['SnowDays'], 'type': 'bar', 'name': '2019'},\n # ],\n # 'layout': {\n # 'title': 'Dash Data Visualization'\n # }\n # }\n\n \n\n\n \n\n","repo_name":"maxrottersman/MaxDashProject","sub_path":"Archived_Experiments/alt_dash_example_01_function_first_3_addmoredataabsraction.py","file_name":"alt_dash_example_01_function_first_3_addmoredataabsraction.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"70635768533","text":"import os\nimport numpy as np\nfrom rich.progress import track\nimport pandas as pd\nfrom visualizers.DataVisualizers import visualize_line\n\ndef run(name, points, runners, generator, tests_per_point=10, ret_data=lambda x, y, z: x,\n data_names=[], x_name=\"X\", output=None, **kwargs):\n res = []\n\n for point in track(np.arange(*points)):\n for i in range(tests_per_point):\n test = generator(point)\n for name, runner in runners.items():\n result = runner(test, **kwargs)\n fix_res = point, *ret_data(result, test, name), name\n\n res.append(fix_res)\n\n\n\n np_arr = np.array(res)\n dataset = pd.DataFrame(np_arr, columns=[x_name, *data_names, \"Runner\"])\n\n for i in data_names: \n visualize_line(f\"{name}: {i}\", x_name, i, dataset, output=output, hue=\"Runner\")\n\n","repo_name":"atrin-hojjat/Uni-AI-Course-Reports","sub_path":"Report 03/code/tests/TestRunner.py","file_name":"TestRunner.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"11611158114","text":"# 1. opencv >>>> PIL\n#\n# IO cost\n# 1. hdf5, tfrecord (Tensorflow) several small image (text) to a larger file\n# 2. preloader\n# 3. cache (lmdb or redis) key, value cahe\n#\n# pin_memory = True\n\n# tfrecord\nimport tensorflow as tf\nimport os\nfrom PIL import Image\nimport numpy as np\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[tf.io.encode_jpeg(value).numpy()]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef image_example(image_path, label):\n image_raw = Image.open(image_path)\n image_raw = np.array(image_raw)\n\n feature = {\n 'image': _bytes_feature(image_raw.tobytes()),\n 'label': _int64_feature(label),\n }\n\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\ndef create_tfrecord(output_filename, image_folder):\n writer = tf.io.TFRecordWriter(output_filename)\n\n # Your dataset: list of tuples (image_path, label)\n dataset = [\n (\"path/to/image1.jpg\", 0),\n (\"path/to/image2.jpg\", 1),\n # Add more images and labels as needed\n ]\n\n for image_path, label in dataset:\n tf_example = image_example(image_path, label)\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n\n# Specify the output TFRecord file and the folder containing images\ntfrecord_filename = 'output.tfrecord'\nimage_folder_path = 'path/to/your/image/folder'\n\n# Create TFRecord file\ncreate_tfrecord(tfrecord_filename, image_folder_path)\n\n# several small images to hdf5 file\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimport h5py\nfrom PIL import Image\nimport os\nfrom multiprocessing import Pool\n\nclass CustomDataset(Dataset):\n def __init__(self, root_dir):\n self.root_dir = root_dir\n self.image_paths = [os.path.join(root_dir, file) for file in os.listdir(root_dir) if file.endswith(\".jpg\")]\n self.transform = transforms.Compose([transforms.ToTensor()])\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n image_path = self.image_paths[idx]\n image = Image.open(image_path).convert('RGB')\n image = self.transform(image)\n return image\n\ndef process_images(args):\n idx, image_path = args\n image = Image.open(image_path).convert('RGB')\n image = transforms.ToTensor()(image)\n return idx, image.numpy()\n\ndef convert_to_hdf5_multiprocess(dataset, output_filename='output.h5', num_processes=4):\n with h5py.File(output_filename, 'w') as hdf5_file:\n images_group = hdf5_file.create_group('images')\n\n # Use multiprocessing to parallelize image processing\n with Pool(num_processes) as pool:\n results = pool.map(process_images, enumerate(dataset.image_paths))\n\n for idx, image_data in results:\n images_group.create_dataset(f'image_{idx}', data=image_data)\n\n# Specify the folder containing images\nimage_folder_path = 'path/to/your/image/folder'\n\n# Create a dataset\ndataset = CustomDataset(image_folder_path)\n\n# Convert to HDF5 using multiple processes\nhdf5_filename = 'output.h5'\nconvert_to_hdf5_multiprocess(dataset, hdf5_filename, num_processes=4)\n\n# redis \nimport redis\nfrom PIL import Image\nfrom io import BytesIO\nimport base64\n\n# Connect to the Redis server\nredis_client = redis.StrictRedis(host='localhost', port=6379, decode_responses=True)\n\ndef encode_image(image_path):\n with open(image_path, \"rb\") as image_file:\n encoded_image = base64.b64encode(image_file.read()).decode('utf-8')\n return encoded_image\n\ndef decode_image(encoded_image):\n decoded_image = base64.b64decode(encoded_image)\n return Image.open(BytesIO(decoded_image))\n\ndef store_images_in_redis(image_folder):\n # Assuming each image has a unique filename\n image_files = [f for f in os.listdir(image_folder) if f.endswith('.jpg')]\n\n for image_file in image_files:\n image_path = os.path.join(image_folder, image_file)\n encoded_image = encode_image(image_path)\n key = f\"image:{image_file}\"\n redis_client.set(key, encoded_image)\n\ndef retrieve_image_from_redis(image_key, output_path):\n encoded_image = redis_client.get(image_key)\n if encoded_image:\n decoded_image = decode_image(encoded_image)\n decoded_image.save(output_path)\n\n# Specify the folder containing images\nimage_folder_path = 'path/to/your/image/folder'\n\n# Store images in Redis\nstore_images_in_redis(image_folder_path)\n\n# Retrieve an image from Redis (replace 'your_image_key' with an actual key)\nimage_key = 'image:your_image.jpg'\noutput_image_path = 'output_image.jpg'\nretrieve_image_from_redis(image_key, output_image_path)\n\n\n","repo_name":"ChenXie-sci/model_optimization","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"17229798699","text":"import pathlib\nfrom .gen_utils import *\nfrom dk2cxx import *\n\n\ndef format_functions_cpp(globals: list[dk2map.Global], blocks: UserBlocks = None):\n def format_cpp_head():\n yield format_middle(f\"warning: file is managed by {pathlib.Path(__file__).name}\")\n yield format_middle(f\"you can edit code only in *_user_code blocks\")\n yield f\"#include \"\n yield empty_line\n yield f\"using namespace dk2;\"\n yield empty_line\n yield f\"#define relink_stub(name) printf(\\\"[fatal]: stub \\\"#name\\\" call\\\\n\\\"); ::abort();\"\n yield empty_line\n yield from map(format_autogen_line, format_cpp_head())\n yield format_block_line(\"head_user_code\")\n if blocks is not None:\n yield from blocks.head\n else:\n yield f\"\"\n yield f\"// user code\"\n yield f\"\"\n yield format_end_of_block_line()\n\n def format_cpp_body():\n yield f\"#pragma optimize( \\\"\\\", off )\"\n for glob in filter(filter_function_var, globals):\n fun_t = glob.type # type: dk2map.FunctionType\n suffix = \" // assembly\" if fun_t.declspec is dk2map.Declspec.Assembly else ''\n # ret = ''\n # if fun_t.ret.kind is dk2map.TypeKind.Ptr:\n # ret = ' return NULL; '\n # elif fun_t.ret.kind is dk2map.TypeKind.Bool:\n # ret = ' return false; '\n # elif fun_t.ret.kind in [\n # dk2map.TypeKind.Int, dk2map.TypeKind.Float,\n # dk2map.TypeKind.Char, dk2map.TypeKind.Winapi\n # ]:\n # ret = ' return 0; '\n name = f\"dk2::{format_function_name(glob.name)}\"\n ret = f\" relink_stub({name}); \"\n yield f\"/*{glob.va:08X}*/ {format_function(fun_t, name)} {{{ret}}}{suffix}\"\n yield empty_line\n yield f\"#pragma optimize( \\\"\\\", on )\"\n yield from map(format_autogen_line, format_cpp_body())\n yield format_block_line(\"tail_user_code\")\n if blocks is not None:\n yield from blocks.tail\n else:\n yield f\"\"\n yield f\"// user code\"\n yield f\"\"\n yield format_end_of_block_line()\n\n","repo_name":"DiaLight/Ember","sub_path":"dk2/gen/gen_functions_cpp.py","file_name":"gen_functions_cpp.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"}
+{"seq_id":"25164537359","text":"from flask import Flask, request, abort\nfrom flask_restful import Resource, Api\nfrom marshmallow import Schema, fields\n\n\nclass BarQuerySchema(Schema):\n key1 = fields.Str(required=True)\n key2 = fields.Str(required=True)\n kucundayu = fields.Int()\n\n\napp = Flask(__name__)\napi = Api(app)\nschema = BarQuerySchema()\n\n\nclass BarAPI(Resource):\n def get(self):\n print(request.args)\n errors = schema.validate(request.args)\n if errors:\n abort(400, str(errors))\n msg = \"OK\"\n #print(request.args['key1'])\n msg += request.args['key1'] + \", \"\n #print(request.args['key1'])\n msg += request.args['key2'] + \", \"\n\n if 'kucundayu' in request.args:\n print(request.args['kucundayu'])\n msg += request.args['kucundayu'] + \", \"\n return msg\n\napi.add_resource(BarAPI, '/bar', endpoint='bar')\n\n# omit of you intend to use `flask run` command\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"CeciliaRuiSun/Asset-Management-Web-App","sub_path":"guide/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"2179910509","text":"'''Written by Yinshi Liu'''\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n#define constants\nL = 1*10**(-8)\nm = 9.109*10**(-31)\nsigma = L/25\nkappa = 500/L\nx_0 = L/5\nP = 1024\ndt = 1*10**(-18)\nN = 3000\nh_bar = 1.05*10**(-34)\n\np = np.arange(1, P)\na = L/P\nx = p*a - L/2\npsi = np.zeros(P-1, dtype = \"complex_\")\n\n#normalization constant\npsi_0 = 1/((2*np.pi*sigma**2)**(1/4))\n#initial psi \nfor i in range(1, P-1):\n psi[i] = psi_0 * np.exp(-(x[i]-x_0)**2/(4*sigma**2) + 1j*kappa*x[i])\nprint(psi)\nplt.figure(figsize = (6,4))\nplt.plot(x, psi)\nplt.xlabel('x(m)')\nplt.ylabel('real ψ')\nplt.title('wavefunction ψ at T = 0')\nplt.tight_layout()\nplt.savefig('Fig 1.1.png')\n#define the Hamiltonian\nA = -h_bar**2/(2*m*a**2)\n#V(x) = 0 in potential well\nB = -2*A\nvec_diag = B*np.ones(P-1)\nD = np.diag(vec_diag, k=0)\nsup = A*np.eye(P-1, k = 1)\nsub = A*np.eye(P-1, k = -1)\nH = D + sup + sub\n\n#define time independent matrix L, R\nLeft = np.eye(P-1) + (dt/(2*h_bar))*1j*H\nRight = np.eye(P-1) - (dt/(2*h_bar))*1j*H\n\n#define position and time arrays\ntime = []\nx_exp = []\nprob = []\npsi_total = []\nprob = []\npos = []\nenergy = []\n#begin time steps\ni = 0\nwhile i < 3000:\n #calculate psi(n+1)\n v = np.matmul(Right, psi)\n psi_new = np.linalg.solve(Left, v)\n psi = np.copy(psi_new)\n psi_total.append(psi_new)\n if i == 750:\n plt.figure(figsize = (6,4))\n plt.plot(x, psi)\n plt.xlabel('x(m)')\n plt.ylabel('real ψ')\n plt.title('wavefunction ψ at T = T/4')\n plt.tight_layout()\n plt.savefig('Fig 1.2.png')\n if i == 1500:\n plt.figure(figsize = (6,4))\n plt.plot(x, psi)\n plt.xlabel('x(m)')\n plt.ylabel('real ψ')\n plt.title('wavefunction ψ at T = T/2')\n plt.tight_layout()\n plt.savefig('Fig 1.3.png')\n if i == 2250:\n plt.figure(figsize = (6,4))\n plt.plot(x, psi)\n plt.xlabel('x(m)')\n plt.ylabel('real ψ')\n plt.title('wavefunction ψ at T = 3T/4')\n plt.tight_layout()\n plt.savefig('Fig 1.4.png')\n time.append(i*dt)\n i += 1\nplt.figure(figsize = (6,4))\nplt.plot(x, psi)\nplt.xlabel('x(m)')\nplt.ylabel('real ψ')\nplt.title('wavefunction ψ at T = T')\nplt.tight_layout()\nplt.savefig('Fig 1.5.png')\n\n#verify normalization\n#integrate psi*conj(psi) using trap. rule\nfor i in range(len(psi_total)):\n psi_t = psi_total[i]\n psi_magnitude = psi_t*np.conj(psi_t)\n prob.append(np.sum(psi_magnitude)*a)\n#calculate expected value of x\nfor i in range(len(psi_total)):\n psi_t = psi_total[i]\n position = x*psi_t*np.conj(psi_t)\n pos.append(np.sum(position)*a)\n#calculate total energy\nfor i in range(len(psi_total)):\n psi_t = psi_total[i]\n psi_magnitude = psi_t*np.conj(psi_t)\n E = np.matmul(H, psi_magnitude)\n energy.append(np.sum(E*a))\nplt.figure()\nplt.plot(time, prob)\nplt.ylim(0.9, 1.1)\nplt.xlabel('time(s)')\nplt.ylabel('Total Probability')\nplt.title('Normalization of ψ over time')\nplt.savefig('Fig 1.7.png')\n\nplt.figure()\nplt.plot(time, pos)\nplt.xlabel('time(s)')\nplt.ylabel('Expected position')\nplt.title('Expected position over time')\nplt.savefig('Fig 1.6.png')\n\nplt.figure()\nplt.plot(time, energy)\nplt.xlabel('time(s)')\nplt.ylabel('Energy (J)')\nplt.title('Energy conservation over time')\nplt.savefig('Fig 1.8.png')\n","repo_name":"Lenventor/Computation-physics-PHY407","sub_path":"Lab09_TIme-dependent Schrodinger equation (Crank-Nicolson Method).py","file_name":"Lab09_TIme-dependent Schrodinger equation (Crank-Nicolson Method).py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"13000620983","text":"import click\nimport botocore, botocore.session\nfrom botocore.exceptions import ClientError\nimport time\nimport cv2\nimport sys\nimport errno\nfrom os import listdir\nfrom os.path import isfile, join\n\nFILE_NAME = 'selfie.png'\n\ndef capture_frame():\n ### Capture image from cam\n camera = cv2.VideoCapture(0)\n time.sleep(0.2) # If you don't wait, the image will be dark\n\n if camera.isOpened(): # try to get the first frame\n rval, frame = camera.read()\n else:\n rval = False\n exit -1\n\n cv2.imwrite(FILE_NAME, frame)\n del(camera) # so that others can use the camera as soon as possible\n\ndef add_face(ctx, file, collection, name):\n session = botocore.session.Session(profile=ctx.obj['PROFILE'])\n\n ### Send to Rekognition to add it to faces collection\n rekognition = session.create_client('rekognition')\n dynamodb = session.create_client('dynamodb')\n\n ### Read image from file system\n with open(file, 'rb') as image:\n response = rekognition.index_faces(\n CollectionId=collection,\n Image={\n 'Bytes': image.read()\n }\n )\n\n ### If successful, add info in DDB\n if len(response['FaceRecords']) > 0:\n ddb_response = dynamodb.put_item(\n TableName=collection,\n Item={\n 'face-id': {\n 'S': response['FaceRecords'][0]['Face']['FaceId'],\n },\n 'name': {\n 'S': name,\n }\n }\n )\n click.secho(\"All done. {} has been successfully added.\".format(name), fg='blue')\n else:\n click.secho(\"Sorry, something went wrong while adding {}. Try again or see an admin for help.\".format(name), fg='yellow')\n\n@click.group()\n@click.option('--profile', metavar='AWS_PROFILE', default='default', envvar='AWS_DEFAULT_PROFILE',\n help='The name of the AWS profile to use. You can configure a profile with the AWS CLI command: aws configure --profile .')\n@click.pass_context\ndef cli(ctx, profile):\n \"\"\"TBD\"\"\"\n ctx.obj = {}\n ctx.obj['PROFILE'] = profile\n\n@cli.command()\n@click.option('--collection', prompt='Please enter the collection name', help='Name of the collection to add the faces to')\n@click.option('--path', prompt='Please enter the path to the images', help='Path to a directory containing the faces images')\n@click.pass_context\ndef setup(ctx, collection, path):\n \"\"\"Sets up a collection with faces (pictures) from the local filesystem.\"\"\"\n session = botocore.session.Session(profile=ctx.obj['PROFILE'])\n\n rekognition = session.create_client('rekognition')\n dynamodb = session.create_client('dynamodb')\n\n ### Creates DDB table\n try:\n response = dynamodb.create_table(\n AttributeDefinitions=[\n {\n 'AttributeName': 'face-id',\n 'AttributeType': 'S',\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'face-id',\n 'KeyType': 'HASH',\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 5,\n 'WriteCapacityUnits': 5,\n },\n TableName=collection\n )\n\n click.secho(\"DynamoDB table {} created.\".format(collection), fg='blue')\n except ClientError as e:\n click.secho(\"Sorry, something went wrong: {}. Try again or see an admin for help.\".format(e), fg='yellow')\n\n ### Creates Rekognition collection\n response = rekognition.create_collection(\n CollectionId=collection\n )\n\n if response['StatusCode'] == 200:\n click.secho(\"Collection {} created.\".format(collection), fg='blue')\n else:\n click.secho(\"Sorry, something went wrong. Try again or see an admin for help.\", fg='yellow')\n\n ### Adds faces to collection and info to DDB\n faces = [f for f in listdir(path) if isfile(join(path, f))]\n for face in faces:\n click.secho(\"Working on {}\".format(face), fg='blue')\n try:\n add_face(ctx, join(path, face), collection, face.split('.')[0])\n except IOError as exc:\n if exc.errno != errno.EISDIR: # Do not fail if a directory is found, just ignore it.\n raise # Propagate other kinds of IOError.\n\n@cli.command()\n@click.option('--name', nargs=2, prompt='Please enter the full name', help='Full name of the person being added')\n@click.option('--collection', prompt='Please enter the collection name', help='Name of the collection to add the face to')\n@click.pass_context\ndef add(ctx, name, collection):\n \"\"\"Captures an image from the camera and adds it to the collection.\"\"\"\n ### Capture image from cam\n capture_frame()\n\n ### Send to Rekognition to add it to faces collection\n add_face(ctx, FILE_NAME, collection, name)\n\n\n@cli.command()\n@click.option('--collection', prompt='Please enter the collection name', help='Name of the collection to compare the face to')\n@click.pass_context\ndef capture(ctx, collection):\n \"\"\"Captures an image from the camera and compares it to the faces in the collection.\"\"\"\n session = botocore.session.Session(profile=ctx.obj['PROFILE'])\n ### Capture image from cam\n capture_frame()\n\n ### Send to Rekognition to compare it to faces in collection\n rekognition = session.create_client('rekognition')\n dynamodb = session.create_client('dynamodb')\n\n with open(\"selfie.png\", 'rb') as image:\n response = rekognition.search_faces_by_image(\n CollectionId=collection,\n Image={\n 'Bytes': image.read()\n },\n MaxFaces=1,\n FaceMatchThreshold=80\n )\n\n ### If a match is found, get info from DDB\n if len(response['FaceMatches']) == 1:\n ddb_response = dynamodb.get_item(\n TableName=collection,\n Key={\n 'face-id': {\n 'S': response['FaceMatches'][0]['Face']['FaceId'],\n }\n }\n )\n click.secho(\"Welcome {}! You can now proceed.\".format(ddb_response['Item']['name']['S']), fg='green')\n else:\n click.secho(\"Sorry, we couldn't recognize you. Try again or see an admin for help.\", fg='yellow')\n","repo_name":"dstroppa/facer","sub_path":"facer.py","file_name":"facer.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"35803075785","text":"# standard library imports\nfrom typing import TYPE_CHECKING, Optional\nimport time\nimport random\nimport contextlib\n\n# third party imports\nimport numpy as np\nimport emcee\nimport arviz as az\nfrom loguru import logger\nfrom tabulate import tabulate\n\n# local imports\nfrom probeye.subroutines import pretty_time_delta\nfrom probeye.subroutines import check_for_uninformative_priors\nfrom probeye.inference.scipy.solver import ScipySolver\nfrom probeye.subroutines import stream_to_logger\nfrom probeye.subroutines import print_dict_in_rows\nfrom probeye.subroutines import extract_true_values\n\n# imports only needed for type hints\nif TYPE_CHECKING: # pragma: no cover\n from probeye.definition.inverse_problem import InverseProblem\n\n\nclass EmceeSolver(ScipySolver):\n \"\"\"\n Provides emcee-sampler which is a pure-Python implementation of Goodman & Weare’s\n Affine Invariant Markov chain Monte Carlo (MCMC) Ensemble sampler. For more\n information, check out https://emcee.readthedocs.io/en/stable/.\n\n Parameters\n ----------\n problem\n Describes the inverse problem including e.g. parameters and data.\n seed\n Random state used for random number generation.\n show_progress\n When True, the progress of a solver routine will be shown (for example as a\n progress-bar) if such a feature is available. Otherwise, the progress will\n not shown.\n \"\"\"\n\n def __init__(\n self,\n problem: \"InverseProblem\",\n seed: Optional[int] = None,\n show_progress: bool = True,\n ):\n logger.debug(f\"Initializing {self.__class__.__name__}\")\n # check that the problem does not contain a uninformative prior\n check_for_uninformative_priors(problem)\n # initialize the scipy-based solver (ScipySolver)\n super().__init__(problem, seed=seed, show_progress=show_progress)\n\n def emcee_summary(\n self, posterior_samples: np.ndarray, true_values: Optional[dict] = None\n ) -> dict:\n \"\"\"\n Computes and prints a summary of the posterior samples containing mean, median,\n standard deviation, 5th percentile and 95th percentile. Note, that this method\n was based on code from the taralli package: https://gitlab.com/tno-bim/taralli.\n\n Parameters\n ----------\n posterior_samples\n The generated samples in an array with as many columns as there are latent\n parameters, and n rows, where n = n_chains * n_steps.\n true_values\n True parameter values, if known.\n\n Returns\n -------\n Keys are the different statistics 'mean', 'median', 'sd' (standard\n deviation), 'q05' and 'q95' (0.05- and 0.95-quantile). The values are\n dictionaries with the parameter names as keys and the respective statistics\n as values.\n \"\"\"\n\n # used for the names in the first column\n var_names = self.problem.get_theta_names(tex=False, components=True)\n\n # compute some stats for each column (i.e., each parameter)\n mean = np.mean(posterior_samples, axis=0)\n quantiles = np.quantile(posterior_samples, [0.50, 0.05, 0.95], axis=0)\n median = quantiles[0, :]\n quantile_05 = quantiles[1, :]\n quantile_95 = quantiles[2, :]\n\n # compute the sample standard deviations for each parameter\n cov_matrix = np.atleast_2d(np.cov(posterior_samples.T))\n sd = np.sqrt(np.diag(cov_matrix))\n\n # assemble the summary array\n if true_values:\n col_names = [\"\", \"true\", \"mean\", \"median\", \"sd\", \"5%\", \"95%\"]\n true = extract_true_values(true_values, var_names)\n row_names = np.array(var_names)\n tab = np.hstack(\n (\n row_names.reshape(-1, 1),\n true.reshape(-1, 1),\n mean.reshape(-1, 1),\n median.reshape(-1, 1),\n sd.reshape(-1, 1),\n quantile_05.reshape(-1, 1),\n quantile_95.reshape(-1, 1),\n )\n )\n\n # print the generated table, and return a summary dict for later use\n print(tabulate(tab, headers=col_names, floatfmt=\".2f\"))\n return {\n \"true\": {name: val for name, val in zip(row_names, true)},\n \"mean\": {name: val for name, val in zip(row_names, mean)},\n \"median\": {name: val for name, val in zip(row_names, median)},\n \"sd\": {name: val for name, val in zip(row_names, sd)},\n \"q05\": {name: val for name, val in zip(row_names, quantile_05)},\n \"q95\": {name: val for name, val in zip(row_names, quantile_95)},\n }\n else:\n col_names = [\"\", \"mean\", \"median\", \"sd\", \"5%\", \"95%\"]\n row_names = np.array(var_names)\n tab = np.hstack(\n (\n row_names.reshape(-1, 1),\n mean.reshape(-1, 1),\n median.reshape(-1, 1),\n sd.reshape(-1, 1),\n quantile_05.reshape(-1, 1),\n quantile_95.reshape(-1, 1),\n )\n )\n\n # print the generated table, and return a summary dict for later use\n print(tabulate(tab, headers=col_names, floatfmt=\".2f\"))\n return {\n \"mean\": {name: val for name, val in zip(row_names, mean)},\n \"median\": {name: val for name, val in zip(row_names, median)},\n \"sd\": {name: val for name, val in zip(row_names, sd)},\n \"q05\": {name: val for name, val in zip(row_names, quantile_05)},\n \"q95\": {name: val for name, val in zip(row_names, quantile_95)},\n }\n\n def run(\n self,\n n_walkers: int = 20,\n n_steps: int = 1000,\n n_initial_steps: int = 100,\n true_values: Optional[dict] = None,\n **kwargs,\n ) -> az.data.inference_data.InferenceData:\n \"\"\"\n Runs the emcee-sampler for the InverseProblem the EmceeSolver was initialized\n with and returns the results as an arviz InferenceData obj.\n\n Parameters\n ----------\n n_walkers\n Number of walkers used by the estimator.\n n_steps\n Number of steps to run.\n n_initial_steps\n Number of steps for initial (burn-in) sampling.\n true_values\n True parameter values, if known.\n kwargs\n Additional key-word arguments channeled to emcee.EnsembleSampler.\n\n Returns\n -------\n inference_data\n Contains the results of the sampling procedure.\n \"\"\"\n\n # log which solver is used\n logger.info(\n f\"Solving problem using emcee sampler with {n_initial_steps} + {n_steps} \"\n f\"samples and {n_walkers} walkers\"\n )\n if kwargs:\n logger.info(\"Additional options:\")\n print_dict_in_rows(kwargs, printer=logger.info)\n else:\n logger.info(\"No additional options specified\")\n\n # draw initial samples from the parameter's priors\n logger.debug(\"Drawing initial samples\")\n if self.seed is not None:\n np.random.seed(self.seed)\n sampling_initial_positions = np.zeros(\n (n_walkers, self.problem.n_latent_prms_dim)\n )\n theta_names = self.problem.get_theta_names(tex=False, components=False)\n for parameter_name in theta_names:\n idx = self.problem.parameters[parameter_name].index\n idx_end = self.problem.parameters[parameter_name].index_end\n samples = self.sample_from_prior(parameter_name, n_walkers)\n if (idx_end - idx) == 1:\n sampling_initial_positions[:, idx] = samples\n else:\n sampling_initial_positions[:, idx:idx_end] = samples\n\n # The following code is based on taralli and merely adjusted to the variables\n # in the probeye setup; see https://gitlab.com/tno-bim/taralli\n\n # ............................................................................ #\n # Pre-process #\n # ............................................................................ #\n\n def logprob(x):\n # Skip loglikelihood evaluation if logprior is equal\n # to negative infinity\n logprior = self.logprior(x)\n if logprior == -np.inf:\n return logprior\n\n # Otherwise return logprior + loglikelihood\n return logprior + self.loglike(x)\n\n logger.debug(\"Setting up EnsembleSampler\")\n sampler = emcee.EnsembleSampler(\n nwalkers=n_walkers,\n ndim=self.problem.n_latent_prms_dim,\n log_prob_fn=logprob,\n **kwargs,\n )\n\n if self.seed is not None:\n random.seed(self.seed)\n sampler.random_state = np.random.mtrand.RandomState(self.seed)\n\n # ............................................................................ #\n # Initial sampling, burn-in: used to avoid a poor starting point #\n # ............................................................................ #\n\n logger.debug(\"Starting sampling (initial + main)\")\n start = time.time()\n state = sampler.run_mcmc(\n initial_state=sampling_initial_positions,\n nsteps=n_initial_steps,\n progress=self.show_progress,\n )\n sampler.reset()\n\n # ............................................................................ #\n # Sampling of the posterior #\n # ............................................................................ #\n sampler.run_mcmc(\n initial_state=state, nsteps=n_steps, progress=self.show_progress\n )\n end = time.time()\n runtime_str = pretty_time_delta(end - start)\n logger.info(\n f\"Sampling of the posterior distribution completed: {n_steps} steps and \"\n f\"{n_walkers} walkers.\"\n )\n logger.info(f\"Total run-time (including initial sampling): {runtime_str}.\")\n logger.info(\"\")\n logger.info(\"Summary of sampling results (emcee)\")\n posterior_samples = sampler.get_chain(flat=True)\n with contextlib.redirect_stdout(stream_to_logger(\"INFO\")): # type: ignore\n self.summary = self.emcee_summary(\n posterior_samples, true_values=true_values\n )\n logger.info(\"\") # empty line for visual buffer\n self.raw_results = sampler\n\n # translate the results to a common data structure and return it\n var_names = self.problem.get_theta_names(tex=True, components=True)\n inference_data = az.from_emcee(sampler, var_names=var_names)\n return inference_data\n","repo_name":"BAMresearch/probeye","sub_path":"probeye/inference/emcee/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":10995,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"}
+{"seq_id":"20750269076","text":"n = int(input())\narr = [list(map(int, input().split())) for _ in range(n)]\narr.sort(key = lambda x : x[0])\n\ndp = [0] * len(arr)\ndp[0] = 1\nfor i in range(1, len(dp)):\n max_num = 0\n for j in range(0, i):\n if arr[j][1] < arr[i][1]:\n max_num = max(max_num, dp[j])\n dp[i] = max_num + 1\n\nprint(len(dp)-max(dp))","repo_name":"jongbin26/coding_test","sub_path":"python/2565.py","file_name":"2565.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"10325728306","text":"from django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.models import Permission\nfrom django.db.models import Q, Exists, OuterRef\nfrom ..models.archive import Archive\n\nclass ArchiveBackend(ModelBackend):\n def get_group_permissions(self, user_obj, obj=None):\n perm_cache = super().get_group_permissions(user_obj, obj)\n group_perms = Permission.objects.filter(\n archivegrouppermission__group__user__id=user_obj.id,\n content_type__app_label='archive',\n )\n group_perms = group_perms.values_list(\n 'content_type__app_label',\n 'archivegrouppermission__archive__slug',\n 'codename',\n ).order_by()\n perm_cache = perm_cache.union({\n \"{label}.any.{codename}\".format(label=label, codename=codename)\n for label, slug, codename in group_perms\n })\n perm_cache = perm_cache.union({\n \"{label}.archive.{slug}.{codename}\".format(label=label, slug=slug, codename=codename)\n for label, slug, codename in group_perms\n })\n perms = Permission.objects.filter(\n group__user=user_obj,\n content_type__app_label='archive',\n )\n perms = perms.values_list('content_type__app_label', 'codename').order_by()\n perm_cache = perm_cache.union(self.implied_per_archive_permissions(perms))\n user_obj._group_perm_cache = perm_cache\n return perm_cache\n\n def implied_per_archive_permissions(self, values):\n return {\n \"{label}.archive.{slug}.{codename}\".format(label=label, codename=codename, slug=archive.slug)\n for label, codename in values\n for archive in Archive.objects.all()\n } | {\n \"{label}.any.{codename}\".format(label=label, codename=codename)\n for label, codename in values\n }\n\n def get_user_permissions(self, user_obj, obj=None):\n perm_cache = super().get_user_permissions(user_obj, obj)\n perms = Permission.objects.filter(\n archiveuserpermission__user=user_obj,\n content_type__app_label='archive',\n )\n perms = perms.values_list('content_type__app_label', 'archiveuserpermission__archive__slug', 'codename').order_by()\n perm_cache = perm_cache.union({\n \"{label}.any.{codename}\".format(label=label, codename=codename)\n for label, slug, codename in perms\n })\n perm_cache = perm_cache.union({\n \"{label}.archive.{slug}.{codename}\".format(label=label, slug=slug, codename=codename)\n for label, slug, codename in perms\n })\n if user_obj.is_superuser:\n perms = Permission.objects.filter(\n content_type__app_label='archive',\n )\n else:\n perms = Permission.objects.filter(\n user=user_obj,\n content_type__app_label='archive',\n )\n perms = perms.values_list('content_type__app_label', 'codename').order_by()\n perm_cache = perm_cache.union(self.implied_per_archive_permissions(perms))\n\n user_obj._user_perm_cache = perm_cache\n return perm_cache\n\n def has_perm(self, user_obj, perm, obj=None):\n return super().has_perm(user_obj, perm, obj)\n","repo_name":"workjonathan/kronofoto","sub_path":"kronofoto/archive/auth/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"33381489748","text":"from __future__ import print_function\n\nimport argparse\nfrom collections import Counter\nimport os\nimport re\nimport subprocess\n\nfile_re = re.compile(r'diff --git a/(\\S+)')\ndiff_region_re = re.compile(r'@@\\s[-]\\S+\\s[+](\\S+)\\s@@')\nsource_line_re = re.compile(r'
Coverage for [^:]+:\\s+(\\d+%)<\\/title>')\nsummary_end_re = re.compile(r'\\s+
')\n\n__version__ = \"0.7\"\n\n\nclass DiffCollectionFailed(Exception):\n pass\n\n\nclass SourceLine(object):\n\n def __init__(self, line_number, is_context=True, code=''):\n self.line_number = line_number\n self.is_context = is_context\n self.code = code\n self.status = '???'\n\n def __eq__(self, other):\n return (self.line_number == other.line_number and\n self.is_context == other.is_context)\n\n def __repr__(self):\n return \"SourceLine(line_number=%d, is_context=%s)\" % (self.line_number,\n self.is_context)\n\n\nclass SourceModule(object):\n\n def __init__(self, filename, lines):\n self.filename = filename\n self.lines = lines\n self.line_num_map = {l.line_number: l for l in lines}\n self.cover_file = (filename.replace('/', '_').replace('.', '_') +\n \".html\")\n self.have_report = False\n self.coverage = '??%'\n\n def update_line_status(self, line_number, status):\n if line_number in self.line_num_map:\n line = self.line_num_map[line_number]\n if status.startswith('pln'):\n line.status = ' '\n else:\n line.status = status[4:7]\n\n def report(self):\n output = self.filename\n if not self.have_report:\n return \"%s (No coverage data)\\n\" % output\n if not self.lines or all(l.is_context for l in self.lines):\n return \"%s (No added/changed lines)\\n\" % output\n stats = Counter([l.status for l in self.lines if not l.is_context])\n output += \" (run={}, mis={}, par={}, ign={}) {}\\n\".format(\n stats['run'], stats['mis'], stats['par'], stats[' '],\n self.coverage)\n last_line = None\n for line in self.lines:\n if last_line and line.line_number != (last_line + 1):\n output += \"\\n\"\n output += \"{:5d} {} {} {}\\n\".format(\n line.line_number, line.status,\n ' ' if line.is_context else '+', line.code)\n last_line = line.line_number\n return output\n\n\ndef check_coverage_status(coverage_info, module):\n for coverage_line in coverage_info:\n m = title_re.match(coverage_line)\n if m:\n module.coverage = m.group(1)\n continue\n if summary_end_re.match(coverage_line):\n return\n m = source_line_re.match(coverage_line)\n if m:\n line_num = int(m.group(1))\n status = m.group(2)\n module.update_line_status(line_num, status)\n\n\ndef check_coverage_file(root, module):\n \"\"\"Check the lines in coverage file and report coverage status.\"\"\"\n report_file = os.path.join(root, 'cover', module.cover_file)\n if not os.path.isfile(report_file):\n return # No coverage data for file\n with open(report_file) as coverage_info:\n coverage_lines = coverage_info.readlines()\n check_coverage_status(coverage_lines, module)\n module.have_report = True\n\n\ndef collect_diff_lines(diff_region, start, last):\n \"\"\"Find added and context lines in a diff region.\n\n Note: If the diff region is at the start or end of the file, there\n may not be context lines.\n \"\"\"\n lines = []\n line_num = start\n while line_num <= last:\n line = next(diff_region)\n if line.startswith('-'):\n continue\n lines.append(SourceLine(line_num, is_context=line.startswith(' '),\n code=line[1:]))\n line_num += 1\n return lines\n\n\ndef parse_diffs(diff_output):\n \"\"\"Collect the file and ranges of diffs added, if any.\"\"\"\n added_lines = []\n source_file = ''\n diff_lines = iter(diff_output.splitlines())\n for line in diff_lines:\n m = file_re.match(line)\n if m:\n source_file = m.group(1)\n continue\n m = diff_region_re.match(line)\n if m:\n start, comma, num = m.group(1).partition(',')\n start = int(start)\n if num:\n last = start + int(num) - 1\n else:\n last = start\n added_lines += collect_diff_lines(diff_lines, start, last)\n return (source_file, added_lines)\n\n\ndef collect_diffs_for_files(root, versions, source_files, context_lines):\n \"\"\"Generator to obtain the diffs for files.\"\"\"\n os.chdir(root)\n for filename in source_files:\n command = ['git', 'diff', '-U%d' % context_lines,\n '-w', versions, '--', filename]\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n diff_lines, err = p.communicate()\n if err:\n raise DiffCollectionFailed(\n \"Unable to collect diffs for file %s/%s: %s\" %\n (root, filename, err))\n yield diff_lines\n\n\ndef collect_diff_files(root, versions):\n \"\"\"Generator to obtain all the diff files.\"\"\"\n command = ['git', 'diff', '--name-only', versions]\n os.chdir(root)\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if err:\n raise DiffCollectionFailed(\"Unable to find diff files to examine \"\n \"in %s: %s\" % (root, err))\n for filename in out.splitlines():\n if not os.path.basename(filename).startswith('.'):\n yield filename\n\n\ndef validate(parser, provided_args=None):\n args = parser.parse_args(provided_args)\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n parser.error(\"The repo-dir must be a directory pointing to the top \"\n \"of the Git repo\")\n if not os.path.isdir(os.path.join(args.root, 'cover')):\n parser.error(\"Missing cover directory for project\")\n if args.commits == 'working':\n args.commits = 'HEAD'\n elif args.commits == 'committed':\n args.commits = 'HEAD^..HEAD'\n return args\n\n\ndef main():\n args = validate(setup_parser())\n files = collect_diff_files(args.root, args.commits)\n diff_files = collect_diffs_for_files(args.root, args.commits, files,\n args.context)\n for diffs in diff_files:\n source_file, lines = parse_diffs(diffs)\n module = SourceModule(source_file, lines)\n check_coverage_file(args.root, module)\n print(module.report())\n\n\ndef setup_parser():\n parser = argparse.ArgumentParser(\n description='Reports coverage for a commit in repo.')\n parser.add_argument(\n '-c', '--context', action='store', type=int, default=3,\n help='Number of context lines around diff regions. Default=3.')\n parser.add_argument(\n '-w', '--which', action='store', default=\"working\", dest='commits',\n help=\"Which commit(s) to compare. Use 'working', 'commit', or \"\n \"custom commit specification. Latest should be same as cover run. \"\n \"Default='working'.\")\n parser.add_argument(dest='root', metavar='repo-dir',\n help=\"Root of Git repo\")\n return parser\n","repo_name":"pmichali/commit-coverage","sub_path":"commit_coverage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"21931316268","text":"\"\"\" get rates module \"\"\"\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import date\nimport threading\n\nimport requests\n\nfrom rates_demo.business_days import business_days\nimport rates_demo.rates_orchestrator as ro\n\ndef get_rates(base_url: str) -> list[str]:\n \"\"\" get rates \"\"\"\n\n start_date = date(2021, 1, 1)\n end_date = date(2021, 1, 31)\n rates: list[str] = []\n\n for business_day in business_days(start_date, end_date):\n rates_url = \"\".join([base_url, \"/api/\",\n business_day.strftime(\"%Y-%m-%d\"),\n \"?base=USD&symbols=EUR\"])\n\n response = requests.request(\"GET\", rates_url)\n rates.append(response.text)\n\n return rates\n\n\ndef get_rate_task(base_url: str, business_day: date) -> None:\n \"\"\" get rate for a single day from the rest api \"\"\"\n\n rates_url = \"\".join([base_url, \"/api/\",\n business_day.strftime(\"%Y-%m-%d\"),\n \"?base=USD&symbols=EUR\"])\n\n ro.process_rates_queue.put(requests.request(\"GET\", rates_url).text)\n\n\ndef get_rates_threaded(base_url: str) -> None:\n \"\"\" get rates using multiple threads \"\"\"\n\n start_date = date(2021, 1, 1)\n end_date = date(2021, 1, 31)\n threads: list[threading.Thread] = []\n\n for business_day in business_days(start_date, end_date):\n a_thread = threading.Thread(\n target=get_rate_task, args=(base_url, business_day))\n a_thread.start()\n threads.append(a_thread)\n\n for a_thread in threads:\n a_thread.join()\n\n ro.get_rates_done.set()\n\n\n# def get_rates_threadpool(base_url: str) -> list[str]:\n# \"\"\" get rates using multiple threads \"\"\"\n\n# start_date = date(2021, 1, 1)\n# end_date = date(2021, 1, 31)\n\n# with ThreadPoolExecutor() as executor:\n\n# return list(executor.map(\n# lambda params: get_rate_task(*params),\n# [ (base_url, business_day) for business_day\n# in business_days(start_date, end_date)]\n# ))\n\n\n","repo_name":"t4d-classes/advanced-python_05102021","sub_path":"python_demos/rates_demo/get_rates.py","file_name":"get_rates.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"73826630951","text":"import ipaddress\n\nclass Validator:\n\n ALLOWED_STATES = [\n 'NY',\n 'CA',\n 'AZ',\n 'TX',\n 'OH'\n ]\n\n ALLOWED_ESTIMATION_TYPES = [\n 'normal',\n 'premium'\n ]\n\n MANDATORY_PARAMETERS = [\n 'km',\n 'type',\n 'base_amount',\n 'state',\n ]\n\n NUMBER_PARAMETERS = [\n 'km',\n 'base_amount'\n ]\n\n def validate(self, params, ip):\n self.validate_ip(ip)\n self.validate_params_integrity(params)\n self.validate_state(params['state'])\n self.validate_type(params['type'])\n self.validate_numbers(params)\n\n def validate_params_integrity(self, params):\n params_sent = list(params.keys())\n for param in self.MANDATORY_PARAMETERS:\n if param not in params_sent:\n raise Exception(\"param \"+param+\" is a mandatory\")\n if params[param] == '':\n raise Exception(\"param \"+param+\" shouldn't be empty\")\n\n def validate_state(self, state:str):\n if state.upper() not in self.ALLOWED_STATES:\n raise Exception('unsupported state')\n\n def validate_type(self, type:str):\n if type.lower() not in self.ALLOWED_ESTIMATION_TYPES:\n raise Exception('unsupported type')\n\n def validate_ip(self, ip):\n try:\n ipaddress.ip_address(ip)\n except ValueError:\n raise Exception(\"ip is not valid\")\n\n def validate_numbers(self, params):\n for param in self.NUMBER_PARAMETERS:\n if not params[param].isnumeric():\n try:\n float(params[param])\n except ValueError:\n raise Exception(\"param \"+param+\" must be numeric\")\n\n","repo_name":"dsmunoz/total-calculator","sub_path":"services/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"73830448874","text":"from django.urls import path, include\n\nfrom wind_simulation_model.views import (\n wind_station_simulation_main, wind_station_simulation_now, wind_station_simulation_forecast,\n calculate_solar_radiation, get_forecast_data, turbines, turbine_in_detail, add_turbine,\n edit_turbine, edit_turbine_field, delete_turbine_table, add_turbine_field, get_all_forecast_data\n)\n\n\nurlpatterns = [\n path('solar_radiation', calculate_solar_radiation, name='calculate_solar_radiation'),\n\n path('wind_simulation', wind_station_simulation_main, name='wind_station_simulation_main'),\n path('wind_simulation/now', wind_station_simulation_now, name='wind_station_simulation_now'),\n path('wind_simulation/forecast', wind_station_simulation_forecast, name='wind_station_simulation_forecast'),\n\n path('wind_simulation/list_of_turbines', turbines, name='list_of_turbines'),\n path('wind_simulation/list_of_turbines/', turbine_in_detail, name='turbine_ib_detail'),\n\n path('wind_simulation/add_turbine', add_turbine, name='add_turbine'),\n path('wind_simulation/list_of_turbines//add', add_turbine_field, name='add_turbine_field'),\n\n path('wind_simulation/list_of_turbines//edit', edit_turbine, name='edit_turbine'),\n path('wind_simulation/list_of_turbines//edit/', edit_turbine_field, name='edit_turbine_field'),\n\n path('wind_simulation/list_of_turbines//delete', delete_turbine_table, name='delete_turbine_table'),\n\n path('get_forecast_data', get_forecast_data, name='get_forecast_data'),\n path('get_all_forecast_data', get_all_forecast_data, name='get_forecast_data'),\n]\n","repo_name":"Oleg-tech/solar-system-monitoring-service","sub_path":"house_monitoring_system/wind_simulation_model/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"12732656918","text":"import re\n\n\nclass Keyword:\n def __init__(self, value: str):\n self.value = value\n\n def __eq__(self, other):\n if not isinstance(other, Keyword):\n return False\n\n return self.value == other.value\n\n def __repr__(self):\n return f'Keyword(value=\"{self.value}\")'\n\n\nclass Literal:\n def __init__(self, value: str):\n self.value = value\n\n def __eq__(self, other):\n if not isinstance(other, Literal):\n return False\n\n return self.value == other.value\n\n def __repr__(self):\n return f'Literal(value=\"{self.value}\")'\n\n\nclass Select:\n def __init__(\n self,\n select_list: list[str],\n select_from: list[str],\n join: list[tuple[str, str, tuple]],\n where: tuple\n ):\n self.select_list = select_list\n self.select_from = select_from\n self.join = join\n self.where = where\n\n def __eq__(self, other):\n if not isinstance(other, Select):\n return False\n\n return (\n self.select_list == other.select_list\n and self.select_from == other.select_from\n and self.join == other.join\n and self.where == other.where\n )\n\n def __repr__(self):\n return f'Select(select_list={self.select_list}, select_from={self.select_from}, join={self.join}, where={self.where})'\n\n\nclass InvalidTokenError(Exception):\n pass\n\n\nTokens = list[Keyword | Literal | Select]\n\n\ndef parse_sql(s: str) -> Tokens:\n position = skip_white_spaces(s, 0)\n select, position = read_query(s, position)\n return [select]\n\n\ndef skip_white_spaces(s: str, position: int) -> int:\n while position < len(s) and s[position] in (' ', '\\t', '\\n', '\\r'):\n position += 1\n return position\n\n\ndef read_query(s: str, position: int) -> tuple[Select, int]:\n _, position = read_keyword('select', s, position)\n select_list, position = read_list(s, position)\n _, position = read_keyword('from', s, position)\n select_from, position = read_list(s, position)\n join, position = read_joins(s, position)\n where, position = read_where(s, position)\n return Select(select_list=select_list, select_from=select_from, join=join, where=where), position\n\n\ndef read_list(s: str, position: int) -> tuple[list, int]:\n items = []\n while position < len(s):\n value, position = read_until_separator(s, position)\n items.append(value)\n position = skip_white_spaces(s, position)\n if position >= len(s) or s[position] != ',':\n break\n position += 1\n position = skip_white_spaces(s, position)\n\n position = skip_white_spaces(s, position)\n return items, position\n\n\ndef read_until(values: list[str], s: str, position: int) -> tuple[str, int]:\n start = position\n while position < len(s):\n found_value = any(s[position:].lower().startswith(value) for value in values)\n if found_value:\n break\n\n position += 1\n\n return s[start:position], position\n\n\ndef read_until_white_space(s: str, position: int) -> tuple[str, int]:\n value, position = read_until([' ', '\\t', '\\n', '\\r'], s, position)\n position = skip_white_spaces(s, position)\n return value, position\n\n\ndef read_until_white_space_or_bracket(s: str, position: int) -> tuple[str, int]:\n value, position = read_until([' ', '\\t', '\\n', '\\r', ')', '('], s, position)\n position = skip_white_spaces(s, position)\n return value, position\n\n\ndef read_until_separator(s: str, position: int) -> tuple[str, int]:\n return read_until([',', ' ', '\\t', '\\n', '\\r'], s, position)\n\n\ndef read_keyword(keyword: str, s: str, position: int) -> tuple[Keyword, int]:\n t = s[position:].lower()\n if t.startswith(keyword):\n position = skip_white_spaces(s, position + len(keyword))\n return Keyword(keyword), position\n\n raise InvalidTokenError(f'Invalid token at position {position}: expected keyword \"{keyword}\"')\n\n\ndef read_keywords(keywords: list[str], s: str, position: int) -> tuple[Keyword, int]:\n t = s[position:].lower()\n\n for keyword in keywords:\n if t.startswith(keyword):\n position = skip_white_spaces(s, position + len(keyword))\n return Keyword(keyword), position\n\n raise InvalidTokenError(f'Invalid token at position {position}: expected keywords \"{keywords}\"')\n\n\ndef read_literal(s: str, position: int) -> tuple[Literal, int]:\n start = position\n while position < len(s) and s[position] not in (' ', '\\t', '\\n', '\\r'):\n position += 1\n\n return Literal(s[start:position]), position\n\n\ndef read_joins(s: str, position: int) -> tuple[list, int]:\n joins = []\n while position < len(s):\n position = skip_white_spaces(s, position)\n if re.match(r'^left\\s+outer\\s+join\\s+', s[position:], re.IGNORECASE) is not None:\n join, position = read_left_outer_join(s, position)\n elif re.match(r'^inner\\s+join\\s+', s[position:], re.IGNORECASE) is not None:\n join, position = read_inner_join(s, position)\n elif re.match(r'^join\\s+', s[position:], re.IGNORECASE) is not None:\n join, position = read_join(s, position)\n else:\n break\n\n joins.extend(join)\n\n position = skip_white_spaces(s, position)\n return joins, position\n\n\ndef read_left_outer_join(s: str, position: int) -> tuple[list, int]:\n _, position = read_keyword('left', s, position)\n _, position = read_keyword('outer', s, position)\n _, position = read_keyword('join', s, position)\n table, position = read_until_white_space(s, position)\n _, position = read_keyword('on', s, position)\n conditions, position = read_conditions(s, position)\n return [('left outer join', table, conditions)], position\n\n\ndef read_inner_join(s: str, position: int) -> tuple[list, int]:\n _, position = read_keyword('inner', s, position)\n _, position = read_keyword('join', s, position)\n value, position = read_until_white_space(s, position)\n _, position = read_keyword('on', s, position)\n conditions, position = read_conditions(s, position)\n return [('inner join', value, conditions)], position\n\n\ndef read_join(s: str, position: int) -> tuple[list, int]:\n _, position = read_keyword('join', s, position)\n value, position = read_until_white_space(s, position)\n _, position = read_keyword('on', s, position)\n conditions, position = read_conditions(s, position)\n return [('join', value, conditions)], position\n\n\ndef read_where(s: str, position: int) -> tuple[tuple | None, int]:\n if not s[position:].lower().startswith('where'):\n return None, position\n\n _, position = read_keyword('where', s, position)\n conditions, position = read_conditions(s, position)\n\n return conditions, position\n\n\ndef read_conditions(s: str, position: int) -> tuple[tuple, int]:\n operands = []\n operators = []\n while position < len(s):\n if s[position] == '(':\n operators.append('(')\n position = skip_white_spaces(s, position + 1)\n\n left, position = read_until_white_space(s, position)\n operator, position = read_until_white_space(s, position)\n right, position = read_until_white_space_or_bracket(s, position)\n\n condition = (left, operator, right)\n operands.append(condition)\n\n if position < len(s) and s[position] == ')':\n position = skip_white_spaces(s, position + 1)\n while len(operators) > 0 and operators[-1] != '(':\n boolean_operator = operators.pop()\n right_condition = operands.pop()\n left_condition = operands.pop()\n operands.append((boolean_operator, left_condition, right_condition))\n operators.pop()\n\n if re.match(r'^(and|or)', s[position:], re.IGNORECASE) is None:\n break\n\n keyword, position = read_keywords(['and', 'or'], s, position)\n boolean_operator = keyword.value\n\n while len(operators) > 0 and operators[-1] == 'and' and boolean_operator == 'or':\n last_boolean_operator = operators.pop()\n\n if last_boolean_operator == '(':\n break\n\n right_condition = operands.pop()\n left_condition = operands.pop()\n operands.append((last_boolean_operator, left_condition, right_condition))\n\n operators.append(boolean_operator)\n\n while len(operators) > 0:\n boolean_operator = operators.pop()\n right_condition = operands.pop()\n left_condition = operands.pop()\n operands.append((boolean_operator, left_condition, right_condition))\n\n conditions = operands.pop()\n\n position = skip_white_spaces(s, position)\n return conditions, position\n","repo_name":"inikolaev/toy-database-engine","sub_path":"sql_parser.py","file_name":"sql_parser.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"35607249368","text":"from saida_gym.starcraft.vultureVsZealot import VultureVsZealot\r\n## gym 환경 import VultureVsZealot\r\n\r\nfrom collections import deque\r\nimport numpy as np\r\nimport random\r\nimport os\r\nimport math\r\nimport pickle\r\nimport time\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torch.distributions import Categorical ## 분포 관련\r\nfrom tensorboardX import SummaryWriter\r\n\r\nclass Actor(nn.Module):\r\n def __init__(self, state_size, action_size):\r\n super(Actor, self).__init__()\r\n self.fc1 = nn.Linear(state_size,128) ## input state\r\n self.fc2 = nn.Linear(128,512)\r\n self.fc3 = nn.Linear(512,128)\r\n self.fc4 = nn.Linear(128,action_size) ## output each action\r\n\r\n def forward(self, x, soft_dim):\r\n x = torch.tanh(self.fc1(x))\r\n x = torch.tanh(self.fc2(x))\r\n x = torch.tanh(self.fc3(x))\r\n \r\n prob_each_actions = F.softmax(self.fc4(x),dim=soft_dim) ## NN에서 각 action에 대한 확률을 추정한다.\r\n\r\n return prob_each_actions\r\n\r\ndef scale_velocity(v):\r\n return v / 6.4\r\n\r\ndef scale_coordinate(pos):\r\n if pos > 0:\r\n return 1 if pos > 320 else int(pos / 16) / 20\r\n else:\r\n return -1 if pos < -320 else int(pos / 16) / 20\r\n\r\ndef scale_angle(angle):\r\n return (angle - math.pi) / math.pi\r\n\r\ndef scale_cooldown(cooldown):\r\n return (cooldown + 1) / 15\r\n\r\ndef scale_vul_hp(hp):\r\n return hp / 80\r\n\r\ndef scale_zeal_hp(hp):\r\n return hp / 160\r\n\r\ndef scale_bool(boolean):\r\n return 1 if boolean else 0\r\n\r\ndef rearrange_State(observation, state_size, env):\r\n state_arr = deque(maxlen=state_size)\r\n\r\n my_x = 0\r\n my_y = 0\r\n if observation.my_unit:\r\n for idx, me in enumerate(observation.my_unit): ## 9\r\n my_x = me.pos_x\r\n my_y = me.pos_y\r\n state_arr.append(math.atan2(me.velocity_y, me.velocity_x) / math.pi)\r\n state_arr.append(scale_velocity(math.sqrt((me.velocity_x) ** 2 + (me.velocity_y) ** 2)))\r\n state_arr.append(scale_cooldown(me.cooldown))\r\n state_arr.append(scale_vul_hp(me.hp))\r\n state_arr.append(scale_angle(me.angle))\r\n state_arr.append(scale_bool(me.accelerating))\r\n state_arr.append(scale_bool(me.braking))\r\n state_arr.append(scale_bool(me.attacking))\r\n state_arr.append(scale_bool(me.is_attack_frame))\r\n for i, terrain in enumerate(me.pos_info): ##12\r\n state_arr.append(terrain.nearest_obstacle_dist / 320)\r\n else:\r\n for _ in range(state_size - 11):\r\n state_arr.append(0)\r\n\r\n if observation.en_unit:\r\n for idx, enemy in enumerate(observation.en_unit): ## 11\r\n state_arr.append(math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x) / math.pi)\r\n state_arr.append(scale_coordinate(math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2)))\r\n state_arr.append(math.atan2(enemy.velocity_y, enemy.velocity_x) / math.pi)\r\n state_arr.append(scale_velocity(math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2)))\r\n state_arr.append(scale_cooldown(enemy.cooldown))\r\n state_arr.append(scale_zeal_hp(enemy.hp + enemy.shield))\r\n state_arr.append(scale_angle(enemy.angle))\r\n state_arr.append(scale_bool(enemy.accelerating))\r\n state_arr.append(scale_bool(enemy.braking))\r\n state_arr.append(scale_bool(enemy.attacking))\r\n state_arr.append(scale_bool(enemy.is_attack_frame))\r\n else:\r\n for _ in range(11):\r\n state_arr.append(0)\r\n \r\n\r\n return state_arr\r\n\r\ndef reward_reshape(state, next_state, reward, done):\r\n\r\n KILL_REWARD = 10\r\n DEAD_REWARD = -10\r\n DAMAGED_REWARD = -4\r\n HIT_REWARD = 2\r\n\r\n if done:\r\n if reward > 0: ## env에서 반환된 reward가 1 이면, 질럿을 잡음.\r\n reward = KILL_REWARD\r\n if next_state[3] == 1.0 and next_state[-6] == 0: ## perfect clear했다면 추가 bonus reward\r\n reward+=5\r\n \r\n return reward\r\n # 잡은 경우\r\n else: ## 게임이 종료되고 -1 값을 받게 된다면, \r\n reward = DEAD_REWARD\r\n return reward\r\n else: ## 게임이 종료되지 않았다면,\r\n my_pre_hp = state[3]\r\n my_cur_hp = next_state[3]\r\n \r\n en_pre_hp = state[-6]\r\n en_cur_hp = next_state[-6]\r\n \r\n if my_pre_hp - my_cur_hp > 0: ## 벌쳐가 맞아 버렸네 ㅠㅠ\r\n reward += DAMAGED_REWARD\r\n if en_pre_hp - en_cur_hp > 0: ## 질럿을 때려 버렸네 ㅠㅠ\r\n reward += HIT_REWARD\r\n \r\n ## 벌쳐가 맞고, 질럿도 때리는 2가지 동시 case가 있을 거 같아. reward를 +=을 했고 각각 if문으로 처리했습니다.\r\n \r\n return reward\r\n\r\ndef main():\r\n \r\n load = True\r\n episode = 0 ## 21710:91.1% 21610: 91% 21600: 90% 여러 모델 테스트 결과 중 3 모델이 90% 이상을 보였음.\r\n ## 환경의 초기, 진행되는 episode의 조건에 따라 86% ~ 91% 까지 perfect score를 보임. \r\n \r\n env = VultureVsZealot(version=0, frames_per_step=12, action_type=0, move_angle=20, move_dist=3, verbose=0, no_gui=False\r\n ,auto_kill=False)\r\n print_interval = 10\r\n \r\n learning_rate=0.00003\r\n torch.manual_seed(500)\r\n \r\n state_size = 38\r\n \r\n action_size= 19\r\n \r\n\r\n actor = Actor(state_size, action_size)\r\n \r\n \r\n if load: ## 경로를 model 파일 경로+ model 파일 이름으로 변경해주세요. 저는 원래 episode 번호로 구분했습니다.\r\n actor.load_state_dict(torch.load(os.path.join('C:/SAIDA_RL/python/saida_agent_example/vultureZealot/save_ppo3_clear/','clear_ppo_actor_'+str(episode)+'.pkl')))\r\n \r\n actor_optimizer = optim.Adam(actor.parameters(), lr=learning_rate)\r\n \r\n episode = 0\r\n clear_cnt=0\r\n for n_iter in range(1000):\r\n step = 0\r\n \r\n state = env.reset()\r\n \r\n state = rearrange_State(state, state_size, env)\r\n episode+=1\r\n temp_score = 0.0\r\n while True:\r\n \r\n prob_each_actions = actor(torch.Tensor(state), soft_dim=0)\r\n \r\n distribution = Categorical(prob_each_actions)\r\n \r\n action = distribution.sample().item() \r\n \r\n next_state, reward, done, info = env.step([action])\r\n next_state = rearrange_State(next_state, state_size, env)\r\n \r\n reward = reward_reshape(state, next_state, reward, done) \r\n \r\n mask = 0 if done else 1 \r\n \r\n state = next_state\r\n \r\n temp_score += reward \r\n \r\n if next_state[3] == 1.0 and next_state[-6] == 0:\r\n clear_cnt+=1\r\n print(\"clear: \",next_state[3],next_state[-6],\"clear_score: \",temp_score, \"clear_cnt: \", clear_cnt,\" / \", n_iter+1)\r\n \r\n if done: \r\n print(\"step: \", step, \"per_episode_score: \",temp_score)\r\n \r\n break\r\n\r\n print(\"clear count: \",clear_cnt,\" percent: \",(clear_cnt/n_iter)) \r\n env.close()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"betastarcraft/Problem_1","sub_path":"01_Vulture_vs_Zealot/Perfect_Performance_test/vz_PPO_test.py","file_name":"vz_PPO_test.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"}
+{"seq_id":"26276723435","text":"total = 0\n\ndef get_mul(num):\n sum = 1\n while num > 1:\n sum *= num\n num -= 1\n return sum\ndef get_num(s, num, d, sum):\n if d <= 0:\n return\n if sum <= 0:\n return\n if num * num > sum:\n get_num(s, num - 1, d, sum)\n if num * num == sum:\n global total\n total += 2**(27-d+1) * get_mul(len(set(s+str(num))))\n # print(24-d+1, len(set(s+str(num))) - 1)\n print(s + str(num) + '0' * (d - 1) + ' ' + str(2**(24-d+1) * get_mul(len(set(s+str(num))) - 1)))\n while num > 0:\n if num * num < sum:\n # print(s + str(num), num - 1, d - 1, sum - num * num)\n if (num)**2 <= sum - num * num:\n get_num(s + str(num), num, d - 1, sum - num * num)\n else:\n get_num(s + str(num), num - 1, d - 1, sum - num * num)\n num -= 1\nget_num(' ', 8, 24, 79)\nprint(total)","repo_name":"xiaoxiongfeng/lattice","sub_path":"zn.py","file_name":"zn.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"14774213674","text":"from collections import OrderedDict\n\nd = OrderedDict()\nd['foo'] = 1\nd['bar'] = 2\nd['span'] = 4\nprint(d)\nfor key in d:\n print(key, d[key])\n\nimport json\n\nj = json.dumps(d)\nprint(j)\n\nj = json.loads(j)\nprint(j)\n","repo_name":"ayumi64/Python_Cookbook_Learn","sub_path":"Section01 数据结构和算法/s1.7_字典排序.py","file_name":"s1.7_字典排序.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}
+{"seq_id":"17497666515","text":"from __future__ import nested_scopes\n\nfrom twisted.web import resource as resourcelib\nfrom twisted.web import client, microdom, domhelpers, server\n\nurlTemplate = 'http://www.livejournal.com/users/%s/rss'\n\nclass LJSyndicatingResource(resourcelib.Resource):\n\n def render_GET(self, request):\n url = urlTemplate % request.args['user'][0]\n client.getPage(url).addCallback(\n microdom.parseString).addCallback(\n lambda t: domhelpers.findNodesNamed(t, 'item')).addCallback(\n lambda itms: zip([domhelpers.findNodesNamed(x, 'title')[0]\n for x in itms],\n [domhelpers.findNodesNamed(x, 'link')[0]\n for x in itms]\n )).addCallback(\n lambda itms: '
'\n output += ''\n flash(\"you are now logged in as %s\" % login_session['username'])\n logging.info(\"done!\")\n return output\n\n\n# User Helper Functions\n\n# DISCONNECT - Revoke a current user's token and reset their login_session\n\n\n@app.route('/gdisconnect')\ndef gdisconnect():\n \"\"\"Disconnect from google.\"\"\"\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n response = make_response(json.dumps(\n 'Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# Disconnect based on provider\n@app.route('/disconnect')\ndef disconnect():\n \"\"\"Delete stored session.\"\"\"\n del login_session['access_token']\n del login_session['username']\n del login_session['userid']\n del login_session['picture']\n del login_session['email']\n del login_session['gplus_id']\n del login_session['state']\n\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('show_brands'))\n\n\n# User Helper Functions\ndef create_user(login_session):\n \"\"\"create new user and return user id.\"\"\"\n session = DBSession()\n new_user = User(name=login_session['username'], email=login_session[\n 'email'], picture=login_session['picture'])\n session.add(new_user)\n session.commit()\n user = session.query(User).filter_by(\n email=login_session['email']).one_or_none()\n return user.id\n\n\ndef get_user_info(user_id):\n \"\"\"Return user information\"\"\"\n\n session = DBSession()\n user = session.query(User).filter_by(id=user_id).one_or_none()\n return user\n\n\ndef get_user_id(email):\n \"\"\"Return user id.\"\"\"\n\n session = DBSession()\n user = session.query(User).filter_by(email=email).one_or_none()\n if user is None:\n return None\n return user.id\n\n\n# JSON APIs to view Brand Information\n@app.route('/brand//model/JSON')\ndef brand_json(brand_id):\n \"\"\"Returns Laptop Models in a JSON Format\"\"\"\n session = DBSession()\n # brand = session.query(Brand).filter_by(id = brand_id).one()\n models = session.query(Model).filter_by(brand_id=brand_id).all()\n return jsonify(Models=[i.serialize for i in models])\n\n\n@app.route('/brand//model//JSON')\ndef model_json(brand_id, model_id):\n \"\"\"Returns Laptop Model in a JSON Format\"\"\"\n\n session = DBSession()\n model = session.query(Model).filter_by(id=model_id).one()\n return jsonify(Model=model.serialize)\n\n\n@app.route('/brand/JSON')\ndef brands_json():\n session = DBSession()\n brands = session.query(Brand).all()\n return jsonify(brands=[r.serialize for r in brands])\n\n\n# Show all brands\n@app.route('/')\n@app.route('/brand/')\ndef show_brands():\n \"\"\"Render brands.Html with permission (edit, delete).\"\"\"\n session = DBSession()\n brands = session.query(Brand).order_by(asc(Brand.name))\n\n if 'username' not in login_session:\n return render_template('brands.html', brands=brands,\n is_admin=False)\n else:\n return render_template('brands.html', brands=brands,\n is_admin=True)\n\n\n@app.route('/static/')\ndef send_file(filename):\n \"\"\"Return photo path.\"\"\"\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n\n# Create a new brand\n@app.route('/brand/new/', methods=['GET', 'POST'])\ndef new_brand():\n \"\"\"Add new Laptop brand.\"\"\"\n try:\n session = DBSession()\n if request.method == 'POST':\n name = request.form['name']\n if name != '':\n file = request.files['photo']\n # if user does not select file, browser also\n # submit a empty part without filename\n # if file.filename == '':\n # flash('No selected file')\n # return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(name +\n file.filename).replace(\" \", \"\")\n photo = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(photo)\n else:\n flash('File Extention is not allowed')\n newbrand = Brand(name=request.form['name'], photo=filename,\n user_id=login_session['userid'])\n session.add(newbrand)\n session.commit()\n flash('New Brand %s Successfully Created' % newbrand.name)\n return redirect(url_for('show_brands'))\n else:\n return render_template('brandNew.html')\n except Exception as ex:\n return logging.error(str(ex))\n\n\n# Edit brand\n@app.route('/brand//edit/', methods=['GET', 'POST'])\ndef edit_brand(brand_id):\n \"\"\"Edit brand if the created user is the same as the editing user.\"\"\"\n session = DBSession()\n edited_brand = session.query(Brand).filter_by(id=brand_id).one()\n if request.method == 'POST':\n created_user = edited_brand.user_id\n logged_user = login_session['userid']\n if created_user != logged_user:\n flash('You are not authorized to edit')\n return redirect(url_for('show_brands'))\n\n edit_btn = request.form.get('edit')\n if edit_btn is not None:\n if request.form['name']:\n edited_brand.name = request.form['name']\n edited_brand.user_id = login_session['userid']\n session.commit()\n flash('Brand Successfully Edited %s' % edited_brand.name)\n return redirect(url_for('show_brands'))\n else:\n return render_template('brandEdit.html', brand=edited_brand)\n\n\n# Delete brand and its models\n@app.route('/brand//delete/', methods=['GET', 'POST'])\ndef delete_brand(brand_id):\n \"\"\"Delete brand if the created user is the same as the deleting user.\"\"\"\n session = DBSession()\n deleted_brand = session.query(Brand).filter_by(id=brand_id).one()\n if request.method == 'POST':\n delete_btn = request.form.get('delete')\n creadted_user = deleted_brand.user_id\n logged_user = login_session['userid']\n if creadted_user != logged_user:\n flash('You are not authorized to delete')\n return redirect(url_for('show_brands', brand_id=brand_id))\n if delete_btn is not None:\n delete_btn = request.form.get('delete')\n if delete_btn is not None:\n session.delete(deleted_brand)\n flash('%s Successfully Deleted' % deleted_brand.name)\n session.commit()\n return redirect(url_for('show_brands', brand_id=brand_id))\n\n else:\n return render_template('brandDelete.html', brand=deleted_brand)\n\n\n# Show a brand model\n@app.route('/brand//')\n@app.route('/brand//model/')\ndef show_model(brand_id):\n \"\"\"Render model.Html with user permission (edit, delete).\"\"\"\n session = DBSession()\n brand = session.query(Brand).filter_by(id=brand_id).one()\n models = session.query(Model).filter_by(brand_id=brand_id).all()\n\n user = get_user_info(brand.user_id)\n\n if 'userid' in login_session:\n userid_session = login_session['userid']\n if not user or 'username' not in login_session or \\\n brand.user_id != userid_session:\n return render_template('model.html', models=models, brand=brand,\n creator=user, is_admin=False)\n else:\n return render_template('model.html', models=models, brand=brand,\n creator=user, is_admin=True)\n else:\n return render_template('model.html', models=models, brand=brand,\n creator=user, is_admin=False)\n\n\n# Create a new brand model\n@app.route('/brand//model/new/', methods=['GET', 'POST'])\ndef new_model(brand_id):\n \"\"\"Add new laptop model.\"\"\"\n session = DBSession()\n # brand = session.query(Brand).filter_by(id = brand_id).one()\n if request.method == 'POST':\n file = request.files['photo']\n # if user does not select file, browser also\n # submit a empty part without filename\n # if file.filename == '':\n # flash('No selected file')\n # return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(request.form['name'] +\n file.filename).replace(\" \", \"\")\n photo = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(photo)\n\n newmodel = Model(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n photo=filename, brand_id=brand_id)\n session.add(newmodel)\n session.commit()\n flash('New Labtop Model %s Item Successfully Created' %\n newmodel.name)\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n flash('Brand must have a logo')\n return render_template('modelNew.html', brand_id=brand_id)\n else:\n return render_template('modelNew.html', brand_id=brand_id)\n\n\n# helper for allowed photos ext.\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n# Edit a brand model\n@app.route('/brand//model//edit',\n methods=['GET', 'POST'])\ndef edit_model(brand_id, model_id):\n \"\"\"Modify laptop model only\n\n if the created user is the same as the editing user.\n \"\"\"\n session = DBSession()\n edited_model = session.query(Model).filter_by(id=model_id).one()\n if request.method == 'POST':\n edit_btn = request.form.get('edit')\n creadted_user = edited_model.user_id\n logged_user = login_session['userid']\n if creadted_user != logged_user:\n flash('You are not authorized to edit')\n return redirect(url_for('show_model', brand_id=brand_id))\n if edit_btn is not None:\n if request.form['name']:\n edited_model.name = request.form['name']\n if request.form['description']:\n edited_model.description = request.form['description']\n if request.form['price']:\n edited_model.price = request.form['price']\n edit_model.user_id = login_session['userid']\n # if 'photo' not in request.files:\n # flash('No file part')\n # return redirect(request.url)\n file = request.files['photo']\n # if user does not select file, browser also\n # submit a empty part without filename\n # if file.filename == '':\n # flash('No selected file')\n # return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(request.form['name'] +\n file.filename).replace(\" \", \"\")\n photo = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(photo)\n edited_model.photo = filename\n session.add(edited_model)\n session.commit()\n flash('Menu Item Successfully Edited')\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n return render_template('modelEdit.html', brand_id=brand_id,\n model_id=model_id, model=edited_model)\n\n\n# Delete brand Model\n@app.route('/brand//model//delete',\n methods=['GET', 'POST'])\ndef delete_model(brand_id, model_id):\n \"\"\"Remove model\"\"\"\n session = DBSession()\n deleted_model = session.query(Model).filter_by(id=model_id).one()\n\n if request.method == 'POST':\n delete_btn = request.form.get('delete')\n edit_btn = request.form.get('edit')\n creadted_user = deleted_model.user_id\n logged_user = login_session['userid']\n if creadted_user != logged_user:\n flash('You are not authorized to delete')\n return redirect(url_for('show_model', brand_id=brand_id))\n if delete_btn is not None:\n session.delete(deleted_model)\n session.commit()\n flash('Menu Item Successfully Deleted')\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n return redirect(url_for('show_model', brand_id=brand_id))\n else:\n return render_template('modelDelete.html', model=deleted_model)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"SamehPierre/item_catalog","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":18548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"1108695881","text":"import matplotlib.pyplot as plt\nfrom scipy.signal import correlate\nimport numpy as np\ndef PlotAutoCorr(samples,n,L):\n\t# normalise samples\n\tnormed_samples = (samples - np.mean(samples,axis=0))/np.std(samples,axis=0)\n\t# calculate autocorrelation\n\tcorr = correlate(normed_samples,normed_samples,mode='same',method='fft')/(n*L*3)\n\tprint(corr.shape)\n\treturn plt.plot(range(-n//2,n//2),corr)\n\n\nenergy = np.genfromtxt('data/fake/sk_energies.txt')\n#seqs = np.genfromtxt('data/fake/sk_encode.txt',dtype=int)\n\nPlotAutoCorr(energy,100000,1)\n#PlotAutoCorr(seqs[:,0],100000,1)\nplt.savefig('data/fake/sk_autocorr.png')","repo_name":"andrewcboardman/apta_ml","sub_path":"data_stats/check_gen_data.py","file_name":"check_gen_data.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"26884571777","text":"import pexpect\nimport unittest\nimport subprocess\nimport sys\nimport os\nfrom . import PexpectTestCase\n\nunicode_type = str if pexpect.PY3 else unicode\n\n\ndef timeout_callback(values):\n if values[\"event_count\"] > 3:\n return 1\n return 0\n\n\ndef function_events_callback(values):\n try:\n previous_echoed = (values[\"child_result_list\"][-1]\n .decode().split(\"\\n\")[-2].strip())\n if previous_echoed.endswith(\"stage-1\"):\n return \"echo stage-2\\n\"\n elif previous_echoed.endswith(\"stage-2\"):\n return \"echo stage-3\\n\"\n elif previous_echoed.endswith(\"stage-3\"):\n return \"exit\\n\"\n else:\n raise Exception(\"Unexpected output {0}\".format(previous_echoed))\n except IndexError:\n return \"echo stage-1\\n\"\n\n\nclass RunFuncTestCase(PexpectTestCase.PexpectTestCase):\n if sys.platform != 'win32':\n runfunc = staticmethod(pexpect.run)\n cr = b'\\r'\n empty = b''\n prep_subprocess_out = staticmethod(lambda x: x)\n\n def setUp(self):\n self.runenv = os.environ.copy()\n self.runenv['PS1'] = 'GO:'\n super(RunFuncTestCase, self).setUp()\n\n def test_run_exit(self):\n (data, exitstatus) = self.runfunc(sys.executable + ' exit1.py', withexitstatus=1)\n assert exitstatus == 1, \"Exit status of 'python exit1.py' should be 1.\"\n\n def test_run(self):\n the_old_way = subprocess.Popen(\n args=['uname', '-m', '-n'],\n stdout=subprocess.PIPE\n ).communicate()[0].rstrip()\n\n (the_new_way, exitstatus) = self.runfunc(\n 'uname -m -n', withexitstatus=1)\n the_new_way = the_new_way.replace(self.cr, self.empty).rstrip()\n\n self.assertEqual(self.prep_subprocess_out(the_old_way), the_new_way)\n self.assertEqual(exitstatus, 0)\n\n def test_run_callback(self):\n # TODO it seems like this test could block forever if run fails...\n events = {pexpect.TIMEOUT: timeout_callback}\n self.runfunc(\"cat\", timeout=1, events=events)\n\n def test_run_bad_exitstatus(self):\n (the_new_way, exitstatus) = self.runfunc(\n 'ls -l /najoeufhdnzkxjd', withexitstatus=1)\n assert exitstatus != 0\n\n def test_run_event_as_string(self):\n events = [\n # second match on 'abc', echo 'def'\n ('abc\\r\\n.*GO:', 'echo \"def\"\\n'),\n # final match on 'def': exit\n ('def\\r\\n.*GO:', 'exit\\n'),\n # first match on 'GO:' prompt, echo 'abc'\n ('GO:', 'echo \"abc\"\\n')\n ]\n\n (data, exitstatus) = pexpect.run(\n 'bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n assert exitstatus == 0\n\n def test_run_event_as_function(self):\n events = [\n ('GO:', function_events_callback)\n ]\n\n (data, exitstatus) = pexpect.run(\n 'bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n assert exitstatus == 0\n\n def test_run_event_as_method(self):\n events = [\n ('GO:', self._method_events_callback)\n ]\n\n (data, exitstatus) = pexpect.run(\n 'bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n assert exitstatus == 0\n\n def test_run_event_typeerror(self):\n events = [('GO:', -1)]\n with self.assertRaises(TypeError):\n pexpect.run('bash --norc',\n withexitstatus=True,\n events=events,\n env=self.runenv,\n timeout=10)\n\n def _method_events_callback(self, values):\n try:\n previous_echoed = (values[\"child_result_list\"][-1].decode()\n .split(\"\\n\")[-2].strip())\n if previous_echoed.endswith(\"foo1\"):\n return \"echo foo2\\n\"\n elif previous_echoed.endswith(\"foo2\"):\n return \"echo foo3\\n\"\n elif previous_echoed.endswith(\"foo3\"):\n return \"exit\\n\"\n else:\n raise Exception(\"Unexpected output {0!r}\"\n .format(previous_echoed))\n except IndexError:\n return \"echo foo1\\n\"\n\n\nclass RunUnicodeFuncTestCase(RunFuncTestCase):\n if sys.platform != 'win32':\n runfunc = staticmethod(pexpect.runu)\n cr = b'\\r'.decode('ascii')\n empty = b''.decode('ascii')\n prep_subprocess_out = staticmethod(lambda x: x.decode('utf-8', 'replace'))\n\n def test_run_unicode(self):\n if pexpect.PY3:\n char = chr(254) # þ\n pattern = ''\n else:\n char = unichr(254) # analysis:ignore\n pattern = ''.decode('ascii')\n\n def callback(values):\n if values['event_count'] == 0:\n return char + '\\n'\n else:\n return True # Stop the child process\n\n output = pexpect.runu(self.PYTHONBIN + ' echo_w_prompt.py',\n env={'PYTHONIOENCODING': 'utf-8'},\n events={pattern: callback})\n assert isinstance(output, unicode_type), type(output)\n assert ('' + char) in output, output\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pexpect/pexpect","sub_path":"tests/test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","stars":2458,"dataset":"github-code","pt":"67"}
+{"seq_id":"70740480853","text":"#!/usr/bin/python3\n\nimport os\nimport dynein.run as run\nimport glob\nimport numpy as np\nimport argparse\nimport subprocess\n\ndef latex_format(x):\n if isinstance(x, float) or isinstance(x, int):\n x = '{:e}'.format(x)\n if 'e+0' in x:\n m,e = x.split('e+0')\n if m == '1':\n return r'10^{'+e+'}'\n return m + r'\\times 10^{' + e+ '}'\n if 'e+' in x:\n m,e = x.split('e+')\n if m == '1':\n return r'10^{'+e+'}'\n return m + r'\\times 10^{' + e+ '}'\n if 'e-0' in x:\n m,e = x.split('e-0')\n if m == '1':\n return r'10^{-'+e+'}'\n return m + r'\\times 10^{-' + e+ '}'\n if 'e' in x:\n m,e = x.split('e')\n if m == '1':\n return r'10^{'+e+'}'\n return m + r'\\times 10^{' + e+ '}'\n # if isinstance(x, str):\n # x = x.replace('-', '_')\n return x\n\nparser = argparse.ArgumentParser(description=\"script to generate unbinding probabilities of bb dynein\")\n\nparser.add_argument('-k_b', '--binding', dest='k_b', action='store', type=float,\n default=1e8, help=\"pre-exponential binding constant\", metavar='')\nparser.add_argument('-k_ub', '--unbinding', dest='k_ub', action='store', type=float,\n default=100, help=\"pre-exponential unbinding constant\", metavar='')\nparser.add_argument('-t', '--runtime', dest='runtime', action='store', type=float,\n default=1.0, help='total runtime for simulation in seconds', metavar='')\nparser.add_argument('-exp', '--exp-unbinding-constant', dest='exp_unbinding_constant',\n action='store', type=float, default=0.0, help=\"exponential unbinding constant\", metavar='')\n\nparser.add_argument('-s', '--seed', dest ='seed', action='store', type=float, default=1.0, help =\"random number seed\", metavar='')\nparser.add_argument('-cb', '--cb', dest ='cb', action='store', type=float, default=0.1, help =\"cb\", metavar='')\nparser.add_argument('-cm', '--cm', dest ='cm', action='store', type=float, default=0.4, help =\"cm\", metavar='')\nparser.add_argument('-ct', '--ct', dest ='ct', action='store', type=float, default=0.2, help =\"ct\", metavar='')\n\nparser.add_argument('-l', '--label', dest='label', action='store', type=str, default='default', help=\"label for run\", metavar='')\n\nparser.add_argument('-w', '--writerate', dest='write_rate', action='store', type=str, default=1e6, help=\"writes per second\", metavar='')\n\nargs = parser.parse_args()\n\nif os.path.exists('run-unbinding-rate-simulations.py'):\n os.chdir('../')\nos.system(\"make simulate_unbinding_rates\")\n\nif not os.path.exists('data/unbinding_probability/'):\n os.makedirs('data/unbinding_probability/')\n\nfor L in [1, 5, 10, 15, 20, 25, 30, 35, 40]:\n basename = \"%s__L-%s,s-%s\" % (args.label, str(L), args.seed)\n\n cmd = [\"./simulate_unbinding_rates\",\n \"--label\", \"%s\" % str(args.label),\n \"--k_b\", \"%g\" % float(args.k_b),\n \"--k_ub\", \"%g\" % float(args.k_ub),\n \"--c\", \"%g\" % float(args.exp_unbinding_constant),\n \"--cb\", \"%g\" % float(args.cb),\n \"--cm\", \"%g\" % float(args.cm),\n \"--ct\", \"%g\" % float(args.ct),\n \"--ls\", \"10.49\",\n \"--lt\", \"23.8\",\n \"--eqb\", \"120\",\n \"--eqmpre\", \"200\",\n \"--eqmpost\", \"224\",\n \"--eqt\", \"0\",\n \"--write_rate\", \"%g\" % float(args.write_rate),\n \"--runtime\", \"%g\" % float(args.runtime),\n \"--seed\", \"%g\" % float(args.seed),\n \"--dt\", \"1e-10\",\n \"--L\", \"%g\" % float(L)]\n\n if not os.path.exists('runlogs'):\n os.makedirs('runlogs')\n out = open('runlogs/' + basename + '.out', 'w')\n\n print(\"Running: \", \" \".join(cmd), out)\n out.flush()\n process_object = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)\n err = process_object.communicate()[1]\n if (err != b''):\n print(\"\\n##################################\",\n \"\\nSimulation exited in error: \\n\\n\",\n err.decode(\"utf-8\"),\n \"\\n##################################\\n\\n\")\n\nwith open(\"data/unbinding_probability/%s.tex\" % args.label, \"w\") as f:\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"runlabel\").replace(\"_\",\"\"), latex_format(args.label)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"kb\").replace(\"_\",\"\"), latex_format(args.k_b)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"kub\").replace(\"_\",\"\"), latex_format(args.k_ub)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' %(latex_format(\"cexp\").replace(\"_\",\"\"), latex_format(args.exp_unbinding_constant)) + '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"cb\").replace(\"_\",\"\"), latex_format(args.cb))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"cm\").replace(\"_\",\"\"), latex_format(args.cm))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"ct\").replace(\"_\",\"\"), latex_format(args.ct))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"ls\").replace(\"_\",\"\"), latex_format(10.49))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"lt\").replace(\"_\",\"\"), latex_format(23.8))+ '\\n')\n f.write(r'\\newcommand\\%s{%s}' % (latex_format(\"w_rate\").replace(\"_\",\"\"), latex_format(args.write_rate))+ '\\n')\n","repo_name":"elliotc12/dynein_walk","sub_path":"scripts/run-unbinding-rate-simulations.py","file_name":"run-unbinding-rate-simulations.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"}
+{"seq_id":"9344001837","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\nmin_max_scaler = MinMaxScaler()\nss = StandardScaler()\nnp.random.seed(1) # 使每次随机产生的数都相同\n\n\n# 导入数据\n\nInputIndex = [\n 'Rn', 'PAR', 'fdif', 'PARdif', 'PARdir',\n 'Ta', 'Ts', 'Vpd', 'RH', 'Ustar', 'O3',\n 'VWC5', 'VWC25', 'VWC50', 'VWC100', 'VWC150', 'VWC200']\nOutputIndex = ['NEE']\ndata = pd.read_csv('data/example.csv')\n\n# 分割训练集合验证集,test_size=0.4代表从总的数据集合train中随机选取40%作为验证集,随机种子为0\ntrain = data[InputIndex]\ntarget = data[OutputIndex]\n\ntrX, teX, trY, teY = train_test_split(train, target, test_size=0.2, random_state=0)\nX = min_max_scaler.fit_transform(trX)\nY = min_max_scaler.fit_transform(trY)\ntest_X = min_max_scaler.transform(teX)\ntest_y = min_max_scaler.transform(teY)\n\n\nshape_X = X.shape # X, 行17列\nshape_Y = Y.shape # Y, 1列\nm = X.shape[1] # 样本数\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1 / (1 + np.exp(-x))\n return s\n\n# 定义神经网络结构\ndef layer_sizes(X, Y):\n \"\"\"\n Arguments:\n X -- input dataset of shape (input size, number of examples)\n Y -- labels of shape (output size, number of examples)\n Returns:\n n_x -- the size of the input layer\n n_h -- the size of the hidden layer\n n_y -- the size of the output layer\n \"\"\"\n n_x = X.shape[0] # 输入层神经元个数\n n_h = 20 # 隐藏层神经元个数\n n_y = Y.shape[0] # 输出神经元个数\n\n return (n_x, n_h, n_y)\n\n\n# 初始化模型参数\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n Returns:\n params -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n np.random.seed(2)\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros((n_y, 1))\n assert (W1.shape == (n_h, n_x))\n assert (b1.shape == (n_h, 1))\n assert (W2.shape == (n_y, n_h))\n assert (b2.shape == (n_y, 1))\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n\n return parameters\n\n\n# 前向传播\ndef forward_propagation(X, parameters):\n \"\"\"\n Argument:\n X -- input data of size (n_x, m)\n parameters -- python dictionary containing your parameters (output of initialization function)\n Returns:\n A2 -- The sigmoid output of the second activation\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\"\n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n # Implement Forward Propagation to calculate A2 (probabilities)\n # np.dot 代表矩阵相乘\n Z1 = np.dot(W1, X) + b1\n A1 = sigmoid(Z1)\n\n Z2 = np.dot(W2, A1) + b2\n A2 = Z2\n\n assert (A2.shape == (1, X.shape[1]))\n\n cache = {\"Z1\": Z1,\n \"A1\": A1,\n \"Z2\": Z2,\n \"A2\": A2}\n\n return A2, cache\n\n\n# 计算cost\ndef compute_cost(A2, Y, parameters):\n \"\"\"\n Computes the cross-entropy cost given in equation (13)\n Arguments:\n A2 -- The sigmoid output of the second activation, of shape (1, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n parameters -- python dictionary containing your parameters W1, b1, W2 and b2\n Returns:\n \"\"\"\n\n cost = np.sqrt(((A2 - Y) ** 2).mean())\n assert (isinstance(cost, float))\n\n return cost\n\n\n# 反向传播\ndef backward_propagation(parameters, cache, X, Y):\n \"\"\"\n Implement the backward propagation using the instructions above.\n Arguments:\n parameters -- python dictionary containing our parameters\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\n X -- input data of shape (2, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n Returns:\n grads -- python dictionary containing your gradients with respect to different parameters\n \"\"\"\n m = X.shape[1] # 样本数目\n\n # First, retrieve W1 and W2 from the dictionary \"parameters\".\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n\n # Retrieve also A1 and A2 from dictionary \"cache\".\n # A1,A2是每一层的输出结果\n A1 = cache[\"A1\"]\n A2 = cache[\"A2\"]\n\n # Backward propagation: calculate dW1, db1, dW2, db2.\n # 输出层误差\n dZ2 = A2 - Y\n # 隐藏层到输出层权重求导数,最后一层是线性值\n dW2 = np.dot(dZ2, A1.T) / m\n db2 = np.sum(dZ2, axis=1, keepdims=True) / m\n dZ1 = np.multiply(np.dot(W2.T, dZ2), (1 - np.power(A1, 2)))\n # dZ1 = np.multiply(np.dot(W2.T, dZ2), (A1*(1-A1)))\n dW1 = np.dot(dZ1, X.T) / m\n db1 = np.sum(dZ1, axis=1, keepdims=True) / m\n\n grads = {\"dW1\": dW1,\n \"db1\": db1,\n \"dW2\": dW2,\n \"db2\": db2,\n \"dZ1\": dZ1}\n\n return grads\n\n\n# 更新参数\ndef update_parameters(parameters, grads, learning_rate=1.2):\n \"\"\"\n Updates parameters using the gradient descent update rule given above\n Arguments:\n parameters -- python dictionary containing your parameters\n grads -- python dictionary containing your gradients\n Returns:\n parameters -- python dictionary containing your updated parameters\n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n # Retrieve each gradient from the dictionary \"grads\"\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n\n # Update rule for each parameter\n W1 = W1 - learning_rate * dW1\n b1 = b1 - learning_rate * db1\n W2 = W2 - learning_rate * dW2\n b2 = b2 - learning_rate * db2\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n\n return parameters\n\n\n# 打包模型\n# num_iterations: 训练次数,如果nn_model中未指定,默认为10000\ndef nn_model(X, Y, n_h, num_iterations=10000, print_cost=False):\n \"\"\"\n Arguments:\n X -- dataset of shape (2, number of examples)\n Y -- labels of shape (1, number of examples)\n n_h -- size of the hidden layer\n num_iterations -- Number of iterations in gradient descent loop\n print_cost -- if True, print the cost every 1000 iterations\n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n np.random.seed(3)\n\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n\n # Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: \"n_x, n_h, n_y\". Outputs = \"W1, b1, W2, b2, parameters\".\n parameters = initialize_parameters(n_x, n_h, n_y)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n # Loop (gradient descent)\n for i in range(0, num_iterations):\n # Forward propagation. Inputs: \"X, parameters\". Outputs: \"A2, cache\".\n A2, cache = forward_propagation(X, parameters)\n # Cost function. Inputs: \"A2, Y, parameters\". Outputs: \"cost\".\n cost = compute_cost(A2, Y, parameters)\n # Backpropagation. Inputs: \"parameters, cache, X, Y\". Outputs: \"grads\".\n grads = backward_propagation(parameters, cache, X, Y)\n # Gradient descent parameter update. Inputs: \"parameters, grads\". Outputs: \"parameters\".\n parameters = update_parameters(parameters, grads)\n\n # Print the cost every 1000 iterations\n if print_cost and i % 1000 == 0:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n\n return parameters\n\n\n# 预测函数\ndef predict(parameters, X):\n \"\"\"\n Using the learned parameters, predicts a class for each example in X\n Arguments:\n parameters -- python dictionary containing your parameters\n X -- input data of size (n_x, m)\n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n\n # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.\n A2, cache = forward_propagation(X, parameters)\n # predictions = (A2 > 0.5)\n\n return A2, cache\n\n\n# 训练\nparameters = nn_model(X.T, Y.T, n_h=20, num_iterations=3000, print_cost=True)\n# 预测\npredictions, cache = predict(parameters, test_X.T)\nprint('RMSE: ', (np.sqrt(((predictions - test_y.T) ** 2).mean())))\n\n# print(parameters)\n\ngrads = backward_propagation(parameters, cache, test_X.T, test_y.T)\n\n# 开始计算偏导数\nw2 = np.sum(parameters['W2'], axis=1)/parameters['W2'].shape[0]\na = np.dot(parameters['W1'], test_X.T)\ndI = sigmoid(a) * (1-sigmoid(a))\n\n\nd = np.dot(parameters['W1'].T, dI) * w2\n# print(d)\n# 对计算结果进行归一化操作\n\ndraw_y = ss.fit_transform(d.T)\nres = min_max_scaler.inverse_transform(test_X)\n\n# 绘图\ncloumns = train.columns.tolist()\nfor index in range(len(cloumns)):\n plt.figure(index) # 创建图表1\n y = draw_y[:, index]\n x = res[:, index]\n plt.xlabel(cloumns[index])\n plt.ylabel(\"NEE-\"+cloumns[index])\n plt.scatter(x, y)\n plt.savefig(\"res/\"+cloumns[index]+\".png\")","repo_name":"CuriousLei/DerivationOfANN","sub_path":"tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":9772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"31423508311","text":"# input은 숫자와 * / - +\n# input()\n# Calculator 클래스 생성\n# 계산하는 함수\n# 결과값을 출력하는 함수\n\nclass calc:\n\n\n def __init__(self, input):\n self.input = input\n self.string = []\n\n def parse(self):\n self.string = self.input.split(' ')\n \n def calculate(self):\n i = 0\n a = self.string\n# 1 + 2 - 4 * 3\n while i < len(a):\n if a[i] == \"*\":\n result = int(a[i-1]) * int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n elif a[i] == \"/\":\n result = int(a[i-1]) / int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n i+=1\n i=0\n while i < len(a):\n if a[i] == \"-\":\n result = int(a[i-1]) - int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n i = i-1\n elif a[i] == \"+\":\n result = int(a[i-1]) + int(a[i+1])\n a[i] = result\n a.pop(i+1)\n a.pop(i-1)\n i = i-1\n i+=1\n print(a[0])\n\n\n\ndef get_input():\n str = input()\n return str\n\n# str = get_input()\n\ncal = calc(\"1 + 2 - 4 * 3\")\ncal.parse()\ncal.calculate()","repo_name":"RoseBLINK/2022Python-study","sub_path":"pythonGrammar/practice1.py","file_name":"practice1.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"3414609750","text":"import os\nimport json\n\nfrom appwrite.client import Client\nfrom appwrite.services.database import Database\n\ndef init_client():\n # Initialize the Appwrite client\n client = Client()\n client.set_endpoint(os.getenv(\"APPWRITE_ENDPOINT\"))\n client.set_project(os.getenv(\"APPWRITE_PROJECT_ID\"))\n client.set_key(os.getenv(\"APPWRITE_API_KEY\"))\n\n return client\n\ndef main():\n payload = json.loads(os.getenv(\"APPWRITE_FUNCTION_EVENT_DATA\"))\n user_collection_id = os.getenv(\"APPWRITE_USER_COLLECTION_ID\")\n\n userId = payload[\"$id\"]\n userName = payload[\"name\"]\n email = payload[\"email\"]\n\n\n client = init_client()\n database = Database(client)\n\n database.create_document(user_collection_id, {'user_id': userId, 'user_name': userName, 'email': email}, read=['*'])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"appwrite/demos-for-functions","sub_path":"python/create_user_profile/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"67"}
+{"seq_id":"23675751548","text":"def factorial(val):\n if val == 0:\n return 1\n return factorial(val-1) * val \nval = factorial(int(input()))\ncnt = 0\nwhile True:\n if val % 10**(cnt+1) == 0:\n cnt +=1 \n else:\n break # for consecutive search \nprint(cnt)","repo_name":"amo33/study_projects","sub_path":"Study/python_start/1676.py","file_name":"1676.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"16328293552","text":"from flask import request, session, abort\nfrom flask_restx import Namespace, Resource\n\nfrom delivery.calc import calculate_path\nfrom delivery.models import db, Package, Token, Node, User\nfrom delivery.schemas import PackageSchema\nfrom delivery.utils import authed, notify_user, verify_keys\n\npackages = Namespace('packages')\n\n\n@packages.route('')\nclass Packages(Resource):\n @authed\n def get(self):\n if 'filter' not in request.args.keys():\n abort(403, 'insufficient arguments')\n f = request.args['filter']\n ret = {}\n for k, v in {'sending': 'sender_id',\n 'receiving': 'receiver_id',\n 'delivering': 'courier_id',\n 'manage': 'manager_id'}.items():\n pkgs = Package.query.filter_by(**{v: session['user_id']}).all() \\\n if f == 'all' or f == k else []\n ret[k] = [PackageSchema(view=k).dump(item) for item in pkgs]\n return ret\n\n @authed\n @verify_keys({'token': str, 'user_id': int})\n def head(self):\n req = request.json\n package = Package.query.filter_by(token=req['token']).first()\n if package is None:\n abort(404, '快件不存在')\n node = package.current_node\n if node.manager_id != session['user_id']:\n abort(403, '只有快件所在站点的管理员能够调用')\n user = User.query.filter_by(id=req['id']).first()\n if user is None:\n abort(404, '用户不存在')\n package.courier_id = req['id']\n db.session.commit()\n return {'msg': '成功指派送货员'}\n\n @authed\n @verify_keys({'token': str, 'node_uuid': str})\n def post(self):\n token = Token.query.filter_by(token=request.json['token']).first()\n first_node = Node.query.filter_by(\n uuid=request.json['node_uuid']).first()\n if not token:\n abort(404, '收货节点不存在')\n if not first_node:\n abort(404, '发货节点不存在')\n try:\n path = calculate_path(first_node.id, token.address.id)\n except ValueError:\n abort(404, '收发货节点无法联通')\n\n package = Package(\n sender_id=session['user_id'],\n receiver_id=token.user_id,\n next_node_id=first_node.id,\n path=path,\n )\n db.session.add(package)\n db.session.commit()\n return {'uuid': package.token}\n\n @authed\n @verify_keys({'uuid': str})\n def put(self):\n package = Package.query.filter_by(token=request.json['uuid']).first()\n\n if not package:\n abort(404, '快件不存在')\n if package.receiver_id == session['user_id']:\n package.progress = len(package.path) - 1\n db.session.commit()\n return {'msg': '快件成功送达'}\n if package.next_node.manager_id != session['user_id']:\n abort(403, '只有节点管理员能够调用')\n\n package.progress = package.progress + 1\n package.manager_id = package.current_node.manager_id\n if package.progress == len(package.path) - 1:\n notify_user(\n package.receiver.open_id,\n 'Inifv86VCZGFaBhIh2ECIini4tJPgBxpC9Gni68zsSM', {\n 'node': package.current_node.token,\n 'phone': package.current_node.manager.phone,\n 'username': package.current_node.manager.username, \n 'code': package.token[:4]\n })\n db.session.commit()\n return {'msg': f'快件成功抵达{package.current_node.id}号节点'}\n","repo_name":"FrankArchive/Delivery","sub_path":"backend/delivery/api/packages.py","file_name":"packages.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"13081807872","text":"\"\"\"\nThis module runs a series of test cases on modules a and b of the Ormuco\nquestions.\n\nUsage: python2 test.py or python3 test.py\ntests for a.py output if lines are overlaping\ntests for b.py output -1, 0 or 1 according to if the first of two version\nnumbers is smaller than, equal to or larger than the second one.\n\nFor details of modules a and b refer to their respective files.\n\"\"\"\n\nimport time\nimport a\nimport b\nimport c\n\n\ndef _test_module(f, test_cases):\n \"\"\"Runs test_cases with function f on origin module\n\n arguments\n f: module with function to run test_cases on\n test_cases: list with test values to run into module function f\n\n \"\"\"\n print('## Testing %s' % f.__name__)\n for t in test_cases:\n print('Testing %s with values: %s' % (f.__name__, str(t)))\n print(f(t[0], t[1]))\n print()\n\n\ntest_cases_a = [\n ([0, 10],[10, 20]), \n ([-10, -9],[-1, -2]), \n ([-10, -5],[-7, 2]), \n ([-5, -10],[2, -7]), \n ([-10, 10],[1, 2]),\n ]\n\ntest_cases_b = [\n ('1.1.1', '1.1.1'),\n ('1.1.1', '1.1.1', '.'),\n ('1_1_1', '1_1_2', '_'),\n ('1_1_2', '1_1_1', '_'),\n ('1_2_1', '1_1_2', '_'),\n ('1_10_1', '1_5_20', '_'),\n ('0.1.1', '1.1.1'),\n ('1.1az.1', '1.1b.1'),\n ('1a.1.1', '1.2d.1'),\n ('1abc.1def.1abc', '1abc.2def.1abc')\n ]\n\ntest_cases_c = [\n ('t1', 1),\n ('t2', 2),\n ('t3', 3),\n ('t4', 4),\n ('t5', 5),\n ('t6', 6),\n ('t7', 7)\n ]\n\n_test_module(a.are_lines_overlaping, test_cases_a)\n_test_module(b.compare_versions, test_cases_b)\n\n# test c.py\nmax_cached_items = 5\nstale_delay = 2 # in seconds\n\nprint('## Testing lru (c.py)')\nprint('Setting up lru with max 5 items and expiry time of 2 seconds')\nlru = c.Lru(max_cached_items, stale_delay)\nfor k, v in test_cases_c:\n print('Adding element in lru')\n lru.set_value(k, v)\n print('Current values in lru %s' % lru.get_values())\nprint('wainting for 1 second')\ntime.sleep(1)\nlru.set_value('t6', 6)\nprint('waiting for initial values to expire')\ntime.sleep(1)\nprint('Current values in lru %s' % lru.get_values())\n\n","repo_name":"classmathieuloyer/ormuco","sub_path":"mathieu_loyer_test.py","file_name":"mathieu_loyer_test.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"25723341895","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"../../\")\nimport os\nimport logging\nimport argparse\nimport ConfigParser\nimport common.datetime_wrapper as datetime\nimport common.hadoop_shell_wrapper as hadoop_shell\nimport common.spark_submit_wrapper as spark\n\n\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nlogging.basicConfig(\n level=logging.INFO,\n format='[%(asctime)s - %(filename)s - %(levelname)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\ndef process(args):\n config = ConfigParser.SafeConfigParser()\n config.read(args.conf)\n\n embed_module_option = \"{}_embedding\".format(args.module)\n\n input_path_bert_embedding = config.get(\"common\", \"hdfs_output\", 0, {\n \"date\": args.date,\n \"dir\": config.get(\"embedding_history\", \"dir\")\n })\n input_path_video_ctr_stat = config.get(\"inputs\", \"video_ctr_stat\", 0, {\"date\": args.date})\n output_path = config.get(\"common\", \"hdfs_output\", 0, {\n \"date\": args.date,\n \"dir\": config.get(embed_module_option, \"dir\")\n })\n\n if hadoop_shell.exists_all(output_path, flag=True, print_missing=True):\n logging.info(\"output already exists, skip\")\n return 0\n\n if not hadoop_shell.exists_all_with_retry(\n input_path_video_ctr_stat,\n flag=True,\n print_info=True,\n retry=config.getint(\"common\", \"upstream_retry\"),\n interval=config.getint(\"common\", \"upstream_interval\")):\n logging.error(\"finally, input not ready, exit!\")\n return 1\n\n if not hadoop_shell.rmr(output_path):\n logging.error(\"fail to clear output folder\")\n return 1\n\n ss = spark.SparkSubmitWrapper()\n\n ss.set_master(\"yarn\")\\\n .set_deploy_mode(\"cluster\")\\\n .set_driver_memory(\"1G\")\\\n .set_executor_memory(\"1G\") \\\n .add_conf(\"spark.executor.memoryOverhead\", 2048) \\\n .set_executor_cores(2)\\\n .set_num_executors(100)\\\n .add_conf(\"spark.network.timeout\", 600)\\\n .set_name(config.get(\"common\", \"job_name\", 0, {'date': args.date, 'module': \"FilterEmbedding-{}\".format(args.module)}))\\\n .set_queue(config.get(\"common\", \"job_queue\"))\\\n .set_class(\"com.td.ml.x2vec.bert.FilterEmbedding\")\\\n .set_app_jar(FILE_DIR + \"/../../lib/\" + config.get(\"common\", \"jar\"))\\\n .add_app_argument(\"normalize\", config.getboolean(\"bert\", \"normalize\")) \\\n .add_app_argument(\"ctr_bound\", config.getfloat(embed_module_option, \"ctr_bound\")) \\\n .add_app_argument(\"display_bound\", config.getint(embed_module_option, \"display_bound\")) \\\n .add_app_argument(\"click_bound\", config.getint(embed_module_option, \"click_bound\")) \\\n .add_app_argument(\"input_path_bert_embedding\", input_path_bert_embedding)\\\n .add_app_argument(\"input_path_video_ctr_stat\", input_path_video_ctr_stat)\\\n .add_app_argument(\"output_path\", output_path)\n\n return 0 if ss.run(print_cmd=True, print_info=True) else 1\n\n\ndef del_expire(args):\n config = ConfigParser.SafeConfigParser()\n config.read(args.conf)\n\n embed_module_option = \"{}_embedding\".format(args.module)\n\n if not args.expire or config.getint(\"common\", \"expire\") <= 0:\n return 0\n\n lifetime = config.getint(embed_module_option, \"lifetime\")\n\n if lifetime > 0:\n dt_expire = datetime.DateTime(args.date).apply_offset_by_day(-lifetime)\n expire_path = config.get(\"common\", \"hdfs_output\", 0, {\n \"date\": dt_expire,\n \"dir\": config.get(embed_module_option, \"dir\")\n })\n if not hadoop_shell.rmr(expire_path):\n logging.error(\"fail to del expired path: %s\", expire_path)\n return 1\n return 0\n\n\ndef run(args):\n if process(args) == 0 and del_expire(args) == 0:\n return 0\n return 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='build user inference samples')\n parser.add_argument('--date', dest='date', required=True, help=\"which date(%%Y-%%m-%%d)\")\n parser.add_argument('--conf', dest='conf', required=True, help=\"conf file\")\n parser.add_argument('--module', dest='module', required=True, choices=[\"recommendable\", \"queryable\"])\n parser.add_argument('--expire', dest='expire', action=\"store_true\", help=\"whether to del expired path\")\n\n arguments = parser.parse_args()\n\n try:\n if not datetime.DateTime(arguments.date).is_perfect_date():\n raise RuntimeError(\"passed arg [date={}] format error\".format(arguments.date))\n sys.exit(run(arguments))\n except Exception as ex:\n logging.exception(\"exception occur in %s, %s\", __file__, ex)\n sys.exit(1)\n","repo_name":"Jayyyyyyyyyyyy/x2vec","sub_path":"src/scheduling/bert/modules/run_filter_embedding.py","file_name":"run_filter_embedding.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"74937177173","text":"\"\"\"\nbased on Python Syntax highlighting from:\nhttp://diotavelli.net/PyQtWiki/Python%20syntax%20highlighting\n\"\"\"\nimport sys\n\nfrom PyQt4.QtCore import QRegExp\nfrom PyQt4.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter, QPen\n\nfrom tools import loader\n\ndef format(color, style=''):\n \"\"\"Return a QTextCharFormat with the given attributes.\n \"\"\"\n _color = QColor()\n _color.setNamedColor(color)\n\n _format = QTextCharFormat()\n _format.setFontFamily('monospace')\n _format.setForeground(_color)\n if 'bold' in style:\n _format.setFontWeight(QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n\n return _format\n\n\n# Syntax styles that can be shared by all languages\nSTYLES = {\n 'keyword': format('darkMagenta', 'bold'),\n 'operator': format('darkRed'),\n 'brace': format('#858585'),\n 'definition': format('black', 'bold'),\n 'string': format('green'),\n 'string2': format('darkGreen'),\n 'comment': format('gray', 'italic'),\n 'properObject': format('darkBlue', 'italic'),\n 'numbers': format('brown'),\n 'spaces': format('#BFBFBF'),\n}\n\n\nclass Highlighter (QSyntaxHighlighter):\n keywords = []\n\n # operators\n operators = []\n\n # braces\n braces = []\n def __init__(self, document, lang):\n QSyntaxHighlighter.__init__(self, document)\n langSyntax = loader.syntax[lang]\n Highlighter.keywords = langSyntax.get('keywords', [])\n Highlighter.braces = langSyntax.get('brace', [])\n Highlighter.operators = langSyntax.get('operators', [])\n\n rules = []\n\n # Keyword, operator, and brace rules\n rules += [(r'\\b%s\\b' % w, 0, STYLES['keyword'])\n for w in Highlighter.keywords]\n rules += [(r'%s' % o, 0, STYLES['operator'])\n for o in Highlighter.operators]\n rules += [(r'%s' % b, 0, STYLES['brace'])\n for b in Highlighter.braces]\n\n # All other rules\n proper = langSyntax.get('properObject', None)\n if proper is not None:\n proper = '\\\\b' + str(proper[0]) + '\\\\b'\n rules += [\n # 'self'\n (proper, 0, STYLES['properObject'])]\n\n rules.append((r'__\\w+__', 0, STYLES['properObject']))\n \n definition = langSyntax.get('definition', [])\n for de in definition:\n expr = '\\\\b' + de + '\\\\b\\\\s*(\\\\w+)'\n rules.append((expr, 1, STYLES['definition']))\n\n rules += [\n # Numeric literals\n (r'\\b[+-]?[0-9]+[lL]?\\b', 0, STYLES['numbers']),\n (r'\\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\\b', 0, STYLES['numbers']),\n (r'\\b[+-]?[0-9]+(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\\b', 0, STYLES['numbers']),\n ]\n\n stringChar = langSyntax.get('string', [])\n for sc in stringChar:\n expr = r'\"[^\"\\\\]*(\\\\.[^\"\\\\]*)*\"' if sc == '\"' else r\"'[^'\\\\]*(\\\\.[^'\\\\]*)*'\"\n rules.append((expr, 0, STYLES['string']))\n\n # Multi-line strings (expression, flag, style)\n # FIXME: The triple-quotes in these two lines will mess up the\n # syntax highlighting from this point onward\n self.tri_single = (QRegExp(\"'''\"), 1, STYLES['string2']) #'''\n self.tri_double = (QRegExp('\"\"\"'), 2, STYLES['string2']) #\"\"\"\n\n comments = langSyntax.get('comment', [])\n for co in comments:\n expr = co + '[^\\\\n]*'\n rules.append((expr, 0, STYLES['comment']))\n\n rules.append(('\\s+', 0, STYLES['spaces']))\n\n # Build a QRegExp for each pattern\n self.rules = [(QRegExp(pat), index, fmt)\n for (pat, index, fmt) in rules]\n\n\n def highlightBlock(self, text):\n \"\"\"Apply syntax highlighting to the given block of text.\n \"\"\"\n # Do other syntax formatting\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = expression.cap(nth).length()\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)\n\n # Do multi-line strings\n in_multiline = self.match_multiline(text, *self.tri_single)\n if not in_multiline:\n in_multiline = self.match_multiline(text, *self.tri_double)\n\n\n def match_multiline(self, text, delimiter, in_state, style):\n \"\"\"Do highlighting of multi-line strings. ``delimiter`` should be a\n ``QRegExp`` for triple-single-quotes or triple-double-quotes, and\n ``in_state`` should be a unique integer to represent the corresponding\n state changes when inside those strings. Returns True if we're still\n inside a multi-line string when this function is finished.\n \"\"\"\n # If inside triple-single quotes, start at 0\n if self.previousBlockState() == in_state:\n start = 0\n add = 0\n # Otherwise, look for the delimiter on this line\n else:\n start = delimiter.indexIn(text)\n # Move past this match\n add = delimiter.matchedLength()\n\n # As long as there's a delimiter match on this line...\n while start >= 0:\n # Look for the ending delimiter\n end = delimiter.indexIn(text, start + add)\n # Ending delimiter on this line?\n if end >= add:\n length = end - start + add + delimiter.matchedLength()\n self.setCurrentBlockState(0)\n # No; multi-line string\n else:\n self.setCurrentBlockState(in_state)\n length = text.length() - start + add\n # Apply formatting\n self.setFormat(start, length, style)\n # Look for the next match\n start = delimiter.indexIn(text, start + length)\n\n # Return True if still inside a multi-line string, False otherwise\n if self.currentBlockState() == in_state:\n return True\n else:\n return False\n","repo_name":"calpe20/PYTHONIZANDO","sub_path":"TKINTER/ninja-ide/gui/qt/main_panel/editor/highlighter.py","file_name":"highlighter.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"42694070377","text":"'''\nCreated on May 21, 2021\nTranslated May 23, 2021\n\n@author: bleem\n'''\nfrom src.common import Constants\nfrom src.common import Converter\nfrom src.common.BehaviorsInfo import BehaviorsInfo\nfrom src.common.RescorlaWagnerParameters import RescorlaWagnerParameters\nfrom src.etbd.MutationInfo import MutationInfo\nfrom src.etbd.RecombinationInfo import RecombinationInfo\nfrom src.etbd.SelectionInfo import SelectionInfo\n\n\n# This class is an abstract container for the specific organisms\nclass AnOrganism:\n\t''' \n\tclassdocs\n\t'''\n\n\tdef __init__(self, json_data):\n\t\tself.m_currentSDColor = Constants.SD_COLOR_NULL\n\t\tself.stuBehaviorsInfo = self.load_behaviors_info(json_data)\n\n\tdef get_behaviors_info(self):\n\t\treturn self.stuBehaviorsInfo\n\n\tdef load_behaviors_info(self, json_data):\n\t\tstuBehaviorsInfo = BehaviorsInfo()\n\t\t# Dim objPopulation As Behaviors\n\t\t# Dim objStringBuilder As New System.Text.StringBuilder\n\t\t#Read form--------------------------------------------------\n\t\t# Get discriminative stimulus\n\t\tstuBehaviorsInfo.set_sdid(json_data.get_sdid())\n\t\tif stuBehaviorsInfo.get_sdid() == -1:\n\t\t\treturn # This SD as already been used.\n\n\t\t# Gray codes\n\t\tif json_data.use_gray_codes():\n\t\t\tstuBehaviorsInfo.set_use_gray_codes(True)\n\t\telse:\n\t\t\tstuBehaviorsInfo.set_use_gray_codes(False)\n\n\t\t# Properties\n\t\tstuBehaviorsInfo.set_decay_of_transfer(json_data.get_decay_of_transfer())\n\t\tstuBehaviorsInfo.set_fomo_a(json_data.get_fomo_a())\n\n\t\t#-----Viscosity\n\t\tif json_data.add_viscosity():\n\t\t\tstuBehaviorsInfo.set_viscosity_ticks(json_data.get_viscosity_ticks())\n\t\t\tif json_data.get_viscosity_selected_index() == 0:\n\t\t\t\t# \"original\"\n\t\t\t\tstuBehaviorsInfo.set_create_from_synthetic(False)\n\t\t\telse:\n\t\t\t\t# \"amalgamated\"\n\t\t\t\tstuBehaviorsInfo.set_create_from_synthetic(True)\n\n\t\telse:\n\t\t\t# if populations are to have no viscosity, then ViscosityTicks = 0\n\t\t\t# Note that when ViscosityTicks = 1 there is also no viscosity.\n\t\t\t# When ViscosityTicks = 0, the standard method of emitting a behavior\n\t\t\t# (random selection among phenotypes) is used; when ViscosityTicks = 1\n\t\t\t# the method based on relative frequencies is used.\n\t\t\t# Both methods should give the same results.\n\t\t\tstuBehaviorsInfo.set_viscosity_ticks(0)\n\n\t\tstuBehaviorsInfo.set_num_behaviors(json_data.get_num_behaviors())\n\t\tstuBehaviorsInfo.set_low_phenotype(json_data.get_low_phenotype())\n\t\tstuBehaviorsInfo.set_high_phenotype(json_data.get_high_phenotype())\n\t\tstuBehaviorsInfo.set_percent_to_replace(json_data.get_percent_to_replace())\n\t\tstuBehaviorsInfo.set_percent_to_replace_2(json_data.get_percent_to_replace_2())\n\t\tstuBehaviorsInfo.set_fitness_method(json_data.get_fitness_method())\n\t\tstuBehaviorsInfo.set_fitness_landscape(json_data.get_fitness_landscape())\n\t\tstuBehaviorsInfo.set_punishment_method(json_data.get_punishment_method())\n\t\t# Data structures\n\t\tstuBehaviorsInfo.set_RW_info(self.load_RW_info(json_data))\n\t\tstuBehaviorsInfo.set_selection_info(self.load_selection_info(json_data))\n\t\tstuBehaviorsInfo.set_recombination_info(self.load_recombination_info(json_data))\n\t\tstuBehaviorsInfo.set_mutation_info(self.load_mutation_info(json_data))\n\t\t# Non-ETBD parameters\n\t\tstuBehaviorsInfo.set_num_hidden_nodes(json_data.get_num_hidden_nodes())\n\t\tstuBehaviorsInfo.set_num_output_nodes(json_data.get_num_output_nodes())\n\t\tstuBehaviorsInfo.set_num_firing_hidden_nodes(json_data.get_num_firing_hidden_nodes())\n\n\t\tstuBehaviorsInfo.set_net_one_magnitude_slope(json_data.get_net_one_magnitude_slope())\n\t\tstuBehaviorsInfo.set_net_one_magnitude_intercept(json_data.get_net_one_magnitude_intercept())\n\t\tstuBehaviorsInfo.set_net_one_neutral_magnitude(json_data.get_net_one_neutral_magnitude())\n\n\t\tstuBehaviorsInfo.set_net_two_neutral_magnitude(json_data.get_net_two_neutral_magnitude())\n\t\tstuBehaviorsInfo.set_net_two_selection_strength_exponent(json_data.get_net_two_selection_strength_exponent())\n\t\tstuBehaviorsInfo.set_net_two_selection_strength_multiplier(json_data.get_net_two_selection_strength_multiplier())\n\t\tstuBehaviorsInfo.set_net_two_num_hidden_nodes(json_data.get_net_two_num_hidden_nodes())\n\n\t\tstuBehaviorsInfo.set_ml_learning_rate(json_data.get_ml_learning_rate())\n\t\tstuBehaviorsInfo.set_ml_num_slots(json_data.get_ml_num_slots())\n\t\tstuBehaviorsInfo.set_ml_reward_multiplier(json_data.get_ml_reward_multiplier())\n\t\tstuBehaviorsInfo.set_ml_reward_exponent(json_data.get_ml_reward_exponent())\n\t\tstuBehaviorsInfo.set_ml_pessimism(json_data.get_ml_pessimism())\n\t\tstuBehaviorsInfo.set_ml_extinction(json_data.get_ml_extinction())\n\t\tstuBehaviorsInfo.set_ml_epsilon(json_data.get_ml_epsilon())\n\t\tstuBehaviorsInfo.set_ml_discount_rate(json_data.get_ml_discount_rate())\n\n\t\treturn stuBehaviorsInfo\n\n\tdef reset_state(self):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef is_ready_to_emit(self):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef emit_behavior(self):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef set_selection(self, selectionParameter, value):\n\t\traise NotImplementedError\n\t\t# TODO - implement this for the other organism types\n\n\tdef load_selection_info(self, json_data):\n\n\t\tstuSelectionInfo = SelectionInfo()\n\t\tstuSelectionInfo.set_selection_method(json_data.get_selection_method())\n\t\tstuSelectionInfo.set_continuous_function_form(json_data.get_continuous_function_form())\n\n\t\t# High Phenotype------------------------------------------------------(added to implement punishment)\n\t\tstuSelectionInfo.set_high_phenotype(json_data.get_high_phenotype())\n\n\t\t# Fitness Landscare---------------------------------------------------(added to implement punishment)\n\t\tstuSelectionInfo.set_fitness_landscape(json_data.get_fitness_landscape())\n\t\tstuSelectionInfo.set_matchmaking_method(json_data.get_matchmaking_method())\n\n\t\treturn stuSelectionInfo\n\n\tdef load_recombination_info(self, json_data):\n\n\t\tstuRecombinationInfo = RecombinationInfo()\n\t\tstuRecombinationInfo.set_method(json_data.get_recombination_method())\n\n\t\tif json_data.get_recombination_method() == Constants.RECOMBINATION_METHOD_CROSSOVER:\n\t\t\tstuRecombinationInfo.set_points(json_data.get_crossover_points())\n\n\t\treturn stuRecombinationInfo\n\n\tdef load_mutation_info(self, json_data):\n\n\t\tstuMutationInfo = MutationInfo()\n\t\tstuMutationInfo.set_method(json_data.get_mutation_method())\n\n\t\tif stuMutationInfo.get_method() == Constants.MUTATION_METHOD_GAUSSIAN:\n\t\t\tstuMutationInfo.set_sd(json_data.get_gaussian_mutation_sd())\n\t\t\tstuMutationInfo.set_boundary(json_data.get_mutation_boundary())\n\n\t\tstuMutationInfo.set_rate(json_data.get_mutation_rate())\n\n\t\t# Redundant info needed by the Mutator object\n\t\tif json_data.use_gray_codes():\n\t\t\tstuMutationInfo.set_use_gray_codes(True)\n\t\telse:\n\t\t\tstuMutationInfo.set_use_gray_codes(False)\n\t\tstuMutationInfo.set_high_phenotype(json_data.get_high_phenotype())\n\t\tstuMutationInfo.set_low_phenotype(json_data.get_low_phenotype())\n\n\t\treturn stuMutationInfo\n\n\tdef get_sdcolor(self):\n\t\treturn self.m_currentSDColor\n\n\tdef get_sdcolor_str(self):\n\t\treturn Converter.convert_sd_color_to_string(self.m_currentSDColor)\n\n\tdef set_sdcolor(self, value):\n\t\traise NotImplementedError\n\n\tdef load_RW_info(self, json_data):\n\n\t\tstuRWInfo = RescorlaWagnerParameters()\n\n\t\tstuRWInfo.set_alpha(json_data.get_alpha())\n\t\tstuRWInfo.set_beta_0(json_data.get_beta_0())\n\t\tstuRWInfo.set_beta_1(json_data.get_beta_1())\n\t\tstuRWInfo.set_berg_a(1) # Hard coded to 1 for now.\n\t\tstuRWInfo.set_lambda(1) # Hard coded to 1 for now.\n\n\t\treturn stuRWInfo\n","repo_name":"misterriley/PyETBD","sub_path":"src/orgs/AnOrganism.py","file_name":"AnOrganism.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"25751003092","text":"from django.db import models\n\n# Create your models here.\nDUES_TYPE = [\n\t('NEWQ1', 'New member - Jan to Dec'),\n\t('NEWQ2', 'New member - Apr to Dec'),\n\t('NEWQ3', 'New member - Jul to Dec'),\n\t('NEWQ4', 'New member - Oct to Dec'),\n\t('RENEW', 'Renewal - Jan to Dec'),\n]","repo_name":"pineapplejuice/earc2-members","sub_path":"helpers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"28026990567","text":"data = \"\"\"Given a string containing just the characters '(' and ')', return the length of the longest valid (well-formed) parentheses substring\nExample 1:\n\nInput: s = \"(()\"\nOutput: 2\nExplanation: The longest valid parentheses substring is \"()\".\nExample 2:\n\nInput: s = \")()())\"\nOutput: 4\nExplanation: The longest valid parentheses substring is \"()()\".\n\n\"\"\"\n\n\nclass Solution:\n\n def longestValidParentheses(self, s: str) -> int:\n stack = []\n max_1 = 0\n count = 0\n for c in s:\n if c == '(':\n stack.append('(')\n elif c == ')' and stack:\n if stack[-1] == '(':\n count += 2\n stack.pop()\n else:\n count = 0\n if max_1 < count:\n max_1 = count\n max_2 = 0\n count = 0\n if max_1 != len(s):\n stack = []\n for c in s[::-1]:\n if c == ')':\n stack.append(')')\n elif c == '(' and stack:\n if stack[-1] == ')':\n count += 2\n stack.pop()\n else:\n count = 0\n if max_2 < count:\n max_2 = count\n print(max_1, max_2)\n return max_1 if max_1 <= max_2 and max_2 != 0 else max_2\n\n\nprint(Solution().longestValidParentheses(\")()())\"))\n","repo_name":"laxman590249/Data-Structures","sub_path":"DataStructures/DP/maximum_valid_para.py","file_name":"maximum_valid_para.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"21445685063","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport numpy as np\nimport math\nimport molecule_predictionv2_0 as mp\n\n\n# In[5]:\n\n\nimport tensorflow as tf\n\n\n# In[6]:\n\n\ndat = mp.molecule_prediction_data_wrapper()\n\n\n# In[21]:\n\n\natom_vector_size = 1024\nnum_timesteps = 5\nhidden_unit_size = 1024\nbatchSize = 32\nnum_epochs = 200\nbatch_gen = dat.batch_gen\ndataset = tf.data.Dataset. from_generator(batch_gen.generate, (tf.float32,tf.float32),\n output_shapes= (tf.TensorShape([30,1054]), \n tf.TensorShape([3]))) \ndataset = dataset.shuffle(buffer_size = batchSize*10) \ndataset = dataset.repeat(num_epochs).batch(batchSize)\ndataset = dataset.prefetch(buffer_size = 2)\ndata_source = dataset.make_one_shot_iterator()\nbatch_in, batch_y = data_source.get_next()\n\n\n# In[8]:\n\n\nwith tf.variable_scope('message_transform_network'):\n hidden1 = tf.keras.layers.Dense(2048,activation='relu')\n hidden1.build((None,atom_vector_size))\n hidden2 = tf.keras.layers.Dense(2048,activation='tanh')\n hidden2.build((None,2048))\n hidden3 = tf.keras.layers.Dense(1024,activation='relu')\n hidden3.build((None,2048))\n out_message = tf.keras.layers.Dense(atom_vector_size)\n out_message.build((None, 1024))\ndef apply_edge_neural_network_transform(input):\n return out_message.apply(hidden3.apply(hidden2.apply(hidden1.apply(input))))\n\n\n# In[9]:\n\n\ndef get_messages(ordered_atoms_vector, adjacency_matrix):\n transformed = apply_edge_neural_network_transform(ordered_atoms_vector)\n return tf.matmul(adjacency_matrix,transformed)\n\n\n# In[10]:\n\n\nwith tf.variable_scope(\"RecusiveUnitLTSM\"):\n shared_lstm_cell = tf.keras.layers.LSTMCell(hidden_unit_size);\ndef iterate_time_step(states_vectors_t, messages):\n (outputs_t, states_vectors_t_1) = shared_lstm_cell(messages, states_vectors_t)\n return outputs_t, states_vectors_t_1\n\n\n# In[11]:\n\n\ninitial_state1 = tf.Variable(np.random.normal(size=(1,hidden_unit_size)),\n trainable=True,dtype=tf.float32);\ninitial_state2 = tf.Variable(np.random.normal(size=(1,hidden_unit_size)),\n trainable=True,dtype=tf.float32);\ninitial_states1 = tf.reshape(tf.tile(initial_state1, (1,tf.shape(batch_in)[1])),\n [tf.shape(batch_in)[1], hidden_unit_size])\ninitial_states2 = tf.reshape(tf.tile(initial_state2, (1,tf.shape(batch_in)[1])),\n [tf.shape(batch_in)[1], hidden_unit_size])\ninitial_states = (initial_states1,initial_states2)\n\n\n# In[12]:\n\n\ndef extract_atom_vectors_ad_matrix(input_mat):\n num_atoms = tf.shape(input_mat)[0]\n return tf.slice(input_mat,[0,0],[num_atoms,atom_vector_size]), tf.slice(input_mat,[0,atom_vector_size],[num_atoms,num_atoms])\n\n\n# In[13]:\n\n\ndef graph_neural_network(concatenated_input_mat):\n global initial_states\n initial_output,ad_matrix = extract_atom_vectors_ad_matrix(concatenated_input_mat)\n outputs_x = initial_output\n states_vectors_x = initial_states\n for i in range(num_timesteps):\n messages = get_messages(outputs_x, ad_matrix)\n outputs_x, states_vectors_x = iterate_time_step(states_vectors_x,messages) \n final_outputs = outputs_x\n out = tf.reduce_sum(final_outputs, axis=0)\n return out \n\n\n# In[25]:\n\n\nfinal_outputs=tf.map_fn(graph_neural_network,batch_in)\nprediction = tf.keras.layers.Dense(3)(final_outputs)\nwith tf.name_scope(\"loss\"):\n loss = tf.losses.mean_squared_error(batch_y, prediction)\n\n\n# In[15]:\n\n\nlearning_rate = 0.001\nwith tf.name_scope(\"train\"):\n global training_op\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_op = optimizer.minimize(loss)\n\n\n# In[27]:\n\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n init.run()\n for epoch in range(num_epochs):\n for iteration in range(batch_gen.samples// batchSize):\n _,loss_value = sess.run([training_op,loss])\n if iteration % 500 == 0:\n print(\"Epoch \" + str(epoch) + \" Step \" + str(iteration) + \" loss \" + str(loss_value))\n if epoch % 20 == 0:\n save_path = saver.save(sess, \"../models/graph_model_\" + str(epoch) + \".ckpt\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"neilhazra/KaggleMoleculePrediction","sub_path":"MoleculePrediction/Preliminary/GraphNeuralNetworkTest.py","file_name":"GraphNeuralNetworkTest.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"37521617327","text":"from __future__ import print_function, division\nfrom functools import partial\nimport os\nimport sys\nimport re\nimport copy\nimport time\nimport shutil\nimport codecs\nimport tarfile\nimport tempfile\nif sys.version_info[0] < 3:\n\tfrom cStringIO import StringIO\n\tstring_types = basestring\nelse:\n\tfrom io import StringIO\n\tstring_types = str\nfrom textwrap import TextWrapper\nfrom unicodedata import normalize\nfrom collections import defaultdict\nfrom xml.sax.saxutils import escape\nif (sys.version_info[0] < 3):\n\timport urlparse, urllib\nelse:\n\timport urllib, urllib.parse as urlparse\nimport zipfile\n\nfrom operator import itemgetter\nfrom decimal import Decimal, getcontext\ngetcontext().prec = 8\n\n\n#------------------------------------------------\n# Set up logging\n#------------------------------------------------\nimport logging\nlog = logging.getLogger(\".DynamicWeb\")\n# The log initialization shall be performed after Gramps start-up (i.e. not here)\n\n#------------------------------------------------\n# Gramps module\n#------------------------------------------------\n\n\nfrom gramps.gen.const import IMAGE_DIR, GRAMPS_LOCALE as glocale\ntry:\n\t_trans = glocale.get_addon_translator(__file__)\nexcept ValueError:\n\t_trans = glocale.translation\n_ = _trans.sgettext\n\nfrom gramps.version import VERSION, VERSION_TUPLE\nDWR_VERSION_410 = (VERSION_TUPLE[0] >= 4) and (VERSION_TUPLE[1] >= 1)\nDWR_VERSION_412 = (VERSION_TUPLE[0] >= 4) and (VERSION_TUPLE[1] >= 1) and (VERSION_TUPLE[2] >= 2)\nDWR_VERSION_420 = (VERSION_TUPLE[0] >= 4) and (VERSION_TUPLE[1] >= 2)\nfrom gramps.gen.lib import (ChildRefType, Date, EventType, FamilyRelType, Name,\n\t\t\t\t\t\t\tNameType, Person, UrlType, NoteType,\n\t\t\t\t\t\t\tEventRoleType, Family, Event, Place, Source,\n\t\t\t\t\t\t\tCitation, MediaObject, Repository, Note, Tag,\n\t\t\t\t\t\t\tMediaRef, Location)\nif (DWR_VERSION_410):\n\tfrom gramps.gen.lib import PlaceType\nfrom gramps.gen.lib.date import Today\nfrom gramps.gen.const import PROGRAM_NAME, URL_HOMEPAGE\nfrom gramps.gen.plug.menu import (PersonOption, NumberOption, StringOption,\n\tBooleanOption, EnumeratedListOption, FilterOption,\n\tNoteOption, MediaOption, DestinationOption, ColorOption)\nfrom gramps.gen.plug.report import (Report, Bibliography)\nfrom gramps.gen.plug.report import utils as report_utils\nfrom gramps.gen.plug.report import MenuReportOptions\n\nfrom gramps.gen.utils.config import get_researcher\nfrom gramps.gen.utils.string import conf_strings\nfrom gramps.gen.utils.file import media_path_full\nfrom gramps.gen.utils.alive import probably_alive\nfrom gramps.gen.utils.db import get_source_and_citation_referents, get_birth_or_fallback, get_death_or_fallback, get_marriage_or_fallback\nfrom gramps.gen.constfunc import win, conv_to_unicode, get_curr_dir\nif (sys.version_info[0] < 3):\n\tfrom gramps.gen.constfunc import UNITYPE\nelse:\n\tUNITYPE = str\nfrom gramps.gen.config import config\nfrom gramps.gui.thumbnails import get_thumbnail_path, run_thumbnailer\nfrom gramps.gen.utils.image import image_size, resize_to_jpeg_buffer\nfrom gramps.gen.mime import get_description\nfrom gramps.gen.display.name import displayer as _nd\nif (DWR_VERSION_412):\n\tfrom gramps.gen.display.place import displayer as _pd\nfrom gramps.gen.datehandler import get_date_formats, displayer as _dd\nfrom gramps.gen.proxy import PrivateProxyDb, LivingProxyDb\nfrom gramps.plugins.lib.libhtmlconst import _CHARACTER_SETS, _CC, _COPY_OPTIONS\n\n# import HTML Class from src/plugins/lib/libhtml.py\nfrom gramps.plugins.lib.libhtml import Html, xml_lang\n\n# import styled notes from src/plugins/lib/libhtmlbackend.py\nfrom gramps.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces\n\nfrom gramps.plugins.lib.libgedcom import make_gedcom_date, DATE_QUALITY\n\nfrom gramps.plugins.webreport.narrativeweb import first_letter\n\nfrom gramps.gen.utils.place import conv_lat_lon\nfrom gramps.gui.pluginmanager import GuiPluginManager\n\nfrom gramps.gen.relationship import get_relationship_calculator\nif (DWR_VERSION_410):\n\tfrom gramps.gen.utils.location import get_main_location\n\nfrom gramps.gui.widgets.fanchart import (\n\tGENCOLOR,\n\tGRADIENTSCALE,\n\tBACKGROUND_SCHEME1,\n\tBACKGROUND_SCHEME2,\n\tBACKGROUND_GENDER,\n\tBACKGROUND_WHITE,\n\tBACKGROUND_GRAD_GEN,\n\tBACKGROUND_GRAD_AGE,\n\tBACKGROUND_SINGLE_COLOR,\n\tBACKGROUND_GRAD_PERIOD,\n)\nfrom gramps.gui.utils import hex_to_rgb\n\nSORT_KEY = glocale.sort_key\n\n#------------------------------------------------\n# constants\n#------------------------------------------------\n\n#: Maximum number of pages containing custom text\nNB_CUSTOM_PAGES = 5\n#: Maximum number of pages =\nNB_TOTAL_PAGES_MAX = 15\n#: Liste of the pages (description, title, file name)\nPAGES_NAMES = [\n\t(_(\"Person page\"), _(\"Person\"), \"person.html\"),\n\t(_(\"Surnames index page\"), _(\"Surnames\"), \"surnames.html\"),\n\t(_(\"Individuals index page\"), _(\"Individuals\"), \"persons.html\"),\n\t(_(\"Families index page\"), _(\"Families\"), \"families.html\"),\n\t(_(\"Sources index page\"), _(\"Sources\"), \"sources.html\"),\n\t(_(\"Media index page\"), _(\"Media\"), \"medias.html\"),\n\t(_(\"Places index page\"), _(\"Places\"), \"places.html\"),\n\t(_(\"Addresses page\"), _(\"Addresses\"), \"address.html\"),\n\t(_(\"Repositories index page\"), _(\"Repositories\"), \"repositories.html\"),\n\t(_(\"SVG graphical tree\"), _(\"Tree\"), \"tree_svg.html\"),\n] + [\n\t(_(\"Custom page %(index)i\") % {\"index\": i + 1}, _(\"Custom\"), \"custom_%i.html\" % (i + 1))\n\tfor i in range(NB_CUSTOM_PAGES)\n]\n\n# Constants used as indexes in L{PAGES_NAMES}\n(PAGE_PERSON,\nPAGE_SURNAMES,\nPAGE_PERSON_INDEX,\nPAGE_FAMILY_INDEX,\nPAGE_SOURCE_INDEX,\nPAGE_MEDIA_INDEX,\nPAGE_PLACE_INDEX,\nPAGE_ADDRESS_INDEX,\nPAGE_REPOSITORY_INDEX,\nPAGE_SVG_TREE,\nPAGE_CUSTOM) = range(11)\n\n# List of the descriptions of the tree graphs types\nSVG_TREE_TYPES = [\n\t_(\"Ascending tree\"),\n\t_(\"Descending tree\"),\n\t_(\"Descending tree with spouses\"),\n\t_(\"Ascending and descending tree\"),\n\t_(\"Ascending and descending tree with spouses\"),\n]\n(SVG_TREE_TYPE_ASCENDING,\nSVG_TREE_TYPE_DESCENDING,\nSVG_TREE_TYPE_DESCENDING_SPOUSES,\nSVG_TREE_TYPE_ASCDESC,\nSVG_TREE_TYPE_ASCDESC_SPOUSES) = range(len(SVG_TREE_TYPES))\nDEFAULT_SVG_TREE_TYPE = SVG_TREE_TYPE_ASCDESC\n\nSVG_TREE_SHAPES = [\n\t_(\"Vertical (↓)\"),\n\t_(\"Vertical (↑)\"),\n\t_(\"Horizontal (→)\"),\n\t_(\"Horizontal (←)\"),\n\t_(\"Full Circle\"),\n\t_(\"Half Circle\"),\n\t_(\"Quadrant\"),\n]\n(SVG_TREE_SHAPE_VERTICAL_TOP_BOTTOM,\nSVG_TREE_SHAPE_VERTICAL_BOTTOM_TOP,\nSVG_TREE_SHAPE_HORIZONTAL_LEFT_RIGHT,\nSVG_TREE_SHAPE_HORIZONTAL_RIGHT_LEFT,\nSVG_TREE_SHAPE_CIRCLE,\nSVG_TREE_SHAPE_HALF_CIRCLE,\nSVG_TREE_SHAPE_QUADRANT) = range(len(SVG_TREE_SHAPES))\nDEFAULT_SVG_TREE_SHAPE = SVG_TREE_SHAPE_HORIZONTAL_LEFT_RIGHT\n\nSVG_TREE_DISTRIB_ASC = [\n\t_('Size proportional to number of ancestors'),\n\t_('Homogeneous parents distribution'),\n]\nSVG_TREE_DISTRIB_DSC = [\n\t_('Size proportional to number of descendants'),\n\t_('Homogeneous children distribution'),\n]\n(SVG_TREE_DISTRIB_PROPORTIONAL,\nSVG_TREE_DISTRIB_HOMOGENEOUS) = range(len(SVG_TREE_DISTRIB_ASC))\nDEFAULT_SVG_TREE_DISTRIB = SVG_TREE_DISTRIB_PROPORTIONAL\n\nSVG_TREE_BACKGROUNDS = [\n\t_('Gender colors'),\n\t_('Generation based gradient'),\n\t_('Age based gradient'),\n\t_('Single main (filter) color'),\n\t_('Time period based gradient'),\n\t_('White'),\n\t_('Color scheme classic report'),\n\t_('Color scheme classic view'),\n]\n(SVG_TREE_BACKGROUND_GENDER,\nSVG_TREE_BACKGROUND_GENERATION,\nSVG_TREE_BACKGROUND_AGE,\nSVG_TREE_BACKGROUND_SINGLE,\nSVG_TREE_BACKGROUND_PERIOD,\nSVG_TREE_BACKGROUND_WHITE,\nSVG_TREE_BACKGROUND_SCHEME1,\nSVG_TREE_BACKGROUND_SCHEME2) = range(len(SVG_TREE_BACKGROUNDS))\nDEFAULT_SVG_TREE_BACKGROUND = SVG_TREE_BACKGROUND_GENERATION\n\n#: Templates for the website, in the form: [directory, name]\n# First template is the default one:\n# The files in the default template are used when they are not present in another template\n# Only the files that are different from the default template are present in the other templates directories\nWEB_TEMPLATE_LIST = (\n\t(\"dwr_default\", _(\"Default\")),\n\t(\"dwr_mainz\", _(\"Mainz\")),\n)\n\n\nINCLUDE_LIVING_VALUE = 99 #: Arbitrary number\n\n# Indexes in the L{DynamicWebReport.obj_dict} and L{DynamicWebReport.bkref_dict} elements\nOBJDICT_NAME = 0\nOBJDICT_GID = 1\nOBJDICT_INDEX = 2\nBKREF_CLASS = 0\nBKREF_HANDLE = 1\nBKREF_REFOBJ = 2\n\n\n_html_dbl_quotes = re.compile(r'([^\"]*) \" ([^\"]*) \" (.*)', re.VERBOSE)\n_html_sng_quotes = re.compile(r\"([^']*) ' ([^']*) ' (.*)\", re.VERBOSE)\n\ndef html_escape(text):\n\t\"\"\"Convert the text and replace some characters with a variant.\"\"\"\n\t# First single characters, no quotes\n\ttext = escape(text)\n\t# Deal with double quotes.\n\tm = _html_dbl_quotes.match(text)\n\twhile m:\n\t\ttext = \"%s\" \"“\" \"%s\" \"”\" \"%s\" % m.groups()\n\t\tm = _html_dbl_quotes.match(text)\n\t# Replace remaining double quotes.\n\ttext = text.replace('\"', '"')\n\t# Deal with single quotes.\n\ttext = text.replace(\"'s \", '’s ')\n\tm = _html_sng_quotes.match(text)\n\twhile m:\n\t\ttext = \"%s\" \"‘\" \"%s\" \"’\" \"%s\" % m.groups()\n\t\tm = _html_sng_quotes.match(text)\n\t# Replace remaining single quotes.\n\ttext = text.replace(\"'\", ''')\n\n\treturn text\n\n\ndef script_escape(text):\n\t\"\"\"Convert the text and escape quotes, backslashes and end-of-lines\n\t\"\"\"\n\treturn(text.\n\t\treplace(\"\\\\\", \"\\\\\\\\\").\n\t\treplace(\"'\", \"\\\\'\").\n\t\treplace(\"\\\"\", \"\\\\\\\"\").\n\t\treplace(\"\\n\", \"\\\\n\")\n\t)\n\n\ndef html_text(html):\n\t\"\"\"Get the string corresponding to an L{Html} object\"\"\"\n\tif (isinstance(html, string_types)): return(html.strip())\n\tsw = StringIO()\n\thtml.write(partial(print, file = sw), indent = \"\", tabs = \"\")\n\treturn(sw.getvalue().strip())\n\n\ndef format_date(date, gedcom = False, iso = False):\n\t\"\"\"Give the date as a string\n\t@param iso: If True, the date should be given in ISO format: YYYY-MM-DD\n\t@type iso: Boolean\n\t\"\"\"\n\tif (not date): return(\"\")\n\t\n\tval = \"\"\n\t\n\tif (iso):\n\t\t# TODO: export ISO dates\n\t\t# if (iso): val = DateDisplay.display(date) or \"\"\n\t\t# else: val = _dd.display(date) or \"\"\n\t\tpass\n\t\t\n\telif (gedcom):\n\t\tstart = date.get_start_date()\n\t\tif start != Date.EMPTY:\n\t\t\tcal = date.get_calendar()\n\t\t\tmod = date.get_modifier()\n\t\t\tquality = date.get_quality()\n\t\t\tif quality in DATE_QUALITY:\n\t\t\t\tqual_text = DATE_QUALITY[quality] + \" \"\n\t\t\telse:\n\t\t\t\tqual_text = \"\"\n\t\t\tif mod == Date.MOD_SPAN:\n\t\t\t\tval = \"%sFROM %s TO %s\" % (\n\t\t\t\t\tqual_text,\n\t\t\t\t\tmake_gedcom_date(start, cal, mod, None), \n\t\t\t\t\tmake_gedcom_date(date.get_stop_date(), cal, mod, None))\n\t\t\telif mod == Date.MOD_RANGE:\n\t\t\t\tval = \"%sBET %s AND %s\" % (\n\t\t\t\t\tqual_text,\n\t\t\t\t\tmake_gedcom_date(start, cal, mod, None), \n\t\t\t\t\tmake_gedcom_date(date.get_stop_date(), cal, mod, None))\n\t\t\telse:\n\t\t\t\tval = make_gedcom_date(start, cal, mod, quality)\n\t\t\t\t\n\telse:\n\t\t# Regular Gramps place displayer\n\t\tval = _dd.display(date) or \"\"\n\t\n\treturn(val)\n\n\ndef rmtree_fix(dirname):\n\t\"\"\"Windows fix: Python shutil.rmtree does not work properly on Windows.\n\tUnfortunately this fix is not completely working. Don't know why.\n\tThe strategy is to rename the directory first, in order to let Windows delete it in differed time.\n\t\"\"\"\n\t#TODO: Fix shutil.rmtree on Windows\n\ttmp = dirname + \"_removetree_tmp\"\n\tos.rename(dirname, tmp)\n\tshutil.rmtree(tmp)\n\t# Wait for rmtree to complete\n\tfor i in range(100):\n\t\tif (not os.path.exists(tmp)): break\n\t\ttime.sleep(0.1)\n\n\n\nclass DynamicWebReport(Report):\n\t\"\"\"\n\tClass DynamicWebReport\n\t\n\tExtracts information from the database and exports the data into Javascript and HTML files\n\t\n\tThe database extraction is performed by the method L{_build_obj_dict}. It recursively calls the methods \"_add_***\".\n\t\n\tThe database extraction builds:\n\t - indexes of the objects selected for the report as dictionaries,\n\t - for each object (of the report), references to the objects calling this object.\n\t \n\tThe indexes of the objects selected are stored as dictionaries \"obj_dict[class][handle]\",\n\tindexed by the object class,\n\tindexed by the database handle,\n\tcontaining for each report object the following information:\n\t - object file name, if any,\n\t - object name,\n\t - gramps id,\n\t - object index, starting from 0,\n\t only counting the objects selected,\n\t each object type is counted separately.\n\t\n\tThe references to objects are stored as dictionaries \"bkref_dict[class][handle]\",\n\tindexed by the object class,\n\tindexed by the database handle,\n\tcontaining for each report object the following information:\n\t - class of the object referencing it,\n\t - handle of the object referencing it,\n\t - reference object (MediaRef, EventRef) if any.\n\t \n\tThe report is generated by L{write_report}\n\t\"\"\"\n\n\tdef __init__(self, database, options, user):\n\t\t\"\"\"\n\t\tCreate WebReport object that produces the report.\n\n\t\tThe arguments are:\n\n\t\tdatabase - the Gramps database instance\n\t\toptions - instance of the Options class for this report\n\t\tuser - instance of a gen.user.User()\n\t\t\"\"\"\n\n\t\tReport.__init__(self, database, options, user)\n\t\tself.user = user\n\t\tmenu = options.menu\n\t\tself.link_prefix_up = True\n\t\tself.options = {}\n\n\t\tfor optname in menu.get_all_option_names():\n\t\t\tmenuopt = menu.get_option_by_name(optname)\n\t\t\tself.options[optname] = menuopt.get_value()\n\n\t\tif not self.options['incpriv']:\n\t\t\tself.database = PrivateProxyDb(database)\n\t\telse:\n\t\t\tself.database = database\n\n\t\tlivinginfo = self.options['living']\n\t\tyearsafterdeath = self.options['yearsafterdeath']\n\n\t\tif livinginfo != INCLUDE_LIVING_VALUE:\n\t\t\tself.database = LivingProxyDb(self.database, livinginfo, None, yearsafterdeath)\n\n\t\tfilters_option = menu.get_option_by_name('filter')\n\t\tself.filter = filters_option.get_filter()\n\n\t\tself.target_path = self.options['target'] #: Destination directory\n\t\tself.ext = \".html\" #: HTML fiules extension\n\t\tself.title = self.options['title'] #: Web site title. Web pages title are in the form \"title of the page - title of the site\"\n\n\t\tself.author = get_researcher().get_name() #: Database author name. Used in copyright text.\n\t\tif self.author:\n\t\t\tself.author = self.author.replace(',,,', '')\n\n\t\t# The following data are local copies of the options. Refer to the L{DynamicWebOptions} class for more details.\n\t\tself.inc_events = self.options['inc_events']\n\t\tself.inc_places = self.options['inc_places']\n\t\tself.inc_families = self.options['inc_families']\n\t\tself.inc_gallery = self.options['inc_gallery']\n\t\tself.copy_media = self.options['copy_media']\n\t\tself.inc_notes = self.options['inc_notes']\n\t\tself.print_notes_type = self.options['print_notes_type']\n\t\tself.inc_sources = self.options['inc_sources']\n\t\tself.inc_repositories = self.options['inc_repositories']\n\t\t# Repositories are not exported unless sources are exported\n\t\tself.inc_repositories = self.inc_repositories and self.inc_sources\n\t\tself.inc_addresses = self.options['inc_addresses']\n\t\tself.name_format = self.options['name_format']\n\t\tself.short_name_format = self.options['short_name_format']\n\t\tself.encoding = self.options['encoding']\n\t\tself.copyright = self.options['copyright']\n\t\tself.inc_gendex = self.options['inc_gendex']\n\t\tself.template = self.options['template']\n\t\tself.pages_number = self.options['pages_number']\n\t\tself.page_content = [\n\t\t\tself.options['page_content_%i' %i]\n\t\t\tfor i in range(self.pages_number)\n\t\t]\n\t\tself.page_name = [\n\t\t\tself.options['page_name_%i' %i]\n\t\t\tfor i in range(len(PAGES_NAMES))\n\t\t]\n\t\tself.custom_menu = [\n\t\t\tself.options['custom_menu_%i' %i]\n\t\t\tfor i in range(NB_CUSTOM_PAGES)\n\t\t]\n\t\tself.custom_note = [\n\t\t\tself.options['custom_note_%i' %i]\n\t\t\tfor i in range(NB_CUSTOM_PAGES)\n\t\t]\n\t\t# Filter pages that cannot be exported due to other options\n\t\tself.page_content = [pc for pc in self.page_content if (not(\n\t\t\t(pc == PAGE_FAMILY_INDEX and not self.inc_families) or\n\t\t\t(pc == PAGE_MEDIA_INDEX and not self.inc_gallery) or\n\t\t\t(pc == PAGE_SOURCE_INDEX and not self.inc_sources) or\n\t\t\t(pc == PAGE_REPOSITORY_INDEX and not self.inc_repositories) or\n\t\t\t(pc == PAGE_PLACE_INDEX and not self.inc_places)\n\t\t))]\n\t\tself.pages_number = len(self.page_content)\n\n\t\tself._backend = HtmlBackend()\n\t\tself._backend.build_link = self.build_link\n\n\n\tdef write_report(self):\n\t\t\"\"\"\n\t\tReport generation\n\t\t\"\"\"\n\t\t\n\t\t# Initialize the logger\n\t\t# This initialization shall be performed after Gramps has start-up\n\t\t# import importlib\n\t\t# logging = importlib.reload(logging)\n\t\t# global log\n\t\t# log = logging.getLogger(\".DynamicWeb\")\n\t\t\n\t\t# Create directory\n\t\tdir_name = self.target_path\n\t\tif dir_name is None:\n\t\t\tdir_name = get_curr_dir()\n\t\telif not os.path.isdir(dir_name):\n\t\t\tparent_dir = os.path.dirname(dir_name)\n\t\t\tif not os.path.isdir(parent_dir):\n\t\t\t\tmsg = _(\"Neither %(current)s nor %(parent)s are directories\") % \\\n\t\t\t\t\t {'current': dir_name, 'parent': parent_dir}\n\t\t\t\tself.user.notify_error(msg)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tos.mkdir(dir_name)\n\t\t\t\texcept IOError as value:\n\t\t\t\t\tmsg = _(\"Could not create the directory: %(path)s\") % {\n\t\t\t\t\t\t \"path\": dir_name + \"\\n\" + value[1]}\n\t\t\t\t\tself.user.notify_error(msg)\n\t\t\t\t\treturn\n\t\t\t\texcept:\n\t\t\t\t\tmsg = _(\"Could not create the directory: %(path)s\") % {\"path\": dir_name}\n\t\t\t\t\tself.user.notify_error(msg)\n\t\t\t\t\treturn\n\t\tconfig.set('paths.website-directory', os.path.dirname(self.target_path) + os.sep)\n\n\n\t\t# for use with discovering biological, half, and step siblings for use\n\t\t# in display_ind_parents()...\n\t\t# self.rel_class = get_relationship_calculator()\n\n\t\t#: List of images already copied\n\t\tself.images_copied = set()\n\n\t\t#: List of thumbnails already created\n\t\tself.thumbnail_created = set()\n\n\t\t#################################################\n\t\t# Pass 1 Build the lists of objects to be output\n\n\t\tself._build_obj_dict()\n\t\tself._sort_obj_dict()\n\n\t\t#################################################\n\t\t# Pass 2 Generate the web pages\n\t\t\n\t\twith self.user.progress(_(\"Dynamic Web Site Report\"), _(\"Exporting family tree data ...\"), 10) as step:\n\t\t\tself.created_files = []\n\t\t\t# Create directories\n\t\t\tfor dirname in [\"thumb\"] + ([\"image\"] if (self.copy_media) else []):\n\t\t\t\tdirpath = os.path.join(self.target_path, dirname)\n\t\t\t\tif (not os.path.isdir(dirpath)): os.mkdir(dirpath)\n\t\t\t# Copy web site files\n\t\t\tself.copy_template_files()\n\t\t\tstep()\n\t\t\t# Export database as Javascript files\n\t\t\tself._export_individuals()\n\t\t\tstep()\n\t\t\tself._export_families()\n\t\t\tstep()\n\t\t\tself._export_sources()\n\t\t\tself._export_citations()\n\t\t\tself._export_repositories()\n\t\t\tstep()\n\t\t\tself._export_places()\n\t\t\tstep()\n\t\t\tself._export_media()\n\t\t\tstep()\n\t\t\tself._export_surnames()\n\t\t\tstep()\n\t\t\t# Generate HTML files\n\t\t\tself._export_pages()\n\t\t\tstep()\n\t\t\t# Create GENDEX file\n\t\t\tself.build_gendex(self.obj_dict[Person])\n\t\t\tstep()\n\t\t\t# Create an archive file of the web site\n\t\t\tself.create_archive()\n\t\t\tstep()\n\n\n\tdef _export_individuals(self):\n\t\t\"\"\"\n\t\tExport individuals data in Javascript file\n\t\tThe individuals data is stored in the Javascript Array \"I\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'I' is sorted by person name\\n\"\n\t\t\t\"// 'I' gives for individual:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The complete name\\n\"\n\t\t\t\"// - The short name\\n\"\n\t\t\t\"// - The names as a list of:\\n\"\n\t\t\t\"// [full name, type, title, nick, call, given, suffix, list of surnames, family nickname,\\n\"\n\t\t\t\"// notes, list of the name source citations index (in table 'C')]\\n\"\n\t\t\t\"// - The gender\\n\"\n\t\t\t\"// - The birth year in the form '1700', '?' (date unknown)\\n\"\n\t\t\t\"// - The birth place\\n\"\n\t\t\t\"// - The death year in the form '1700', '?' (date unknown), '' (not dead)\\n\"\n\t\t\t\"// - The death place\\n\"\n\t\t\t\"// - The death age\\n\"\n\t\t\t\"// - A list of events, with for each event:\\n\"\n\t\t\t\"// - The event name\\n\"\n\t\t\t\"// - The event date\\n\"\n\t\t\t\"// - The event date in ISO format (sortable)\\n\"\n\t\t\t\"// - The event place index (in table 'P'), -1 if none\\n\"\n\t\t\t\"// - The event description\\n\"\n\t\t\t\"// - The event text and notes (including event reference notes)\\n\"\n\t\t\t\"// - A list of the event media index, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the event source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of addresses, with for each address:\\n\"\n\t\t\t\"// - The address date\\n\"\n\t\t\t\"// - The address date in ISO format (sortable)\\n\"\n\t\t\t\"// - The address place in the form:\\n\"\n\t\t\t\"// [street, locality, parish, city, state, county, zip, country]\\n\"\n\t\t\t\"// - The address notes\\n\"\n\t\t\t\"// - A list of the address source citations index (in table 'C')\\n\"\n\t\t\t\"// - The person notes\\n\"\n\t\t\t\"// - A list of the person media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the person source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the person attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"// - The list of the person URL in the form:\\n\"\n\t\t\t\"// [type, url, description]\\n\"\n\t\t\t\"// - A list of partners families index (in table 'F')\\n\"\n\t\t\t\"// - A list of parents families in the form:\\n\"\n\t\t\t\"// [index (in table 'F'), relation to father, relation to mother, notes, list of citations]\\n\"\n\t\t\t\"// - A list of associations in the form:\\n\"\n\t\t\t\"// [person index (in table 'I'), relationship, notes, list of citations (in table 'C')]\\n\"\n\t\t\t\"I = [\")\n\t\tsep = \"\\n\"\n\t\tperson_list = list(self.obj_dict[Person].keys())\n\t\tperson_list.sort(key = lambda x: self.obj_dict[Person][x][OBJDICT_INDEX])\n\t\tfor person_handle in person_list:\n\t\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Person][person_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\t# Names\n\t\t\tname = self.get_name(person) or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\")\n\t\t\tname = self.get_short_name(person) or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\\n\")\n\t\t\tsw.write(self.get_name_data(person) + \",\\n\")\n\t\t\t# Gender\n\t\t\tgender = \"\"\n\t\t\tif (person.get_gender() == Person.MALE): gender = \"M\"\n\t\t\tif (person.get_gender() == Person.FEMALE): gender = \"F\"\n\t\t\tif (person.get_gender() == Person.UNKNOWN): gender = \"U\"\n\t\t\tsw.write(\"\\\"\" + gender + \"\\\",\")\n\t\t\t# Years\n\t\t\tsw.write(\"\\\"\" + self.get_birth_year(person) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_birth_place(person) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_death_year(person) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_death_place(person) + \"\\\",\\n\")\n\t\t\t# Age at death\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_death_age(person)) + \"\\\",\\n\")\n\t\t\t# Events\n\t\t\tsw.write(\"[\\n\" + self._data_events(person) + \"\\n],\\n\")\n\t\t\t# Addresses\n\t\t\tsw.write(\"[\\n\" + self._data_addresses(person) + \"\\n],\\n\")\n\t\t\t# Get individual notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(person)) + \"\\\",\\n\")\n\t\t\t# Get individual media\n\t\t\tsw.write(self._data_media_reference_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get individual sources\n\t\t\tsw.write(self._data_source_citation_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get individual attributes\n\t\t\tsw.write(self._data_attributes(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get individual URL\n\t\t\tsw.write(self._data_url_list(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Families (partners)\n\t\t\tsw.write(self._data_families_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Families (parents)\n\t\t\tsw.write(self._data_parents_families_index(person))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Associations\n\t\t\tsw.write(self._data_associations(person))\n\t\t\tsw.write(\"\\n]\")\n\t\t\t#\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_indi.js\", sw.getvalue())\n\n\n\tdef get_name_data(self, person):\n\t\tprimary_name = person.get_primary_name()\n\t\tall_names = [primary_name] + person.get_alternate_names()\n\t\tfirst_name = primary_name.get_first_name()\n\t\ttext = \"[\"\n\t\tsep = \"\"\n\t\tfor name in all_names:\n\t\t\ttext += sep + \"[\"\n\t\t\tname.set_display_as(self.name_format)\n\t\t\tpname = _nd.display_name(name)\n\t\t\ttext += \"\\\"\" + script_escape(pname) + \"\\\",\"\n\t\t\t# Type\n\t\t\ttext += \"\\\"\" + script_escape(str(name.get_type())) + \"\\\",\"\n\t\t\t# Title\n\t\t\ttitle = name.get_title() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(title)) + \"\\\",\"\n\t\t\t# Nickname\n\t\t\tnick_name = name.get_nick_name()\n\t\t\tif (nick_name == first_name or not nick_name): nick_name = \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(nick_name)) + \"\\\",\"\n\t\t\t# Callname\n\t\t\tcall_name = name.get_call_name()\n\t\t\tif (call_name == first_name or not call_name): call_name = \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(call_name)) + \"\\\",\"\n\t\t\t# Given\n\t\t\tgiven = name.get_first_name() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(given)) + \"\\\",\"\n\t\t\t# Suffix\n\t\t\tsuffix = name.get_suffix() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(suffix)) + \"\\\",\"\n\t\t\t# Surnames\n\t\t\tsurnames = name.get_surname_list()\n\t\t\ttext += \"[\" + \",\".join([\n\t\t\t\t\"\\\"\" + script_escape(str(surname.get_surname() or \"\")) + \"\\\"\"\n\t\t\t\tfor surname in surnames]) + \"],\"\n\t\t\t# Family nickname\n\t\t\tfnick = name.get_family_nick_name() or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(str(fnick)) + \"\\\",\"\n\t\t\t# Get name date\n\t\t\tdatetext = format_date(name.date) or \"\"\n\t\t\ttext += \"\\\"\" + script_escape(datetext) + \"\\\",\"\n\t\t\t# Get name notes\n\t\t\ttext += \"\\\"\" + script_escape(self.get_notes_text(name)) + \"\\\",\"\n\t\t\t# Get name sources\n\t\t\ttext += self._data_source_citation_index(name) + \"]\"\n\t\t\tsep = \",\"\n\t\ttext += \"]\"\n\t\treturn(text)\n\n\n\tdef get_name_object(self, person, maiden_name = None):\n\t\t\"\"\"\n\t\tReturn person's name, unless maiden_name given, unless married_name\n\t\tlisted.\n\t\t@param: person -- person object from database\n\t\t@param: maiden_name -- Female's family surname\n\t\t\"\"\"\n\t\t# Get all of a person's names\n\t\tprimary_name = person.get_primary_name()\n\t\tmarried_name = None\n\t\tnames = [primary_name] + person.get_alternate_names()\n\t\tfor name in names:\n\t\t\tif int(name.get_type()) == NameType.MARRIED:\n\t\t\t\tmarried_name = name\n\t\t\t\tbreak # use first\n\t\t# Now, decide which to use:\n\t\tif maiden_name is not None:\n\t\t\tif married_name is not None:\n\t\t\t\tname = Name(married_name)\n\t\t\telse:\n\t\t\t\tname = Name(primary_name)\n\t\t\t\tsurname_obj = name.get_primary_surname()\n\t\t\t\tsurname_obj.set_surname(maiden_name)\n\t\telse:\n\t\t\tname = Name(primary_name)\n\t\treturn(name)\n\n\n\tdef get_name(self, person, maiden_name = None):\n\t\tname = self.get_name_object(person, maiden_name)\n\t\tname.set_display_as(self.name_format)\n\t\treturn _nd.display_name(name)\n\n\n\tdef get_short_name(self, person, maiden_name = None):\n\t\tname = self.get_name_object(person, maiden_name)\n\t\tname.set_display_as(self.short_name_format)\n\t\treturn _nd.display_name(name)\n\n\n\tdef _export_families(self):\n\t\t\"\"\"\n\t\tExport families data in Javascript file\n\t\tThe families data is stored in the Javascript Array \"F\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'F' is sorted by family full name\\n\"\n\t\t\t\"// 'F' gives for each family:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The family full name\\n\"\n\t\t\t\"// - The family union type\\n\"\n\t\t\t\"// - The marriage year in the form '1700', '?' (unknown), or '' (not married)\\n\"\n\t\t\t\"// - The marriage place\"\n\t\t\t\"// - A list of events, with for each event:\\n\"\n\t\t\t\"// - The event name\\n\"\n\t\t\t\"// - The event date\\n\"\n\t\t\t\"// - The event date in ISO format (sortable)\\n\"\n\t\t\t\"// - The event place index (in table 'P'), -1 if none\\n\"\n\t\t\t\"// - The event description\\n\"\n\t\t\t\"// - The event text and notes (including event reference notes)\\n\"\n\t\t\t\"// - A list of the event media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the event source citations index (in table 'C')\\n\"\n\t\t\t\"// - The family notes\\n\"\n\t\t\t\"// - A list of the family media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the family source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the family attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"// - A list of spouses index (in table 'I')\\n\"\n\t\t\t\"// - A list of child in the form:\\n\"\n\t\t\t\"// [index (in table 'I'), relation to father, relation to mother, notes, list of citations]\\n\"\n\t\t\t\"F = [\")\n\t\tsep = \"\\n\"\n\t\tfamily_list = list(self.obj_dict[Family].keys())\n\t\tfamily_list.sort(key = lambda x: self.obj_dict[Family][x][OBJDICT_INDEX])\n\t\tfor family_handle in family_list:\n\t\t\tfamily = self.database.get_family_from_handle(family_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Family][family_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\t# Names\n\t\t\tname = self.get_family_name(family) or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + script_escape(str(family.get_relationship())) + \"\\\",\\n\")\n\t\t\t# Years\n\t\t\tsw.write(\"\\\"\" + self.get_marriage_year(family) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + self.get_marriage_place(family) + \"\\\",\\n\")\n\t\t\t# Events\n\t\t\tsw.write(\"[\\n\" + self._data_events(family) + \"\\n],\\n\")\n\t\t\t# Get family notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(family)) + \"\\\",\\n\")\n\t\t\t# Get family media\n\t\t\tsw.write(self._data_media_reference_index(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get family sources\n\t\t\tsw.write(self._data_source_citation_index(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get family attributes\n\t\t\tsw.write(self._data_attributes(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Partners\n\t\t\tsw.write(self._data_partners_index(family))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Children\n\t\t\tsw.write(self._data_children_index(family))\n\t\t\tsw.write(\"\\n]\")\n\t\t\t#\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_fam.js\", sw.getvalue())\n\n\n\tdef _data_events(self, object):\n\t\t\"\"\"\n\t\tBuild events data related to L{object} in a string representing a Javascript Array\n\t\tL{object} could be: a person or a family\n\t\t@return: events as a string representing a Javascript Array\n\t\t\"\"\"\n\t\t# Builds an event list that gives for each event:\n\t\t# - Gramps ID\\n\"\n\t\t# - The event name\n\t\t# - The event date\n\t\t# - The event date in ISO format (sortable)\n\t\t# - The event place index (in table 'P'), -1 if none\n\t\t# - The event description\n\t\t# - The event text and notes (including event reference notes)\n\t\t# - A list of the event media index, in the form:\n\t\t# - media index (in table 'M')\n\t\t# - media thumbnail path\n\t\t# - [x1, y1, x2, y2] of the media reference\n\t\t# - notes of the media reference\n\t\t# - list of the media reference source citations index (in table 'C')\\n\"\n\t\t# - A list of the event source citations index (in table 'C')\n\t\tevent_ref_list = object.get_event_ref_list()\n\t\tif not event_ref_list: return(\"\")\n\t\trows = []\n\t\tfor event_ref in event_ref_list:\n\t\t\tif (event_ref.ref not in self.obj_dict[Event]): continue\n\t\t\tevent = self.database.get_event_from_handle(event_ref.ref)\n\t\t\tif (not event): continue\n\t\t\ttrow = \"\\t[\"\n\t\t\tevt_type = str(event.get_type())\n\t\t\tevent_role = event_ref.get_role()\n\t\t\tif (event_role != EventRoleType.PRIMARY and event_role != EventRoleType.FAMILY):\n\t\t\t\tevt_type += \" (%s)\" % event_role\n\t\t\tplace_index = -1\n\t\t\tplace_handle = event.get_place_handle()\n\t\t\tif (place_handle and (place_handle in self.obj_dict[Place])):\n\t\t\t\tplace_index = self.obj_dict[Place][place_handle][OBJDICT_INDEX]\n\t\t\tevt_desc = event.get_description()\n\t\t\ttrow += \"\\\"\" + self.obj_dict[Event][event_ref.ref][OBJDICT_GID] + \"\\\",\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_type)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object())\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object(), True)\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\ttrow += str(place_index) + \",\"\n\t\t\tif (evt_desc is None): evt_desc = \"\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_desc)) + \"\\\",\"\n\t\t\t# Get event notes\n\t\t\tnotelist = event.get_note_list()\n\t\t\tnotelist.extend(event_ref.get_note_list())\n\t\t\tattrlist = event.get_attribute_list()\n\t\t\tattrlist.extend(event_ref.get_attribute_list())\n\t\t\ttrow += \"\\\"\" + script_escape(self.get_notes_attributes_text(notelist, attrlist)) + \"\\\",\"\n\t\t\t# Get event media\n\t\t\ttrow += self._data_media_reference_index(event)\n\t\t\ttrow += \",\"\n\t\t\t# Get event sources\n\t\t\tcitationlist = event.get_citation_list()\n\t\t\tcitationlist.extend(event_ref.get_citation_list())\n\t\t\tfor attr in attrlist: citationlist.extend(attr.get_citation_list())\n\t\t\ttrow += self._data_source_citation_index_from_list(citationlist)\n\t\t\t#\n\t\t\ttrow += \"]\"\n\t\t\trows.append(trow)\n\t\treturn(\",\\n\".join(rows))\n\n\n\tdef _data_addresses(self, object):\n\t\t\"\"\"\n\t\tExport addresses data related to L{object} in a string representing a Javascript Array\n\t\tL{object} could be: a person or a repository\n\t\t@return: events as a string representing a Javascript Array\n\t\t\"\"\"\n\t\t# Builds an address list that gives for each address:\n\t\t# - The address date\\n\"\n\t\t# - The address date in ISO format (sortable)\\n\"\n\t\t# - The address place in the form:\\n\"\n\t\t# [street, locality, parish, city, state, county, zip, country]\\n\"\\n\"\n\t\t# - The address notes\\n\"\n\t\t# - A list of the address source citations index (in table 'C')\\n\"\n\t\tif (not self.inc_addresses): return(\"\")\n\t\taddrlist = object.get_address_list()\n\t\tif not addrlist: return(\"\")\n\t\trows = []\n\t\tfor addr in addrlist:\n\t\t\ttext = \"\\t[\"\n\t\t\taddr_date = format_date(addr.get_date_object())\n\t\t\ttext += \"\\\"\" + script_escape(html_escape(addr_date)) + \"\\\",\"\n\t\t\taddr_date = format_date(addr.get_date_object(), True)\n\t\t\ttext += \"\\\"\" + script_escape(html_escape(addr_date)) + \"\\\",\"\n\t\t\taddr_data = [\n\t\t\t\taddr.get_street(),\n\t\t\t\taddr.get_locality(),\n\t\t\t\t\"\",\n\t\t\t\taddr.get_city(),\n\t\t\t\taddr.get_state(),\n\t\t\t\taddr.get_county(),\n\t\t\t\taddr.get_postal_code(),\n\t\t\t\taddr.get_country(),\n\t\t\t\taddr.get_phone(),\n\t\t\t]\n\t\t\ttext += \"[\\\"\" + \"\\\",\\\"\".join([script_escape(data) for data in addr_data]) + \"\\\"],\"\n\t\t\t# Get address notes\n\t\t\ttext += \"\\\"\" + script_escape(self.get_notes_text(addr)) + \"\\\",\"\n\t\t\t# Get address sources\n\t\t\ttext += self._data_source_citation_index(addr) + \"]\"\n\t\t\trows.append(text)\n\t\treturn(\",\\n\".join(rows))\n\n\n\tdef _export_sources(self):\n\t\t\"\"\"\n\t\tExport sources data in Javascript file\n\t\tThe sources data is stored in the Javascript Array \"S\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'S' is sorted by source title\\n\"\n\t\t\t\"// 'S' gives for each source:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The source title\\n\"\n\t\t\t\"// - The source text (author, etc.)\\n\"\n\t\t\t\"// - The source notes\\n\"\n\t\t\t\"// - A list of the source media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the citations index (in table 'C') referencing this source\\n\"\n\t\t\t\"// - A list of the repositories for this source, in the form:\\n\"\n\t\t\t\"// - repository index (in table 'R')\\n\"\n\t\t\t\"// - media type\\n\"\n\t\t\t\"// - call number\\n\"\n\t\t\t\"// - notes of the repository reference\\n\"\n\t\t\t\"// - The list of the sources attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"S = [\")\n\t\tsep = \"\\n\"\n\t\tsource_list = list(self.obj_dict[Source])\n\t\tif (not self.inc_sources): source_list = []\n\t\tsource_list.sort(key = lambda x: self.obj_dict[Source][x][OBJDICT_INDEX])\n\t\tfor source_handle in source_list:\n\t\t\tsource = self.database.get_source_from_handle(source_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Source][source_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\ttitle = source.get_title() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(html_escape(title)) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\")\n\t\t\tfor (label, value) in [\n\t\t\t\t(_(\"Author\"), source.get_author()),\n\t\t\t\t(_(\"Abbreviation\"), source.get_abbreviation()),\n\t\t\t\t(_(\"Publication information\"), source.get_publication_info())]:\n\t\t\t\tif value:\n\t\t\t\t\thtml = Html(\"p\") + Html(\"b\", label + \": \") + value\n\t\t\t\t\tsw.write(script_escape(html_text(html)))\n\t\t\tsw.write(\"\\\",\\n\")\n\t\t\t# Get source notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(source)) + \"\\\",\\n\")\n\t\t\t# Get source media\n\t\t\tsw.write(self._data_media_reference_index(source))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get source citations\n\t\t\tsw.write(self._data_bkref_index(Source, source_handle, Citation))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get repositories references\n\t\t\tsw.write(self._data_repo_reference_index(source))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get source attributes\n\t\t\tif (DWR_VERSION_410):\n\t\t\t\tsw.write(self._data_attributes_src(source))\n\t\t\telse:\n\t\t\t\tsw.write(\"[]\")\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_sour.js\", sw.getvalue())\n\n\n\tdef _export_citations(self):\n\t\t\"\"\"\n\t\tExport citations data in Javascript file\n\t\tThe citations data is stored in the Javascript Array \"C\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'C' gives for each source citation:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The source index (in table 'S')\\n\"\n\t\t\t\"// - The citation text (page, etc.)\\n\"\n\t\t\t\"// - The citation notes\\n\"\n\t\t\t\"// - A list of the citation media references, in the form:\\n\"\n\t\t\t\"// - media index (in table 'M')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - A list of the person index (in table 'I') referencing this citation\\n\"\n\t\t\t\"// (including the person events referencing this citation)\\n\"\n\t\t\t\"// - A list of the family index (in table 'F') referencing this citation\\n\"\n\t\t\t\"// (including the family events referencing this citation)\\n\"\n\t\t\t\"// - A list of the media index (in table 'M') referencing this citation\\n\"\n\t\t\t\"// (including the media references referencing this citation)\\n\"\n\t\t\t\"// - A list of the place index (in table 'P') referencing this citation\\n\"\n\t\t\t\"// (including the media references referencing this citation)\\n\"\n\t\t\t\"// - A list of the repository index (in table 'R') referencing this citation\\n\"\n\t\t\t\"C = [\")\n\t\tsep = \"\\n\"\n\t\tcitation_list = list(self.obj_dict[Citation])\n\t\tif (not self.inc_sources): citation_list = []\n\t\tcitation_list.sort(key = lambda x: self.obj_dict[Citation][x][OBJDICT_INDEX])\n\t\tfor citation_handle in citation_list:\n\t\t\tcitation = self.database.get_citation_from_handle(citation_handle)\n\t\t\tsource_handle = citation.get_reference_handle()\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Citation][citation_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\tsw.write(str(self.obj_dict[Source][source_handle][OBJDICT_INDEX])+ \",\\n\")\n\t\t\tsw.write(\"\\\"\")\n\t\t\tconfidence = citation.get_confidence_level()\n\t\t\tif ((confidence in conf_strings) and confidence != Citation.CONF_NORMAL):\n\t\t\t\tconfidence = _(conf_strings[confidence])\n\t\t\telse:\n\t\t\t\tconfidence = None\n\t\t\tfor (label, value) in [\n\t\t\t\t(_(\"Date\"), format_date(citation.get_date_object())),\n\t\t\t\t(_(\"Page\"), citation.get_page()),\n\t\t\t\t(_(\"Confidence\"), confidence),\n\t\t\t]:\n\t\t\t\tif value:\n\t\t\t\t\thtml = Html(\"p\") + Html(\"b\", label + \": \") + value\n\t\t\t\t\tsw.write(script_escape(html_text(html)))\n\t\t\tsw.write(\"\\\",\\n\")\n\t\t\t# Get citation notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(citation)) + \"\\\",\\n\")\n\t\t\t# Get citation media\n\t\t\tsw.write(self._data_media_reference_index(citation))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get references\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Person))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Family))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, MediaObject))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Place))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Citation, citation_handle, Repository))\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_cita.js\", sw.getvalue())\n\n\n\tdef _export_repositories(self):\n\t\t\"\"\"\n\t\tExport repositories data in Javascript file\n\t\tThe repositories data is stored in the Javascript Array \"R\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'R' is sorted by repository name\\n\"\n\t\t\t\"// 'R' gives for each repository:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The repository name\\n\"\n\t\t\t\"// - The repository type\\n\"\n\t\t\t\"// - A list of addresses, with for each address:\\n\"\n\t\t\t\"// - The address date\\n\"\n\t\t\t\"// - The address date in ISO format (sortable)\\n\"\n\t\t\t\"// - The address place in the form:\\n\"\n\t\t\t\"// [street, locality, parish, city, state, county, zip, country]\\n\"\n\t\t\t\"// - The address notes\\n\"\n\t\t\t\"// - A list of the address source citations index (in table 'C')\\n\"\n\t\t\t\"// - The repository notes\\n\"\n\t\t\t\"// - The list of the repository URL in the form:\\n\"\n\t\t\t\"// [type, url, description]\\n\"\n\t\t\t\"// - A list of the sources referencing this repository, in the form:\\n\"\n\t\t\t\"// - source index (in table 'S')\\n\"\n\t\t\t\"// - media type\\n\"\n\t\t\t\"// - call number\\n\"\n\t\t\t\"// - notes of the repository reference\\n\"\n\t\t\t\"R = [\")\n\t\tsep = \"\\n\"\n\t\trepo_list = list(self.obj_dict[Repository])\n\t\tif (not self.inc_repositories): repo_list = []\n\t\trepo_list.sort(key = lambda x: self.obj_dict[Repository][x][OBJDICT_INDEX])\n\t\tfor repo_handle in repo_list:\n\t\t\trepo = self.database.get_repository_from_handle(repo_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Repository][repo_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\tname = repo.get_name() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(name) + \"\\\",\\n\")\n\t\t\ttype = repo.get_type() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(str(type)) + \"\\\",\\n\")\n\t\t\t# Addresses\n\t\t\tsw.write(\"[\\n\" + self._data_addresses(repo) + \"\\n],\\n\")\n\t\t\t# Get repository notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(repo)) + \"\\\",\\n\")\n\t\t\t# Get repository URL\n\t\t\tsw.write(self._data_url_list(repo))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get source references\n\t\t\tsw.write(self._data_repo_backref_index(repo, Source))\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_repo.js\", sw.getvalue())\n\n\n\tdef _export_media(self):\n\t\t\"\"\"\n\t\tExport media data in Javascript file\n\t\tThe media data is stored in the Javascript Array \"M\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'M' is sorted by media title\\n\"\n\t\t\t\"// 'M' gives for each media object:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The media title\\n\"\n\t\t\t\"// - The media path in Gramps\\n\"\n\t\t\t\"// - The media path were the media is really located\\n\"\n\t\t\t\"// - The media MIME type\\n\"\n\t\t\t\"// - The media date\\n\"\n\t\t\t\"// - The media date in ISO format (sortable)\\n\"\n\t\t\t\"// - The media notes\\n\"\n\t\t\t\"// - A list of the media source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the media attributes in the form:\\n\"\n\t\t\t\"// [attribute, value, note, list of citations]\\n\"\n\t\t\t\"// - Media thumbnail path\\n\"\n\t\t\t\"// - A list of the person referencing this media (including the person events referencing this media), in the form:\\n\"\n\t\t\t\"// - person index (in table 'I')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the family referencing this media (including the family events referencing this media), in the form:\\n\"\n\t\t\t\"// - family index (in table 'F')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the source referencing this media (including the source citations referencing this media), in the form:\\n\"\n\t\t\t\"// - source index (in table 'S')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"// - A list of the places referencing this media, in the form:\\n\"\n\t\t\t\"// - place index (in table 'P')\\n\"\n\t\t\t\"// - media thumbnail path\\n\"\n\t\t\t\"// - [x1, y1, x2, y2] of the media reference\\n\"\n\t\t\t\"// - notes of the media reference\\n\"\n\t\t\t\"// - list of the media reference source citations index (in table 'C')\\n\"\n\t\t\t\"M = [\")\n\t\tsep = \"\\n\"\n\t\tmedia_list = list(self.obj_dict[MediaObject])\n\t\tif (not self.inc_gallery): media_list = []\n\t\tmedia_list.sort(key = lambda x: self.obj_dict[MediaObject][x][OBJDICT_INDEX])\n\t\tfor media_handle in media_list:\n\t\t\tmedia = self.database.get_object_from_handle(media_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[MediaObject][media_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\ttitle = media.get_description() or \"\"\n\t\t\tsw.write(\"\\\"\" + script_escape(html_escape(title)) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + script_escape(media.get_path()) + \"\\\",\\n\")\n\t\t\tpath = self.get_media_web_path(media)\n\t\t\tsw.write(\"\\\"\" + script_escape(path) + \"\\\",\\n\")\n\t\t\tsw.write(\"\\\"\" + script_escape(media.get_mime_type()) + \"\\\",\\n\")\n\t\t\t# Get media date\n\t\t\tdate = format_date(media.get_date_object()) or \"\"\n\t\t\tsw.write(\"\\\"\" + date + \"\\\",\\n\")\n\t\t\tdate = format_date(media.get_date_object(), True) or \"\"\n\t\t\tsw.write(\"\\\"\" + date + \"\\\",\\n\")\n\t\t\t# Get media notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(media)) + \"\\\",\\n\")\n\t\t\t# Get media sources\n\t\t\tsw.write(self._data_source_citation_index(media))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get media attributes\n\t\t\tsw.write(self._data_attributes(media))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get media thumbnail\n\t\t\tsw.write(\"\\\"\" + self.copy_thumbnail(media, (0,0,100,100)) + \"\\\",\\n\")\n\t\t\t# Get media references\n\t\t\tsw.write(self._data_media_backref_index(media, Person))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_media_backref_index(media, Family))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_media_backref_index(media, Source))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_media_backref_index(media, Place))\n\t\t\tsw.write(\"\\n\")\n\t\t\tsw.write(\"]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_media.js\", sw.getvalue())\n\n\n\tdef _export_places(self):\n\t\t\"\"\"\n\t\tExport places data in Javascript file\n\t\tThe places data is stored in the Javascript Array \"P\"\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'P' is sorted by place name\\n\"\n\t\t\t\"// 'P' gives for each media object:\\n\"\n\t\t\t\"// - Gramps ID\\n\"\n\t\t\t\"// - The place name\\n\"\n\t\t\t\"// - The place locations parts for the main and alternate names, in the form:\\n\"\n\t\t\t\"// (index 0 is main name, others are for alternate names)\\n\"\n\t\t\t\"// [street, locality, parish, city, state, county, zip, country]\\n\"\n\t\t\t\"// - The coordinates [latitude, longitude]\\n\\n\"\n\t\t\t\"// - The place notes\\n\"\n\t\t\t\"// - A list of the place source citations index (in table 'C')\\n\"\n\t\t\t\"// - The list of the place URL in the form:\\n\"\n\t\t\t\"// [type, url, description]\\n\"\n\t\t\t\"// - A list of the person index (in table 'I') for events referencing this place\\n\"\n\t\t\t\"// (including the persons directly referencing this place)\\n\"\n\t\t\t\"// - A list of the family index (in table 'F') for events referencing this place\\n\"\n\t\t\t\"P=[\")\n\t\tsep = \"\\n\"\n\t\tplace_list = list(self.obj_dict[Place])\n\t\tplace_list.sort(key = lambda x: self.obj_dict[Place][x][OBJDICT_INDEX])\n\t\tfor place_handle in place_list:\n\t\t\tplace = self.database.get_place_from_handle(place_handle)\n\t\t\tsw.write(sep)\n\t\t\tsw.write(\"[\\\"\" + self.obj_dict[Place][place_handle][OBJDICT_GID] + \"\\\",\")\n\t\t\tplace_name = report_utils.place_name(self.database, place_handle)\n\t\t\tsw.write(\"\\\"\" + script_escape(place_name) + \"\\\"\")\n\t\t\tif (not self.inc_places):\n\t\t\t\tsw.write(\"]\")\n\t\t\t\tsep = \",\\n\"\n\t\t\t\tcontinue\n\t\t\tsw.write(\",\\n\")\n\t\t\tlocations = []\n\t\t\tif (DWR_VERSION_410):\n\t\t\t\tml = get_main_location(self.database, place)\n\t\t\t\tloc = Location()\n\t\t\t\tloc.street = ml.get(PlaceType.STREET, '')\n\t\t\t\tloc.locality = ml.get(PlaceType.LOCALITY, '')\n\t\t\t\tloc.city = ml.get(PlaceType.CITY, '')\n\t\t\t\tloc.parish = ml.get(PlaceType.PARISH, '')\n\t\t\t\tloc.county = ml.get(PlaceType.COUNTY, '')\n\t\t\t\tloc.state = ml.get(PlaceType.STATE, '')\n\t\t\t\tloc.postal = place.get_code()\n\t\t\t\tloc.country = ml.get(PlaceType.COUNTRY, '')\n\t\t\t\tlocations.append(loc)\n\t\t\telse:\n\t\t\t\tif (place.main_loc):\n\t\t\t\t\tml = place.get_main_location()\n\t\t\t\t\tif (ml and not ml.is_empty()): locations.append(ml)\n\t\t\taltloc = place.get_alternate_locations()\n\t\t\tif (altloc):\n\t\t\t\taltloc = [nonempt for nonempt in altloc if (not nonempt.is_empty())]\n\t\t\t\tlocations += altloc\n\t\t\tloctabs = []\n\t\t\tfor loc in locations:\n\t\t\t\tloctab = [\n\t\t\t\t\tloc.street,\n\t\t\t\t\tloc.locality,\n\t\t\t\t\tloc.city,\n\t\t\t\t\tloc.parish,\n\t\t\t\t\tloc.county,\n\t\t\t\t\tloc.state,\n\t\t\t\t\tloc.postal,\n\t\t\t\t\tloc.country,\n\t\t\t\t]\n\t\t\t\tloctab = [(data or \"\") for data in loctab]\n\t\t\t\tloctab = [\"\\\"\" + script_escape(data) + \"\\\"\" for data in loctab]\n\t\t\t\tloctabs.append(\"[\" + \",\".join(loctab) + \"]\")\n\t\t\tsw.write(\"[\" + \",\".join(loctabs) + \"],\\n\")\n\t\t\tlatitude = place.get_latitude()\n\t\t\tlongitude = place.get_longitude()\n\t\t\tif (latitude and longitude):\n\t\t\t\tcoords = conv_lat_lon(latitude, longitude, \"D.D8\")\n\t\t\telse:\n\t\t\t\tcoords = (\"\", \"\")\n\t\t\tsw.write(\"[\\\"\" + \"\\\",\\\"\".join(coords) + \"\\\"]\\n,\")\n\t\t\t# Get place notes\n\t\t\tsw.write(\"\\\"\" + script_escape(self.get_notes_text(place)) + \"\\\",\\n\")\n\t\t\t# Get place media\n\t\t\tsw.write(self._data_media_reference_index(place))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get place sources\n\t\t\tsw.write(self._data_source_citation_index(place))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get place URL\n\t\t\tsw.write(self._data_url_list(place))\n\t\t\tsw.write(\",\\n\")\n\t\t\t# Get back references\n\t\t\tsw.write(self._data_bkref_index(Place, place_handle, Person))\n\t\t\tsw.write(\",\\n\")\n\t\t\tsw.write(self._data_bkref_index(Place, place_handle, Family))\n\t\t\tsw.write(\"\\n]\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_place.js\", sw.getvalue())\n\n\n\tdef get_notes_text(self, object):\n\t\tif (not self.inc_notes): return(\"\")\n\t\tnotelist = object.get_note_list()\n\t\thtmllist = self.dump_notes(notelist)\n\t\tif (not htmllist): return(\"\")\n\t\treturn(html_text(htmllist))\n\n\n\tdef get_notes_attributes_text(self, notelist, attrlist):\n\t\tif (not self.inc_notes): return(\"\")\n\t\t# Get notes\n\t\thtmllist = self.dump_notes(notelist)\n\t\t# Get attributes\n\t\tfor attr in attrlist:\n\t\t\tif (not htmllist): htmllist = Html(\"div\")\n\t\t\thtmllist.extend(Html(\n\t\t\t\t\"p\", _(\"%(type)s: %(value)s\") % {\n\t\t\t\t'type': Html(\"b\", attr.get_type()),\n\t\t\t\t'value': attr.get_value()\n\t\t\t\t}\n\t\t\t))\n\t\t\t# Also output notes attached to the attributes\n\t\t\tnotelist2 = attr.get_note_list()\n\t\t\thtmlnotelist = self.dump_notes(notelist2)\n\t\t\tif (htmlnotelist): htmllist.extend(htmlnotelist)\n\t\tif (not htmllist): return(\"\")\n\t\treturn(html_text(htmllist))\n\n\n\tdef dump_notes(self, notelist):\n\t\t\"\"\"\n\t\tdump out of list of notes with very little elements of its own\n\n\t\t@param: notelist -- list of notes\n\t\t\"\"\"\n\t\tnotesection = None\n\t\tif (not notelist): return(notesection)\n\t\tif (not self.inc_notes): return(notesection)\n\t\tfor note_handle in notelist:\n\t\t\tif (not notesection): notesection = Html(\"div\")\n\t\t\tthis_note = self.database.get_note_from_handle(note_handle)\n\t\t\tif this_note is not None:\n\t\t\t\tif (self.print_notes_type):\n\t\t\t\t\tnotesection.extend(Html(\"i\", str(this_note.type), class_=\"NoteType\"))\n\t\t\t\tnotesection.extend(self.get_note_format(this_note))\n\t\treturn(notesection)\n\n\tdef get_note_format(self, note):\n\t\t\"\"\"\n\t\twill get the note from the database, and will return either the\n\t\tstyled text or plain note\n\t\t\"\"\"\n\t\ttext = \"\"\n\t\tif note is not None:\n\t\t\t# retrieve the body of the note\n\t\t\tnote_text = note.get()\n\t\t\t# styled notes\n\t\t\thtmlnotetext = self.styled_note(note.get_styledtext(),\n\t\t\t\t\t\t\t\t\t\t\tnote.get_format(), contains_html =\n\t\t\t\t\t\t\t\t\t\t\tnote.get_type() == NoteType.HTML_CODE)\n\t\t\ttext = htmlnotetext or Html(\"p\", note_text)\n\t\t# return text of the note to its callers\n\t\treturn(text)\n\n\tdef styled_note(self, styledtext, format, contains_html=False):\n\t\t\"\"\"\n\t\tstyledtext : assumed a StyledText object to write\n\t\tformat : = 0 : Flowed, = 1 : Preformatted\n\t\tstyle_name : name of the style to use for default presentation\n\t\t\"\"\"\n\t\ttext = str(styledtext)\n\n\t\tif (not text): return('')\n\n\t\ts_tags = styledtext.get_tags()\n\t\thtmllist = Html(\"div\", class_=\"grampsstylednote\")\n\t\tif contains_html:\n\t\t\tmarkuptext = self._backend.add_markup_from_styled(text,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t s_tags,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t split='\\n',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t escape=False)\n\t\t\thtmllist += markuptext\n\t\telse:\n\t\t\tmarkuptext = self._backend.add_markup_from_styled(text,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t s_tags,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t split='\\n')\n\t\t\tlinelist = []\n\t\t\tlinenb = 1\n\t\t\tsigcount = 0\n\t\t\tfor line in markuptext.split('\\n'):\n\t\t\t\t[line, sigcount] = process_spaces(line, format)\n\t\t\t\tif sigcount == 0:\n\t\t\t\t\t# The rendering of an empty paragraph ''\n\t\t\t\t\t# is undefined so we use a non-breaking space\n\t\t\t\t\tif linenb == 1:\n\t\t\t\t\t\tlinelist.append(' ')\n\t\t\t\t\thtmllist.extend(Html('p') + linelist)\n\t\t\t\t\tlinelist = []\n\t\t\t\t\tlinenb = 1\n\t\t\t\telse:\n\t\t\t\t\tif linenb > 1:\n\t\t\t\t\t\tlinelist[-1] += ' '\n\t\t\t\t\tlinelist.append(line)\n\t\t\t\t\tlinenb += 1\n\t\t\tif linenb > 1:\n\t\t\t\thtmllist.extend(Html('p') + linelist)\n\t\t\t# if the last line was blank, then as well as outputting the previous para,\n\t\t\t# which we have just done,\n\t\t\t# we also output a new blank para\n\t\t\tif sigcount == 0:\n\t\t\t\tlinelist = [\" \"]\n\t\t\t\thtmllist.extend(Html('p') + linelist)\n\t\treturn(htmllist)\n\n\n\tdef _data_source_citation_index(self, object):\n\t\t\"\"\"\n\t\tExport sources citations indexes related to L{object}\n\t\tSee L{_data_source_citation_index_from_list}\n\t\t\"\"\"\n\t\tcitationlist = object.get_citation_list()\n\t\treturn(self._data_source_citation_index_from_list(citationlist))\n\n\tdef _data_source_citation_index_from_list(self, citationlist):\n\t\t\"\"\"\n\t\tList sources citations indexes of the L{citationlist} in a string representing a Javascript Array\n\t\t@return: citations indexes as a string representing a Javascript Array\n\t\t\"\"\"\n\t\tif (not self.inc_sources): return(\"[]\")\n\t\tif not citationlist: return(\"[]\")\n\t\tsep = \"\"\n\t\ttxt = \"[\"\n\t\tfor citation_handle in citationlist:\n\t\t\tif (not txt): txt = Html(\"div\")\n\t\t\tcitation = self.database.get_citation_from_handle(citation_handle)\n\t\t\tif (citation is not None and (citation_handle in self.obj_dict[Citation])):\n\t\t\t\tsource_handle = citation.get_reference_handle()\n\t\t\t\tsource = self.database.get_source_from_handle(source_handle)\n\t\t\t\tif (source is not None and (source_handle in self.obj_dict[Source])):\n\t\t\t\t\ttitle = source.get_title()\n\t\t\t\t\tif (not title): title = source.get_gramps_id()\n\t\t\t\t\ttxt += sep + str(self.obj_dict[Citation][citation_handle][OBJDICT_INDEX])\n\t\t\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _data_repo_reference_index(self, object):\n\t\t\"\"\"\n\t\tBuild a list of the repositories references index, in the form given by L{_data_repo_ref}\n\t\t\"\"\"\n\t\tif (not self.inc_repositories): return(\"[]\")\n\t\trefs = object.get_reporef_list()\n\t\tif (not refs): return(\"[]\")\n\t\tsep = \"\\n\"\n\t\ttxt = \"[\"\n\t\tfor ref in refs:\n\t\t\trepo_handle = ref.get_reference_handle()\n\t\t\tif (repo_handle in self.obj_dict[Repository]):\n\t\t\t\ttxt += sep + \"\\t\" + self._data_repo_ref(ref, self.obj_dict[Repository][repo_handle][OBJDICT_INDEX])\n\t\t\t\tsep = \",\\n\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_repo_ref(self, ref, index):\n\t\t\"\"\"\n\t\tBuild a repository reference, in the form:\n\t\t - repository index (in table 'R')\n\t\t - media type\n\t\t - call number\n\t\t - notes of the repository reference\n\t\t\"\"\"\n\t\trepo_handle = ref.get_reference_handle()\n\t\trepo = self.database.get_repository_from_handle(repo_handle)\n\t\ttxt = \"[\"\n\t\ttxt += str(index) + \",\"\n\t\ttxt += \"\\\"\" + script_escape(str(ref.get_media_type())) + \"\\\",\"\n\t\ttxt += \"\\\"\" + script_escape(ref.get_call_number()) + \"\\\",\"\n\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(ref)) + \"\\\"\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _data_media_reference_index(self, object):\n\t\t\"\"\"\n\t\tBuild a list of the media references index, in the form given by L{_data_media_ref}\n\t\t\"\"\"\n\t\tif (not self.inc_gallery): return(\"[]\")\n\t\trefs = object.get_media_list()\n\t\tif (not refs): return(\"[]\")\n\t\tsep = \"\\n\"\n\t\ttxt = \"[\"\n\t\tfor ref in refs:\n\t\t\tmedia_handle = ref.get_reference_handle()\n\t\t\tif (media_handle in self.obj_dict[MediaObject]):\n\t\t\t\ttxt += sep + \"\\t\" + self._data_media_ref(ref, self.obj_dict[MediaObject][media_handle][OBJDICT_INDEX])\n\t\t\t\tsep = \",\\n\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_media_ref(self, ref, index):\n\t\t\"\"\"\n\t\tBuild a media reference, in the form:\n\t\t - media index (in table 'M')\n\t\t - media thumbnail path\n\t\t - [x1, y1, x2, y2] of the media reference\n\t\t - notes of the media reference\n\t\t - list of the media reference source citations index (in table 'C')\n\t\t\"\"\"\n\t\tmedia_handle = ref.get_reference_handle()\n\t\tmedia = self.database.get_object_from_handle(media_handle)\n\t\ttxt = \"[\"\n\t\ttxt += str(index)\n\t\ttxt += \",\\\"\"\n\t\ttxt += self.copy_thumbnail(media, ref.get_rectangle())\n\t\ttxt += \"\\\",[\"\n\t\trect = ref.get_rectangle() or (0,0,100,100)\n\t\ttxt += \",\".join(str(x) for x in rect)\n\t\ttxt += \"],\"\n\t\tattrlist = ref.get_attribute_list()\n\t\ttxt += \"\\\"\" + script_escape(self.get_notes_attributes_text(ref.get_note_list(), attrlist)) + \"\\\",\"\n\t\tcitationlist = ref.get_citation_list()\n\t\tfor attr in attrlist: citationlist.extend(attr.get_citation_list())\n\t\t# BUG: it seems that attribute references are given by both ref.get_citation_list and attr.get_citation_list\n\t\ttxt += self._data_source_citation_index_from_list(citationlist)\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef get_media_web_path(self, media):\n\t\t\"\"\"\n\t\tReturn the path of the media from the web pages\n\t\tThis function could be called several times for the same media\n\t\tThis function copies the media to the web pages directories if necessary\n\t\t\"\"\"\n\t\tmedia_path = media.get_path()\n\t\tif (media_path):\n\t\t\tnorm_path = media_path_full(self.database, media_path)\n\t\t\tif (os.path.isfile(norm_path)):\n\t\t\t\tif (self.copy_media):\n\t\t\t\t\text = os.path.splitext(norm_path)[1]\n\t\t\t\t\tiname = str(media.get_handle()) + ext\n\t\t\t\t\tiname = iname.lower()\n\t\t\t\t\tif (iname not in self.images_copied):\n\t\t\t\t\t\tself.copy_file(norm_path, iname, \"image\")\n\t\t\t\t\t\tself.images_copied.add(iname)\n\t\t\t\t\tweb_path = \"image/\" + iname\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tweb_path = os.path.relpath(norm_path, self.target_path)\n\t\t\t\t\t\tweb_path = web_path.replace(\"\\\\\", \"/\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tweb_path = urlparse.urljoin('file:', urllib.pathname2url(norm_path))\n\t\t\t\t\t\tlog.warning(_(\"Impossible to convert \\\"%(path)s\\\" to a relative path.\") % {\"path\": norm_path})\n\t\t\t\treturn(web_path)\n\t\tlog.warning(\"Warning: File not found \\\"%(path)s\\\"\" % {\"path\": str(media_path)})\n\t\treturn(media_path)\n\n\n\tdef copy_thumbnail(self, media, region = None):\n\t\t\"\"\"\n\t\tGiven a handle (and optional region) make (if needed) an\n\t\tup-to-date cache of a thumbnail, and call copy_file\n\t\tto copy the cached thumbnail to the website.\n\t\tReturn the new path to the image.\n\t\t\"\"\"\n\t\tif (region and region[0] == 0 and region[1] == 0 and region[2] == 100 and region[3] == 100):\n\t\t\tregion = None\n\t\thandle = media.get_handle()\n\t\ttname = handle + ((\"-%d,%d-%d,%d.png\" % region) if region else \".png\")\n\t\tif (media.get_mime_type()):\n\t\t\tfrom_path = get_thumbnail_path(\n\t\t\t\tmedia_path_full(self.database, media.get_path()),\n\t\t\t\tmedia.get_mime_type(),\n\t\t\t\tregion)\n\t\t\tif not os.path.isfile(from_path):\n\t\t\t\tfrom_path = os.path.join(IMAGE_DIR, \"document.png\")\n\t\telse:\n\t\t\tfrom_path = os.path.join(IMAGE_DIR, \"document.png\")\n\t\tif (tname not in self.thumbnail_created):\n\t\t\tself.copy_file(from_path, tname, \"thumb\")\n\t\t\tself.thumbnail_created.add(tname)\n\t\tweb_path = \"thumb/\" + tname\n\t\treturn(web_path)\n\n\n\tdef _data_attributes(self, object):\n\t\t\"\"\"\n\t\tBuild the list of the L{object} attributes as a Javascript string, in the form:\n\t\t [attribute, value, note, list of citations]\n\t\t\"\"\"\n\t\tattrlist = object.get_attribute_list()\n\t\ttxt = \"[\"\n\t\tsep = \"\"\n\t\tfor attr in attrlist:\n\t\t\ttxt += sep + \"[\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_type())) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_value())) + \"\\\",\"\n\t\t\t# Get attribute notes\n\t\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(attr)) + \"\\\",\"\n\t\t\t# Get attribute sources\n\t\t\ttxt += self._data_source_citation_index(attr)\n\t\t\ttxt += \"]\"\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_attributes_src(self, source):\n\t\t\"\"\"\n\t\tBuild the list of the L{source} sources attributes as a Javascript string, in the form:\n\t\t [attribute, value, \"\", []]\n\t\t\"\"\"\n\t\tattrlist = source.get_attribute_list()\n\t\ttxt = \"[\"\n\t\tsep = \"\"\n\t\tfor attr in attrlist:\n\t\t\ttxt += sep + \"[\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_type())) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(str(attr.get_value())) + \"\\\",\"\n\t\t\t# There aren't any attribute notes\n\t\t\ttxt += \"\\\"\\\",\"\n\t\t\t# There aren't any attribute sources\n\t\t\ttxt += \"[]\"\n\t\t\ttxt += \"]\"\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_url_list(self, object):\n\t\t\"\"\"\n\t\tBuild the list of the L{object} URL as a Javascript string, in the form:\n\t\t [type, url, description]\n\t\t\"\"\"\n\t\turllist = object.get_url_list()\n\t\ttxt = \"[\"\n\t\tsep = \"\"\n\t\tfor url in urllist:\n\t\t\t_type = url.get_type()\n\t\t\turi = url.get_path()\n\t\t\tdescr = url.get_description()\n\t\t\t# Email address\n\t\t\tif _type == UrlType.EMAIL:\n\t\t\t\tif not uri.startswith(\"mailto:\"):\n\t\t\t\t\turi = \"mailto:%(email)s\" % { 'email' : uri }\n\t\t\t# Web Site address\n\t\t\telif _type == UrlType.WEB_HOME:\n\t\t\t\tif not (uri.startswith(\"http://\") or uri.startswith(\"https://\")):\n\t\t\t\t\turi = \"http://%(website)s\" % { \"website\" : uri }\n\t\t\t# FTP server address\n\t\t\telif _type == UrlType.WEB_FTP:\n\t\t\t\tif not (uri.startswith(\"ftp://\") or uri.startswith(\"ftps://\")):\n\t\t\t\t\turi = \"ftp://%(ftpsite)s\" % { \"ftpsite\" : uri }\n\t\t\ttxt += sep + \"[\"\n\t\t\ttxt += \"\\\"\" + str(_type) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(uri) + \"\\\",\"\n\t\t\ttxt += \"\\\"\" + script_escape(descr) + \"\\\"\"\n\t\t\ttxt += \"]\"\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _export_surnames(self):\n\t\t\"\"\"\n\t\tExport surnames data in Javascript file\n\t\tThe surnames data is stored in the Javascript Array \"SN\"\n\t\t\"\"\"\n\t\t# Extract the surnames data\n\t\tsurnames = defaultdict(list) #: Dictionary giving for each surname: the list of person handles with this surname\n\t\tsortnames = {} #: Dictionary giving for each person handle: a sortable string for the person\n\t\tperson_list = list(self.obj_dict[Person].keys())\n\t\tfor person_handle in person_list:\n\t\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\t\tprimary_name = person.get_primary_name()\n\t\t\tif (primary_name.group_as):\n\t\t\t\tsurname = primary_name.group_as\n\t\t\telse:\n\t\t\t\tsurname = self.database.get_name_group_mapping(_nd.primary_surname(primary_name))\n\t\t\t# Treat people who have no name with those whose name is just 'whitespace'\n\t\t\tif (surname is None or surname.isspace()):\n\t\t\t\tsurname = \"\"\n\t\t\tsortnames[person_handle] = _nd.sort_string(primary_name)\n\t\t\tsurnames[surname].append(person_handle)\n\t\t# Sort surnames\n\t\tsurns_keys = list(surnames.keys())\n\t\tsurns_keys.sort(key = SORT_KEY)\n\t\t# Generate the file\n\t\tsw1 = StringIO()\n\t\tsw1.write(\n\t\t\t\"// This file is generated\\n\\n\"\n\t\t\t\"// 'SN' is sorted by surname\\n\"\n\t\t\t\"// 'SN' gives for each surname:\\n\"\n\t\t\t\"// - the surname\\n\"\n\t\t\t\"// - the surname first letter\\n\"\n\t\t\t\"// - the list of persion index (in table 'I') with this surname\\n\"\n\t\t\t\"\\nSN = [\")\n\t\tsep = \"\\n\"\n\t\tfor s in surns_keys:\n\t\t\t# Sort persons\n\t\t\tsurnames[s].sort(key = lambda x: sortnames[x])\n\t\t\ttab = \",\".join([str(self.obj_dict[Person][x][OBJDICT_INDEX]) for x in surnames[s]])\n\t\t\tsw1.write(sep + \"[\\\"\" + script_escape(s) + \"\\\", \\\"\" + first_letter(s).strip() + \"\\\", [\" + tab + \"]]\")\n\t\t\tsep = \",\\n\"\n\t\tsw1.write(\"\\n];\\n\")\n\t\tself.update_file(\"dwr_db_surns.js\", sw1.getvalue())\n\n\n\tdef _data_families_index(self, person):\n\t\tfams = []\n\t\tfamily_list = person.get_family_handle_list()\n\t\tif (family_list):\n\t\t\tfams = [self.obj_dict[Family][family_handle][OBJDICT_INDEX] for family_handle in family_list if (family_handle in self.obj_dict[Family])]\n\t\treturn(\n\t\t\t\"[\" +\n\t\t\t\",\".join([str(i) for i in fams]) +\n\t\t\t\"]\")\n\n\tdef _data_partners_index(self, family):\n\t\tindis = []\n\t\tperson_handle = family.get_father_handle()\n\t\tif (person_handle and (person_handle in self.obj_dict[Person])):\n\t\t\tindis.append(self.obj_dict[Person][person_handle][OBJDICT_INDEX])\n\t\tperson_handle = family.get_mother_handle()\n\t\tif (person_handle and (person_handle in self.obj_dict[Person])):\n\t\t\tindis.append(self.obj_dict[Person][person_handle][OBJDICT_INDEX])\n\t\treturn(\n\t\t\t\"[\" +\n\t\t\t\",\".join([str(i) for i in indis]) +\n\t\t\t\"]\")\n\n\tdef _data_parents_families_index(self, person):\n\t\tlinks = []\n\t\tfamily_list = person.get_parent_family_handle_list()\n\t\tif (family_list):\n\t\t\tfor family_handle in family_list:\n\t\t\t\tif (family_handle not in self.obj_dict[Family]): continue\n\t\t\t\tfamily = self.database.get_family_from_handle(family_handle)\n\t\t\t\tchild_refs = [\n\t\t\t\t\tchild_ref\n\t\t\t\t\tfor child_ref in family.get_child_ref_list()\n\t\t\t\t\tif (child_ref.ref == person.get_handle())\n\t\t\t\t]\n\t\t\t\tif (len(child_refs) >= 1):\n\t\t\t\t\tindex = self.obj_dict[Family][family_handle][OBJDICT_INDEX]\n\t\t\t\t\tlinks.append(self._data_child_ref(index, child_refs[0]))\n\t\treturn(\"[\" + \",\".join(links) + \"]\")\n\n\tdef _data_children_index(self, family):\n\t\tlinks = [\n\t\t\tself._data_child_ref(self.obj_dict[Person][child_ref.ref][OBJDICT_INDEX], child_ref)\n\t\t\tfor child_ref in family.get_child_ref_list()\n\t\t\tif (child_ref.ref in self.obj_dict[Person])\n\t\t]\n\t\treturn(\"[\" + \",\".join(links) + \"]\")\n\n\tdef _data_child_ref(self, index, child_ref):\n\t\t# Child reference in the form:\n\t\t# [index, relation to father, relation to mother, notes, list of citations]\n\t\ttxt = \"[\"\n\t\ttxt += str(index) + \",\"\n\t\ttxt += \"\\\"\" + script_escape(str(child_ref.get_father_relation())) + \"\\\",\"\n\t\ttxt += \"\\\"\" + script_escape(str(child_ref.get_mother_relation())) + \"\\\",\"\n\t\t# Get child reference notes\n\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(child_ref)) + \"\\\",\"\n\t\t# Get child reference sources\n\t\ttxt += self._data_source_citation_index(child_ref)\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef _data_associations(self, person):\n\t\tassoclist = person.get_person_ref_list()\n\t\trels = []\n\t\tfor person_ref in assoclist:\n\t\t\ttxt = \"[\"\n\t\t\tif (person_ref.ref not in self.obj_dict[Person]): continue\n\t\t\ttxt += \"%i,\" % self.obj_dict[Person][person_ref.ref][OBJDICT_INDEX]\n\t\t\ttxt += \"\\\"\" + script_escape(str(person_ref.get_relation())) + \"\\\",\"\n\t\t\t# Get association notes\n\t\t\ttxt += \"\\\"\" + script_escape(self.get_notes_text(person_ref)) + \"\\\",\"\n\t\t\t# Get association sources\n\t\t\ttxt += self._data_source_citation_index(person_ref)\n\t\t\ttxt += \"]\"\n\t\t\trels.append(txt)\n\t\treturn(\"[\" + \",\".join(rels) + \"]\")\n\n\n\tdef get_birth_year(self, person):\n\t\tev = get_birth_or_fallback(self.database, person)\n\t\treturn(self._get_year_text(ev) or \"?\")\n\tdef get_death_year(self, person):\n\t\tev = get_death_or_fallback(self.database, person)\n\t\treturn(self._get_year_text(ev))\n\tdef get_marriage_year(self, family):\n\t\tev = get_marriage_or_fallback(self.database, family)\n\t\treturn(self._get_year_text(ev))\n\tdef _get_year_text(self, event):\n\t\ty = \"\"\n\t\tif (event):\n\t\t\ty = \"?\"\n\t\t\tdate = event.get_date_object()\n\t\t\tmod = date.get_modifier()\n\t\t\tstart = date.get_start_date()\n\t\t\tif (mod == Date.MOD_NONE and start != Date.EMPTY):\n\t\t\t\ty = str(start[2])\n\t\treturn(y)\n\n\tdef get_birth_place(self, person):\n\t\tev = get_birth_or_fallback(self.database, person)\n\t\treturn(self._get_place_text(ev))\n\tdef get_death_place(self, person):\n\t\tev = get_death_or_fallback(self.database, person)\n\t\treturn(self._get_place_text(ev))\n\tdef get_marriage_place(self, family):\n\t\tev = get_marriage_or_fallback(self.database, family)\n\t\treturn(self._get_place_text(ev))\n\tdef _get_place_text(self, event):\n\t\tplace_name = \"\"\n\t\tif (event):\n\t\t\tplace_handle = event.get_place_handle()\n\t\t\tif (place_handle and (place_handle in self.obj_dict[Place])):\n\t\t\t\tplace_name = report_utils.place_name(self.database, place_handle)\n\t\treturn(place_name)\n\n\tdef get_death_age(self, person):\n\t\tev_birth = get_birth_or_fallback(self.database, person)\n\t\tbirth_date = None\n\t\tif (ev_birth): birth_date = ev_birth.get_date_object()\n\t\tev_death = get_death_or_fallback(self.database, person)\n\t\tdeath_date = None\n\t\tif (ev_death): death_date = ev_death.get_date_object()\n\t\tif (birth_date):\n\t\t\talive = probably_alive(person, self.database, Today())\n\t\t\tif (not alive and death_date):\n\t\t\t\tnyears = death_date - birth_date\n\t\t\t\tnyears.format(precision = 3)\n\t\t\t\treturn(str(nyears))\n\t\treturn(\"\");\n\n\n\tdef _export_pages(self):\n\t\t\"\"\"\n\t\tGenerate the HTML pages\n\t\t\"\"\"\n\t\t\n\t\t# Check pages configuration (in the options)\n\t\tpcset = set(self.page_content)\n\t\tif (len(pcset) != len(self.page_content)):\n\t\t\tlog.error(_(\"The pages configuration is not valid: several pages have the same content\"))\n\t\t\treturn\n\t\t\t\n\t\t# Export the script containing the web pages configuration\n\t\tself._export_script_configuration()\n\t\t\n\t\t# List of the scripts and CSS stylesheets used in the HTML pages\n\t\t# Note: other scripts and stylesheets are dynamically loaded in \"dwr_start.js\"\n\t\t# \"dwr_start.js\" is loaded in all pages uncontitionally (see L{write_header})\n\t\tdbscripts = [\"dwr_db_indi.js\", \"dwr_db_fam.js\", \"dwr_db_sour.js\", \"dwr_db_cita.js\", \"dwr_db_media.js\", \"dwr_db_place.js\", \"dwr_db_repo.js\", \"dwr_db_surns.js\"] #: list of the scripts to embed in the HTML\n\t\tmapscripts = [] #: list of the scripts to embed in the HTML pages that show a map\n\t\tmapstyles = [] #: list of the CSS stylesheets to embed in the HTML pages that show a map\n\t\tif (self.options['placemappages'] or self.options['familymappages']):\n\t\t\tif (self.options['mapservice'] == \"Google\"):\n\t\t\t\tmapscripts = [\"http://maps.googleapis.com/maps/api/js?sensor=false\"]\n\t\t\telse:\n\t\t\t\tmapscripts = [\"http://openlayers.org/en/v3.0.0/build/ol.js\"]\n\t\t\t\tmapstyles = [\"http://openlayers.org/en/v3.0.0/css/ol.css\"]\n\t\t\t\t# mapscripts = [\"ol.js\"]\n\t\t\t\t# mapstyles = [\"ol.css\"]\n\t\t\t\t# mapscripts = [\"http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.js\"]\n\t\t\t\t# mapstyles = [\"http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.css\"]\n\t\t\t\t\n\t\t#: List of page to generate: index in L{PAGES_NAMES}, Javascript code for generating the page\n\t\tparts = {\n\t\t\tPAGE_PERSON: (dbscripts, \"DwrMain(PAGE_INDI);\"),\n\t\t\tPAGE_SURNAMES: (dbscripts, \"printSurnamesIndex();\"),\n\t\t\tPAGE_PERSON_INDEX: (dbscripts, \"printPersonsIndex();\"),\n\t\t\tPAGE_FAMILY_INDEX: (dbscripts, \"printFamiliesIndex();\"),\n\t\t\tPAGE_SOURCE_INDEX: (dbscripts, \"printSourcesIndex();\"),\n\t\t\tPAGE_MEDIA_INDEX: (dbscripts, \"printMediaIndex();\"),\n\t\t\tPAGE_PLACE_INDEX: (dbscripts, \"printPlacesIndex();\"),\n\t\t\tPAGE_ADDRESS_INDEX: (dbscripts, \"printAddressesIndex();\"),\n\t\t\tPAGE_REPOSITORY_INDEX: (dbscripts, \"printReposIndex();\"),\n\t\t\tPAGE_SVG_TREE: (dbscripts, \"DwrMain(PAGE_SVG_TREE);\"),\n\t\t}\n\t\t\n\t\t# Export the HTML pages listed in L{PAGES_NAMES}\n\t\tfor i in range(self.pages_number):\n\t\t\tpc = self.page_content[i] # Get the page i contents defined in the options\n\t\t\tfilename = PAGES_NAMES[pc][2]\n\t\t\ttitle = self.page_name[pc]\n\t\t\tif (pc in parts):\n\t\t\t\t# The page is not a custom page\n\t\t\t\t(scripts, cmd) = parts[pc]\n\t\t\t\tself._export_html_page(filename, title, cmd, True, scripts)\n\t\t\telse:\n\t\t\t\t# The page is a custom page\n\t\t\t\ti_cst = pc - PAGE_CUSTOM\n\t\t\t\tself._export_custom_page(filename, title, self.custom_menu[i_cst], self.custom_note[i_cst])\n\n\t\t# The person page is required\n\t\tif (PAGE_PERSON not in self.page_content):\n\t\t\tself._export_html_page(\"person.html\", self.page_name[PAGE_PERSON], \"DwrMain(PAGE_INDI);\", True, dbscripts)\n\n\t\t# The search results page is required\n\t\tself._export_html_page(\"search.html\", _(\"Search results\"), \"DwrMain(PAGE_SEARCH);\", True, dbscripts)\n\n\t\t# Page for printing a family (if needed)\n\t\tif (self.inc_families):\n\t\t\tself._export_html_page(\"family.html\", self.page_name[PAGE_FAMILY_INDEX], \"DwrMain(PAGE_FAM);\", True, dbscripts + mapscripts , mapstyles)\n\t\t\n\t\t# Generate page surnames pages (if surnames page is used)\n\t\tif (PAGE_SURNAMES in self.page_content):\n\t\t\t# Page for persons with a given surname\n\t\t\tself._export_html_page(\"surname.html\", self.page_name[PAGE_SURNAMES], \"printSurnameIndex();\", True, dbscripts)\n\t\t\t# Page for surnames sorted by quantity\n\t\t\tself._export_html_page(\"surnames2.html\", self.page_name[PAGE_SURNAMES], \"printSurnamesIndex2();\", True, dbscripts)\n\n\t\t# Page for a single family (if needed)\n\t\tif (self.inc_sources):\n\t\t\tself._export_html_page(\"source.html\", self.page_name[PAGE_SOURCE_INDEX], \"DwrMain(PAGE_SOURCE);\", True, dbscripts)\n\n\t\t# Page for a single media (if needed)\n\t\tif (self.inc_gallery):\n\t\t\tself._export_html_page(\"media.html\", self.page_name[PAGE_MEDIA_INDEX], \"DwrMain(PAGE_MEDIA);\", True, dbscripts)\n\n\t\t# Page for a single place (if needed)\n\t\tif (self.inc_places):\n\t\t\tself._export_html_page(\"place.html\", self.page_name[PAGE_PLACE_INDEX], \"DwrMain(PAGE_PLACE);\", True, dbscripts + mapscripts , mapstyles)\n\n\t\t# Page for a single repository (if needed)\n\t\tif (self.inc_repositories):\n\t\t\tself._export_html_page(\"repository.html\", self.page_name[PAGE_REPOSITORY_INDEX], \"DwrMain(PAGE_REPO);\", True, dbscripts)\n\n\t\t# Page for full-screen SVG graph (if SVG graph is used)\n\t\tif (PAGE_SVG_TREE in self.page_content):\n\t\t\tself._export_html_page(\"tree_svg_full.html\", self.page_name[PAGE_SVG_TREE], \"DwrMain(PAGE_SVG_TREE_FULL);\", False, dbscripts)\n\t\t\tself._export_html_page(\"tree_svg_conf.html\", self.page_name[PAGE_SVG_TREE], \"DwrMain(PAGE_SVG_TREE_CONF);\", True, dbscripts)\n\t\t\tself._export_html_page(\"tree_svg_save.html\", self.page_name[PAGE_SVG_TREE], \"DwrMain(PAGE_SVG_TREE_SAVE);\", True, dbscripts)\n\n\n\tdef _export_script_configuration(self):\n\t\t\"\"\"\n\t\tGenerate \"dwr_conf.js\", which contains:\n\t\t - The pages configuration (mostly extract of the report options),\n\t\t - The localization (translated strings)\n\t\t - Gramps constants that could be used in the Javascript\n\t\t\"\"\"\n\t\tsw = StringIO()\n\t\tsw.write(\"// This file is generated\\n\\n\")\n\t\tsw.write(\"NB_GENERATIONS_MAX = %i;\\n\" % int(self.options[\"graphgens\"]))\n\t\tsw.write(\"PAGES_TITLE = [\")\n\t\tsw.write(\", \".join([\n\t\t\t(\"\\\"\" + script_escape(self.page_name[self.page_content[i]]).replace(\" \", \" \") + \"\\\"\")\n\t\t\tfor i in range(self.pages_number)]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"PAGES_FILE = [\")\n\t\tsw.write(\", \".join([\n\t\t\t(\"\\\"\" + script_escape(PAGES_NAMES[self.page_content[i]][2]) + \"\\\"\")\n\t\t\tfor i in range(self.pages_number)]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_TYPES_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_TYPES]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_SHAPES_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_SHAPES]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_DISTRIB_ASC_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_DISTRIB_ASC]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_DISTRIB_DSC_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_DISTRIB_DSC]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_BACKGROUND_NAMES = [\")\n\t\tsw.write(\", \".join([(\"\\\"\" + script_escape(n) + \"\\\"\") for n in SVG_TREE_BACKGROUNDS]))\n\t\tsw.write(\"];\\n\")\n\t\tsw.write(\"SVG_TREE_TYPE = %s;\\n\" % self.options['svg_tree_type'])\n\t\tsw.write(\"SVG_TREE_SHAPE = %s;\\n\" % self.options['svg_tree_shape'])\n\t\tsw.write(\"SVG_TREE_DISTRIB_ASC = %s;\\n\" % self.options['svg_tree_distrib_asc'])\n\t\tsw.write(\"SVG_TREE_DISTRIB_DSC = %s;\\n\" % self.options['svg_tree_distrib_dsc'])\n\t\tsw.write(\"SVG_TREE_BACKGROUND = %s;\\n\" % self.options['svg_tree_background'])\n\t\tsw.write(\"SVG_TREE_COLOR1 = \\\"%s\\\";\\n\" % self.options['svg_tree_color1'])\n\t\tsw.write(\"SVG_TREE_COLOR2 = \\\"%s\\\";\\n\" % self.options['svg_tree_color2'])\n\t\tsw.write(\"SVG_TREE_SHOW_DUP = \" + (\"true\" if (self.options['svg_tree_dup']) else \"false\") + \";\\n\")\n\t\tsw.write(\"SVG_TREE_COLOR_DUP = \\\"%s\\\";\\n\" % self.options['svg_tree_color_dup'])\n\t\tsw.write(\"GRAMPS_PREFERENCES = [];\\n\")\n\t\tfor pref in [\n\t\t\t'bordercolor-gender-female-alive',\n\t\t\t'bordercolor-gender-female-death',\n\t\t\t'bordercolor-gender-male-alive',\n\t\t\t'bordercolor-gender-male-death',\n\t\t\t'bordercolor-gender-unknown-alive',\n\t\t\t'bordercolor-gender-unknown-death',\n\t\t\t'color-gender-female-alive',\n\t\t\t'color-gender-female-death',\n\t\t\t'color-gender-male-alive',\n\t\t\t'color-gender-male-death',\n\t\t\t'color-gender-unknown-alive',\n\t\t\t'color-gender-unknown-death',\n\t\t\t]:\n\t\t\tsw.write(\"GRAMPS_PREFERENCES['%s'] = \\\"%s\\\";\\n\" % (pref, config.get('preferences.%s' % pref)))\n\t\tsw.write(\"SVG_TREE_COLOR_SCHEME0 = [\" + \", \".join(\n\t\t\t[(\"\\\"#%02x%02x%02x\\\"\" % (r, g, b)) for (r, g, b) in GENCOLOR[BACKGROUND_WHITE]])\n\t\t\t+ \"];\\n\")\n\t\tsw.write(\"SVG_TREE_COLOR_SCHEME1 = [\" + \", \".join(\n\t\t\t[(\"\\\"#%02x%02x%02x\\\"\" % (r, g, b)) for (r, g, b) in GENCOLOR[BACKGROUND_SCHEME1]])\n\t\t\t+ \"];\\n\")\n\t\tsw.write(\"SVG_TREE_COLOR_SCHEME2 = [\" + \", \".join(\n\t\t\t[(\"\\\"#%02x%02x%02x\\\"\" % (r, g, b)) for (r, g, b) in GENCOLOR[BACKGROUND_SCHEME2]])\n\t\t\t+ \"];\\n\")\n\t\tsw.write(\"FOOTER=\\\"\" + script_escape(self.get_header_footer_notes(\"footernote\")) + \"\\\";\\n\")\n\t\tsw.write(\"HEADER=\\\"\" + script_escape(self.get_header_footer_notes(\"headernote\")) + \"\\\";\\n\")\n\t\tsw.write(\"COPYRIGHT=\\\"\" + script_escape(self.get_copyright_license()) + \"\\\";\\n\")\n\t\tsw.write(\"INDEX_SHOW_BIRTH=\" + (\"true\" if (self.options['showbirth']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_DEATH=\" + (\"true\" if (self.options['showdeath']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_MARRIAGE=\" + (\"true\" if (self.options['showmarriage']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_PARTNER=\" + (\"true\" if (self.options['showpartner']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_PARENTS=\" + (\"true\" if (self.options['showparents']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_ALL_SIBLINGS=\" + (\"true\" if (self.options['birthorder']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INDEX_SHOW_BKREF_TYPE=\" + (\"true\" if (self.options['bkref_type']) else \"false\") + \";\\n\")\n\t\tsw.write(\"SORT_CHILDREN=\" + (\"true\" if (self.options['showallsiblings']) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_EVENTS=\" + (\"true\" if (self.inc_events) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_FAMILIES=\" + (\"true\" if (self.inc_families) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_SOURCES=\" + (\"true\" if (self.inc_sources) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_MEDIA=\" + (\"true\" if (self.inc_gallery) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_PLACES=\" + (\"true\" if (self.inc_places) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_REPOSITORIES=\" + (\"true\" if (self.inc_repositories) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_NOTES=\" + (\"true\" if (self.inc_notes) else \"false\") + \";\\n\")\n\t\tsw.write(\"INC_ADDRESSES=\" + (\"true\" if (self.inc_addresses) else \"false\") + \";\\n\")\n\t\tsw.write(\"MAP_PLACE=\" + (\"true\" if (self.options['placemappages']) else \"false\") + \";\\n\")\n\t\tsw.write(\"MAP_FAMILY=\" + (\"true\" if (self.options['familymappages']) else \"false\") + \";\\n\")\n\t\tsw.write(\"MAP_SERVICE=\\\"\" + script_escape(self.options['mapservice']) + \"\\\";\\n\")\n\t\tsw.write(\"__ = {\")\n\t\tsep = \"\\n\"\n\t\tfor (s, translated) in (\n\t\t\t(\"(filtered from _MAX_ total entries)\", _(\"(filtered from _MAX_ total entries)\")),\n\t\t\t(\"(sort by name)\", _(\"(sort by name)\")),\n\t\t\t(\"(sort by quantity)\", _(\"(sort by quantity)\")),\n\t\t\t(\": activate to sort column ascending\", _(\": activate to sort column ascending\")),\n\t\t\t(\": activate to sort column descending\", _(\": activate to sort column descending\")),\n\t\t\t(\"
Click on a person to center the graph on this person. When clicking on the center person, the person page is shown.
The type of graph could be selected in the list (on the top left side of the graph)
The number of ascending end descending generations could also be adjusted.
Use the mouse wheel or the buttons to zoom in and out.
The graph could also be shown full-screen.\", _(\"
Click on a person to center the graph on this person. When clicking on the center person, the person page is shown.
The type of graph could be selected in the list (on the top left side of the graph)
The number of ascending end descending generations could also be adjusted.
Use the mouse wheel or the buttons to zoom in and out.
The graph could also be shown full-screen.\")),\n\t\t\t(\"
This page provides the SVG raw code. Copy the contents into a text editor and save as an SVG file. Make sure that the text editor encoding is UTF-8.
\", _(\"
This page provides the SVG raw code. Copy the contents into a text editor and save as an SVG file. Make sure that the text editor encoding is UTF-8.
\")),\n\t\t\t(\"Address\", _(\"Address\")),\n\t\t\t(\"Addresses\", _(\"Addresses\")),\n\t\t\t(\"Age at Death\", _(\"Age at Death\")),\n\t\t\t(\"Alternate Name\", _(\"Alternate Name\")),\n\t\t\t(\"Ancestors\", _(\"Ancestors\")),\n\t\t\t(\"Associations\", _(\"Associations\")),\n\t\t\t(\"Attribute\", _(\"Attribute\")),\n\t\t\t(\"Attributes\", _(\"Attributes\")),\n\t\t\t(\"Background\", _(\"Background\")),\n\t\t\t(\"Call Name\", _(\"Call Name\")),\n\t\t\t(\"Call Number\", _(\"Call Number\")),\n\t\t\t(\"Children\", _(\"Children\")),\n\t\t\t(\"Church Parish\", _(\"Church Parish\")),\n\t\t\t(\"Citation\", _(\"Citation\")),\n\t\t\t(\"Citations\", _(\"Citations\")),\n\t\t\t(\"City\", _(\"City\")),\n\t\t\t(\"Click on the map to show it full-screen\", _(\"Click on the map to show it full-screen\")),\n\t\t\t(\"Configuration\", _(\"Configuration\")),\n\t\t\t(\"Country\", _(\"Country\")),\n\t\t\t(\"County\", _(\"County\")),\n\t\t\t(\"Date\", _(\"Date\")),\n\t\t\t(\"Descendants\", _(\"Descendants\")),\n\t\t\t(\"Description\", _(\"Description\")),\n\t\t\t(\"Event\", _(\"Event\")),\n\t\t\t(\"Events\", _(\"Events\")),\n\t\t\t(\"Expand\", _(\"Expand\")),\n\t\t\t(\"Families Index\", _(\"Families Index\")),\n\t\t\t(\"Family Nick Name\", _(\"Family Nick Name\")),\n\t\t\t(\"Father\", _(\"Father\")),\n\t\t\t(\"Female\", _(\"Female\")),\n\t\t\t(\"File ready\", _(\"File ready\")),\n\t\t\t(\"Gender\", _(\"Gender\")),\n\t\t\t(\"Graph help\", _(\"Graph help\")),\n\t\t\t(\"Latitude\", _(\"Latitude\")),\n\t\t\t(\"Link\", _(\"Link\")),\n\t\t\t(\"Loading...\", _(\"Loading...\")),\n\t\t\t(\"Locality\", _(\"Locality\")),\n\t\t\t(\"Location\", _(\"Location\")),\n\t\t\t(\"Longitude\", _(\"Longitude\")),\n\t\t\t(\"Male\", _(\"Male\")),\n\t\t\t(\"Map\", _(\"Map\")),\n\t\t\t(\"Maximize\", _(\"Maximize\")),\n\t\t\t(\"Media found:\", _(\"Media found:\")),\n\t\t\t(\"Media Index\", _(\"Media Index\")),\n\t\t\t(\"Media Type\", _(\"Media Type\")),\n\t\t\t(\"Media\", _(\"Media\")),\n\t\t\t(\"Mother\", _(\"Mother\")),\n\t\t\t(\"Name\", _(\"Name\")),\n\t\t\t(\"Nick Name\", _(\"Nick Name\")),\n\t\t\t(\"No data available in table\", _(\"No data available in table\")),\n\t\t\t(\"No matching records found\", _(\"No matching records found\")),\n\t\t\t(\"No matching surname.\", _(\"No matching surname.\")),\n\t\t\t(\"None.\", _(\"None.\")),\n\t\t\t(\"Notes\", _(\"Notes\")),\n\t\t\t(\"OK\", _(\"OK\")),\n\t\t\t(\"Parents\", _(\"Parents\")),\n\t\t\t(\"Path\", _(\"Path\")),\n\t\t\t(\"Person page\", _(\"Person page\")),\n\t\t\t(\"Person to search for\", _(\"Person to search for\")),\n\t\t\t(\"Person\", _(\"Person\")),\n\t\t\t(\"Persons found:\", _(\"Persons found:\")),\n\t\t\t(\"Persons Index\", _(\"Persons Index\")),\n\t\t\t(\"Phone\", _(\"Phone\")),\n\t\t\t(\"Place\", _(\"Place\")),\n\t\t\t(\"Places found:\", _(\"Places found:\")),\n\t\t\t(\"Places Index\", _(\"Places Index\")),\n\t\t\t(\"Postal Code\", _(\"Postal Code\")),\n\t\t\t(\"Preparing file ...\", _(\"Preparing file ...\")),\n\t\t\t(\"Processing...\", _(\"Processing...\")),\n\t\t\t(\"References\", _(\"References\")),\n\t\t\t(\"Relationship to Father\", _(\"Relationship to Father\")),\n\t\t\t(\"Relationship to Mother\", _(\"Relationship to Mother\")),\n\t\t\t(\"Relationship\", _(\"Relationship\")),\n\t\t\t(\"Repositories\", _(\"Repositories\")),\n\t\t\t(\"Repository\", _(\"Repository\")),\n\t\t\t(\"Restore\", _(\"Restore\")),\n\t\t\t(\"Save tree as file\", _(\"Save tree as file\")),\n\t\t\t(\"Search:\", _(\"Search:\")),\n\t\t\t(\"Select the background color scheme\", _(\"Select the background color scheme\")),\n\t\t\t(\"Select the children distribution (fan charts only)\", _(\"Select the children distribution (fan charts only)\")),\n\t\t\t(\"Select the number of ascending generations\", _(\"Select the number of ascending generations\")),\n\t\t\t(\"Select the number of descending generations\", _(\"Select the number of descending generations\")),\n\t\t\t(\"Select the parents distribution (fan charts only)\", _(\"Select the parents distribution (fan charts only)\")),\n\t\t\t(\"Select the shape of graph\", _(\"Select the shape of graph\")),\n\t\t\t(\"Select the type of graph\", _(\"Select the type of graph\")),\n\t\t\t(\"Several matches. Precise your search or choose in the lists below.\", _(\"Several matches. Precise your search or choose in the lists below.\")),\n\t\t\t(\"Show _MENU_ entries\", _(\"Show _MENU_ entries\")),\n\t\t\t(\"Showing 0 to 0 of 0 entries\", _(\"Showing 0 to 0 of 0 entries\")),\n\t\t\t(\"Showing _START_ to _END_ of _TOTAL_ entries\", _(\"Showing _START_ to _END_ of _TOTAL_ entries\")),\n\t\t\t(\"Siblings\", _(\"Siblings\")),\n\t\t\t(\"Source\", _(\"Source\")),\n\t\t\t(\"Sources found:\", _(\"Sources found:\")),\n\t\t\t(\"Sources Index\", _(\"Sources Index\")),\n\t\t\t(\"Sources\", _(\"Sources\")),\n\t\t\t(\"Spouses\", _(\"Spouses\")),\n\t\t\t(\"State/ Province\", _(\"State/ Province\")),\n\t\t\t(\"Street\", _(\"Street\")),\n\t\t\t(\"Surnames Index\", _(\"Surnames Index\")),\n\t\t\t(\"SVG tree children distribution\", _(\"SVG tree children distribution\")),\n\t\t\t(\"SVG tree graph shape\", _(\"SVG tree graph shape\")),\n\t\t\t(\"SVG tree graph type\", _(\"SVG tree graph type\")),\n\t\t\t(\"SVG tree parents distribution\", _(\"SVG tree parents distribution\")),\n\t\t\t(\"There is no matching name.\", _(\"There is no matching name.\")),\n\t\t\t(\"Title\", _(\"Title\")),\n\t\t\t(\"Type\", _(\"Type\")),\n\t\t\t(\"Unknown\", _(\"Unknown\")),\n\t\t\t(\"Use the search box above in order to find a person. Women are listed with their maiden name.\", _(\"Use the search box above in order to find a person. Women are listed with their maiden name.\")),\n\t\t\t(\"Used for family\", _(\"Used for family\")),\n\t\t\t(\"Used for media\", _(\"Used for media\")),\n\t\t\t(\"Used for person\", _(\"Used for person\")),\n\t\t\t(\"Used for place\", _(\"Used for place\")),\n\t\t\t(\"Used for source\", _(\"Used for source\")),\n\t\t\t(\"Value\", _(\"Value\")),\n\t\t\t(\"Web Link\", _(\"Web Link\")),\n\t\t\t(\"Web Links\", _(\"Web Links\")),\n\t\t\t(\"Without surname\", _(\"Without surname\")),\n\t\t\t(\"Zoom in\", _(\"Zoom in\")),\n\t\t\t(\"Zoom out\", _(\"Zoom out\")),\n\t\t\t):\n\t\t\tsw.write(sep + \"\\\"\" + script_escape(s) + \"\\\": \\\"\" + script_escape(translated) + \"\\\"\")\n\t\t\tsep = \",\\n\"\n\t\tfor (code, translated, s) in EventType._DATAMAP:\n\t\t\tsw.write(sep + \"\\\"\" + script_escape(s) + \"\\\": \\\"\" + script_escape(translated) + \"\\\"\")\n\t\t\tsep = \",\\n\"\n\t\tsw.write(\"\\n};\\n\")\n\t\tsw.write(\n\t\t\t(\"URLTYPE_UNKNOWN = %i;\\n\" % UrlType.UNKNOWN) +\n\t\t\t(\"URLTYPE_CUSTOM = %i;\\n\" % UrlType.CUSTOM) +\n\t\t\t(\"URLTYPE_EMAIL = %i;\\n\" % UrlType.EMAIL) +\n\t\t\t(\"URLTYPE_WEB_HOME = %i;\\n\" % UrlType.WEB_HOME) +\n\t\t\t(\"URLTYPE_WEB_SEARCH = %i;\\n\" % UrlType.WEB_SEARCH) +\n\t\t\t(\"URLTYPE_WEB_FTP = %i;\\n\" % UrlType.WEB_FTP))\n\t\tself.update_file(\"dwr_conf.js\", sw.getvalue(), \"UTF-8\")\n\n\n\tdef _export_html_page(self, filename, title, cmd, menu, scripts = [], styles = []):\n\t\t\"\"\"\n\t\tGenerate an HTML page\n\t\t@param filename: output HTML file name\n\t\t@param title: Title of the page (prepended to L{self.title}\n\t\t@param cmd: Javascript code that generates the page\n\t\t@param menu: Whether to put a menu on the page\n\t\t@param scripts: Scripts embedded in the page\n\t\t@param styles: CSS stylesheets embedded in the page\n\t\t\"\"\"\n\t\t(page, head, body) = self.write_header(title, menu)\n\t\tfor style in styles:\n\t\t\thead += Html(\"link\", rel = \"stylesheet\", href = style, type = \"text/css\")\n\t\tfor script in scripts:\n\t\t\thead += Html(\"script\", language = \"javascript\", src = script, charset = self.encoding)\n\t\tbody += Html(\"script\", cmd, language = \"javascript\")\n\t\tself.update_file(filename, html_text(page))\n\n\n\tdef _export_custom_page(self, filename, title, menu, note):\n\t\t\"\"\"\n\t\tGenerate an HTML custom page\n\t\t@param filename: output HTML file name\n\t\t@param title: Title of the page (prepended to L{self.title}\n\t\t@param menu: Whether to put a menu on the page\n\t\t@param note: note that contains the page contents\n\t\t\"\"\"\n\t\t(page, head, body) = self.write_header(title, menu)\n\t\tif (note):\n\t\t\thtml = self.get_note_format(self.database.get_note_from_gramps_id(note))\n\t\t\tbody += self.replace_note_fields(html)\n\t\tself.update_file(filename, html_text(page))\n\n\n\tdef write_header(self, title, menu):\n\t\t\"\"\"\n\t\tGenerate an HTML page header\n\t\t@param title: Title of the page (prepended to L{self.title}\n\t\t@param menu: Whether to put a menu on the page\n\t\t@return: List of L{Html} objects as follows: (page, head, body)\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tNote. 'title' is used as currentsection in the navigation links and\n\t\tas part of the header title.\n\t\t\"\"\"\n\t\t# Begin each html page...\n\t\txmllang = xml_lang()\n\t\t(page, head, body) = Html.page('%s - %s' % (\n\t\t\t\thtml_escape(title),\n\t\t\t\thtml_escape(self.title.strip()),\n\t\t\t),\n\t\t\tself.encoding, xmllang)\n\t\t# Header constants\n\t\thead += Html(\"meta\", attr = 'name=\"viewport\" content=\"width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=1\"')\n\t\thead += Html(\"meta\", attr = 'name=\"apple-mobile-web-app-capable\" content=\"yes\"')\n\t\thead += Html(\"meta\", attr = 'name=\"generator\" content=\"%s %s %s\"' % (PROGRAM_NAME, VERSION, URL_HOMEPAGE))\n\t\thead += Html(\"meta\", attr = 'name=\"author\" content=\"%s\"' % self.author)\n\t\t# Create script and favicon links\n\t\thead += Html(\"link\", type = \"image/x-icon\", href = \"data/favicon.ico\", rel = \"shortcut icon\")\n\t\thead += Html(\"script\", language = 'javascript', src = 'data/dwr_start.js')\n\t\t# Disable menu\n\t\tif (not menu):\n\t\t\tbody.attr = \"class='dwr-menuless'\"\n\t\treturn(page, head, body)\n\n\n\tdef get_header_footer_notes(self, item):\n\t\t\"\"\"\n\t\tGive the header/footer note converted to an HTML string\n\t\t@param item: Option giving the note. See options \"footernote\" and \"headernote\"\n\t\t@return: text of the note\n\t\t@rtype: L{String}\n\t\t\"\"\"\n\t\tnote = self.options[item]\n\t\tif (note):\n\t\t\thtml = self.get_note_format(self.database.get_note_from_gramps_id(note))\n\t\t\treturn(self.replace_note_fields(html))\n\t\treturn(\"\")\n\n\n\tdef replace_note_fields(self, html):\n\t\t\"\"\"\n\t\tModify the notes for HTML pages generation\n\t\tThis allow to add special features or computed data in the pages\n\t\t@param html: Note converted to HTML string\n\t\t@return: Modified string\n\t\t\"\"\"\n\t\ttext = html_text(html)\n\t\t# __SEARCH_FORM__ is replaced by a search form\n\t\ttext = text.replace(\"__SEARCH_FORM__\",\n\t\t\t\"\\n\")\n\t\t# __NB_INDIVIDUALS__ is replaced by the number of persons\n\t\t# __NB_FAMILIES__ is replaced by the number of families\n\t\t# __NB_MEDIA__ is replaced by the number of media objects\n\t\t# __NB_SOURCES__ is replaced by the number of sources\n\t\t# __NB_REPOSITORIES__ is replaced by the number of repositories\n\t\t# __NB_PLACES__ is replaced by the number of places\n\t\ttext = text.replace(\"__NB_INDIVIDUALS__\", str(len(self.obj_dict[Person])))\n\t\ttext = text.replace(\"__NB_FAMILIES__\", str(len(self.obj_dict[Family])))\n\t\ttext = text.replace(\"__NB_MEDIA__\", str(len(self.obj_dict[MediaObject])))\n\t\ttext = text.replace(\"__NB_SOURCES__\", str(len(self.obj_dict[Source])))\n\t\ttext = text.replace(\"__NB_REPOSITORIES__\", str(len(self.obj_dict[Repository])))\n\t\ttext = text.replace(\"__NB_PLACES__\", str(len(self.obj_dict[Place])))\n\t\t# __MEDIA___ is replaced by the media with gramps ID \n\t\t# __THUMB___ is replaced by the thumbnail of the media with gramps ID \n\t\ttext2 = text\n\t\tfor mo in re.finditer(r\"__(MEDIA|THUMB)_(.*?)__\", text):\n\t\t\tgid = mo.group(2)\n\t\t\tmedia = self.database.get_object_from_gramps_id(gid)\n\t\t\tif (not media): continue\n\t\t\ttm = mo.group(1)\n\t\t\tif (tm == \"THUMB\"):\n\t\t\t\tpath = self.copy_thumbnail(media)\n\t\t\t\ttext2 = (\n\t\t\t\t\ttext2[ : -(len(text) - mo.start(0))] +\n\t\t\t\t\t\"\" +\n\t\t\t\t\ttext2[-(len(text) - mo.end(0)) : ])\n\t\t\telse:\n\t\t\t\tpath = self.get_media_web_path(media)\n\t\t\t\ttext2 = (\n\t\t\t\t\ttext2[ : -(len(text) - mo.start(0))] +\n\t\t\t\t\t\"\" +\n\t\t\t\t\ttext2[-(len(text) - mo.end(0)) : ])\n\t\ttext = text2\n\t\t# __EXPORT_DATE__ is replaced by the current date\n\t\t# __GRAMPS_VERSION__ is replaced by the Gramps version\n\t\t# __GRAMPS_HOMEPAGE__ is replaced by the Gramps homepage\n\t\ttext = text.replace(\"__EXPORT_DATE__\", format_date(Today()))\n\t\ttext = text.replace(\"__GRAMPS_VERSION__\", VERSION)\n\t\ttext = text.replace(\"__GRAMPS_HOMEPAGE__\", \"Gramps\")\n\t\t# Relative URL are managed\n\t\ttext = text.replace(\"relative://relative.\", \"\")\n\t\t# __HOME_PERSON_NAME__ is replaced by the home person name\n\t\t# __HOME_PERSON_URL__ is replaced by the home person page URL\n\t\t# center_person = self.database.get_person_from_gramps_id(self.options['pid'])\n\t\t# if (center_person and (center_person.handle in self.obj_dict[Person])):\n\t\t\t# person_name = self.get_name(center_person)\n\t\t\t# person_url = \"person.html?idx=%i\" % self.obj_dict[Person][center_person.handle][OBJDICT_INDEX]\n\t\t\t# text = text.replace(\"__HOME_PERSON_NAME__\", person_name)\n\t\t\t# text = text.replace(\"__HOME_PERSON_URL__\", person_url)\n\t\treturn(text)\n\n\t\t\n\tdef get_copyright_license(self):\n\t\t\"\"\"\n\t\twill return either the text or image of the copyright license\n\t\t\"\"\"\n\t\ttext = \"\"\n\t\tif (self.copyright == 0):\n\t\t\tif self.author:\n\t\t\t\tyear = Today().get_year()\n\t\t\t\ttext = \"
\"\n\t\t# return text or image to its callers\n\t\treturn(text)\n\n\n\tdef update_file(self, fout, txt, encoding = None):\n\t\t\"\"\"\n\t\tWrite a string in a file.\n\t\tThe file is not overwritten if the file exists and already contains the string \n\t\t@param fout: output file name\n\t\t@param txt: file contents\n\t\t@param encoding: encoding as passed to Python function codecs.open \n\t\t\"\"\"\n\t\tif (encoding is None): encoding = self.encoding\n\t\tf = os.path.join(self.target_path, fout)\n\t\tself.created_files.append(f)\n\t\tif (os.path.exists(f)):\n\t\t\ttry:\n\t\t\t\tfr = codecs.open(f, \"r\", encoding = encoding, errors=\"xmlcharrefreplace\")\n\t\t\t\ttxtr = fr.read()\n\t\t\t\tfr.close()\n\t\t\t\tif (txtr == txt):\n\t\t\t\t\tlog.info(\"File \\\"%s\\\" not overwritten (identical)\" % fout)\n\t\t\t\t\treturn\n\t\t\texcept:\n\t\t\t\tpass\n\t\tfw = codecs.open(f, \"w\", encoding = encoding, errors=\"xmlcharrefreplace\")\n\t\tfw.write(txt)\n\t\tfw.close()\n\t\tlog.info(\"File \\\"%s\\\" generated\" % fout)\n\n\tdef copy_file(self, from_fname, to_fname, to_dir=\"\"):\n\t\t\"\"\"\n\t\tCopy a file from a source to a (report) destination.\n\t\tIf to_dir is not present and if the target is not an archive,\n\t\tthen the destination directory will be created.\n\n\t\tNormally 'to_fname' will be just a filename, without directory path.\n\n\t\t'to_dir' is the relative path name in the destination root. It will\n\t\tbe prepended before 'to_fname'.\n\t\t\n\t\tThe file is not copied if the contents of 'from_fname' 'to_fname' are identical\n\t\t\"\"\"\n\t\t# log.debug(\"copying '%s' to '%s/%s'\" % (from_fname, to_dir, to_fname))\n\t\tdest = os.path.join(self.target_path, to_dir, to_fname)\n\t\tdestdir = os.path.dirname(dest)\n\t\tif not os.path.isdir(destdir):\n\t\t\tos.makedirs(destdir)\n\n\t\tif from_fname != dest:\n\t\t\ttry:\n\t\t\t\tdest_temp = dest + \".temp\"\n\t\t\t\tshutil.copyfile(from_fname, dest_temp)\n\t\t\t\tself.created_files.append(dest)\n\t\t\t\tif (os.path.exists(dest)):\n\t\t\t\t\tfr = codecs.open(dest, \"rb\")\n\t\t\t\t\told_bytes = fr.read()\n\t\t\t\t\tfr.close()\n\t\t\t\t\tfr = codecs.open(dest_temp, \"rb\")\n\t\t\t\t\tnew_bytes = fr.read()\n\t\t\t\t\tfr.close()\n\t\t\t\t\tif (old_bytes == new_bytes):\n\t\t\t\t\t\tos.remove(dest_temp)\n\t\t\t\t\t\tlog.info(\"File \\\"%s\\\" not overwritten (identical)\" % dest)\n\t\t\t\t\t\treturn\n\t\t\t\t\tos.remove(dest)\n\t\t\t\tos.rename(dest_temp, dest)\n\t\t\t\tlog.info(\"File \\\"%s\\\" generated\" % dest)\n\t\t\texcept:\n\t\t\t\tlog.warning(_(\"Copying error: %(error)s\") % {\"error\": sys.exc_info()[1]})\n\t\t\t\tlog.error(_(\"Impossible to copy \\\"%(src)s\\\" to \\\"%(dst)s\\\"\") % {\"src\": from_fname, \"dst\": to_fname})\n\t\telif self.warn_dir:\n\t\t\tself.user.warn(\n\t\t\t\t_(\"Possible destination error\") + \"\\n\" +\n\t\t\t\t_(\"You appear to have set your target directory \"\n\t\t\t\t \"to a directory used for data storage. This \"\n\t\t\t\t \"could create problems with file management. \"\n\t\t\t\t \"It is recommended that you consider using \"\n\t\t\t\t \"a different directory to store your generated \"\n\t\t\t\t \"web pages.\"))\n\t\t\tself.warn_dir = False\n\n\n\n\tdef copy_template_files(self):\n\t\t\"\"\"\n\t\tCopy the template files to the target directory\n\t\t\n\t\tThe template files are:\n\t\t - The files contained in the chosen template directory,\n\t\t - The files contained in the default template directory, unless they are also present in the chosen template directory\n\t\t\"\"\"\n\t\t# Get template path\n\t\ttmpl_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\", WEB_TEMPLATE_LIST[self.template][0])\n\t\tdefault_tmpl_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\", WEB_TEMPLATE_LIST[0][0])\n\t\ttry:\n\t\t\t# Copy template files\n\t\t\tself.copy_template_files_sub(tmpl_path)\n\t\t\t# Copy default template files if not already copied\n\t\t\tself.copy_template_files_sub(default_tmpl_path)\n\t\texcept:\n\t\t\tlog.error(_(\"Unable to copy web site template files from \\\"%(path)s\\\"\") % {\"path\": tmpl_path})\n\t\t\traise\n\t\t\t\n\tdef copy_template_files_sub(self, tmpl_path):\n\t\t\"\"\"\n\t\tCopy the template files from L{tmpl_path} to the target directory\n\t\tThe files already present in the target directory are not overwritten\n\t\t@param tmpl_path: template directory, as listed in L{WEB_TEMPLATE_LIST}\n\t\t\"\"\"\n\t\tfor (root, dirnames, files) in os.walk(tmpl_path):\n\t\t\tdst_path = root.replace(tmpl_path, self.target_path, 1)\n\t\t\t# Create sub-directories\n\t\t\tfor dirname in dirnames:\n\t\t\t\t# Remove files that have the same name as directories\n\t\t\t\tdstdirname = os.path.join(dst_path, dirname)\n\t\t\t\tif (os.path.isfile(dstdirname) or os.path.islink(dstdirname)):\n\t\t\t\t\tos.remove(dstdirname)\n\t\t\t\t# Create directory if needed\n\t\t\t\tif (not os.path.isdir(dstdirname)):\n\t\t\t\t\tos.mkdir(dstdirname)\n\t\t\t# Copy files\n\t\t\tfor file in files:\n\t\t\t\tsrc = os.path.join(root, file)\n\t\t\t\tdst = os.path.join(dst_path, file)\n\t\t\t\tif (dst in self.created_files):\n\t\t\t\t\t# File was already copied\n\t\t\t\t\tcontinue\n\t\t\t\tif (os.path.isfile(dst)):\n\t\t\t\t\t# If file already exists, check dates\n\t\t\t\t\tstat_src = os.stat(src)\n\t\t\t\t\tstat_dst = os.stat(dst)\n\t\t\t\t\t# If target file is newer, do not overwrite => If target file is older, delete it\n\t\t\t\t\tif (stat_src.st_mtime >= stat_dst.st_mtime):\n\t\t\t\t\t\tos.remove(dst)\n\t\t\t\t\telse:\n\t\t\t\t\t\tlog.info(_(\"Keeping \\\"%(dst)s\\\" (newer than \\\"%(src)s\\\")\") % {'src': src, 'dst': dst})\n\t\t\t\tif (not os.path.exists(dst)):\n\t\t\t\t\tshutil.copyfile(src, dst)\n\t\t\t\t\tlog.info(_(\"Copying \\\"%(src)s\\\" to \\\"%(dst)s\\\"\") % {'src': src, 'dst': dst})\n\t\t\t\tself.created_files.append(dst)\n\n\n\tdef create_archive(self):\n\t\t\"\"\"\n\t\tCreate an archive of the whole web site\n\t\t\"\"\"\n\t\tif (not self.options['archive']): return\n\t\t\n\t\t# Get archive path and type\n\t\tarch_path = self.options['archive_file']\n\t\text = os.path.splitext(arch_path)[1].lower()\n\t\tif (ext not in [\".zip\", \".tgz\"]):\n\t\t\tarch_path += \".zip\"\n\t\t\text = \".zip\"\n\t\t\n\t\tif (os.path.isdir(arch_path)):\n\t\t\tlog.error(_('Invalid file name'))\n\t\t\tlog.error(_('The archive file must be a file, not a directory'))\n\t\t\treturn\n\t\t\t\n\t\t# Get base path for the files inside the archive\n\t\tbasepath = os.path.splitext(os.path.basename(arch_path))[0]\n\t\t\n\t\tif (ext == \".zip\"):\n\t\t\ttry:\n\t\t\t\tfzip = zipfile.ZipFile(arch_path, \"w\", zipfile.ZIP_DEFLATED, True)\n\t\t\texcept:\n\t\t\t\tlog.error(_(\"Unable to overwrite archive file \\\"%(path)s\\\"\") % {\"path\": arch_path})\n\t\t\t\traise\n\t\t\tfor file in self.created_files:\n\t\t\t\tarc_rel_path = file.replace(self.target_path, basepath, 1)\n\t\t\t\tif (sys.version_info[0] < 3):\n\t\t\t\t\tfile = file.encode(\"cp437\")\n\t\t\t\t\tarc_rel_path = arc_rel_path.encode(\"cp437\")\n\t\t\t\ttry:\n\t\t\t\t\tfzip.write(file, arc_rel_path)\n\t\t\t\texcept:\n\t\t\t\t\tlog.error(_(\"Unable to add file \\\"%(file)s\\\" to archive \\\"%(archive)s\\\"\") % {\"file\": file, \"archive\": arch_path})\n\t\t\t\t\traise\n\t\t\tfzip.close()\n\n\t\tif (ext == \".tgz\"):\n\t\t\ttry:\n\t\t\t\ttgz = tarfile.open(arch_path, \"w:gz\")\n\t\t\texcept:\n\t\t\t\tlog.error(_(\"Unable to overwrite archive file \\\"%(path)s\\\"\") % {\"path\": arch_path})\n\t\t\t\traise\n\t\t\tfor file in self.created_files:\n\t\t\t\tarc_rel_path = file.replace(self.target_path, basepath, 1)\n\t\t\t\ttry:\n\t\t\t\t\ttgz.add(file, arc_rel_path)\n\t\t\t\texcept:\n\t\t\t\t\tlog.error(_(\"Unable to add file \\\"%(file)s\\\" to archive \\\"%(archive)s\\\"\") % {\"file\": path, \"archive\": arch_path})\n\t\t\t\t\traise\n\t\t\ttgz.close()\n\n\n\tdef build_link(self, prop, handle, obj_class):\n\t\t\"\"\"\n\t\tBuild a link to an item.\n\t\t\n\t\tThis function is used when converting a Gramps note with hyperlinks into an HTML string\n\t\t\"\"\"\n\t\tif prop == \"gramps_id\":\n\t\t\tif obj_class in self.database.get_table_names():\n\t\t\t\tobj = self.database.get_table_metadata(obj_class)[\"gramps_id_func\"](handle)\n\t\t\t\tif obj:\n\t\t\t\t\thandle = obj.get_handle()\n\t\t\t\telse:\n\t\t\t\t\traise AttributeError(\"gramps_id '%s' not found in '%s'\" % handle, obj_class)\n\t\t\telse:\n\t\t\t\traise AttributeError(\"invalid gramps_id lookup in table name '%s'\" % obj_class)\n\t\thref = \"search.html\"\n\t\ti = -1\n\t\tif (obj_class == \"Person\"):\n\t\t\thref = \"person.html\"\n\t\t\tif (handle in self.obj_dict[Person]):\n\t\t\t\thref = \"%s?idx=%i\" % (href, self.obj_dict[Person][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Family\"):\n\t\t\thref = \"family.html\"\n\t\t\tif (handle in self.obj_dict[Family]):\n\t\t\t\thref = \"%s?fdx=%i\" % (href, self.obj_dict[Family][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Source\"):\n\t\t\thref = \"source.html\"\n\t\t\tif (handle in self.obj_dict[Source]):\n\t\t\t\thref = \"%s?sdx=%i\" % (href, self.obj_dict[Source][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Citation\"):\n\t\t\thref = \"source.html\"\n\t\t\tsource_handle = self.database.get_citation_from_handle(handle).get_reference_handle()\n\t\t\tif (source_handle in self.obj_dict[Source]):\n\t\t\t\thref = \"%s?sdx=%i\" % (href, self.obj_dict[Source][source_handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Repository\"):\n\t\t\thref = \"repository.html\"\n\t\t\tif (handle in self.obj_dict[Repository]):\n\t\t\t\thref = \"%s?rdx=%i\" % (href, self.obj_dict[Repository][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Media\"):\n\t\t\thref = \"media.html\"\n\t\t\tif (handle in self.obj_dict[MediaObject]):\n\t\t\t\thref = \"%s?mdx=%i\" % (href, self.obj_dict[MediaObject][handle][OBJDICT_INDEX])\n\t\telif (obj_class == \"Place\"):\n\t\t\thref = \"place.html\"\n\t\t\tif (handle in self.obj_dict[Place]):\n\t\t\t\thref = \"%s?pdx=%i\" % (href, self.obj_dict[Place][handle][OBJDICT_INDEX])\n\t\telse:\n\t\t\tprint(_(\"DynamicWebReport ignoring link type '%(class)s'\") % {\"class\": obj_class})\n\t\treturn(href)\n\n\n\tdef _data_bkref_index(self, obj_class, obj_handle, ref_class):\n\t\t\"\"\"\n\t\tBuild a list of object indexes referencing a given object\n\t\t@param obj_class: Referenced object class\n\t\t@param obj_handle: Referenced object handle\n\t\t@param ref_class: Class of the refencing objects\n\t\t@return: String representing the Javascript Array of the object indexes (of class L{ref_class}) referencing a given object (L{obj_class}, L{obj_handle})\n\t\t\"\"\"\n\t\tbkref_list = self.bkref_dict[obj_class][obj_handle]\n\t\tif (not bkref_list): return (\"[]\")\n\t\t# Sort by referenced object\n\t\tbkref_list = sorted(bkref_list, key = lambda bkref: self.obj_dict[bkref[BKREF_CLASS]][bkref[BKREF_HANDLE]][OBJDICT_NAME])\n\t\t# Filter bkref_list (keep only ref_class) and remove duplicates\n\t\tseen = set()\n\t\tbkref_list = [bkref_handle\n\t\t\tfor (bkref_class, bkref_handle, media_ref) in bkref_list\n\t\t\tif (bkref_class == ref_class and not (bkref_handle in seen or seen.add(bkref_handle)))]\n\t\treturn(\"[\" +\n\t\t\t\",\".join([str(self.obj_dict[ref_class][bkref_handle][OBJDICT_INDEX]) for bkref_handle in bkref_list]) +\n\t\t\t\"]\")\n\n\n\tdef _data_repo_backref_index(self, repo, ref_class):\n\t\t\"\"\"\n\t\tBuild a list of object referencing a given repository, in the form:\n\t\t - object index (in table 'I', 'F', 'S')\n\t\t - media type\n\t\t - call number\n\t\t - notes of the repository reference\n\t\t@param repo: Referenced repository\n\t\t@param ref_class: Class of the refencing objects\n\t\t@return: String representing the Javascript Array of the references to L{repo}\n\t\t\"\"\"\n\t\trepo_handle = repo.get_handle()\n\t\tif (repo_handle not in self.obj_dict[Repository]): return(\"[]\")\n\t\tbkref_list = self.bkref_dict[Repository][repo_handle]\n\t\tif (not bkref_list): return (\"[]\")\n\t\tsep = \"\"\n\t\ttxt = \"[\"\n\t\tfor (bkref_class, bkref_handle, repo_ref) in bkref_list:\n\t\t\tif (ref_class != bkref_class): continue\n\t\t\ti = self.obj_dict[ref_class][bkref_handle][OBJDICT_INDEX]\n\t\t\tobject = self.get_object_from_handle(bkref_class, bkref_handle)\n\t\t\ttxt += sep + self._data_repo_ref(repo_ref, i)\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\tdef _data_media_backref_index(self, media, ref_class):\n\t\t\"\"\"\n\t\tBuild a list of object referencing a given media, in the form:\n\t\t - object index (in table 'I', 'F', 'S')\n\t\t - media thumbnail path\n\t\t - [x1, y1, x2, y2] of the media reference\n\t\t - notes of the media reference\n\t\t - list of the media reference source citations index (in table 'C')\n\t\t@param media: Referenced repository\n\t\t@param ref_class: Class of the refencing objects\n\t\t@return: String representing the Javascript Array of the references to L{media}\n\t\t\"\"\"\n\t\tmedia_handle = media.get_handle()\n\t\tif (media_handle not in self.obj_dict[MediaObject]): return(\"[]\")\n\t\tbkref_list = self.bkref_dict[MediaObject][media_handle]\n\t\tif (not bkref_list): return (\"[]\")\n\t\tsep = \"\"\n\t\ttxt = \"[\"\n\t\tfor (bkref_class, bkref_handle, media_ref) in bkref_list:\n\t\t\tif (ref_class != bkref_class): continue\n\t\t\ti = self.obj_dict[ref_class][bkref_handle][OBJDICT_INDEX]\n\t\t\tobject = self.get_object_from_handle(bkref_class, bkref_handle)\n\t\t\ttxt += sep + self._data_media_ref(media_ref, i)\n\t\t\tsep = \",\"\n\t\ttxt += \"]\"\n\t\treturn(txt)\n\n\n\tdef get_object_from_handle(self, class_, handle):\n\t\t\"\"\"\n\t\tGet an object from its handle and class\n\t\t\"\"\"\n\t\tobject = None\n\t\tif (class_ == Person):\n\t\t\tobject = self.database.get_person_from_handle(handle)\n\t\telif (class_ == Family):\n\t\t\tobject = self.database.get_family_from_handle(handle)\n\t\telif (class_ == Event):\n\t\t\tobject = self.database.get_event_from_handle(handle)\n\t\telif (class_ == Source):\n\t\t\tobject = self.database.get_source_from_handle(handle)\n\t\telif (class_ == Citation):\n\t\t\tobject = self.database.get_citation_from_handle(handle)\n\t\telif (class_ == Place):\n\t\t\tobject = self.database.get_place_from_handle(handle)\n\t\telif (class_ == Repository):\n\t\t\tobject = self.database.get_repository_from_handle(handle)\n\t\treturn(object)\n\n\n\t##############################################################################################\n\t################################################################################## GENDEX data\n\t##############################################################################################\n\n\tdef build_gendex(self, ind_list):\n\t\tif (not self.inc_gendex): return\n\t\tfp_gendex = StringIO()\n\t\tfor person_handle in ind_list:\n\t\t\tself.write_gendex(fp_gendex, person_handle)\n\t\tself.update_file(\"gendex.txt\", fp_gendex.getvalue())\n\n\tdef write_gendex(self, fp, person_handle):\n\t\t\"\"\"\n\t\tReference|SURNAME|given name /SURNAME/|date of birth|place of birth|date of death|place of death|\n\t\t* field 1: file name of web page referring to the individual\n\t\t* field 2: surname of the individual\n\t\t* field 3: full name of the individual\n\t\t* field 4: date of birth or christening (optional)\n\t\t* field 5: place of birth or christening (optional)\n\t\t* field 6: date of death or burial (optional)\n\t\t* field 7: place of death or burial (optional) \n\t\t\"\"\"\n\t\tif (not(person_handle and (person_handle in self.obj_dict[Person]))): return\n\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\turl = \"person.html?idx=%i\" % self.obj_dict[Person][person_handle][OBJDICT_INDEX]\n\t\tsurname = person.get_primary_name().get_surname()\n\t\tfullname = person.get_primary_name().get_gedcom_name()\n\t\t\n\t\t# get birth info:\n\t\t(dob, pob) = self.get_gendex_data(person.get_birth_ref())\n\t\t\n\t\t# get death info:\n\t\t(dod, pod) = self.get_gendex_data(person.get_death_ref())\n\t\tfp.write(\n\t\t\t'|'.join((url, surname, fullname, dob, pob, dod, pod)) + '|\\n')\n\n\tdef get_gendex_data(self, event_ref):\n\t\t\"\"\"\n\t\tGiven an event, return the date and place a strings\n\t\t\"\"\"\n\t\tdoe = \"\" # date of event\n\t\tpoe = \"\" # place of event\n\t\tif (event_ref):\n\t\t\tevent = self.database.get_event_from_handle(event_ref.ref)\n\t\t\tif (event):\n\t\t\t\tdate = event.get_date_object()\n\t\t\t\tdoe = format_date(date, gedcom = True)\n\t\t\t\tif (event.get_place_handle()):\n\t\t\t\t\tplace_handle = event.get_place_handle()\n\t\t\t\t\tif (place_handle):\n\t\t\t\t\t\tplace = self.database.get_place_from_handle(place_handle)\n\t\t\t\t\t\tif (place):\n\t\t\t\t\t\t\tif (DWR_VERSION_412):\n\t\t\t\t\t\t\t\tpoe = _pd.display(self.database, place)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpoe = place.get_title()\n\t\treturn(doe, poe)\n\t\t\n\n\t##############################################################################################\n\t##############################################################################################\n\t#\n\t# Objects dictionaries construction\n\t#\n\t##############################################################################################\n\t##############################################################################################\n\n\tdef _build_obj_dict(self):\n\t\t\"\"\"\n\t\tConstruct the dictionaries of objects to be included in the reports. There\n\t\tare two dictionaries, which have the same structure: they are two level\n\t\tdictionaries,the first key is the class of object (e.g. gen.lib.Person).\n\t\tThe second key is the handle of the object.\n\n\t\tFor the obj_dict, the value is a tuple containing:\n\t\t - the gramps_id\n\t\t - the text name for the object\n\t\t - the index (number starting at 0)\n\n\t\tFor the bkref_dict, the value is a tuple containing:\n\t\t - the class of object that refers to the 'key' object\n\t\t - the handle for the object that refers to the 'key' object\n\t\t - the reference object:\n\t\t\t- None in most cases\n\t\t\t- for media it is a MediaRef object\n\t\t\n\t\tThis method recursively calls the methods \"_add_***\"\n\t\t\"\"\"\n\t\t_obj_class_list = (Person, Family, Event, Place, Source, Citation,\n\t\t\t\t\t\t MediaObject, Repository, Note, Tag)\n\n\t\t# setup a dictionary of the required structure\n\t\tself.obj_dict = defaultdict(lambda: defaultdict(set))\n\t\tself.bkref_dict = defaultdict(lambda: defaultdict(set))\n\n\n\t\t# initialise the dictionary to empty in case no objects of any\n\t\t# particular class are included in the web report\n\t\tfor obj_class in _obj_class_list:\n\t\t\tself.obj_dict[obj_class] = defaultdict(set)\n\n\t\tind_list = self.database.iter_person_handles()\n\t\twith self.user.progress(_(\"Dynamic Web Site Report\"),\n\t\t\t\t\t\t\t\t _(\"Applying Person Filter...\"),\n\t\t\t\t\t\t\t\t self.database.get_number_of_people()) as step:\n\t\t\tind_list = self.filter.apply(self.database, ind_list,\n\t\t\t\t\t\t\t\t\t\t step)\n\n\t\twith self.user.progress(_(\"Dynamic Web Site Report\"),\n\t\t\t\t\t\t\t\t _(\"Constructing list of other objects...\"),\n\t\t\t\t\t\t\t\t sum(1 for _ in ind_list)) as step:\n\t\t\tfor handle in ind_list:\n\t\t\t\t# FIXME work around bug that self.database.iter under python 3\n\t\t\t\t# returns (binary) data rather than text\n\t\t\t\tif (not isinstance(handle, UNITYPE)):\n\t\t\t\t\thandle = handle.decode(\"UTF-8\")\n\t\t\t\tstep()\n\t\t\t\tself._add_person(handle, \"\", \"\")\n\n\t\tlog.debug(\"final object dictionary \\n\" +\n\t\t\t\t \"\".join((\"%s: %s\\n\" % item) for item in self.obj_dict.items()))\n\n\t\tlog.debug(\"final backref dictionary \\n\" +\n\t\t\t\t \"\".join((\"%s: %s\\n\" % item) for item in self.bkref_dict.items()))\n\n\n\tdef _add_person(self, person_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd person_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Person][person_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the person is already added\n\t\tif (person_handle in self.obj_dict[Person]): return\n\t\t# Add person in the dictionaries of objects\n\t\tperson = self.database.get_person_from_handle(person_handle)\n\t\tif (not person): return\n\t\tperson_name = self.get_person_name(person)\n\t\tself.obj_dict[Person][person_handle] = [person_name, person.gramps_id, len(self.obj_dict[Person])]\n\t\t# Person events\n\t\tevt_ref_list = person.get_event_ref_list()\n\t\tif evt_ref_list:\n\t\t\tfor evt_ref in evt_ref_list:\n\t\t\t\tself._add_event(evt_ref.ref, Person, person_handle, evt_ref)\n\t\t# Person citations\n\t\tfor citation_handle in person.get_citation_list():\n\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Person name citations\n\t\tfor name in [person.get_primary_name()] + \\\n\t\t\t\t\t\tperson.get_alternate_names():\n\t\t\tfor citation_handle in name.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# LDS Ordinance citations\n\t\tfor lds_ord in person.get_lds_ord_list():\n\t\t\tfor citation_handle in lds_ord.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Attribute citations\n\t\tfor attr in person.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Person families\n\t\tfamily_handle_list = person.get_family_handle_list()\n\t\tif family_handle_list:\n\t\t\tfor family_handle in person.get_family_handle_list():\n\t\t\t\tself._add_family(family_handle, Person, person_handle)\n\t\t# Person media\n\t\tfor media_ref in person.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Person, person_handle, media_ref)\n\t\t# Association citations\n\t\tfor assoc in person.get_person_ref_list():\n\t\t\tfor citation_handle in assoc.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\t\t# Addresses citations\n\t\tfor addr in person.get_address_list():\n\t\t\tfor citation_handle in addr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Person, person_handle)\n\n\n\tdef get_person_name(self, person):\n\t\t\"\"\"\n\t\tReturn a string containing the person's primary name in the name format chosen in the web report options\n\t\t@param: person -- person object from database\n\t\t\"\"\"\n\t\tname_format = self.options['name_format']\n\t\tprimary_name = person.get_primary_name()\n\t\tname = Name(primary_name)\n\t\tname.set_display_as(name_format)\n\t\treturn _nd.display_name(name)\n\n\n\tdef _add_family(self, family_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd family_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Family][family_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the family is already added\n\t\tif (family_handle in self.obj_dict[Family]): return\n\t\t# Add family in the dictionaries of objects\n\t\tfamily = self.database.get_family_from_handle(family_handle)\n\t\tfamily_name = self.get_family_name(family)\n\t\tself.obj_dict[Family][family_handle] = [family_name, family.gramps_id, len(self.obj_dict[Family])]\n\t\t# Family events\n\t\tevt_ref_list = family.get_event_ref_list()\n\t\tif evt_ref_list:\n\t\t\tfor evt_ref in evt_ref_list:\n\t\t\t\tself._add_event(evt_ref.ref, Family, family_handle, evt_ref)\n\t\t# Family child references\n\t\tfor child_ref in family.get_child_ref_list():\n\t\t\tfor citation_handle in child_ref.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# LDS Ordinance citations\n\t\tfor lds_ord in family.get_lds_ord_list():\n\t\t\tfor citation_handle in lds_ord.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# Attributes citations\n\t\tfor attr in family.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# Family citations\n\t\tfor citation_handle in family.get_citation_list():\n\t\t\tself._add_citation(citation_handle, Family, family_handle)\n\t\t# Family media\n\t\tfor media_ref in family.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Family, family_handle, media_ref)\n\n\n\tdef get_family_name(self, family):\n\t\t\"\"\"\n\t\tReturn a string containing the name of the family (e.g. 'Family of John Doe and Jane Doe')\n\t\t@param: family -- family object from database\n\t\t\"\"\"\n\t\thusband_handle = family.get_father_handle()\n\t\tspouse_handle = family.get_mother_handle()\n\n\t\thusband = self.database.get_person_from_handle(husband_handle)\n\t\tspouse = self.database.get_person_from_handle(spouse_handle)\n\n\t\tif husband and spouse:\n\t\t\thusband_name = self.get_person_name(husband)\n\t\t\tspouse_name = self.get_person_name(spouse)\n\t\t\ttitle_str = _(\"Family of %(husband)s and %(spouse)s\") % {\n\t\t\t\t\"husband\": husband_name,\n\t\t\t\t\"spouse\": spouse_name}\n\t\telif husband:\n\t\t\thusband_name = self.get_person_name(husband)\n\t\t\t# Only the name of the husband is known\n\t\t\ttitle_str = _(\"Family of %(father)s\") % {\"father\": husband_name}\n\t\telif spouse:\n\t\t\tspouse_name = self.get_person_name(spouse)\n\t\t\t# Only the name of the wife is known\n\t\t\ttitle_str = _(\"Family of %(mother)s\") % {\"mother\": spouse_name}\n\t\telse:\n\t\t\ttitle_str = \"\"\n\n\t\treturn title_str\n\n\n\tdef _add_event(self, event_handle, bkref_class, bkref_handle, event_ref):\n\t\t\"\"\"\n\t\tAdd event_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Check if event reference already added\n\t\trefs = []\n\t\tif (event_handle in self.bkref_dict[Event]):\n\t\t\trefs = [bkref[BKREF_REFOBJ] for bkref in self.bkref_dict[Event][event_handle]]\n\t\t\t# The event reference is already recorded\n\t\t\tif (event_ref in refs): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Event][event_handle].add((bkref_class, bkref_handle, event_ref))\n\t\t# Event reference attributes citations\n\t\tfor attr in event_ref.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, bkref_class, bkref_handle)\n\t\t# Check if the event is already added\n\t\tif (event_handle in self.obj_dict[Event]): return\n\t\t# Add event in the dictionaries of objects\n\t\tevent = self.database.get_event_from_handle(event_handle)\n\t\tif (not event): return\n\t\tevent_name = str(event.get_type())\n\t\tevent_desc = event.get_description()\n\t\t# The event description can be Y on import from GEDCOM. See the\n\t\t# following quote from the GEDCOM spec: \"The occurrence of an event is\n\t\t# asserted by the presence of either a DATE tag and value or a PLACe tag\n\t\t# and value in the event structure. When neither the date value nor the\n\t\t# place value are known then a Y(es) value on the parent event tag line\n\t\t# is required to assert that the event happened.\"\"\n\t\tif not (event_desc == \"\" or event_desc is None or event_desc ==\"Y\"):\n\t\t\tevent_name = event_name + \": \" + event_desc\n\t\tself.obj_dict[Event][event_handle] = [event_name, event.gramps_id, len(self.obj_dict[Event])]\n\t\t# Event place\n\t\tplace_handle = event.get_place_handle()\n\t\tif (place_handle):\n\t\t\tself._add_place(place_handle, bkref_class, bkref_handle)\n\t\t# Event citations\n\t\tfor citation_handle in event.get_citation_list():\n\t\t\tself._add_citation(citation_handle, bkref_class, bkref_handle)\n\t\t# Event attributes citations\n\t\tfor attr in event.get_attribute_list():\n\t\t\tfor citation_handle in attr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, bkref_class, bkref_handle)\n\t\t# Event media\n\t\tfor media_ref in event.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, bkref_class, bkref_handle, media_ref)\n\n\n\tdef _add_place(self, place_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd place_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Place][place_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the place is already added\n\t\tif (place_handle in self.obj_dict[Place]): return\n\t\t# Add place in the dictionaries of objects\n\t\tplace = self.database.get_place_from_handle(place_handle)\n\t\tif (DWR_VERSION_412):\n\t\t\tplace_name = _pd.display(self.database, place)\n\t\telse:\n\t\t\tplace_name = place.get_title()\n\t\tself.obj_dict[Place][place_handle] = [place_name, place.gramps_id, len(self.obj_dict[Place])]\n\n\t\tif (self.inc_places):\n\t\t\t# Place citations\n\t\t\tfor citation_handle in place.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Place, place_handle)\n\t\t\t# Place media\n\t\t\tfor media_ref in place.get_media_list():\n\t\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\t\tself._add_media(media_handle, Place, place_handle, media_ref)\n\n\n\tdef _add_source(self, source_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd source_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_sources): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Source][source_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the source is already added\n\t\tif (source_handle in self.obj_dict[Source]): return\n\t\t# Add source in the dictionaries of objects\n\t\tsource = self.database.get_source_from_handle(source_handle)\n\t\tsource_name = source.get_title()\n\t\tself.obj_dict[Source][source_handle] = [source_name, source.gramps_id, len(self.obj_dict[Source])]\n\t\t# Source repository\n\t\tif self.inc_repositories:\n\t\t\tfor repo_ref in source.get_reporef_list():\n\t\t\t\trepo_handle = repo_ref.get_reference_handle()\n\t\t\t\tself._add_repository(repo_handle, Source, source_handle, repo_ref)\n\t\t# Source media\n\t\tfor media_ref in source.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Source, source_handle, media_ref)\n\n\n\tdef _add_citation(self, citation_handle, bkref_class, bkref_handle):\n\t\t\"\"\"\n\t\tAdd citation_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_sources): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Citation][citation_handle].add((bkref_class, bkref_handle, None))\n\t\t# Check if the citation is already added\n\t\tif (citation_handle in self.obj_dict[Citation]): return\n\t\t# Add citation in the dictionaries of objects\n\t\tcitation = self.database.get_citation_from_handle(citation_handle)\n\t\tcitation_name = citation.get_page() or \"\"\n\t\tsource_handle = citation.get_reference_handle()\n\t\tself.obj_dict[Citation][citation_handle] = [citation_name, citation.gramps_id, len(self.obj_dict[Citation])]\n\t\t# Citation source\n\t\tself._add_source(source_handle, Citation, citation_handle)\n\t\t# Citation media\n\t\tfor media_ref in citation.get_media_list():\n\t\t\tmedia_handle = media_ref.get_reference_handle()\n\t\t\tself._add_media(media_handle, Source, source_handle, media_ref)\n\n\n\tdef _add_media(self, media_handle, bkref_class, bkref_handle, media_ref):\n\t\t\"\"\"\n\t\tAdd media_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_gallery): return\n\t\t# Check if media reference already added\n\t\trefs = []\n\t\tif (media_handle in self.bkref_dict[MediaObject]):\n\t\t\trefs = [bkref[BKREF_REFOBJ] for bkref in self.bkref_dict[MediaObject][media_handle]]\n\t\t\t# The media reference is already recorded\n\t\t\tif (media_ref in refs): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[MediaObject][media_handle].add((bkref_class, bkref_handle, media_ref))\n\t\t# Citations for media reference, media reference attributes\n\t\tcitation_list = media_ref.get_citation_list()\n\t\tfor attr in media_ref.get_attribute_list():\n\t\t\tcitation_list.extend(attr.get_citation_list())\n\t\tfor citation_handle in citation_list:\n\t\t\tself._add_citation(citation_handle, MediaObject, media_handle)\n\t\t# Check if the media is already added\n\t\tif (media_handle in self.obj_dict[MediaObject]): return\n\t\t# Add media in the dictionaries of objects\n\t\tmedia = self.database.get_object_from_handle(media_handle)\n\t\tmedia_name = \"Media\"\n\t\tself.obj_dict[MediaObject][media_handle] = [media_name, media.gramps_id, len(self.obj_dict[MediaObject])]\n\t\t# Citations for media, media attributes\n\t\tcitation_list = media.get_citation_list()\n\t\tfor attr in media.get_attribute_list():\n\t\t\tcitation_list.extend(attr.get_citation_list())\n\t\tfor citation_handle in citation_list:\n\t\t\tself._add_citation(citation_handle, MediaObject, media_handle)\n\n\n\tdef _add_repository(self, repo_handle, bkref_class, bkref_handle, repo_ref):\n\t\t\"\"\"\n\t\tAdd repo_handle to the L{self.obj_dict}, and recursively all referenced objects\n\t\t\"\"\"\n\t\tif (not self.inc_repositories): return\n\t\t# Check if repository reference already added\n\t\trefs = []\n\t\tif (repo_handle in self.bkref_dict[Repository]):\n\t\t\trefs = [bkref[BKREF_REFOBJ] for bkref in self.bkref_dict[Repository][repo_handle]]\n\t\t\t# The repository reference is already recorded\n\t\t\tif (repo_ref in refs): return\n\t\t# Update the dictionaries of objects back references\n\t\tself.bkref_dict[Repository][repo_handle].add((bkref_class, bkref_handle, repo_ref))\n\t\t# Check if the repository is already added\n\t\tif (repo_handle in self.obj_dict[Repository]): return\n\t\t# Add repository in the dictionaries of objects\n\t\trepo = self.database.get_repository_from_handle(repo_handle)\n\t\trepo_name = repo.name\n\t\tself.obj_dict[Repository][repo_handle] = [repo_name, repo.gramps_id, len(self.obj_dict[Repository])]\n\t\t# Addresses citations\n\t\tfor addr in repo.get_address_list():\n\t\t\tfor citation_handle in addr.get_citation_list():\n\t\t\t\tself._add_citation(citation_handle, Repository, repo_handle)\n\n\t\t\t\t\n\t##############################################################################################\n\t##############################################################################################\n\t#\n\t# Objects dictionaries sorting\n\t#\n\t##############################################################################################\n\t##############################################################################################\n\n\n\tdef _sort_obj_dict(self):\n\t\t\"\"\"\n\t\tSort the dictionaries of objects to be included in the reports.\n\t\tThe dictionaries are sorted by name.\n\t\tThe sorting is performed by modifying the index of the objects.\n\t\t\"\"\"\n\t\t\n\t\t# Sort persons\n\t\tsortkeys = {}\n\t\tobjs = list(self.obj_dict[Person].keys())\n\t\tfor handle in objs:\n\t\t\tsortkeys[handle] = self.get_person_name_sort_key(handle)\n\t\tobjs.sort(key = lambda x: sortkeys[x])\n\t\tfor (i, x) in enumerate(objs):\n\t\t\tself.obj_dict[Person][x][OBJDICT_INDEX] = i\n\t\t\t\n\t\t# Sort families\n\t\tsortkeys = {}\n\t\tobjs = list(self.obj_dict[Family].keys())\n\t\tfor handle in objs:\n\t\t\tsortkeys[handle] = self.get_family_name_sort_key(handle)\n\t\tobjs.sort(key = lambda x: sortkeys[x])\n\t\tfor (i, x) in enumerate(objs):\n\t\t\tself.obj_dict[Family][x][OBJDICT_INDEX] = i\n\n\t\t# Sort others\n\t\tfor cls in (Source, Repository, MediaObject, Place):\n\t\t\tobjs = list(self.obj_dict[cls].keys())\n\t\t\tsortkeys = {}\n\t\t\tfor handle in objs:\n\t\t\t\tsortkeys[handle] = SORT_KEY(self.obj_dict[cls][handle][OBJDICT_NAME])\n\t\t\tobjs.sort(key = lambda x: sortkeys[x])\n\t\t\tfor (i, x) in enumerate(objs):\n\t\t\t\tself.obj_dict[cls][x][OBJDICT_INDEX] = i\n\t\t\n\n\tdef get_person_name_sort_key(self, handle):\n\t\t\"\"\"\n\t\tReturn a sort key for a person\n\t\t\"\"\"\n\t\tperson = self.database.get_person_from_handle(handle)\n\t\tprimary_name = person.get_primary_name()\n\t\tsort_str = _nd.sort_string(primary_name)\n\t\treturn(SORT_KEY(sort_str))\n\t\t\n\t\t\n\tdef get_family_name_sort_key(self, handle):\n\t\t\"\"\"\n\t\tReturn a sort key for a family\n\t\t\"\"\"\n\t\tfamily = self.database.get_family_from_handle(handle)\n\t\thusband_handle = family.get_father_handle()\n\t\tspouse_handle = family.get_mother_handle()\n\n\t\thusband = self.database.get_person_from_handle(husband_handle)\n\t\tspouse = self.database.get_person_from_handle(spouse_handle)\n\n\t\tif husband and spouse:\n\t\t\tsort_key = self.get_person_name_sort_key(husband_handle) + SORT_KEY(\" \") + self.get_person_name_sort_key(spouse_handle)\n\t\telif husband:\n\t\t\tsort_key = self.get_person_name_sort_key(husband_handle)\n\t\telif spouse:\n\t\t\tsort_key = self.get_person_name_sort_key(spouse_handle)\n\t\telse:\n\t\t\tsort_key = SORT_KEY(\"\")\n\n\t\treturn(sort_key)\n\t\t\n\t\t\n\t\t\n\t\t\n##################################################################################################\n##################################################################################################\n#\n# DynamicWebReport Menu Options\n#\n##################################################################################################\n##################################################################################################\n\nclass DynamicWebOptions(MenuReportOptions):\n\t\"\"\"\n\tCreates the DynamicWebReport Menu Options\n\tDefines options and provides handling interface.\n\t\n\tMethods:\n\t- add_menu_options: called by Gramps to generate the options menu. It calls all the other methods \"__add_***_options\"\n\t- __add_***_options: One method for each tab of the options menu.\n\t- __***_changed: methods called when an option impacts other options\n\t\"\"\"\n\tdef __init__(self, name, dbase):\n\t\n\t\tself.__db = dbase #: Gramps database\n\t\t\n\t\t# The data below are used when some options change the behavior of other options. For example: a boolean option enables/disables another option. These data are used in the methods \"__***_changed\".\n\t\tself.__pid = None\n\t\tself.__filter = None\n\t\tself.__living = None\n\t\tself.__yearsafterdeath = None\n\t\t\n\t\t#: This help explains how Gramps note are modified in order to generate custom pages\n\t\tself.note_help = _(\n\t\t\t\"In this note, the following special words are processed:\\n\"\n\t\t\t\"__SEARCH_FORM__ is replaced by a search form.\\n\"\n\t\t\t\"__NB_INDIVIDUALS__ is replaced by the number of persons.\\n\"\n\t\t\t\"__NB_FAMILIES__ is replaced by the number of families.\\n\"\n\t\t\t\"__NB_MEDIA__ is replaced by the number of media objects.\\n\"\n\t\t\t\"__NB_SOURCES__ is replaced by the number of sources.\\n\"\n\t\t\t\"__NB_REPOSITORIES__ is replaced by the number of repositories.\\n\"\n\t\t\t\"__NB_PLACES__ is replaced by the number of places.\\n\"\n\t\t\t\"__MEDIA___ is replaced by the media with gramps ID .\\n\"\n\t\t\t\"__THUMB___ is replaced by the thumbnail of the media with gramps ID .\\n\"\n\t\t\t\"__EXPORT_DATE__ is replaced by the current date.\\n\"\n\t\t\t\"__GRAMPS_VERSION__ is replaced by the GRAMPS version.\\n\"\n\t\t\t\"__GRAMPS_HOMEPAGE__ is replaced by the GRAMPS homepage link.\\n\"\n\t\t\t\"URL starting with \\\"relative://relative.\\\" are replaced by the relative URL \\\"\\\".\\n\")\n\t\t\t\n\t\tMenuReportOptions.__init__(self, name, dbase)\n\n\t\t\n\tdef add_menu_options(self, menu):\n\t\t\"\"\"\n\t\tAdd options to the menu for the web site.\n\t\t\n\t\tIt calls all the other methods \"__add_***_options\" (one method for each tab of the options menu).\n\t\t\"\"\"\n\t\tself.__add_report_options(menu)\n\t\tself.__add_privacy_options(menu)\n\t\tself.__add_options_options(menu)\n\t\tself.__add_pages_advanced_options(menu)\n\t\tself.__add_pages_options(menu)\n\t\tself.__add_trees_options(menu)\n\t\tself.__add_custom_pages_options(menu)\n\t\tself.__add_select_pages_options(menu)\n\n\t\t\n\tdef __add_report_options(self, menu):\n\t\t\"\"\"\n\t\tOptions on the \"Report\" tab.\n\t\t\"\"\"\n\t\tcategory_name = _(\"Report\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tdbname = self.__db.get_dbname()\n\t\tdefault_dir = dbname + \"_\" + \"dynamicweb\"\n\t\ttarget = DestinationOption(_(\"Destination\"),\n\t\t\tos.path.join(config.get(\"paths.website-directory\"), default_dir))\n\t\ttarget.set_help(_(\"The destination directory for the web files\"))\n\t\ttarget.set_directory_entry(True)\n\t\taddopt(\"target\", target)\n\n\t\tself.__archive = BooleanOption(_('Store web pages in archive'), False)\n\t\tself.__archive.set_help(_(\"Whether to create an archive file (in ZIP or TGZ format) containing the web site\"))\n\t\taddopt(\"archive\", self.__archive)\n\t\tself.__archive.connect(\"value-changed\", self.__archive_changed)\n\n\t\tself.__archive_file = DestinationOption(_(\"Archive file\"),\n\t\t\tos.path.join(config.get(\"paths.website-directory\"), default_dir, \"archive.zip\"))\n\t\tself.__archive_file.set_help(_(\"The archive file name (with \\\".zip\\\" or \\\".tgz\\\" extension)\"))\n\t\tself.__archive_file.set_directory_entry(False)\n\t\taddopt(\"archive_file\", self.__archive_file)\n\n\t\tself.__archive_changed()\n\n\t\ttitle = StringOption(_(\"Web site title\"), _(\"My Family Tree\"))\n\t\ttitle.set_help(_(\"The title of the web site\"))\n\t\taddopt(\"title\", title)\n\n\t\tself.__filter = FilterOption(_(\"Filter\"), 0)\n\t\tself.__filter.set_help(\n\t\t\t _(\"Select filter to restrict people that appear on web site\"))\n\t\taddopt(\"filter\", self.__filter)\n\t\tself.__filter.connect(\"value-changed\", self.__filter_changed)\n\n\t\tself.__pid = PersonOption(_(\"Filter Person\"))\n\t\tself.__pid.set_help(_(\"The center person for the filter\"))\n\t\taddopt(\"pid\", self.__pid)\n\t\tself.__pid.connect(\"value-changed\", self.__pid_changed)\n\n\t\tself.__pid_changed()\n\n\t\t# We must figure out the value of the first option before we can create the EnumeratedListOption\n\t\tfmt_list = _nd.get_name_format()\n\t\tdefaultnum = _nd.get_default_format()\n\t\tdefault = 0\n\t\tfor ind, val in enumerate(fmt_list):\n\t\t\tif val[0] == defaultnum:\n\t\t\t\tdefault = ind\n\t\t\t\tbreak\n\t\tname_format = EnumeratedListOption(_(\"Name format\"), fmt_list[default][0])\n\t\tfor num, name, fmt_str, act in fmt_list:\n\t\t\tname_format.add_item(num, name)\n\t\tname_format.set_help(_(\"Select the format to display the complete names\"))\n\t\taddopt(\"name_format\", name_format)\n\t\tshort_name_format = EnumeratedListOption(_(\"Name format (short)\"), fmt_list[default][0])\n\t\tfor num, name, fmt_str, act in fmt_list:\n\t\t\tshort_name_format.add_item(num, name)\n\t\tshort_name_format.set_help(_(\"Select the format to display a shorter version of the names\"))\n\t\taddopt(\"short_name_format\", short_name_format)\n\t\t\n\t\ttemplate = EnumeratedListOption(_(\"Web site template\"), 0)\n\t\tfor (i, (directory, name)) in enumerate(WEB_TEMPLATE_LIST):\n\t\t\ttemplate.add_item(i, name)\n\t\ttemplate.set_help(_(\"Select the template of the web site\"))\n\t\taddopt(\"template\", template)\n\n\t\tcpright = EnumeratedListOption(_(\"Copyright\"), 0)\n\t\tfor index, copt in enumerate(_COPY_OPTIONS):\n\t\t\tcpright.add_item(index, copt)\n\t\tcpright.set_help( _(\"The copyright to be used for the web files\"))\n\t\taddopt(\"copyright\", cpright)\n\n\n\tdef __add_privacy_options(self, menu):\n\t\t\"\"\"\n\t\tOptions on the \"Privacy\" tab.\n\t\t\"\"\"\n\t\tcategory_name = _(\"Privacy\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tincpriv = BooleanOption(_(\"Include records marked private\"), False)\n\t\tincpriv.set_help(_(\"Whether to include private objects\"))\n\t\taddopt(\"incpriv\", incpriv)\n\n\t\tinc_notes = BooleanOption(_(\"Export notes\"), True)\n\t\tinc_notes.set_help(_(\"Whether to export notes in the web pages\"))\n\t\taddopt(\"inc_notes\", inc_notes)\n\n\t\tinc_sources = BooleanOption(_(\"Export sources\"), True)\n\t\tinc_sources.set_help(_(\"Whether to export sources and citations in the web pages\"))\n\t\taddopt(\"inc_sources\", inc_sources)\n\n\t\tinc_addresses = BooleanOption(_(\"Export addresses\"), True)\n\t\tinc_addresses.set_help(_(\"Whether to export addresses in the web pages\"))\n\t\taddopt(\"inc_addresses\", inc_addresses)\n\n\t\tself.__living = EnumeratedListOption(_(\"Living People\"),\n\t\t\t\t\t\t\t\t\t\t\t LivingProxyDb.MODE_EXCLUDE_ALL)\n\t\tself.__living.add_item(LivingProxyDb.MODE_EXCLUDE_ALL,\n\t\t\t\t\t\t\t _(\"Exclude\"))\n\t\tself.__living.add_item(LivingProxyDb.MODE_INCLUDE_LAST_NAME_ONLY,\n\t\t\t\t\t\t\t _(\"Include Last Name Only\"))\n\t\tself.__living.add_item(LivingProxyDb.MODE_INCLUDE_FULL_NAME_ONLY,\n\t\t\t\t\t\t\t _(\"Include Full Name Only\"))\n\t\tself.__living.add_item(INCLUDE_LIVING_VALUE,\n\t\t\t\t\t\t\t _(\"Include\"))\n\t\tself.__living.set_help(_(\"How to handle living people\"))\n\t\taddopt(\"living\", self.__living)\n\t\tself.__living.connect(\"value-changed\", self.__living_changed)\n\n\t\tself.__yearsafterdeath = NumberOption(_(\"Years from death to consider \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"living\"), 30, 0, 100)\n\t\tself.__yearsafterdeath.set_help(_(\"This allows you to restrict \"\n\t\t\t\t\t\t\t\t\t\t \"information on people who have not \"\n\t\t\t\t\t\t\t\t\t\t \"been dead for very long\"))\n\n\t\taddopt(\"yearsafterdeath\", self.__yearsafterdeath)\n\n\t\tself.__living_changed()\n\n\n\tdef __add_options_options(self, menu):\n\t\tcategory_name = _(\"Options\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tinc_repositories = BooleanOption(_('Include repository pages'), False)\n\t\tinc_repositories.set_help(_('Whether or not to include the Repository Pages.'))\n\t\taddopt(\"inc_repositories\", inc_repositories)\n\n\t\tinc_gallery = BooleanOption(_(\"Include images and media objects\"), True)\n\t\tinc_gallery.set_help(_(\"Whether to include a media objects in the web pages\"))\n\t\taddopt(\"inc_gallery\", inc_gallery)\n\n\t\tcopy_media = BooleanOption(_(\"Copy images and media objects\"), True)\n\t\tcopy_media.set_help(_(\"Whether to make a copy of the media objects.\"\n\t\t\t\" When the objects are not copied, they are referenced by their relative path name\"))\n\t\taddopt(\"copy_media\", copy_media)\n\n\t\tprint_notes_type = BooleanOption(_(\"Print the notes type\"), True)\n\t\tprint_notes_type.set_help(_(\"Whether to print the notes type in the notes text\"))\n\t\taddopt(\"print_notes_type\", print_notes_type)\n\n\t\tself.__inc_places = BooleanOption(_(\"Print place pages\"), True)\n\t\tself.__inc_places.set_help(_(\"Whether to show pages for the places\"))\n\t\taddopt(\"inc_places\", self.__inc_places)\n\t\tself.__inc_places.connect(\"value-changed\", self.__placemap_options_changed)\n\n\t\tself.__placemappages = BooleanOption(_(\"Include Place map on Place Pages\"), False)\n\t\tself.__placemappages.set_help(_(\n\t\t\t\"Whether to include a place map on the Place Pages, \"\n\t\t\t\"where Latitude/ Longitude are available.\"))\n\t\tself.__placemappages.connect(\"value-changed\", self.__placemap_options_changed)\n\t\taddopt(\"placemappages\", self.__placemappages)\n\n\t\tself.__familymappages = BooleanOption(_(\n\t\t\t\"Include Family Map Pages with \"\n\t\t\t\"all places shown on the map\"), False)\n\t\tself.__familymappages.set_help(_(\n\t\t\t\"Whether or not to add an individual page map \"\n\t\t\t\"showing all the places on this page. \"\n\t\t\t\"This will allow you to see how your family \"\n\t\t\t\"traveled around the country.\"))\n\t\tself.__familymappages.connect(\"value-changed\", self.__placemap_options_changed)\n\t\taddopt(\"familymappages\", self.__familymappages)\n\n\t\tmapopts = [\n\t\t\t[_(\"Google\"), \"Google\"],\n\t\t\t[_(\"OpenStreetMap\"), \"OpenStreetMap\"]\n\t\t]\n\t\tself.__mapservice = EnumeratedListOption(_(\"Map Service\"), mapopts[0][1])\n\t\tfor trans, opt in mapopts:\n\t\t\tself.__mapservice.add_item(opt, trans)\n\t\tself.__mapservice.set_help(_(\"Choose your choice of map service for creating the Place Map Pages\"))\n\t\tself.__mapservice.connect(\"value-changed\", self.__placemap_options_changed)\n\t\taddopt(\"mapservice\", self.__mapservice)\n\n\t\tself.__placemap_options_changed()\n\n\n\tdef __add_pages_advanced_options(self, menu):\n\t\tcategory_name = _(\"Advanced\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tencoding = EnumeratedListOption(_('Character set encoding'), _CHARACTER_SETS[0][1])\n\t\tfor eopt in _CHARACTER_SETS:\n\t\t\tencoding.add_item(eopt[1], eopt[0])\n\t\tencoding.set_help(_(\"The encoding to be used for the web files\"))\n\t\taddopt(\"encoding\", encoding)\n\n\t\tinc_families = BooleanOption(_(\"Include family pages\"), False)\n\t\tinc_families.set_help(_(\"Whether or not to include family pages\"))\n\t\taddopt(\"inc_families\", inc_families)\n\n\t\tinc_events = BooleanOption(_('Include event pages'), False)\n\t\tinc_events.set_help(_('Add a complete events list and relevant pages or not'))\n\t\taddopt(\"inc_events\", inc_events)\n\t\tinc_events.set_available(False)\n\n\t\tshowbirth = BooleanOption(_(\"Include a column for birth dates on the index pages\"), True)\n\t\tshowbirth.set_help(_('Whether to include a birth column'))\n\t\taddopt(\"showbirth\", showbirth)\n\n\t\tshowdeath = BooleanOption(_(\"Include a column for death dates on the index pages\"), False)\n\t\tshowdeath.set_help(_('Whether to include a death column'))\n\t\taddopt(\"showdeath\", showdeath)\n\n\t\tshowmarriage = BooleanOption(_(\"Include a column for marriage dates on the index pages\"), False)\n\t\tshowmarriage.set_help(_('Whether to include a marriage column'))\n\t\taddopt(\"showmarriage\", showmarriage)\n\n\t\tshowpartner = BooleanOption(_(\"Include a column for partners on the index pages\"), False)\n\t\tshowpartner.set_help(_('Whether to include a partners column'))\n\t\taddopt(\"showpartner\", showpartner)\n\n\t\tshowparents = BooleanOption(_(\"Include a column for parents on the index pages\"), False)\n\t\tshowparents.set_help(_('Whether to include a parents column'))\n\t\taddopt(\"showparents\", showparents)\n\n\t\tshowallsiblings = BooleanOption(_(\"Include half and/ or step-siblings on the individual pages\"), False)\n\t\tshowallsiblings.set_help(_( \"Whether to include half and/ or step-siblings with the parents and siblings\"))\n\t\taddopt('showallsiblings', showallsiblings)\n\n\t\tbirthorder = BooleanOption(_('Sort all children in birth order'), False)\n\t\tbirthorder.set_help(_('Whether to display children in birth order or in entry order?'))\n\t\taddopt(\"birthorder\", birthorder)\n\n\t\tbkref_type = BooleanOption(_('Include references in indexes'), False)\n\t\tbkref_type.set_help(_('Whether to include the references to the items in the index pages. For example, in the media index page, the names of the individuals, families, places, sources that reference the media.'))\n\t\taddopt(\"bkref_type\", bkref_type)\n\n\t\tinc_gendex = BooleanOption(_('Include GENDEX file (/gendex.txt)'), False)\n\t\tinc_gendex.set_help(_('Whether to include a GENDEX file or not'))\n\t\taddopt(\"inc_gendex\", inc_gendex)\n\n\n\tdef __add_trees_options(self, menu):\n\t\tcategory_name = _(\"Trees\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tpage_defs = [\n\t\t\tPAGE_SVG_TREE,\n\t\t]\n\t\tfor page_def in page_defs:\n\t\t\tname = PAGES_NAMES[page_def][0]\n\t\t\ttitle = PAGES_NAMES[page_def][1]\n\t\t\tpage_name = StringOption(_(\"Title for the tree \\\"%(name)s\\\"\") % {\"name\": name}, title)\n\t\t\tpage_name.set_help(_(\"Name for the page that shows the tree \\\"%(name)s\\\"\") % {\"name\": name})\n\t\t\taddopt(\"page_name_%i\" % page_def, page_name)\n\n\t\tgraphgens = NumberOption(_(\"Maximum number of generations\"), 10, 3, 30)\n\t\tgraphgens.set_help(_(\"The maximum number of generations to include in the ancestor and descendant trees and graphs\"))\n\t\taddopt(\"graphgens\", graphgens)\n\n\t\tsvg_tree_type = EnumeratedListOption(_(\"SVG tree graph type\"), str(DEFAULT_SVG_TREE_TYPE))\n\t\tfor (i, opt) in enumerate(SVG_TREE_TYPES):\n\t\t\tsvg_tree_type.add_item(str(i), opt)\n\t\tsvg_tree_type.set_help(_(\"Choose the default SVG tree graph type\"))\n\t\taddopt(\"svg_tree_type\", svg_tree_type)\n\t\t\n\t\tsvg_tree_shape = EnumeratedListOption(_(\"SVG tree graph shape\"), str(DEFAULT_SVG_TREE_SHAPE))\n\t\tfor (i, opt) in enumerate(SVG_TREE_SHAPES):\n\t\t\tsvg_tree_shape.add_item(str(i), opt)\n\t\tsvg_tree_shape.set_help(_(\"Choose the default SVG tree graph shape\"))\n\t\taddopt(\"svg_tree_shape\", svg_tree_shape)\n\t\t\n\t\tsvg_tree_distrib_asc = EnumeratedListOption(_(\"SVG tree parents distribution\"), str(DEFAULT_SVG_TREE_DISTRIB))\n\t\tfor (i, opt) in enumerate(SVG_TREE_DISTRIB_ASC):\n\t\t\tsvg_tree_distrib_asc.add_item(str(i), opt)\n\t\tsvg_tree_distrib_asc.set_help(_(\"Choose the default SVG tree parents distribution (for fan charts only)\"))\n\t\taddopt(\"svg_tree_distrib_asc\", svg_tree_distrib_asc)\n\t\t\n\t\tsvg_tree_distrib_dsc = EnumeratedListOption(_(\"SVG tree children distribution\"), str(DEFAULT_SVG_TREE_DISTRIB))\n\t\tfor (i, opt) in enumerate(SVG_TREE_DISTRIB_DSC):\n\t\t\tsvg_tree_distrib_dsc.add_item(str(i), opt)\n\t\tsvg_tree_distrib_dsc.set_help(_(\"Choose the default SVG tree children distribution (for fan charts only)\"))\n\t\taddopt(\"svg_tree_distrib_dsc\", svg_tree_distrib_dsc)\n\t\t\n\t\tsvg_tree_background = EnumeratedListOption(_(\"Background\"), str(DEFAULT_SVG_TREE_BACKGROUND))\n\t\tfor (i, opt) in enumerate(SVG_TREE_BACKGROUNDS):\n\t\t\tsvg_tree_background.add_item(str(i), opt)\n\t\tsvg_tree_background.set_help(_(\"Choose the background color scheme for the persons in the SVG tree graph\"))\n\t\taddopt(\"svg_tree_background\", svg_tree_background)\n\n\t\tsvg_tree_color1 = ColorOption(_(\"Start gradient/Main color\"), \"#EF2929\")\n\t\taddopt(\"svg_tree_color1\", svg_tree_color1)\n\n\t\tsvg_tree_color2 = ColorOption(_(\"End gradient/2nd color\"), \"#3D37E9\")\n\t\taddopt(\"svg_tree_color2\", svg_tree_color2)\n\n\t\tself.__svg_tree_dup = BooleanOption(_(\"Show duplicates\"), True)\n\t\tself.__svg_tree_dup.set_help(_(\"Whether to use a special color for the persons that appear several times in the SVG tree\"))\n\t\tself.__svg_tree_dup.connect(\"value-changed\", self.__svg_tree_dup_changed)\n\t\taddopt(\"svg_tree_dup\", self.__svg_tree_dup)\n\t\t\n\t\tself.__svg_tree_color_dup = ColorOption(_(\"Color for duplicates\"), \"#888A85\")\n\t\taddopt(\"svg_tree_color_dup\", self.__svg_tree_color_dup)\n\n\n\tdef __add_pages_options(self, menu):\n\t\tcategory_name = _(\"Pages\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\theadernote = NoteOption(_('HTML user header'))\n\t\theadernote.set_help( _(\"A note to be used as the page header\"))\n\t\taddopt(\"headernote\", headernote)\n\n\t\tfooternote = NoteOption(_('HTML user footer'))\n\t\tfooternote.set_help( _(\"A note to be used as the page footer\"))\n\t\taddopt(\"footernote\", footernote)\n\n\t\tpage_defs = [\n\t\t\tPAGE_PERSON,\n\t\t\tPAGE_SURNAMES,\n\t\t\tPAGE_PERSON_INDEX,\n\t\t\tPAGE_FAMILY_INDEX,\n\t\t\tPAGE_SOURCE_INDEX,\n\t\t\tPAGE_MEDIA_INDEX,\n\t\t\tPAGE_PLACE_INDEX,\n\t\t\tPAGE_ADDRESS_INDEX,\n\t\t\tPAGE_REPOSITORY_INDEX,\n\t\t]\n\t\tfor page_def in page_defs:\n\t\t\tname = PAGES_NAMES[page_def][0]\n\t\t\ttitle = PAGES_NAMES[page_def][1]\n\t\t\tpage_name = StringOption(_(\"Title for the page \\\"%(name)s\\\"\") % {\"name\": name}, title)\n\t\t\tpage_name.set_help(_(\"Name for the page \\\"%(name)s\\\"\") % {\"name\": name})\n\t\t\taddopt(\"page_name_%i\" % page_def, page_name)\n\n\n\tdef __add_custom_pages_options(self, menu):\n\t\tcategory_name = _(\"Custom pages\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tfor i in range(NB_CUSTOM_PAGES):\n\t\t\tpage_def = PAGE_CUSTOM + i\n\t\t\tpage_name = StringOption(_(\"Title for the custom page %(index)i\") % {\"index\": i + 1}, _(\"Custom page %(index)i\") % {\"index\": i + 1})\n\t\t\tpage_name.set_help(_(\"Name for the custom page %(index)i\") % {\"index\": i + 1})\n\t\t\taddopt(\"page_name_%i\" % page_def, page_name)\n\n\t\t\tcustom_note = NoteOption(_(\"Note for custom page %(index)i\") % {\"index\": i + 1})\n\t\t\tcustom_note.set_help(_(\"A note to be used for the custom page content.\\n\") + self.note_help)\n\t\t\taddopt(\"custom_note_%i\" % i, custom_note)\n\n\t\t\tcustom_menu = BooleanOption(_(\"Menu for the custom page %(index)i\") % {\"index\": i + 1}, True)\n\t\t\tcustom_menu.set_help(_(\"Whether to print a menu for the custom page\"))\n\t\t\taddopt(\"custom_menu_%i\" % i, custom_menu)\n\n\n\tdef __add_select_pages_options(self, menu):\n\t\tcategory_name = _(\"Pages selection\")\n\t\taddopt = partial(menu.add_option, category_name)\n\n\t\tself.__pages_number = NumberOption(_(\"Number of pages\"), 11, 1, NB_TOTAL_PAGES_MAX)\n\t\tself.__pages_number.set_help(_(\"Number pages in the web site.\"))\n\t\taddopt(\"pages_number\", self.__pages_number)\n\t\tself.__pages_number.connect(\"value-changed\", self.__pages_contents_changed)\n\n\t\tpage_defs = [\n\t\t\tPAGE_CUSTOM,\n\t\t\tPAGE_SURNAMES,\n\t\t\tPAGE_PERSON,\n\t\t\tPAGE_PERSON_INDEX,\n\t\t\tPAGE_FAMILY_INDEX,\n\t\t\tPAGE_SOURCE_INDEX,\n\t\t\tPAGE_MEDIA_INDEX,\n\t\t\tPAGE_PLACE_INDEX,\n\t\t\tPAGE_ADDRESS_INDEX,\n\t\t\tPAGE_REPOSITORY_INDEX,\n\t\t\tPAGE_SVG_TREE,\n\t\t] + [PAGE_CUSTOM + i for i in range (1, NB_CUSTOM_PAGES)\n\t\t] + [PAGE_CUSTOM] * NB_TOTAL_PAGES_MAX\n\n\t\tself.__page_content = []\n\t\tfor i in range(NB_TOTAL_PAGES_MAX):\n\t\t\tpage_def = page_defs[i]\n\t\t\tpage_content = EnumeratedListOption(_(\"Contents of page %(index)i\") % {\"index\": i + 1}, page_def)\n\t\t\tfor (j, pname) in enumerate(PAGES_NAMES):\n\t\t\t\tpage_content.add_item(j, pname[0])\n\t\t\tpage_content.set_help(_(\"Contents of the page\"))\n\t\t\taddopt(\"page_content_%i\" % i, page_content)\n\t\t\tself.__page_content.append(page_content)\n\t\t\tself.__page_content[i].connect(\"value-changed\", self.__pages_contents_changed)\n\n\t\tself.__pages_contents_changed()\n\n\n\tdef __archive_changed(self):\n\t\t\"\"\"\n\t\tDisable the archive file when archive is disabled \n\t\t\"\"\"\n\t\tenable = self.__archive.get_value()\n\t\tself.__archive_file.set_available(enable)\n\n\tdef __pid_changed(self):\n\t\t\"\"\"\n\t\tUpdate the filter list based on the selected person\n\t\t\"\"\"\n\t\tgid = self.__pid.get_value()\n\t\tperson = self.__db.get_person_from_gramps_id(gid)\n\t\tfilter_list = report_utils.get_person_filters(person, False)\n\t\tself.__filter.set_filters(filter_list)\n\n\tdef __filter_changed(self):\n\t\t\"\"\"\n\t\tHandle filter change. If the filter is not specific to a person,\n\t\tdisable the person option\n\t\t\"\"\"\n\t\tfilter_value = self.__filter.get_value()\n\t\tif filter_value in [1, 2, 3, 4]:\n\t\t\t# Filters 1, 2, 3 and 4 rely on the center person\n\t\t\tself.__pid.set_available(True)\n\t\telse:\n\t\t\t# The rest don't\n\t\t\tself.__pid.set_available(False)\n\n\tdef __living_changed(self):\n\t\t\"\"\"\n\t\tHandle a change in the living option\n\t\t\"\"\"\n\t\tif self.__living.get_value() == INCLUDE_LIVING_VALUE:\n\t\t\tself.__yearsafterdeath.set_available(False)\n\t\telse:\n\t\t\tself.__yearsafterdeath.set_available(True)\n\n\tdef __pages_contents_changed(self):\n\t\tnb = self.__pages_number.get_value()\n\t\tfor i in range(NB_TOTAL_PAGES_MAX):\n\t\t\tif (i < nb):\n\t\t\t\tself.__page_content[i].set_available(True)\n\t\t\telse:\n\t\t\t\tself.__page_content[i].set_available(False)\n\n\tdef __placemap_options_changed(self):\n\t\t\"\"\"\n\t\tHandles the changing nature of the place map Options\n\t\t\"\"\"\n\t\t# get values for all Place Map Options tab...\n\t\tplace_active = self.__inc_places.get_value()\n\t\tplace_map_active = self.__placemappages.get_value()\n\t\tfamily_active = self.__familymappages.get_value()\n\t\tmapservice_opts = self.__mapservice.get_value()\n\t\t# google_opts = self.__googleopts.get_value()\n\n\t\tif (place_active):\n\t\t\tself.__placemappages.set_available(True)\n\t\t\tself.__familymappages.set_available(True)\n\t\t\tself.__mapservice.set_available(True)\n\t\t\t# self.__googleopts.set_available(True)\n\n\t\tif (place_map_active or family_active):\n\t\t\tself.__mapservice.set_available(True)\n\t\telse:\n\t\t\tself.__mapservice.set_available(False)\n\n\t\t# if (family_active and mapservice_opts == \"Google\"):\n\t\t\t# self.__googleopts.set_available(True)\n\t\t# else:\n\t\t\t# self.__googleopts.set_available(False)\n\n\t\tif (not place_active):\n\t\t\tself.__placemappages.set_available(False)\n\t\t\tself.__familymappages.set_available(False)\n\t\t\tself.__mapservice.set_available(False)\n\t\t\t# self.__googleopts.set_available(False)\n\n\tdef __svg_tree_dup_changed(self):\n\t\t\"\"\"\n\t\tHandles the duplicate color enable\n\t\t\"\"\"\n\t\tenable = self.__svg_tree_dup.get_value()\n\t\tself.__svg_tree_color_dup.set_available(enable)\n","repo_name":"daleathan/gramps-addons-code-svn","sub_path":"contrib/DynamicWeb/dynamicweb.py","file_name":"dynamicweb.py","file_ext":"py","file_size_in_byte":148390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"2127931495","text":"from person import Person\nfrom fight import Fight\n\nclass Fighter(Person):\n\n def __init__(self, name: str, age: int = 0, wealth: int = 0, skills: dict = None) -> None:\n \"\"\"\n Constructor for the Fighter class.\n\n :type skills: object\n :param name: str\n :param age: int\n :param wealth: int\n \"\"\"\n\n Person.__init__(self, name, age, wealth)\n self.__skills = None\n self.challenged = []\n\n if age < 18:\n print(name + \" cannot be a fighter.\")\n return\n\n self.__skills = {\"spear\": 0,\n \"unarmed_combat\": 0,\n \"mace\": 0,\n \"broadsword\": 0}\n\n if skills:\n self.__skills = skills\n if self.__skills[\"spear\"] > 10 or self.__skills[\"spear\"] < 0:\n print(\"The skill level for spear is invalid.\")\n\n if self.__skills[\"unarmed_combat\"] > 10 or self.__skills[\"unarmed_combat\"] < 0:\n print(\"The skill level for unarmed_combat is invalid.\")\n\n if self.__skills[\"mace\"] > 10 or self.__skills[\"mace\"] < 0:\n print(\"The skill level for mace is invalid.\")\n\n if self.__skills[\"broadsword\"] > 10 or self.__skills[\"broadsword\"] < 0:\n print(\"The skill level for broadsword is invalid.\")\n\n self.level = 2\n\n\n @property\n def getSkills(self) -> dict:\n \"\"\"\n Returns the skills of the fighter.\n\n :return: __skills: dict\n \"\"\"\n if self.age > 18:\n return self.__skills\n\n else:\n return {}\n\n def setSkills(self, newSkills: dict) -> None:\n \"\"\"\n Sets the skills to these new skills\n\n :param newSkills: dict\n :return: None\n \"\"\"\n\n self.__skills = newSkills\n\n def challenge(self, fighter2: object, skill: str) -> None:\n \"\"\"\n Challenges another fighter.\n\n :param fighter2: object\n :param skill: str\n :return: None\n \"\"\"\n # A fighter cannot fight themselves\n\n if not self.isEqual(fighter2):\n\n if not self.getSkills and not isinstance(fighter2, Fighter):\n print(self.getName + \" and \" + fighter2.getName + \" both are not fighters and hence cannot fight.\")\n\n elif not self.getSkills:\n print(self.getName + \" is not a fighter.\")\n return\n\n elif not isinstance(fighter2, Fighter):\n print(fighter2.getName + \" is not a fighter.\")\n return\n\n elif not fighter2.getSkills:\n print(fighter2.getName + \" is not a fighter.\")\n return\n\n # If they are both fighters:\n\n elif self.getSkills and fighter2.getSkills:\n\n # If the wealth of both the fighters is more than 0\n\n if self.getWealth <= 0:\n print(self.getName + \" has no wealth to fight.\")\n return None\n\n if fighter2.getWealth <= 0:\n print(fighter2.getName + \" has no wealth to fight.\")\n return None\n\n # If the skill they are fighting with does not exist\n\n if skill not in self.getSkills or skill not in fighter2.getSkills:\n print(skill + \" is not a valid skill for the fighter\")\n return None\n\n # If the skill they are using is over 0 then they fight.\n\n duel = Fight(self, fighter2, skill)\n winner = duel.winner()\n\n else:\n print(\"A fighter cannot fight themselves!\")\n return None\n\n def withdraw(self, withdrawFighter: str) -> None:\n \"\"\"\n Withdraws the fighter's name from the list\n\n :param withdrawFighter: str\n :return: None\n \"\"\"\n if self.getSkills and withdrawFighter.getSkills:\n\n pos = -1\n\n for x in range(0, len(self.challenged)):\n if withdrawFighter == self.challenged[x].getFighter1 or withdrawFighter == self.challenged[x].getFighter1:\n pos = x\n else:\n pass\n\n if x > -1:\n self.challenged.remove(x)\n\n else:\n print(self.getName + \" has not challenged \" + withdrawFighter.getName)\n\n else:\n print(self.getName + \" does not have any challenges since it is not a fighter.\")\n\n def __str__(self) -> str:\n \"\"\"\n Returns the state of the Fighter.\n\n :return: str\n \"\"\"\n if self.getSkills:\n return Person.__str__(self) + \" \" + self.getName + \"'s skills are: \\n\\tSpear: \" + str(\n self.getSkills[\"spear\"]) \\\n + \"\\n\\tUnarmed Combat: \" + str(self.getSkills[\"unarmed_combat\"]) + \"\\n\\tMace: \" + str(\n self.getSkills[\"mace\"]) \\\n + \"\\n\\tBroadsword: \" + str(self.getSkills[\"broadsword\"])\n\n else:\n return Person.__str__(self)\n","repo_name":"Avik9/Battle-Time","sub_path":"fighter.py","file_name":"fighter.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"20581016909","text":"import asyncio\r\n\r\nimport discord\r\nfrom discord import client\r\nfrom discord.ext import commands\r\n\r\n\r\n\r\n\r\nclass admin(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def kick(self, ctx, member: discord.Member, *, reason=None):\r\n await ctx.message.delete(delay=0)\r\n await member.send(f\"You was kicked from server\")\r\n await ctx.send(f\"Member {member.mention} was kicked from this server!\")\r\n await member.kick(reason=reason)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def ban(self, ctx, member: discord.Member, *, reason=None):\r\n await ctx.send(f\"Member {member.mention} was banned on this server\")\r\n await member.ban(reason=reason)\r\n await ctx.message.delete(delay=0)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def unban(self, ctx, user_id: int):\r\n user = await client.fetch_user(user_id)\r\n await ctx.guild.unban(user)\r\n await ctx.message.delete(delay=0)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def ping(self, ctx):\r\n await ctx.send(\"pong!\")\r\n\r\n\r\n\r\n\r\nasync def setup(client):\r\n await client.add_cog(admin(client))","repo_name":"nkplka/discord","sub_path":"cogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"12073523384","text":"#!/usr/bin/env python\n#######\n# To allow this module to be imported by other triggers\n# execute the commands below:\n# $ mkdir -p /usr/share/foreman-community/hooks\n# $ touch /usr/share/foreman-community/hooks/__init__.py\n# $ cp functions.py /usr/share/foreman-community/hooks/\n########\nimport json\nimport sys\nimport tempfile\n\nHOOK_TEMP_DIR = \"/usr/share/foreman/tmp\"\n\n# HOOK_EVENT = update, create, before_destroy etc.\n# HOOK_OBJECT = to_s representation of the object, e.g. host's fqdn\nHOOK_EVENT, HOOK_OBJECT = (sys.argv[1], sys.argv[2])\n\n\ndef get_json_hook():\n '''\n Create JSON object to be imported by hook/trigger\n Saves the data received via stdin to file.\n It does not require to save to a file, but it may be useful\n to troubleshooting.\n '''\n\n with tempfile.NamedTemporaryFile(\n dir=HOOK_TEMP_DIR,\n # set to False for troubleshooting\n delete=True,\n prefix=\"foreman_hooks.\") as hook:\n\n json_hook = sys.stdin.read()\n hook.file.write(json_hook)\n hook.file.flush()\n return json.loads(json_hook)\n","repo_name":"theforeman/foreman_hooks","sub_path":"examples/python/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"67"}
+{"seq_id":"40283674361","text":"from itertools import islice\nfrom types import SimpleNamespace\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nimport pickle\n\nclass Measurement:\n def __init__(self, params):\n self.params = params\n self.x = []\n self.w = []\n self.v = []\n self.noise = []\n self.LQG = []\n\n if params.scheme in ['noisy_lloyd_max', 'separate']:\n # Quantization index translated into bits\n self.bits = []\n # Entire history believed by the decoder (at each step)\n self.decoded_bits_history = []\n self.correctly_decoded = []\n\n def record(self, sim):\n self.x.append(sim.plant.x)\n self.w.append(sim.plant.w)\n self.v.append(sim.plant.v)\n self.noise.append(sim.channel.last_noise)\n self.LQG.append(sim.LQG.evaluate(sim.t))\n self.channel_average_power = sim.channel.average_power()\n\n if hasattr(self, 'bits'):\n self.bits = sim.encoder.get_bits_history()\n self.decoded_bits_history.append(list(\n sim.decoder.stack_decoder.first_nodes[-1].input_history()))\n self.correctly_decoded.append(\n all((word == history_word).all()\n for word, history_word in \\\n zip(self.bits, self.decoded_bits_history[-1])))\n print(\"Correctly decoded: {}\".format(self.correctly_decoded[-1]))\n\n def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load(filename):\n with open(filename, 'rb') as f:\n measurement = pickle.load(f)\n assert isinstance(measurement, Measurement)\n return measurement\n\n @staticmethod\n def average(measurements):\n new = Measurement(measurements[0].params)\n\n def average_sequence(sequences):\n sequences = [np.array(sequence).flatten() for sequence in sequences]\n slices = list(zip(*sequences))\n return np.array(list(map(np.mean, slices)))\n\n new.x = average_sequence(m.x for m in measurements)\n new.w = average_sequence(m.w for m in measurements)\n new.v = average_sequence(m.v for m in measurements)\n new.noise = average_sequence(m.noise for m in measurements)\n new.LQG = average_sequence(m.LQG for m in measurements)\n\n return new\n\n def get_noise_record(self):\n noise = SimpleNamespace()\n\n noise.x1 = self.x[0]\n noise.w_sequence = self.w[:]\n noise.v_sequence = self.v[:]\n noise.n_sequence = list(np.array(self.noise).flatten())\n\n return noise\n\n\n def plot(self, label=None):\n self.plot_setup()\n self.plot_LQG(label=label)\n self.plot_bounds()\n if hasattr(self, 'correctly_decoded'):\n self.plot_correctly_decoded()\n plt.legend()\n\n def plot_setup(self, label=\"Time [steps]\"):\n plt.xlabel(label)\n plt.grid()\n\n def plot_x(self):\n plt.plot(list(range(len(self.x))), self.x)\n plt.ylabel(\"Plant state\")\n\n def plot_LQG(self, label=None, *args, **kwargs):\n plt.plot(list(range(len(self.LQG))), 10 * np.log10(self.LQG),\n label=label, *args, **kwargs)\n plt.ylabel(r\"$\\bar{J}_t$ [dB]\")\n\n def plot_bounds(self, lower_label=\"Theoretical average lower bound\",\n upper_label=\"Theoretical prediction\",\n lower_args=['--'], lower_kwargs={},\n upper_args=['--'], upper_kwargs={}):\n params = self.params\n\n # Upper bound\n if params.analog and hasattr(params, 'SDR0'):\n plt.plot((1, len(self.LQG)),\n 10 * np.log10(params.LQR_inf_upper_bound()) * np.ones(2),\n *upper_args, label=upper_label, **upper_kwargs)\n\n # Lower bound\n if params.analog:\n plt.plot((1, len(self.LQG)),\n 10 * np.log10(params.LQR_inf_lower_bound()) * np.ones(2),\n *lower_args, label=lower_label, **lower_kwargs)\n\n def plot_correctly_decoded(self, y=0):\n RECTANGLE_HEIGHT = 0.8\n\n # Find intervals of consecutive Trues\n intervals = []\n start = None\n for (t, good) in enumerate(self.correctly_decoded, 1):\n if not start and not good:\n start = t\n elif start and (good or t == len(self.correctly_decoded)):\n intervals.append((start, t))\n start = None\n\n for i, (start, stop) in enumerate(intervals):\n print(\"({}, {})\".format(start, stop))\n plt.gca().add_patch(\n patches.Rectangle(\n (start, y - RECTANGLE_HEIGHT/2),\n stop - start,\n RECTANGLE_HEIGHT,\n label=\"Decoding errors\" if i == 0 else None,\n color='purple'\n )\n )\n","repo_name":"eliasrg/SURF2017","sub_path":"code/measurements.py","file_name":"measurements.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"}
+{"seq_id":"72229322132","text":"from slack_ttt.base_logic_source import BaseLogicSource\nfrom slack_ttt.response_message import ResponseMessageBuilder as RMB\nfrom simple_salesforce import Salesforce\nfrom settings import (\n SFDC_USERNAME,\n SFDC_PASSWORD,\n SFDC_TOKEN\n )\n\nclass SalesforceLogicSource(BaseLogicSource):\n\n def initialize(self, params):\n self.sfdc = Salesforce(username=SFDC_USERNAME,\n password=SFDC_PASSWORD,\n security_token=SFDC_TOKEN)\n\n self.channel_id = params['channel_id'][0]\n self.requester = '@' + params['user_name'][0]\n self.command = None\n if 'text' in params:\n self.command = params['text'][0]\n\n def __sfdc_rest_call(self, resource):\n return self.sfdc.apexecute('handleTTTCommand',\n method='POST',\n data={'resource': resource,\n 'channelId': self.channel_id,\n 'requestor':self.requester,\n 'command': self.command\n })\n\n def __generate_response(self, response_map):\n print('*** RESPONSE: ',response_map);\n game_response_type = response_map['game_response_type']\n values = response_map['values'].split(',')\n slack_response_type = None\n if \"slack_response_type\" in response_map:\n slack_response_type = response_map[\"slack_response_type\"]\n else:\n slack_response_type = 'Ephemeral'\n\n response = None\n if 'response' in response_map:\n response = response_map['response']\n\n return RMB.respond(None,\n game_response_type=game_response_type,\n values=values,\n slack_response_type=slack_response_type,\n response=response)\n\n\n\n def new_game(self):\n response = self.__sfdc_rest_call('/ttt')\n return self.__generate_response(response['body'])\n\n\n def game_help(self):\n super().game_help()\n \"\"\"Help handler which provides information about the game and how to play it.\n\n :return:\n \"\"\"\n return RMB.respond(None,\n game_response_type='help_text',\n values=[])\n\n def accept_game(self):\n response = self.__sfdc_rest_call('/ttt-accept')\n return self.__generate_response(response['body'])\n\n def decline_game(self):\n response = self.__sfdc_rest_call('/ttt-decline')\n return self.__generate_response(response['body'])\n\n def display_board(self):\n response = self.__sfdc_rest_call('/ttt-board')\n return self.__generate_response(response['body'])\n\n def play_move(self):\n response = self.__sfdc_rest_call('/ttt-move')\n return self.__generate_response(response['body'])\n\n def end_game(self):\n response = self.__sfdc_rest_call('/ttt-end')\n return self.__generate_response(response['body'])","repo_name":"dineshrajpurohit/slack_ttt","sub_path":"slack_ttt/salesforce/salesforce_logic.py","file_name":"salesforce_logic.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"8812218353","text":"import json\nimport os\n\nimport pytest\nfrom jinja2 import DictLoader\nfrom schemachange.cli import JinjaTemplateProcessor\n\n\ndef test_from_environ_not_set():\n processor = JinjaTemplateProcessor(\"\", None)\n\n # overide the default loader\n templates = {\"test.sql\": \"some text {{ env_var('MYVAR') }}\"}\n processor.override_loader(DictLoader(templates))\n\n with pytest.raises(ValueError) as e:\n context = processor.render(\"test.sql\", None, True)\n\n assert str(e.value) == \"Could not find environmental variable MYVAR and no default value was provided\"\n\n\ndef test_from_environ_set():\n processor = JinjaTemplateProcessor(\"\", None)\n\n # set MYVAR env variable\n os.environ[\"MYVAR\"] = \"myvar_from_environment\"\n\n # overide the default loader\n templates = {\"test.sql\": \"some text {{ env_var('MYVAR') }}\"}\n processor.override_loader(DictLoader(templates))\n\n context = processor.render(\"test.sql\", None, True)\n\n # unset MYVAR env variable\n del os.environ[\"MYVAR\"]\n\n assert context == \"some text myvar_from_environment\"\n\n\ndef test_from_environ_not_set_default():\n processor = JinjaTemplateProcessor(\"\", None)\n\n # overide the default loader\n templates = {\"test.sql\": \"some text {{ env_var('MYVAR', 'myvar_default') }}\"}\n processor.override_loader(DictLoader(templates))\n\n context = processor.render(\"test.sql\", None, True)\n\n assert context == \"some text myvar_default\"\n","repo_name":"Snowflake-Labs/schemachange","sub_path":"tests/test_jinja_env_var_template.py","file_name":"test_jinja_env_var_template.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":407,"dataset":"github-code","pt":"67"}
+{"seq_id":"13679918612","text":"#!/usr/bin/python3\n'''Modules to be imported'''\nimport MySQLdb\nimport sys\n\n\ndef main(args):\n '''main function'''\n\n # check if passedargs are as needed\n if (len(args) != 5):\n print(\"checknumber of args\")\n sys.exit(1)\n\n # create a database connection\n db_connection = MySQLdb.connect(host=\"localhost\",\n port=3306,\n user=args[1],\n passwd=args[2],\n db=args[3])\n\n # create a cursor\n cur = db_connection.cursor()\n\n # sql query\n query = \"SELECT cities.name FROM cities JOIN states ON\\\n cities.state_id = states.id WHERE states.name = '{}' ORDER BY cities.id ASC\"\n\n formatted = query.format(args[4])\n\n # execute the query\n cur.execute(formatted)\n\n # loop todisplay results\n print(\", \".join(row[0] for row in cur.fetchall()))\n\n # close connections\n cur.close()\n db_connection.close()\n\n\n# Hey, I won't run when I'mimported\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"LionMara/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"}
+{"seq_id":"15280369221","text":"import textwrap\nfrom django.db.models import Max\n\nfrom src.models import *\n\n\ndef parse_history():\n hist_list = []\n hist = HistoryItem.objects.all()\n for h in hist:\n lines = h.content\n lines = [line for line in lines.split('\\n') if line.strip()]\n ls_1_flag = 0\n ls_2_flag = 0\n for i in xrange(len(lines)):\n lines[i] = lines[i].rstrip()\n if lines[i][0] == \"#\":\n lines[i] = \"\" + lines[i][1:] + \" \"\n elif lines[i][0] != '-':\n if lines[i][0] == \"!\":\n lines[i] = \"by \" + lines[i][1:] + \"
\"\n else:\n lines[i] = \"
\" + lines[i] + \"
\"\n\n else:\n if lines[i][:2] != '-\\\\':\n lines[i] = \"