{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\nq)8 {x,sum -2#x}\\1 1 / 10 Fibonacci numbers\n1 1\n1 1 2\n1 1 2 3\n1 1 2 3 5\n1 1 2 3 5 8\n1 1 2 3 5 8 13\n1 1 2 3 5 8 13 21\n1 1 2 3 5 8 13 21 34\n1 1 2 3 5 8 13 21 34 55\nCommunication¶\nInterprocess communication is ‘baked in’ to q. It requires no library code and is easy to set up.\nWatch two kdb+ processes communicating through TCP/IP.\nClient/server¶\nUse two command shells for this. On the left, we have the server task; on the right, the client.\nKDB+ 3.7t 2020.01.22 … | KDB+ 3.7t 2020.01.22\nm64/ 4()core 8192MB … | m64/ 4()core 8192MB …\n|\nq)\\p 5432 |\n| q)h:hopen `::5432\n| q)h\"2+2\"\n| 4\n|\n| q)h \"system\\\"l /Users/sjt/q/sp.q\\\"\"\nq)+`p`city!(`p$`p1`p2`p3`p4`p5 |\n(`s#+(,`color)!,`s#`blue`green |\n+`s`p`qty!(`s$`s1`s1`s1`s2`s3` |\n| q)h \"select from sp where s in `s2`s3\"\n| s p qty\n| ---------\n| s2 p1 300\n| s2 p2 400\n| s3 p2 200\n| q)\nOn the left, the server task started listening on port 5432. The client task opened a socket to port 5432, getting a handle, which it dubbed h\n.\nThe client task sent to the server the expression 2+2\nto be evaluated, and received the value 4 in return.\nThe client task told the server to load the Suppliers and Parts script. The server task session showed that script loaded. The client sent the server a qSQL query and got a table as a result.\nAsynchronous calls are only slightly more complicated.\nA production system requires code in the callbacks to secure communications but you can see from the above that the basics are very simple. The baked-in interprocess communications make it simple to implement systems as tasks distributed over multiple machines.\nWebserver¶\nA q session can listen for HTTP requests and act as a webserver. The default callback composes a page for browsing tables in the session.\nq)tables[] / Suppliers & Parts\n`p`s`sp\nq)\\p 8090 / listen to port 8090\nBrowse to http://localhost:8090\n.\nDevelopment¶\nScripts¶\nWrite and load scripts to define an application. Scripts are text files.\nThe sp.q\nscript defines the Suppliers & Parts database and runs some queries on it.\ns:([s:`s1`s2`s3`s4`s5]\nname:`smith`jones`blake`clark`adams;\nstatus:20 10 30 20 30;\ncity:`london`paris`paris`london`athens)\np:([p:`p1`p2`p3`p4`p5`p6]\nname:`nut`bolt`screw`screw`cam`cog;\ncolor:`red`green`blue`red`blue`red;\nweight:12 17 17 14 12 19;\ncity:`london`paris`rome`london`paris`london)\nsp:([]\ns:`s$`s1`s1`s1`s1`s4`s1`s2`s2`s3`s4`s4`s1; / fkey\np:`p$`p1`p2`p3`p4`p5`p6`p1`p2`p2`p2`p4`p5; / fkey\nqty:300 200 400 200 100 100 300 400 200 200 300 400)\nselect distinct p,s.city from sp\nselect sum qty by p.color from sp\nselect from sp where s.city=p.city\nIn scripts, q expressions can be written across multiple lines.\nIDE¶\nKX Developer is a free interactive development environment (IDE) for q."}}},{"rowIdx":36,"cells":{"text":{"kind":"string","value":"Skip to content\nkdb+ and q documentation\nand – Reference – kdb+ and q documentation\nInitializing search\nAsk a question\nHome\nkdb+ and q\nkdb Insights SDK\nkdb Insights Enterprise\nKDB.AI\nPyKX\nAPIs\nHelp\nkdb+ and q documentation\nHome\nkdb+ and q\nkdb+ and q\nAbout\nGetting Started\nGetting Started\nInstall\nLicenses\nLearn\nLearn\nOverview\nMountain tour\nMountain tour\nOverview\nBegin here\nThe q session\nTables\nCSVs\nDatatypes\nScripts\nIDE\nQ for quants\nQ by Examples\nQ for All (video)\nExamples from Python\nExamples from Python\nBasic\nArray\nList\nStrings\nDictionaries\nQ for Mortals 3\nQ by Puzzles\nQ by Puzzles\nAbout\n12 Days of Xmas\nABC problem\nAbundant odds\nFour is magic\nName Game\nSummarize and Say\nWord wheel\nReading room\nReading room\nInformation desk\nBoggle\nCats cradle\nFizz buzz\nKlondike\nPhrasebook\nScrabble\nApplication examples\nApplication examples\nAstronomy\nDetecting card counters\nCorporate actions\nDisaster management\nExoplanets\nMarket depth\nMarket fragmentation\nOption pricing\nPredicting floods\nSignal processing\nSpace weather\nTrading surveillance\nTransaction-cost analysis\nTrend indicators\nAdvanced q\nAdvanced q\nRemarks on Style\nShifts & scans\nTechnical articles\nViews\nOrigins\nTerminology\nStarting kdb+\nStarting kdb+\nOverview\nThe q language\nIPC\nTables\nHistorical database\nRealtime database\nLanguage\nLanguage\nReference card\nBy topic\nIteration\nIteration\nOverview\nImplicit iteration\nIterators\nMaps\nAccumulators\nGuide to iterators\nKeywords\nKeywords\nabs\naj, aj0, ajf, ajf0\nall, any\nand\nasc, iasc, xasc\nasof\nattr\navg, avgs, mavg, wavg\nbin, binr\nceiling\ncount, mcount\ncols, xcol, xcols\ncor\ncos, acos\ncov, scov\ncross\ncsv\ncut\ndelete\ndeltas\ndesc, idesc, xdesc\ndev, mdev, sdev\ndiffer\ndistinct\ndiv\ndsave\neach, peach\nej\nema\nenlist\neval, reval\nexcept\nexec\nexit\nexp, xexp\nfby\nfills\nfirst, last\nfkeys\nflip\nfloor\nget, set\ngetenv, setenv\ngroup\ngtime, ltime\nhcount\nhdel\nhopen, hclose\nhsym\nij, ijf\nin\ninsert\ninter\ninv\nkey\nkeys, xkey\nlike\nlj, ljf\nload, rload\nlog, xlog\nlower\nlsq\nmax, maxs, mmax\nmd5\nmed\nmeta\nmin, mins, mmin\nmmu\nmod\nneg\nnext, prev, xprev\nnot\nnull\nor\nover, scan\nparse\npj\nprd, prds\nprior\nrand\nrank\nratios\nraze\nread0\nread1\nreciprocal\nreverse\nrotate\nsave, rsave\nselect\nshow\nsignum\nsin, asin\nsqrt\nss, ssr\nstring\nsublist\nsum, sums, msum, wsum\nsv\nsystem\ntables\ntan, atan\ntil\ntrim, ltrim, rtrim\ntype\nuj, ujf\nunion\nungroup\nupdate\nupsert\nvalue\nvar, svar\nview, views\nvs\nwhere\nwithin\nwj, wj1\nxbar\nxgroup\nxrank\nOverloaded glyphs\nOperators\nOperators\nAdd\nAmend\nApply, Index, Trap\nAssign\nCast\nCoalesce\nCompose\nCut\nDeal, Roll, Permute\nDelete\nDisplay\nDict\nDivide\nDynamic Load\nDrop\nEnkey, Unkey\nEnumerate\nEnumeration\nEnum Extend\nEqual\nExec\nFile Binary\nFile Text\nFill\nFind\nFlip Splayed\nGreater\nGreater Than\nIdentity, Null\nJoin\nLess Than\nLesser\nMatch\nMatrix Multiply\nMultiply\nNot Equal\nPad\nSelect\nSet Attribute\nSimple Exec\nSignal\nSubtract\nTake\nTok\nUpdate\nVector Conditional\nControl constructs\nControl constructs\nCond\ndo\nif\nwhile\nNamespaces\nNamespaces\n.h (markup)\n.j (JSON)\n.m (memory backed files)\n.Q (utils)\n.z (env, callbacks)\nApplication\nAtomic functions\nComparison\nConformability\nConnection handles\nCommand-line options\nDatatypes\nDictionaries\nEnumerations\nEvaluation control\nExposed infrastructure\nFile system\nFunction notation\nGlossary\nInternal functions\nJoins\nMathematics\nMetadata\nNamespaces\nPattern matching\nParse trees\nqSQL\nqSQL\nqSQL queries\nFunctional qSQL\nRegular Expressions\nSyntax\nSystem commands\nTables\nVariadic syntax\nDatabase\nDatabase\nTables in the filesystem\nPopulating tables\nPopulating tables\nLoading from large files\nForeign keys\nLinking columns\nData loaders\nFrom MDB via ODBC\nPersisting tables\nPersisting tables\nSerializing an object\nSplayed tables\nPartitioned tables\nSegmented databases\nMultiple partitions\nMaintenance\nMaintenance\nData management\nData-At-Rest Encryption\nCompression\nCompression\nFile compression\nCompression examples\nFSI case study\nPermissions\nQuery optimization\nQuery scaling\nTime-series simplification\nCompacting HDB sym\nWorking with sym files\nDeveloping\nDeveloping\nIPC\nIPC\nOverview\nListening port\nDeferred response\nAsync callbacks\nNamed pipes\nSerialization examples\nSocket sharding\nSSL/TLS\nHTTP\nWebSockets\nTools\nTools\nCode profiler\nDebugging\nErrors\nman.q\nUnit tests\nMonitor & control execution\nCoding\nCoding\nGeospatial indexing\nLinear programming\nMultithreaded primitives\nPivoting tables\nPrecision\nProgramming examples\nProgramming idioms\nTemporal data\nTimezones\nUnicode\nDevOps\nDevOps\nCPU affinity\nDaemon\nFirewalling\ninetd, xinetd\nLinux production notes\nLog Files\nMulti-threading\nMultiple versions\nParallel processing\nPerformance tips\nShebang script\nSurveillance latency\nWindows service\nOptane Memory\nOptane Memory\nOptane Memory and kdb+\nPerformance tests\nRelease notes\nRelease notes\nHistory\nChanges in 4.1\nChanges in 4.0\nChanges in 3.6\nChanges in 3.5\nChanges in 3.4\nChanges in 3.3\nChanges in 3.2\nChanges in 3.1\nChanges in 3.0\nChanges in 2.8\nChanges in 2.7\nChanges in 2.6\nChanges in 2.5\nChanges in 2.4\nWithdrawn\nDeveloper tools\nFAQ\nStreaming\nStreaming\nGeneral architecture\nGeneral architecture\nOverview\nkdb+tick\nkdb+tick\nTickerplant (tick.q)\nTickerplant pub/sub (u.q)\nRDB (r.q)\nAlternative architecture\nTP Log (data recovery)\nRTEs (real-time engines)\nGateway design\nQuery routing\nLoad balancing\nProfiling\nDisaster recovery\nKubernetes\nOrder Book\nAlternative in-memory layouts\nCorporate actions\nAdvanced\nAdvanced\nDistributed systems\nRDB intraday writedown\nInterfaces\nInterfaces\nLanguages\nLanguages\nC/C++\nC/C++\nQuick guide\nAPI reference\nC API for kdb+\nExtending q with C/C++\nAsync callbacks (C client)\nC#\nForeign Function Interface (FFI)\nJava\nPython\nR\nRust\nScala\nKX libraries\nBloomberg\nExcel\nFIX messaging\nGPUs\nMatlab\nODBC\nODBC\nODBC client\nODBC3 server\nODBC3 and Tableau\nSolace pub/sub\nOpen source\nMachine learning\nUsing kdb+ in the cloud\nUsing kdb+ in the cloud\nAbout\nAmazon Web Services\nAmazon Web Services\nReference architecture\nAmazon EC2 & Storage Services\nAmazon EC2 & Storage Services\nMigrating a kdb+ HDB to Amazon EC2\nElastic Block Store (EBS)\nEFS (NFS)\nAmazon Storage Gateway\nFSx for Lustre\nAWS Lambda\nMicrosoft Azure\nMicrosoft Azure\nReference architecture\nGoogle Cloud\nGoogle Cloud\nReference architecture\nAuto Scaling\nAuto Scaling\nAbout\nAmazon Web Services\nRealtime data cluster\nCosts and risks\nOther file systems\nOther file systems\nMapR-FS\nGoofys\nS3FS\nS3QL\nObjectiveFS\nWekaIO Matrix\nQuobyte\nAcademy\nDiscussion Forum\nWhite papers\nAbout this site\nkdb Insights SDK\nkdb Insights Enterprise\nKDB.AI\nPyKX\nAPIs\nHelp\nand\n¶\nLesser of two values, logical AND\nand\nis a\nmultithreaded primitive\n.\nLesser\nBack to top"}}},{"rowIdx":37,"cells":{"text":{"kind":"string","value":"/-these parameters are only used once their value has been set with values retrieved from the WBD.\nwritedownmode:idbdir:savedir:currentpartition:symfilepath:`;\nsymsize:partitionsize:0;\n\n/-force loads sym file\nloadsym:{[]\n .lg.o[`load;\"loading the sym file\"];\n @[load;symfilepath; {.lg.e[`load;\"failed to load sym file: \",string[symfilepath],\" error: \",x]}];\n symsize::hcount symfilepath;\n };\n\n/-force loads IDB\nloadidb:{[]\n .lg.o[`load;\"loading the db\"];\n @[system; \"l \", 1_string idbdir; {.lg.e[`load;\"failed to load IDB: \",string[idbdir],\" error: \",x]}];\n partitionsize::count key idbdir;\n };\n\n/- force loads the idb and the sym file\nloaddb:{[]\n starttime:.proc.ct[];\n loadsym[];\n loadidb[];\n .lg.o[`load;\"IDB load has been finished for partition: \",string[currentpartition],\". Time taken(ms): \",string .proc.ct[]-starttime];\n };\n\n/- sets current partition and force loads the idb and the sym file. Called by the WDB after EOD.\nrollover:{[pt]\n currentpartition::pt;\n idbdir::.Q.dd[savedir; $[writedownmode~`default;`;currentpartition]];\n .lg.o[`rollover;\"IDB folder has been set to: \",string[idbdir]];\n loaddb[];\n };\n\n/- reloads the db. Called by wdb process midday/eod.\nintradayreload:{[]\n starttime:.proc.ct[];\n if[symfilehaschanged[];loadsym[]];\n if[partitioncounthaschanged[];loadidb[]];\n clearrowcountcache[];\n .lg.o[`intradayreload;\"IDB reload has been finished for partition: \",string[currentpartition],\". Time taken(ms): \",string .proc.ct[]-starttime];\n };\n\n/- checks if sym file has changed since last reload of the IDB. Records new sym size if changed.\nsymfilehaschanged:{[]\n $[symsize<>c:hcount symfilepath;[symsize::c; 1b];0b]\n };\n\n/- checks if count of partitions has changed since last reload of the IDB. Records new partition count if changed.\n/- the default writedown method doesn't need db reloading as no new directory is being created there.\n/- First check is to ensure that a single intraday partition exists (so loadidb doesn't fail)\npartitioncounthaschanged:{[]\n if[(1j~partitionsize)&writedownmode~`default;:0b];\n $[partitionsize<>c:count key idbdir;[partitionsize::c; 1b];0b]\n };\n\n/- each time data gets appended to current partition we are invalidating the row count cache\n/- this makes sure running \"count trade\" queries will return correct row count\nclearrowcountcache:{.Q.pn:.Q.pt!(count .Q.pt)#()};\n\nsetparametersfromwdb:{[wdbHandle]\n .lg.o[`init;\"querying WDB, HDB locations, current partition and writedown mode from WDB\"];\n params:@[wdbHandle; (each;value;`.wdb.savedir`.wdb.hdbdir`.wdb.currentpartition`.wdb.writedownmode); {.lg.e[`connection; \"Failed to retrieve values from WDB.\"]; 'x}];\n savedir::hsym params[0];\n currentpartition::params[2];\n symfilepath::.Q.dd[hsym params[1]; `sym];\n writedownmode::params[3];\n idbdir::.Q.dd[savedir; $[writedownmode~`default;`;currentpartition]];\n .lg.o[`init;\"Current settings: db folder: \",string[idbdir],\", sym file: \",string[symfilepath],\", writedownmode: \", string writedownmode];\n };\n\ninit:{[]\n .lg.o[`init; \"searching for servers\"];\n /- If no valid conneciton to wdb, reattempt\n .servers.startupdepcycles[`wdb;wdbconnsleepintv;wdbcheckcycles];\n .lg.o[`init;\"getting connection handle to the WDB\"];\n w:.servers.gethandlebytype[wdbtypes;`any];\n /-exit if no valid handle\n if[0=count w; .lg.e[`connection;\"no connection to the WDB could be established... failed to initialise.\"];:()];\n .lg.o[`init;\"found a WDB process\"];\n /-setting parameters in .idb namespace from WDB\n setparametersfromwdb[w];\n .lg.o[`init;\"loading the db and the sym file first time\"];\n loaddb[];\n .lg.o[`init;\"registering IDBs on WDB process...\"];\n /-send sync message to WDB to register the existing IDBs.\n @[w;(`.servers.registerfromdiscovery;`idb;0b);{.lg.e[`connection;\"Failed to register IDB with WDB.\"];'x}];\n .lg.o[`init; \"Initialisation of the IDB is done.\"];\n }\n\n\\d .\n\n/- set the reload the function\nreload:.idb.intradayreload;\n\n/-Get the relevant IDB attributes\n.proc.getattributes:{`partition`tables!(.idb.currentpartition;tables[])};\n\n.idb.init[];\n\n/- helper function to support queries against the sym column\nmaptoint:{[val]\n $[(abs type val) in 5 6 7h;\n /- if using an integer column, clamp value between 0 and max int (null maps to 0)\n 0| 2147483647& `long$ val;\n /- if using a symbol column, enumerate against the hdb sym file\n sym?`TORQNULLSYMBOL^val]\n };\n\n\n================================================================================\nFILE: TorQ_code_processes_kill.q\nSIZE: 873 characters\n================================================================================\n\nkillhandle:{@[x:neg x;\"exit 0\";()]; @[x;[];()]}\n\n// make the connections\n.servers.startup[]\n\n// if killnames is on the commandline, then only kill the servers with the specific names\n// need to make sure that for each name we retrieve the type that the servers is a part of as well\n$[`killnames in key .proc.params;\n\t[names:\"S\"$'.proc.params[`killnames];\n\t .lg.o[`kill;\"killing processes with names \",-3!names];\n\t s:.servers.getservers[`procname;\"S\"$'.proc.params[`killnames];()!();1b;0b]];\t\n\t s:.servers.getservers[`proctype;.servers.CONNECTIONS;()!();1b;0b]];\n\n// exit if no connections\nif[0=count s; .lg.o[`kill;\"Failed to find any valid connections\"]; exit 0]\n\n// kill each connection\n{.lg.o[`kill;\"Sending kill command to \",(string x`proctype),\" process with name \",(string x`procname),\" at hp \",string x`hpup];\n\tkillhandle x`w;}each s;\n\n.lg.o[`kill;\"Exiting\"]\nexit 0\n\n\n================================================================================\nFILE: TorQ_code_processes_monitor.q\nSIZE: 4,730 characters\n================================================================================\n\n/TorQ Monitor Process\n\n//configurable parameters for check monitoring\n.monitor.configcsv:@[value;`.monitor.configcsv;first .proc.getconfigfile[\"monitorconfig.csv\"]]; //name of config csv to load in\n.monitor.configstored:@[value;`.monitor.configstored;`]; //name of stored table for save and reload\n.monitor.runcheckinterval:@[value;`.monitor.runcheckinterval;0D00:00:05]; //interval to run checks \n.monitor.checkinginterval:@[value;`.monitor.checkinginterval;0D00:00:05]; //interval to make sure checks are not lagging \n.monitor.cleartrackinterval:@[value;`.monitor.cleartrackinterval;0D01:00:00]; //interval to check tracks are under certain age in checktracker\n.monitor.agecheck:@[value;`.monitor.agecheck;0D12:00:00]; //if check over agecheck, delete from tracker\n.monitor.lagtime:@[value;`.monitor.lagtime;0D00:01:00]; //if check has been running over this time, set to neg\n\n// set up the upd function to handle heartbeats\nupd:{[t;x]\n $[t=`heartbeat;\n\t [ // publish single heartbeat row to web pages \n\t .html.pub[`heartbeat;$[min (`warning`error in cols exec from x);x;[.hb.storeheartbeat[x];hb_x::x;select from .hb.hb where procname in x`procname]]]];\n t=`logmsg;\n\t [ \n insert[`logmsg;x]; \n\t // publish single logmsg row to web page\n\t .html.pub[`logmsg;x];\n // publish all lmchart data - DEV - could publish single cols and update svg internally\n .html.pub[`lmchart;lmchart[]]];\n ()]}\n\nsubscribedhandles:0 0Ni\n\n// subscribe to heartbeats and log messages on a handle\nsubscribe:{[handle]\n subscribedhandles,::handle;\n @[handle;(`.ps.subscribe;`heartbeat;`);{.lg.e[`monitor;\"failed to subscribe to heartbeat on handle \",(string x),\": \",y]}[handle]];\n @[handle;(`.ps.subscribe;`logmsg;`);{.lg.e[`monitor;\"failed to subscribe to logmsg on handle \",(string x),\": \",y]}[handle]];\n }\n \n// if a handle is closed, remove it from the list\n.dotz.set[`.z.pc;{if[y;subscribedhandles::subscribedhandles except y]; x@y}@[value;.dotz.getcommand[`.z.pc];{{[x]}}]]\n\n// Make the connections and subscribe\n.servers.startup[]\nsubscribe each (exec w from .servers.SERVERS) except subscribedhandles;\n\n// As new processes become available, try to connect \n.servers.addprocscustom:{[connectiontab;procs]\n .lg.o[`monitor;\"received process update from discovery service for process of type \",\" \" sv string procs,:()];\n .servers.retry[];\n subscribe each (exec w from .servers.SERVERS) except subscribedhandles;\n }\n.servers.connectcustom:{[connectiontab] \n .lg.o[`monitor;\"created outgoing connections\"];\n subscribe each (exec w from connectiontab) except subscribedhandles;\n }\n \n// GUI\n/- Table data functions - Return unkeyed sorted tables\nhbdata:{0!`error`warning xdesc .hb.hb}\nlmdata:{0!`time xdesc -20 sublist logmsg}\n\n/- Chart data functions - Return unkeyed chart data\nlmchart:{0!select errcount:count i by 0D00:05 xbar time from logmsg where loglevel=`ERR}\nbucketlmchartdata:{[x] x:`minute$$[x=0;1;x];0!select errcount:count i by (0D00:00+x) xbar time from logmsg where loglevel=`ERR}\n\n/- Data functions - These are functions that are requested by the front end\n/- start is sent on each connection and refresh. Where there are more than one table it is wise to identify each one using a dictionary as shown\nstart:{.html.wssub each `heartbeat`logmsg`lmchart;\n .html.dataformat[\"start\";(`hbtable`lmtable`lmchart)!(hbdata[];lmdata[];lmchart[])]}\nbucketlmchart:{.html.dataformat[\"bucketlmchart\";enlist bucketlmchartdata[x]]}\nmonitorui:.html.readpagereplaceHP[\"index.html\"]\n\n// initialise pubsub\n.html.init`heartbeat`logmsg`lmchart\n\n//function to iniitialise process check monitoring- checks for last saved config file\ninitcheck:{\n if[not readstoredconfig[.monitor.configstored];\n readmonitoringconfig[.monitor.configcsv]]};\n\n// specify .z.exit to save config\n// capture any prior definition\n.dotz.set[`.z.exit;{[x;y] saveconfig[.monitor.configstored;checkconfig];x@y}[@[value;.dotz.getcommand[`.z.exit];{{[x]}}]]]\n\n//initialise monitor checks\ninitcheck[]\n\n//Timers\n.timer.repeat[.proc.cp[];0Wp;.monitor.runcheckinterval;(`runnow;`);\"run the monitoring checks\"]\n.timer.repeat[.proc.cp[];0Wp;.monitor.checkinginterval;(`checkruntime;.monitor.lagtime);\"update status if running slow\"]\n.timer.repeat[.proc.cp[];0Wp;.monitor.cleartrackinterval;(`cleartracker;.monitor.agecheck);\"delete rows if over certain age\"]\n\n\n================================================================================\nFILE: TorQ_code_processes_rdb.q\nSIZE: 13,403 characters\n================================================================================\n\n/TorQ rdb process - based on r.q from kdb+tick\n/http://code.kx.com/wsvn/code/kx/kdb+tick/\n/-changes added \n/-Can specify the hdb directory rather than relying on the tickerplant\n\n/-default parameters\n\\d .rdb"}}},{"rowIdx":38,"cells":{"text":{"kind":"string","value":"Serialize a table as an object¶\nThe simplest way to serialize a table is as a single object.\nsave\nand load\n¶\nKeywords save\nand load\nlet you serialize and write any q object to a file of the same name in the working directory. That includes tables, and is the simplest way to persist one.\nq)cities:([]city:`Tokyo`Delhi`Shanghai;pop:37435191 29399141 26317104)\nq)key `:. / nothing in working directory\n`symbol$()\nq)save `cities\n`:cities\nq)key `:. / file in working directory\n,`cities\nq)delete cities from `. / delete from memory\n`.\nq)cities\n'cities\n[0] cities\n^\nq)load `cities / load from filesystem\n`cities\nq)cities\ncity pop\n-----------------\nTokyo 37435191\nDelhi 29399141\nShanghai 26317104\nPerfect for casual use. For more organized writing and reading we need the keywords used to define save\nand load\n.\nset\nand get\n¶\nKeywords set\nand get\ndiffer from save\nand load\n:\nset\nis a binary; its left argument says where in the filesystem to writeget\nreturns the table value rather than the name of the variable it has been assigned to\nNotice the similarity of reading a value from memory to reading it from the filesystem.\nq)get `:cities / from filesystem\nTokyo | 37435191\nDelhi | 29399141\nShanghai| 26317104\nq)get `cities / from memory\ncity pop\n-----------------\nTokyo 37435191\nDelhi 29399141\nShanghai 26317104\nq)`:foo/bar/bigcities set cities\n`:foo/bar/bigcities\nq)get `:foo/bar/bigcities\ncity pop\n-----------------\nTokyo 37435191\nDelhi 29399141\nShanghai 26317104\nEnumerations and foreign keys¶\nThe city\ncolumn is a symbol vector, that is, an enumeration. It is represented in memory as indexes into the sym table. Serialization and deserialization survives the session’s sym list.\nKDB+ 4.0 2020.10.02 Copyright (C) 1993-2020 Kx Systems\nm64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 EXPIRE ..\nq)get `:foo/bar/bigcities\ncity pop\n-----------------\nTokyo 37435191\nDelhi 29399141\nShanghai 26317104\nSimilarly for foreign keys, enumerated against another table.\nq)countries:([country:`China`India`Japan];cont:3#`Asia;code:86 91 81)\nq)cities:([]\ncity:`Tokyo`Delhi`Shanghai;\ncountry: `countries$`Japan`India`China;\npop:37435191 29399141 26317104)\nq)`:linked/countries`:linked/cities set'(countries;cities)\n`:linked/countries`:linked/cities\nq)\\\\\n❯ q\nKDB+ 4.0 2020.10.02 Copyright (C) 1993-2020 Kx Systems\nm64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 EXPIRE ..\nq)countries:get`:linked/countries\nq)cities:get`:linked/cities\nq)select city,pop,country.code from cities\ncity pop code\n----------------------\nTokyo 37435191 81\nDelhi 29399141 91\nShanghai 26317104 86\nUse cases¶\nSerialization as an object suits a table that is\n- small relative to memory\n- frequently read\n- has most of its columns required by most queries\n\nSegmented databases¶\nPartitioned tables can be distributed across multiple storage devices to\n- give them more space\n- support parallelization\nThe root of a segmented database contains only the sym list and a file par.txt\n, which is used to unify the partitions of a database, presenting them as a single database for querying.\npar.txt\n¶\nFile par.txt\ndefines the top-level partitioning of the database into directories. Each row of par.txt\nis a directory path. Each such directory is itself partitioned in the usual way, typically by date. The directories should not be empty.\nDISK 0 DISK 1 DISK 2\ndb db db\n├── par.txt ├── 2020.10.03 ├── 2020.10.04\n└── sym │ ├── quotes │ ├── quotes\n│ │ ├── price │ │ ├── price\n│ │ ├── sym │ │ ├── sym\n│ │ └── time │ │ └── time\n│ └── trades │ └── trades\n│ ├── price │ ├── price\n│ ├── sym │ ├── sym\n│ ├── time │ ├── time\n│ └── vol │ └── vol\n├── 2020.10.05 ├── 2020.10.06\n│ ├── quotes │ ├── quotes\n.. ..\npar.txt\nfor the above:\n/1/db\n/2/db\nDo not end the paths with a folder delimiter\n/0/db\nis good, but /0/db/\ncan be bad, depending on the filesystem.\nUsing symlinks¶\nSecurity related options such as reval\n, or the command line option -u 1\n, restrict access to within the current directory.\nThis can prevent users from accessing a segmented database when par.txt\ncontains references to partitions that are situated outside the current directory.\nIn order to provide access, symlinks can be used. An example of using symlinks is as follows:\n$ ln -s /db1 db1$\n$ ln -s /db2 db2$\n$ ls -l\ntotal 16\nlrwxr-xr-x 1 user kx 6 18 Sep 12:30 db1$ -> /db1\nlrwxr-xr-x 1 user kx 6 18 Sep 12:30 db2$ -> /db2\n-rw-r--r-- 1 user kx 10 18 Sep 12:30 par.txt\n-rw-r--r-- 1 user kx 48 18 Sep 12:30 sym\n$ cat par.txt\ndb1$\ndb2$\nUsing a trailing $\nin the directory name ensures the originating symlinks are not picked up by kdb+, instead using the directory referenced.\nMultithreading¶\nSegmentation is particularly useful in combination with multithreading.\nStarting kdb+ with secondary threads, with each partition in par.txt\non a separate local disk, the partitions in par.txt\nare allocated to secondary threads on a round robin.\nThat is, if kdb+ is started with n\nsecondary threads, then partition p\nis assigned to secondary thread p mod n\n. This gives maximum parallelization for queries over date ranges.\nEach thread gets its own disk or disks, and there should be no disk contention, i.e. not more than one thread issuing commands to any one disk.\nIdeally there is one disk per thread. This works best where the disks have fully independent access paths CPU-disk controller-disk, but may be of little use with shared access due to disk contention, e.g. with SAN/RAID.\nFor example, par.txt\nmight be:\n/0/db\n/1/db\n/2/db\n/3/db\nwith directories :\n~$ ls /0/db\n2019.06.01 2019.06.05 2019.06.11 ...\n~$ ls /1/db\n2019.06.02 2019.06.06 2019.06.12 ...\n...\nSince 4.1 2025.01.17 queries on partitioned tables in segmented databases use secondary threads if available on each segment and partition. Previously parallelism was only at the segment level.\nConsiderations¶\nPartition data correctly: data for a particular date must reside in the partition for that date.\nThe secondary/directory partitioning is for both read and write.\nThe directories pointed to in par.txt\nmay contain only appropriate database subdirectories. Any other content (file or directory) will give an error.\nThe same subdirectory name may be in multiple par.txt\npartitions. For example, this would allow symbols to be split, as in A-M on /0/db\n, N-Z on /1/db\n. Aggregations are handled correctly, as long as data is properly split (not duplicated). Note that in this case, the same day would appear on multiple partitions. There was a 2-billion row limit prior to version 3 of kdb+, which could use this method as a work around.\nMultithreading primitives\nMulti-partitioned kdb+ databases\nMultithreading in kdb+\nQ for Mortals\n§14.4 Segmented Tables\n\nExecute a q script as a shebang script¶ $ more ./test.q #!/usr/bin/env q 2+3 \\\\ $ chmod +x ./test.q $ ./test.q KDB+ 3.1 2013.11.20 Copyright (C) 1993-2013 Kx Systems l64/ ... 5 Shebang"}}},{"rowIdx":39,"cells":{"text":{"kind":"string","value":"// @kind function\n// @category tests\n// @fileoverview Ensure that a test that is expected to pass, \n// does so with an appropriate return\n// @param function {(func;proj)} The function or projection to be tested\n// @param data {any} The data to be applied to the function as an individual item for\n// unary functions or a list of variables for multivariant functions\n// @param applyType {boolean} Is the function to be applied unary(1b) or multivariant(0b)\n// @param expectedReturn {string} The data expected to be returned on \n// execution of the function with the supplied data\n// @return {boolean} Function returned the appropriate output (1b), function failed \n// or executed with incorrect output (0b)\npassingTest:{[function;data;applyType;expectedReturn]\n // Is function to be applied unary or multivariant\n applyType:$[applyType;@;.];\n functionReturn:applyType[function;data];\n expectedReturn~functionReturn\n }\n\n================================================================================\nFILE: ml_ml_examples_code_torch_torch.q\nSIZE: 869 characters\n================================================================================\n\n\\d .torch\n\n// Example invocation of a torch model being fit using embedPy\nfitModel:{[xtrain;ytrain;model]\n optimArg:enlist[`lr]!enlist 0.9;\n optimizer:.p.import[`torch.optim][`:Adam][model[`:parameters][];pykwargs optimArg];\n criterion:.p.import[`torch.nn][`:BCEWithLogitsLoss][];\n dataX:.p.import[`torch][`:from_numpy][.p.import[`numpy][`:array][xtrain]][`:float][];\n dataY:.p.import[`torch][`:from_numpy][.p.import[`numpy][`:array][ytrain]][`:float][];\n tensorXY:.p.import[`torch.utils.data][`:TensorDataset][dataX;dataY];\n modelValues:(count first xtrain;1b;0);\n modelArgs:`batch_size`shuffle`num_workers!$[.pykx.loaded;.pykx.topy each modelValues;modelValues];\n dataLoader:.p.import[`torch.utils.data][`:DataLoader][tensorXY;pykwargs modelArgs];\n nEpochs:10|`int$(count[xtrain]%1000);\n .p.get[`runmodel][model;optimizer;criterion;dataLoader;nEpochs]\n }\n\n\n================================================================================\nFILE: ml_ml_examples_q_deploy.q\nSIZE: 4,257 characters\n================================================================================\n\n\\l init.q\n\n// Retrieve command line arguments and ensure a user is\n// cognizant that they will delete the current registry\n// if they invoke the example by accident\ncmdLine:.Q.opt .z.x\nif[not `run in key cmdLine;\n -1\"This example will delete the registry\",\n \" in your current folder, use '-run' command line arg\";\n exit 1;\n ];\n\n.[.ml.registry.delete.registry;(::;::);{}]\n\n// All models solving the clustering problem are associated with the\n// \"cluster\" experiment\nexperiment:enlist[`experimentName]!enlist \"cluster\"\n\n// Generate and format the dataset\n\nskldata:.p.import`sklearn.datasets\nblobs:skldata[`:make_blobs;<]\ndset:blobs[`n_samples pykw 1000;`centers pykw 2;`random_state pykw 500]\n\n// Generate two separate Affinity Propagation models using the ML Toolkit\nqmdl :.ml.clust.ap.fit[flip dset 0;`nege2dist;0.8;min;::]\nqmdl2:.ml.clust.ap.fit[flip dset 0;`nege2dist;0.5;min;::]\n\n// Add the two q models to the KX_ML_REGISTRY\n.ml.registry.set.model[::;\"cluster\";qmdl ;\"qAPmodel\";\"q\";enlist[`axis]!enlist 1b]\n.ml.registry.set.model[::;\"cluster\";qmdl2;\"qAPmodel\";\"q\";enlist[`axis]!enlist 1b]\n\n// Generate equivalent Affinity Propagation models using Scikit-Learn\nskmdl :.p.import[`sklearn.cluster][`:AffinityPropagation][`damping pykw 0.8][`:fit]dset 0\nskmdl2:.p.import[`sklearn.cluster][`:AffinityPropagation][`damping pykw 0.5][`:fit]dset 0\n\n// Add the two models to the KX_ML_REGISTRY with the second model version 2.0 not 1.1\n.ml.registry.set.model[::;\"cluster\";skmdl ;\"skAPmodel\";\"sklearn\";::]\n.ml.registry.set.model[::;\"cluster\";skmdl2;\"skAPmodel\";\"sklearn\";enlist[`major]!enlist 1b]\n\n// Generate and fit two Keras models adding these to the registry\nif[@[{.p.import[x];1b};`keras;0b];\n seq :.p.import[`keras.models][`:Sequential];\n dense:.p.import[`keras.layers][`:Dense];\n nparray:.p.import[`numpy]`:array;\n\n kerasModel:seq[];\n kerasModel[`:add]dense[4;pykwargs `input_dim`activation!(2;`relu)];\n kerasModel[`:add]dense[4;`activation pykw `relu];\n kerasModel[`:add]dense[1;`activation pykw `sigmoid];\n kerasModel[`:compile][pykwargs `loss`optimizer!`binary_crossentropy`adam];\n kerasModel[`:fit][nparray dset 0;dset 1;pykwargs `epochs`verbose!200 0];\n\n kerasModel2:seq[];\n kerasModel2[`:add]dense[4;pykwargs `input_dim`activation!(2;`relu)];\n kerasModel2[`:add]dense[4;`activation pykw `relu];\n kerasModel2[`:add]dense[1;`activation pykw `sigmoid];\n kerasModel2[`:compile][pykwargs `loss`optimizer!`mse`adam];\n kerasModel2[`:fit][nparray dset 0;dset 1;pykwargs `epochs`verbose!10 0];\n\n // Add the two models to the KX_ML_REGISTRY\n .ml.registry.set.model[::;\"cluster\";kerasModel ;\"kerasModel\";\"keras\";::];\n .ml.registry.set.model[::;\"cluster\";kerasModel2;\"kerasModel\";\"keras\";::];\n ];\n\n\n// Generate and add two Python functions to the KX_ML_REGISTRY.\n// These are not associated with a named experiment or solve the problem that\n// the above do, they are purely for demonstration\nif[@[{.p.import x;1b};`statsmodels;0b];\n pyModel :.p.import[`statsmodels.api][`:OLS];\n pyModel2:.p.import[`statsmodels.api][`:WLS];\n\n // Add the two functions to the KX_ML_REGISTRY.\n .ml.registry.set.model[::;::;pyModel ;\"pythonModel\";\"python\";::];\n .ml.registry.set.model[::;::;pyModel2;\"pythonModel\";\"python\";::]\n ]\n\n\n// Online/out-of-core Models\n\n// Generate and add two q 'online' models to the KX_ML_REGISTRY.\n// These models contain an 'update' key which allows the models to\n// be updated as new data becomes available\nonline1:.ml.online.clust.sequentialKMeans.fit[2 200#400?1f;`e2dist;3;::;::]\nonline2:.ml.online.sgd.linearRegression.fit[100 2#400?1f;100?1f;1b;::]\nonline3:.ml.online.sgd.logClassifier.fit[100 2#400?1f;100?0b;1b;::]\n\n.ml.registry.set.model[::;::;online1;\"onlineCluster\" ;\"q\";::]\n.ml.registry.set.model[::;::;online2;\"onlineRegression\";\"q\";::]\n.ml.registry.set.model[::;::;online3;\"onlineClassifier\";\"q\";::]\n\n// Generate and add two Python 'online' models to the KX_ML_REGISTRY.\n// These models must contain a 'partial_fit' method in order to be\n// considered suitable for retrieval as update functions\n\nsgdClass:.p.import[`sklearn.linear_model][`:SGDClassifier]\nsgdModel:sgdClass[pykwargs `max_iter`tol!(1000;0.003) ][`:fit] . dset 0 1\n\n.ml.registry.set.model[::;::;sgdModel;\"SklearnSGD\";\"sklearn\";::]\n\nexit 0\n\n\n================================================================================\nFILE: ml_ml_examples_q_registry.q\nSIZE: 2,675 characters\n================================================================================\n\n// Initialize all relevant functionality\n\\l init.q\n\n// Set the screen width/lengths for better display\n\\c 200 200\n\n// Retrieve command line arguments and ensure a user is\n// cognizant that they will delete the current registry\n// if they invoke the example by accident\ncmdLine:.Q.opt .z.x\nif[not `run in key cmdLine;\n -1\"This example will delete the registry\",\n \" in your current folder, use '-run' command line arg\";\n exit 1;\n ];\n\n.[.ml.registry.delete.registry;(::;::);{}]\n\n-1\"Generate a model registry and retrieve the 'modelStore'\";\n.ml.registry.new.registry[::;::];\n.ml.registry.get.modelStore[::;::];\nshow modelStore;\n\n-1\"\\nAdd several 'basic q models' to the registry\\n\";\nmodelName:\"basic-model\"\n// Incrementing versions from '1.0'\n.ml.registry.set.model[::;{x} ;modelName;\"q\";::]\n.ml.registry.set.model[::;{x+1};modelName;\"q\";::]\n.ml.registry.set.model[::;{x+2};modelName;\"q\";::]\n\n// Set major version and increment from '2.0'\n.ml.registry.set.model[::;{x+3};modelName;\"q\";enlist[`major]!enlist 1b]\n.ml.registry.set.model[::;{x+4};modelName;\"q\";::]\n\n// Add another version of '1.x'\n.ml.registry.set.model[::;{x+5};modelName;\"q\";enlist[`majorVersion]!enlist 1]\n\n-1\"Display the modelStore following model addition\";\nshow modelStore;\n\n-1\"\\nAdd models associated with an experiment\\n\";\nmodelName:\"new-model\"\nexperiment:enlist[`experimentName]!enlist \"testExperiment\"\n// Incrementing versions from '1.0'\n.ml.registry.set.model[::;{x} ;modelName;\"q\";experiment]\n.ml.registry.set.model[::;{x+1};modelName;\"q\";experiment,enlist[`major]!enlist 1b]\n.ml.registry.set.model[::;{x+2};modelName;\"q\";experiment]\n\n-1\"Display the modelStore following experiment addition\";\nshow modelStore;\n\n-1\"\\nRetrieve version 1.1 of the 'basic-model':\\n\";\n.ml.registry.get.model[::;::;\"basic-model\";1 1]`model\n\n-1\"\\nRetrieve the most up to date model associated with the 'testExperiment':\\n\";\n.ml.registry.get.model[::;\"testExperiment\";\"new-model\";::]`model\n\n-1\"\\nRetrieve the last model added to the registry:\\n\";\n.ml.registry.get.model[::;::;::;::]`model\n\n-1\"\\nDelete the experiment from the registry\";\n.ml.registry.delete.experiment[::;\"testExperiment\"]\n\n-1\"\\nDisplay the modelStore following experiment deletion\";\nshow modelStore\n\n-1\"\\nDelete version 1.3 of the 'basic-model'\";\n.ml.registry.delete.model[::;::;\"basic-model\";1 3];\n\n-1\"\\nDisplay the modelStore following deletion of 1.3 of the 'basic-model'\";\nshow modelStore\n\n-1\"\\nDelete all models associated with the 'basic-model'\";\n.ml.registry.delete.model[::;::;\"basic-model\";::]\n\n-1\"\\nDisplay the modelStore following deletion of 'basic-model'\";\nshow modelStore\n\n// Delete the registry\n.ml.registry.delete.registry[::;::]\n\nexit 0\n\n\n================================================================================\nFILE: ml_ml_fresh_extract.q\nSIZE: 3,084 characters\n================================================================================\n\n// fresh/extract.q - Extract features\n// Copyright (c) 2021 Kx Systems Inc\n// \n// Generate features based on params\n\n\\d .ml \n\n// @kind table\n// @category fresh\n// @desc Table containing .ml.fresh.feat functions\nfresh.params:update pnum:{count 1_get[fresh.feat x]1}each f,pnames:count[i]#(),\n pvals:count[i]#()from([]f:1_key fresh.feat) \nfresh.params:1!`pnum xasc update valid:pnum=count each pnames from fresh.params\n\n// @kind function\n// @category fresh\n// @desc Load in hyperparameters for FRESH functions and add to \n// .ml.fresh.params table\n// @param filePath {string} File path within ML where hyperparameter JSON \n// file is\n// @return {::} Null on success with .ml.fresh.params updated\nfresh.loadparams:{[filePath]\n hyperparamFile:.ml.path,filePath;\n p:.j.k raze read0`$hyperparamFile;\n p:inter[kp:key p;exec f from fresh.params]#p;\n fresh.params[([]f:kp);`pnames]:key each vp:value p;\n fresh.params[([]f:kp);`pvals]:{(`$x`type)$x`value}each value each vp;\n fresh.params:update valid:pnum=count each pnames from fresh.params \n where f in kp;\n }\n\n// @kind function\n// @category fresh\n// @desc Add hyperparameter values to .ml.fresh.params\nfresh.loadparams\"/fresh/hyperparameters.json\";"}}},{"rowIdx":40,"cells":{"text":{"kind":"string","value":"// Bespoke WDB config\n.merge.mergebybytelimit:0b // merge limit configuration - default is 0b row count limit 1b is byte size limit\n.merge.partlimit:1000 // limit the number of partitions in a chunk\n\\d .wdb\nignorelist:`heartbeat`logmsg // list of tables to ignore\nhdbtypes:`hdb // list of hdb types to look for and call in hdb reload\nrdbtypes:`rdb // list of rdb types to look for and call in rdb reload\nidbtypes:`idb // list of idb types to look for and call in rdb reload\nwdbtypes:() // wdb does not need to connect to itself\ngatewaytypes:`gateway // list of gateway types to inform at reload\ntickerplanttypes:`segmentedtickerplant // list of tickerplant types to try and make a connection to\nsubtabs:` // list of tables to subscribe for (` for all)\nsubsyms:` // list of syms to subscribe for (` for all)\nsavedir:hsym`$getenv[`TORQHOME],\"/wdbhdb\" // location to save wdb data\nnumrows:100000 // default number of rows\nnumtab:`quote`trade!10000 50000 // specify number of rows per table\nreplaynumrows:numrows // 0W for replaying all messages at once then flushing\nreplaynumtab:numtab // enlist[`]!enlist 0W for replaying all messages at once then flushing\nmode:`save // the wdb process can operate in three modes\t\n // 1. saveandsort: the process will subscribe for data,\n // periodically write data to disk and at EOD it will flush\n // remaining data to disk before sorting it and informing\n // GWs, RDBs and HDBs etc...\n // 2. save: the process will subscribe for data,\n // periodically write data to disk and at EOD it will flush\n // remaining data to disk. It will then inform it's respective\n // sort mode process to sort the data\n // 3. sort: the process will wait to get a trigger from it's respective\n // save mode process. When this is triggered it will sort the\n // data on disk, apply attributes and the trigger a reload on the\n // rdb and hdb processes\nwritedownmode:`default // the wdb process can periodically write data to disc and sort at EOD in two ways:\n // 1. default - the data is partitioned by [ partitiontype ]\n // at EOD the data will be sorted and given attributes according to sort.csv before being moved to hdb\n // 2. partbyattr - the data is partitioned by [ partitiontype ] and the column(s)assigned the parted attributed in sort.csv\n // at EOD the data will be merged from each partition before being moved to hdb\n // 3. partbyenum - the data is partitioned by [ partitiontype ] and a symbol or integer column with parted attribution assigned in sort.csv\n // at EOD the data will be merged from each partition before being moved to hdb"}}},{"rowIdx":41,"cells":{"text":{"kind":"string","value":"asc\n, iasc\n, xasc\n¶\nSort and grade: ascending\nQ chooses from a variety of algorithms, depending on the type and data distribution.\nasc\n¶\nAscending sort\nasc x asc[x]\nWhere x\nis a:\n- vector, returns its items in ascending order of value, with the sorted attribute set, indicating the list is sorted; where the argument vector is found to be in ascending order already, it is assigned the sorted attribute\n- mixed list, returns the items sorted within datatype and with the sorted attribute set\n- dictionary, returns it sorted by the values\n- table, returns it sorted by the first non-key column and with\n- the sorted attribute set on that column if there is only one non-key column; otherwise\n- the parted attribute set\nThe function is uniform. The sort is stable: it preserves order between equals.\nVector¶\nq)asc 2 1 3 4 2 1 2\n`s#1 1 2 2 2 3 4\nq)a:0 1\nq)b:a\nq)asc b / result has sorted attribute\n`s#0 1\nq)b / argument was already in ascending order\n`s#0 1\nq)a / b was a shallow copy of a\n`s#0 1\nMixed list¶\nIn the example below, the boolean is returned first, then the sorted integers, the sorted characters, and then the date.\nq)asc (1;1b;\"b\";2009.01.01;\"a\";0)\n1b\n0\n1\n\"a\"\n\"b\"\n2009.01.01\nNote how the type numbers are used.\nq)asc(2f;3;4i;5h)\n5h\n4i\n3\n2f\nq){(asc;x iasc abs t)fby t:type each x}(2f;3;4i;5h) / compare asc\n5h\n4i\n3\n2f\nDictionary¶\nq)asc `a`b`c!2 1 3\nb| 1\na| 2\nc| 3\nTable¶\nq)/ simple table\nq)asc ([]a:3 4 1;b:`a`d`s)\na b\n---\n1 s\n3 a\n4 d\nq)meta asc ([]a:3 4 1;b:`a`d`s) / sets parted attribute\nc| t f a\n-| -----\na| j p\nb| s\nq)meta asc([]a:3 4 1) / sets sorted attribute\nc| t f a\n-| -----\na| j s\nq)/ keyed table\nq)meta asc ([c1:`a`b] c2:2 1; c3:01b) / sets parted attribute\nc | t f a\n--| -----\nc1| s\nc2| j p\nc3| b\nq)meta asc ([c1:`a`b] c2:2 1) / sets sorted attribute\nc | t f a\n--| -----\nc1| s\nc2| j s\ndomain: b g x h i j e f c s p m d z n u v t\nrange: b g x h i j e f c s p m d z n u v t\niasc\n¶\nAscending grade\niasc x iasc[x]\nWhere x\nis a list or dictionary, returns the indexes needed to sort list x\nin ascending order.\nq)L:2 1 3 4 2 1 2\nq)iasc L\n1 5 0 4 6 2 3\nq)L iasc L\n1 1 2 2 2 3 4\nq)(asc L)~L iasc L\n1b\nq)iasc `a`c`b!1 2 3\n`a`c`b\nReverse a sort with iasc iasc\n:\nq)x:100?100\nq)b:100?.Q.a\nq)c:b iasc x\nq)b~c iasc iasc x\n1b\ndomain: b g x h i j e f c s p m d z n u v t\nrange: j j j j j j j j j j j j j j j j j j\nxasc\n¶\nSort a table in ascending order of specified columns.\nx xasc y xasc[x;y]\nWhere x\nis a symbol vector of column names defined in table y\n, which is passed by\n- value, returns\n- reference, updates\ny\nsorted in ascending order by x\n.\nThe sort is by the first column specified, then by the second column within the first, and so on.\nThe sorted attribute is set on the first column given (if possible). The sort is stable, i.e. it preserves order amongst equals.\nq)\\l sp.q\nq)s\ns | name status city\n--| -------------------\ns1| smith 20 london\ns2| jones 10 paris\ns3| blake 30 paris\ns4| clark 20 london\ns5| adams 30 athens\nq)`city xasc s / sort on city\ns | name status city\n--| -------------------\ns5| adams 30 athens\ns1| smith 20 london\ns4| clark 20 london\ns2| jones 10 paris\ns3| blake 30 paris\nq)`city`name xasc s / sort on city, and name within city\ns | name status city\n--| -------------------\ns5| adams 30 athens\ns4| clark 20 london\ns1| smith 20 london\ns3| blake 30 paris\ns2| jones 10 paris\nq)`status`city`name xasc s / sort on 3 columns, status first\ns | name status city\n--| -------------------\ns2| jones 10 paris\ns4| clark 20 london\ns1| smith 20 london\ns5| adams 30 athens\ns3| blake 30 paris\nq)`status`city`name xasc `s / table given by reference, updated in place\n`s\nq)s\ns | name status city\n--| -------------------\ns2| jones 10 paris\ns4| clark 20 london\ns1| smith 20 london\ns5| adams 30 athens\ns3| blake 30 paris\nq)meta s / status column has sorted attribute\nc | t f a\n------| -----\ns | s\nname | s\nstatus| i s\ncity | s\nDuplicate column names xasc\nsignals dup\nif it finds duplicate columns in the right argument. (Since V3.6 2019.02.19.)\nSorting data on disk¶\nxasc\ncan sort data on disk directly, without loading the entire table into memory.\nq)t:([]b:`s`g`a`s`a;c:30 10 43 13 24;g:til 5)\nq)`:dat/t/ set .Q.en[`:dat]t / write splayed table\n`:dat/t/\nq)\\ls dat/t / splayed columns\n,\"b\"\n,\"c\"\n,\"g\"\nq)`c xasc `:dat/t / sort table on disk by column c\n`:dat/t\nq)t / in-memory table is unsorted\nb c g\n------\ns 30 0\ng 10 1\na 43 2\ns 13 3\na 24 4\nq)\\l dat/t / load table from disk\n`t\nq)t / table is sorted\nb c g\n------\ng 10 1\ns 13 3\na 24 4\ns 30 0\na 43 2\nDuplicate keys in a dictionary or duplicate column names in a table cause sorts and grades to return unpredictable results.\nattr\n,\ndesc\n, idesc\n, xdesc\n,\nSet Attribute\nDictionaries & tables,\nMetadata,\nSorting\nQ for Mortals\n§8.8 Attributes\n\nasof\n¶\nAs-of join\nt1 asof t2 asof[t1;t2]\nWhere\nt1\nis a tablet2\nis a table or dictionary- the last key or column of\nt2\ncorresponds to a time column int1\nreturns the values from the last rows matching the rest of the keys and time ≤ the time in t2\n.\nq)show trade asof`sym`time!(`IBM;09:30:00.0)\nprice| 96.3e\nsize | 200\nstop | 0b\ncorr | 0\ncond | \"T\"\nex | \"D\"\nq)show trade asof([]sym:`AAPL`IBM;ex:\"TD\";time:09:30:00.0)\nprice size stop corr cond\n-------------------------\n78.14 100 0 0 T\n96.3 200 0 0 T\nThe following examples use the mas\ntable from TAQ.\nq)`date xasc`mas / sort by date\n`mas\nq)show a!mas asof a:([]sym:`A`B`C`GOOG;date:1995.01.01)\nsym date | cusip name wi ex uot\n---------------| --------------------------------------------------\nA 1995.01.01| 049870207 ATTWOODS PLC ADS REP5 ORD/5PNC 0 N 100\nB 1995.01.01| 067806109 BARNES GROUP INCORPORATED 0 N 100\nC 1995.01.01| 171196108 CHRYSLER CORP 0 N 100\nGOOG 1995.01.01| 0\nq)show a!mas asof a:([]sym:`A`B`C`GOOG;date:2006.01.01)\nsym date | cusip name wi ex uot\n---------------| ---------------------------------------------\nA 2006.01.01| 00846U101 AGILENT TECHNOLOGIES, INC 0 N 100\nB 2006.01.01| 067806109 BARNES GROUP INCORPORATED 0 N 100\nC 2006.01.01| 172967101 CITIGROUP 0 N 100\nGOOG 2006.01.01| 38259P508 GOOGLE INC CLASS A 0 T 100\nq)show a!mas asof a:([]sym:`A;date:1993.01.05 1996.05.23 2000.08.04)\nsym date | cusip name wi ex uot\n--------------| --------------------------------------------------\nA 1993.01.05| 049870207 ATTWOODS PLC ADS REP5 ORD/5PNC 0 N 100\nA 1996.05.23| 046298105 ASTRA AB CL-A ADS 1CL-ASEK2.50 0 N 100\nA 2000.08.04| 00846U101 AGILENT TECHNOLOGIES INC 0 N 100\nasof\nis a multithreaded primitive.\naj\n,\nwj\nJoins\nQ for Mortals\n§9.9.8 As-of Joins\n\nAssign¶\nName a value; amend a named value\nSimple assign¶\nx:y\nWhere x\nis a name and y\nis a value, the value of y\nis associated with the name x\n.\nq)a:42 / assign\nq)a\n42\nq)a:3.14159 / amend\nThe Equal operator =\ntests equality. It has nothing to do with naming or amending values.\nThere is no need to declare the type of a variable.\nA variable acquires the type of the value assigned to it. (Known as dynamic typing.)\nq)type a:til 5 / integer vector\n7h\nq)type a:3.14159 / float atom\n-9h\nIndexed assign¶\nx[i]:y\nWhere\nx\nis the name of a list, dictionary or tablei\nis a value that indexesx\ny\nis a scalar, or a list of the same length asi\nthe value of y\nis assigned to x\nat indexes i\n.\nIndexed assignment cannot change the type of x\n.\nIf x\nis a vector (has negative type) then (=). abs type each(x;y)\nmust be true.\nWhere x\nis a dictionary, assignment has upsert semantics.\nq)d:`tom`dick`harry!1 2 3\nq)d[`dick`jane]:100 200\nq)d\ntom | 1\ndick | 100\nharry| 3\njane | 200\nAssign through operator¶\nx op:y op:[x;y]\nx[i]op:y op:[x i;y]\nWhere\nop\nis a binary operator with infix syntaxx\nis an applicable value (i.e. not an atom) in the left domain ofop\ni\nis a value that indexesx\ny\nis a value in the right domain ofop\nthat conforms to eitheri\norx\nx op y\n(orx[i]op y\n) has the same type asx\nthe value of x\n(or x[i]\n) becomes x op y\n(or x[i]op y\n).\nq)s:(\"the\";\"quick\";\"brown\";\"fox\")\nq)s[1 2],:(\"er\";\"ish\")\nq)s\n\"the\"\n\"quicker\"\n\"brownish\"\n\"fox\"\nExtend Assign-through-operator to derived functions, keywords and lambdas.\nq)s:(\"the\";\"quick\";\"brown\";\"fox\")\nq)@[s;1 2;,;(\"er\";\"ish\")]\n\"the\"\n\"quicker\"\n\"brownish\"\n\"fox\"\nAmend At is more general, and extends assignment-through-operator to derived functions, keywords and lambdas.\nIf x\nis undefined, the identity element for op\nis used as a default.\nq)bar\n'bar\n[0] bar\n^\nq)bar+:1\nq)bar\n1\nPattern match¶\nSee Pattern matching\nSyntax¶\nAn expression with an assignment on the left returns no value to the console.\nq)a:til 5\nq)\nThe value of an assignment is the value assigned.\nq)3+a:til 5\n3 4 5 6 7\nq)1+a[2]+:5\n8\nq)a\n0 1 7 3 4\nAmend, Amend At\nQ for Mortals\n§4.6.2 Simple q Amend\n\nattr\n¶\nAttributes of an object\nattr x attr[x]\nWhere x\nis any object, returns its attributes as a symbol vector.\nThe possible attributes are:\n| code | attribute |\n|---|---|\n| s | sorted |\n| u | unique (hash table) |\n| p | partitioned (grouped) |\n| g | true index (dynamic attribute): enables constant time update and access for real-time tables |\nA null symbol result `\nmeans no attributes are set on x\n.\nq)attr 1 3 4\n`\nq)attr asc 1 3 4\n`s\nq)attr ({x+y})\n`\nSet Attribute\nMetadata\nQ for Mortals\n§8.8 Attributes"}}},{"rowIdx":42,"cells":{"text":{"kind":"string","value":"// @kind function\n// @category dataCheck\n// @desc Ensure that any non-default functions a user wishes to use \n// exist within the current process such that they are callable\n// @param config {dictionary} Information relating to the current run of AutoML\n// @return {::|err} Null on success, error if function invalid\ndataCheck.functions:{[config]\n // List of possible objects where user may input a custom function\n funcs2Search:`predictionFunction`trainTestSplit`significantFeatures,\n `scoringFunctionClassification`scoringFunctionRegression,\n `gridSearchFunction`randomSearchFunction`crossValidationFunction;\n funcs:raze config funcs2Search;\n // Ensure the custom inputs are suitably typed\n typeCheck:{$[not type[utils.qpyFuncSearch x]in(99h;100h;104h;105h);'err;0b]};\n locs:@[typeCheck;;{[err]err;1b}]each funcs;\n if[0x;\" \",raze[y],\" is\";\"s \",sv[\", \";y],\" are\"]};\n functionList:strFunc[cnt]string funcs where locs;\n '`$\"The function\",/functionList,\" not defined in your process\\n\"\n ];\n }\n\n// @kind function\n// @category dataCheck\n// @desc Ensure that NLP functionality is available\n// @param config {dictionary} Information relating to the current run of AutoML\n// @return {::|err} Null on success, error if requirements insufficient\ndataCheck.NLPLoad:{[config]\n if[not`nlp~config`featureExtractionType;:()];\n if[not(0~checkimport 3)&(::)~@[{system\"l \",x};\"nlp/nlp.q\";{0b}];\n '\"User attempting to run NLP models with insufficient requirements,\",\n \" see documentation\"\n ];\n if[(\"\"~getenv`PYTHONHASHSEED)&utils.ignoreWarnings>0;\n config[`logFunc]utils.printWarnings`pythonHashSeed\n ];\n }\n\n// @kind function\n// @category dataCheck\n// @desc Ensure data contains appropriate types for application of NLP\n// @param config {dictionary} Information relating to the current run of AutoML\n// @param features {table} Feature data as a table\n// @return {::|err} Null on success, error for inappropriate data\ndataCheck.NLPSchema:{[config;features]\n if[not`nlp~config`featureExtractionType;:()];\n if[0~count .ml.i.findCols[features;\"C\"];\n '`$\"User wishing to apply nlp functionality must pass a table containing \",\n \"a character column.\"\n ];\n }\n\n// @kind function\n// @category dataCheck\n// @desc Remove feature columns which do not conform to allowed schema\n// @param features {table} Feature data as a table\n// @param config {dictionary} Information relating to the current run of AutoML\n// @return {table} Feature dataset with inappropriate columns removed\ndataCheck.featureTypes:{[features;config]\n typ:config`featureExtractionType;\n $[typ in`tseries`normal;\n [fCols:.ml.i.findCols[features;\"sfihjbepmdznuvt\"];\n tab:flip fCols!features fCols\n ];\n typ=`fresh;\n // Ignore aggregating columns for FRESH as these can be of any type\n [apprCols:flip(aggCols:config[`aggregationColumns])_ flip features;\n cls:.ml.i.findCols[apprCols;\"sfiehjb\"];\n // Restore aggregating columns\n tab:flip(aggCols!features aggCols,:()),cls!features cls;\n fCols:cols tab\n ];\n typ=`nlp;\n [fCols:.ml.i.findCols[features;\"sfihjbepmdznuvtC\"];\n tab:flip fCols!features fCols\n ];\n '`$\"This form of feature extraction is not currently supported\"\n ];\n dataCheck.i.errColumns[cols features;fCols;typ;config];\n tab\n }\n\n// @kind function\n// @category dataCheck\n// @desc Ensure target data and final feature dataset are same length\n// @param features {table} Feature data as a table\n// @param target {number[]|symbol[]} Target data as a numeric/symbol vector \n// @param config {dictionary} Information relating to the current run of AutoML\n// @return {::|err} Null on success, error if mismatch in length\ndataCheck.length:{[features;target;config]\n typ:config`featureExtractionType;\n $[-11h=type typ;\n $[`fresh=typ;\n // Check that the number of unique aggregate equals the number of targets\n [aggcols:config`aggregationColumns;\n featAggCols:$[1=count aggcols;features aggcols;(,'/)features aggcols];\n if[count[target]<>count distinct featAggCols;\n '`$\"Target count must equal count of unique agg values for FRESH\"\n ];\n ];\n typ in`normal`nlp;\n if[count[target]<>count features;\n '\"Must have the same number of targets as values in table\"\n ];\n '\"Input for typ must be a supported type\"\n ];\n '\"Input for typ must be a supported symbol\"\n ];\n }\n\n// @kind function\n// @category dataCheck\n// @desc Ensure target data contains more than one unique value\n// @param target {(number[]|symbol[])} Target data as a numeric/symbol vector\n// @return {::|err} Null on success, error on unsuitable target\ndataCheck.target:{[target]\n if[1=count distinct target;'\"Target must have more than one unique value\"]\n }\n\n// @kind function\n// @category dataCheck\n// @desc Checks that the trainTestSplit size provided in config is a \n// floating value between 0 and 1\n// @param config {dictionary} Information relating to the current run of AutoML\n// @return {::|err} Null on success, error on unsuitable target\ndataCheck.ttsSize:{[config]\n if[(sz<0.)|(sz>1.)|-9h<>type sz:config`testingSize;\n '\"Testing size must be in range 0-1\"\n ]\n }\n\n\n================================================================================\nFILE: ml_automl_code_nodes_dataCheck_init.q\nSIZE: 341 characters\n================================================================================\n\n// code/nodes/dataCheck/init.q - Load dataCheck node\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Load code for dataCheck node \n\n\\d .automl\n\nloadfile`:code/nodes/dataCheck/checkimport.p\ncheckimport: .p.get[`checkimport;<]\nloadfile`:code/nodes/dataCheck/utils.q\nloadfile`:code/nodes/dataCheck/funcs.q\nloadfile`:code/nodes/dataCheck/dataCheck.q\n\n\n================================================================================\nFILE: ml_automl_code_nodes_dataCheck_utils.q\nSIZE: 9,809 characters\n================================================================================\n\n// code/nodes/dataCheck/utils.q - Utilities for the dataCheck node\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Utility functions specific the the dataCheck node implementation\n\n\\d .automl\n\n// Error presentation\n\n// @kind function\n// @category dataCheckUtility\n// @desc Print to standard out flagging the removal of inappropriate \n// columns\n// @param clist {symbol[]} List of all columns in the dataset\n// @param slist {symbol[]} Sublist of columns appropriate for the use case\n// @param typ {symbol} Feature extraction type being implemented\n// @param config {dictionary} Configuration information assigned by the user \n// and related to the current run\n// @return {::|stdout} Generic null if all columns suitable, appropriate \n// print out in the case there are outstanding issues\ndataCheck.i.errColumns:{[clist;slist;typ;config]\n if[count[clist]<>count slist;\n errString:utils.printDict[`errColumns],string typ;\n removedCols:\", \"sv string clist where not clist in slist;\n config[`logFunc] errString,\": \",removedCols\n ]\n }\n\n// Parameter retrieval functionality\n\n// @kind function\n// @category dataCheckUtility\n// @desc Retrieve default parameters and update with custom information\n// @param feat {table} The feature data as a table\n// @param config {dictionary} Configuration information assigned by the user \n// and related to the current run\n// @param default {dictionary} Default dictionary which may need to be updated\n// @param ptyp {symbol} Problem type being solved (`nlp/`normal/`fresh)\n// @return {dictionary} configuration dictionary modified with any custom \n// information\ndataCheck.i.getCustomConfig:{[feat;config;default;ptyp]\n dict:$[(typ:type config)in 10 -11 99h;\n [if[10h~typ;\n config:dataCheck.i.getData[config;ptyp]\n ];\n if[-11h~typ;\n config:dataCheck.i.getData[;ptyp]$[\":\"~first config;1_;]\n config:string config\n ];\n $[min key[config]in key default;\n default,config;\n '`$\"Inappropriate key provided for configuration input\"\n ]\n ];\n not any config;d;\n '`$\"config must be passed the identity `(::)`, a filepath to a \", \n \"parameter flatfile or a dictionary with appropriate key/value pairs\"\n ];\n if[ptyp=`fresh;\n aggcols:dict`aggregationColumns;\n dict[`aggregationColumns]:$[100h~typAgg:type aggcols;\n aggcols feat;\n 11h~abs typAgg;\n aggcols;\n '`$\"aggcols must be passed function or list of columns\"\n ]\n ];\n dict\n }\n\n// @kind function\n// @category dataCheckUtility\n// @desc Retrieve a json flatfile from disk \n// @param fileName {char[]} Name of the file from which the dictionary is \n// being extracted\n// @param ptype {symbol} The problem type being solved(`nlp`normal`fresh)\n// @return {dictionary} Configuration dictionary retrieved from a flatfile\ndataCheck.i.getData:{[fileName;ptype]\n customFile:cli.i.checkCustom fileName;\n customJson:.j.k raze read0 `$customFile;\n (,/)cli.i.parseParameters[customJson]each(`general;ptype)\n }\n\n// Save path generation functionality\n\n// @kind function\n// @category dataCheckUtility\n// @desc Create the folders that are required for the saving of the \n// config, models, images and reports\n// @param config {dictionary} Configuration information assigned by the user \n// and related to the current run\n// @return {dictionary} File paths relevant for saving reports/config etc to \n// file, both as full path format and truncated for use in outputs to \n// terminal\ndataCheck.i.pathConstruct:{[config]\n names:`config`models;\n if[config[`saveOption]=2;names:names,`images`report];\n pname:$[`~config`savedModelName;\n dataCheck.i.dateTimePath;\n dataCheck.i.customPath\n ]config;\n paths:pname,/:string[names],\\:\"/\";\n dictNames:`$string[names],\\:\"SavePath\";\n (dictNames!paths),enlist[`mainSavePath]!enlist pname\n }\n\n// @kind function\n// @category dataCheckUtility\n// @desc Construct save path using date and time of the run\n// @param config {dictionary} Configuration information assigned by the user \n// and related to the current run\n// @return {string} Path constructed based on run date and time \ndataCheck.i.dateTimePath:{[config]\n date:string config`startDate;\n time:string config`startTime;\n dirString:\"outputs/dateTimeModels/\",date,\"/run_\",time,\"/\";\n path,\"/\",dataCheck.i.dateTimeStr[dirString]\n }\n\n// @kind function\n// @category dataCheckUtility\n// @desc Construct save path using custom model name\n// @param config {dictionary} Configuration information assigned by the user \n// and related to the current run\n// @return {string} Path constructed based on user defined custom model name\ndataCheck.i.customPath:{[config]\n modelName:config[`savedModelName];\n modelName:$[10h=type modelName;\n modelName;\n -11h=type modelName;string modelName;\n '\"unsupported input type, model name must be a symbol atom or string\"\n ];\n config[`savedModelName]:modelName;\n path,\"/outputs/namedModels/\",modelName,\"/\"\n }\n\n// @kind function\n// @category dataCheckUtility\n// @desc Construct saved logged file path\n// @param config {dictionary} Configuration information assigned by the user \n// and related to the current run\n// @return {string} Path constructed to log file based on user defined paths\ndataCheck.i.logging:{[config]\n if[0~config`saveOption;\n if[`~config`loggingDir;\n -1\"\\nIf saveOption is 0 and loggingDir is not defined,\",\n \" logging is disabled.\\n\";\n .automl.utils.printing:1b;\n .automl.utils.logging:0b;\n :config\n ]\n ];\n if[10h<>type config`loggingDir;string config`loggingDir]\n printDir:$[`~config`loggingDir;\n config[`mainSavePath],\"/log/\";\n [typeLogDir:type config`loggingDir;\n loggingDir:$[10h=typeLogDir;;\n -11h=typeLogDir;string;\n '\"type must be a char array or symbol\"]config`loggingDir;\n path,\"/\",loggingDir,\"/\"\n ]\n ];\n if[`~config`loggingFile;\n date:string config`startDate;\n time:string config`startTime;\n logStr:\"logFile_\",date,\"_\",time,\".txt\";\n config[`loggingFile]:dataCheck.i.dateTimeStr logStr\n ];\n typeLoggingFile:type config[`loggingFile];\n loggingFile:$[10h=typeLoggingFile;;\n -11h=typeLoggingFile;string;\n '\"loggingFile input must be a char array or symbol\"]config`loggingFile;\n config[`printFile]:printDir,loggingFile;\n config\n }"}}},{"rowIdx":43,"cells":{"text":{"kind":"string","value":"/ return weekdays from list of dates\nwday:{x where 1=ap from r;\n r:0!select date:dt,sum bv,sum av,tv:sum ts by id from r;\n r}\n\n\n================================================================================\nFILE: reQ_examples_aoc.q\nSIZE: 1,977 characters\n================================================================================\n\n/Advent of Code example for reQ library\n/Retrieve a private leaderboard or download daily challenge input\n\n/ load reQ library\n\\l req.q\n/ load util funcs for examples\n\\l examples/util.q\n\n\\d .aoc\n\ncfg:.utl.cfg`aoc //get config details\nopts:.Q.def[`year`day`o!(`year$.z.D;`dd$.z.D;`$first system\"pwd\")] .Q.opt .z.x; //get cmd line params\nopts:string each opts; //string params\nint:.z.f like \"*aoc.q\"; //check if aoc.q on cmd line - if not, library funcs\n\n.req.addcookie[\"adventofcode.com\";\"session=\",cfg`session];\n\nboard:{[y;b]\n /* get a leaderboard for a given year */\n r:.req.g\"http://adventofcode.com/\",y,\"/leaderboard/private/view/\",b,\".json\"; //request board\n r:`name`local_score`stars`global_score`id`last_star_ts#/:value r`members; //pull out relevant fields\n :`local_score xdesc update name:(\"anon\",/:id) from r where 10h<>type each name; //fix anon users, sort\n }\n\nday:{[y;d;o]\n /* get challenge input for a given day & save locally */\n -1\"Downloading input for \",y,\" day \",d,\" to: \",string o; //log day being dowloaded & output file\n r:.req.g\"http://adventofcode.com/\",y,\"/day/\",d,\"/input\"; //download input\n o 0: -1_\"\\n\" vs r; //write to file\n }\n\n\\d .\n\n/ get leaderboard if requested\nif[.aoc.int&`board in key .aoc.opts; //do nothing if loaded as lib\n show .aoc.board . .aoc.opts`year`board;\n exit 0;\n ];\n\n/ otherwise download challenge input if aoc.q on cmd line\nif[.aoc.int; //do nothing if loaded as lib\n f:` sv hsym[`$.aoc.opts`o],`$\"p\",.aoc.opts`day;\n .aoc.day[.aoc.opts`year;.aoc.opts`day;f];\n exit 0;\n ]\n\n\n================================================================================\nFILE: reQ_examples_github.q\nSIZE: 2,246 characters\n================================================================================\n\n/GitHub example for reQ library\n\n/ load reQ library\n\\l req.q\n/ load util funcs for examples\n\\l examples/util.q\n\n\\d .gh\n\ncfg:.utl.cfg`github //get config details\nint:.z.f like \"*github.q\"; //check if github.q on cmd line - if not, library funcs\nurl:\"https://api.github.com/\" //basic URL\n\nrepo:{[u;r]\n r:.req.get[url,\"repos/\",u,\"/\",r;enlist[`Authorization]!enlist\"token \",cfg`token]; //get repo\n r:`name`owner`html_url`description`size`stargazers_count`watchers_count#r; //take summary info\n :@[r;`owner;@[;`login]]; //return summary of repo details\n }\n\ncreateissue:{[u;r;title;body;labels]\n ul:url,\"repos/\",u,\"/\",r,\"/issues\"; //build URL\n hd:(\"Authorization\";\"Content_Type\")!(\"token \",cfg`token;.req.ty`json); //build HTTP headers\n labels:$[-11=t:type labels;(),labels;10=t;enlist labels;labels]; //ensure list of syms/strings\n d:`title`body`labels!(title;body;labels); //build input object\n r:.req.post[ul;hd;.j.j d]; //perform API request\n :r`html_url; //return URL of new issue\n }\n\nauth:{[x]\n -1\"Please enter GitHub username & password (will be transmitted over HTTPS)\";\n -1\"WARNING: Username & password will display in plain text here:\";\n 1\"Username: \";u:read0 0;\n 1\"Password: \";p:read0 0;\n r:.req.post[\"https://\",u,\":\",p,\"@api.github.com/authorizations\";\n enlist[\"Content-Type\"]!enlist .req.ty`json;\n .j.j `scopes`note!(enlist`public_repo;\"reQ \",string .z.P)\n ];\n :r`token;\n }\n\nuser:{[u] .req.g url,\"users/\",u}\norgs:{[u] .req.g url,\"users/\",u,\"/orgs\"}\n\nif[cfg[`token]like\"{insert your token here}\";\n cfg[`token]:.gh.auth[];\n .utl.writecfg[`github] cfg\n ];\n\n\\d .\n\nif[.gh.int&first .z.x[0] like \"*/*\";\n show .gh.repo . \"/\" vs .z.x 0;\n exit 0;\n ];\n\nif[.gh.int;\n show .gh.repo . 2#.z.x;\n exit 0;\n ];\n\n\n================================================================================\nFILE: reQ_examples_jira.q\nSIZE: 1,874 characters\n================================================================================\n\n/JIRA example for reQ library\n\n/ load reQ library\n\\l req.q\n/ load util funcs for examples\n\\l examples/util.q\n\n\\d .jira\n\ncfg:.utl.cfg`jira //get config details\nint:.z.f like \"*jira.q\"; //check if jira.q on cmd line - if not, library funcs\nurl:.req.prot[cfg`url],cfg[`user],\"@\",.req.host[cfg`url],\"/rest/api/2/\"; //base URL to use"}}},{"rowIdx":44,"cells":{"text":{"kind":"string","value":"Joins¶\nKeyed: As of: ej equi aj aj0 as-of ij ijf inner ajf ajf0 lj ljf left asof simple as-of pj plus wj wj1 window uj ujf union upsert , join ^ coalesce\nA join combines data from two tables, or from a table and a dictionary.\nSome joins are keyed, in that columns in the first argument are matched with the key columns of the second argument.\nSome joins are as-of, where a time column in the first argument specifies corresponding intervals in a time column of the second argument. Such joins are not keyed.\nIn each case, the result has the merge of columns from both arguments. Where necessary, rows are filled with nulls or zeroes.\nKeyed joins¶\n^\nCoalesce- The Coalesce operator merges keyed tables ignoring nulls\nej\nEqui join- Similar to\nij\n, where the columns to be matched are given as a parameter. ij\nijf\nInner join- Joins on the key columns of the second table. The result has one row for each row of the first table that matches the key columns of the second table.\n,\nJoin-\nThe Join operator\n,\njoins tables and dictionaries as well as lists. For tablesx\nandy\n:x,y\nisx upsert y\nx,'y\njoins records to recordsx,\\:y\nisx lj y\nlj\nljf\nLeft join- Outer join on the key columns of the second table. The result has one row for each row of the first table. Null values are used where a row of the first table has no match in the second table. This is now built-in to\n,\\:\n. (Reverse the arguments to make a right outer join.) pj\nPlus join- A variation on left join. For each matching row, values from the second table are added to the first table, instead of replacing values from the first table.\nuj\nujf\nUnion join- Uses all rows from both tables. If the second table is not keyed, the result is the catenation of the two tables. Otherwise, the result is the left join of the tables, catenated with the unmatched rows of the second table.\nupsert\n- Can be used to join two tables with matching columns (as well as add new records to a table). If the first table is keyed, any records that match on key are updated. The remaining records are appended.\nAs-of joins¶\nIn each case, the time column in the first argument specifies [) intervals in the second argument.\nwj\n,wj1\nWindow join- The most general forms of as-of join. Function parameters aggregate values in the time intervals of the second table. In\nwj\n, prevailing values on entry to each interval are considered. Inwj1\n, only values occurring within each interval are considered. aj\n,aj0\n,ajf\n,ajf0\nAs-of join- Simpler window joins where only the last value in each interval is used. In the\naj\nresult, the time column is from the first table, while in theaj0\nresult, the time column is from the second table. asof\n- A simpler\naj\nwhere all columns (or dictionary keys) of the second argument are used in the join.\nImplicit joins¶\nA foreign key is made by enumerating over the column/s of a keyed table.\nWhere a primary key table m\nhas a key column k\nand a table d\nhas a column c\nand foreign key linking to k\n, a left join is implicit in the query\nselect m.k, c from d\nThis generalizes to multiple foreign keys in d\n.\nSuppliers and parts database sp.q\nq)\\l sp.q\n+`p`city!(`p$`p1`p2`p3`p4`p5`p6`p1`p2;`london`london`london`london`london`lon..\n(`s#+(,`color)!,`s#`blue`green`red)!+(,`qty)!,900 1000 1200\n+`s`p`qty!(`s$`s1`s1`s1`s2`s3`s4;`p$`p1`p4`p6`p2`p2`p4;300 200 100 400 200 300)\nq)select sname:s.name, qty from sp\nsname qty\n---------\nsmith 300\nsmith 200\nsmith 400\nsmith 200\nclark 100\nsmith 100\njones 300\njones 400\nblake 200\nclark 200\nclark 300\nsmith 400\nImplicit joins extend to the situation in which the targeted keyed table itself has a foreign key to another keyed table.\nq)emaster:([eid:1001 1002 1003 1004 1005] currency:`gbp`eur`eur`gbp`eur)\nq)update eid:`emaster$1001 1002 1005 1004 1003 from `s\n`s\nq)select s.name, qty, s.eid.currency from sp\nname qty currency\n------------------\nsmith 300 gbp\nsmith 200 gbp\nsmith 400 gbp\nsmith 200 gbp\nclark 100 gbp\nsmith 100 gbp\njones 300 eur\njones 400 eur\nblake 200 eur\nclark 200 gbp\nclark 300 gbp\nsmith 400 gbp\nQ for Mortals §9.9.1 Implicit Joins\nQ for Mortals §9.9 Joins\n\nListening port¶\nUse the -p\ncommand-line option or the \\p\nsystem command to tell kdb+ to listen to a port. The command-line option and the system command take the same parameters.\n\\p [rp,][hostname:][portnumber|servicename]\n-p [rp,][hostname:](portnumber|servicename)\nWhere\nportnumber\nis an integer or long infinityservicename\nis defined in/etc/services\nkdb+ will listen to portnumber\nor the port number of servicename\non all interfaces, or on hostname\nonly if specified.\nThe port must be available and the process must have permission for the port.\nAs of 4.1t 2022.11.01 (or 4.0 2022.10.26) a port range can be specified in place of a portnumber. The range of ports is inclusive and tried in a random order. A service name can be used instead of each port number. Using 0W to choose a free ephemeral port can be more efficient (where suitable).\nq)\\p 80/85\nq)\\p\n81\nWhere no parameter is specified in the system command, the listening port is reported. The default is 0 (no listening port).\nq)\\p\n0i\nGiven a servicename, q will look up its port number in /etc/services\n.\nq)\\p commplex-main / servicename\nq)\\p\n5000i\nIf you know the process is for clients on the localhost only, choose localhost:port for maximum security.\nPreventing connections¶\nTo stop the process listening on a port at runtime, instruct it to listen on port 0:\nq)\\p 0\nBy default, kdb+ won't listen to a port unless a port is specified.\nLoad balancing¶\nOptional parameter rp\nenables the use of the SO_REUSEPORT\nsocket option, which is available in newer versions of many operating systems, including Linux (kernel version 3.9 and later). This socket option allows multiple sockets (kdb+ processes) to listen on the same IP address and port combination. The kernel then load-balances incoming connections across the processes. (Since V3.5.)\nSocket sharding with kdb+ and Linux\nEphemeral port¶\nA portnumber\nof 0W\nmeans pick a random available port within the range 32768–60999.\nq)\\p 5010 / set port 5010\nq)\\p\n5010\nq)\\p 0W / pick a random available port within the range 32768 - 60999\nq)\\p\n45512\nq)\\p 0 / turn off listening port\nPort range¶\nAn inclusive range of ports can be used in place of a portnumber\n, to randomly use an available port within the given range (since V3.5/3.6 2023.03.13,V4.0 2022.10.26,V4.1 2022.11.01). A service name can be used instead of a port number within the range. Note that the ephemeral port option also provides the ability to choose from a range of ports.\nq)\\p 2000/2010 / use a free port between 2000 and 2010\nq)\\p -2000/2010 / use a free port between 2000 and 2010 in multithreaded mode\nq)\\p myhost:2000/2010 / use a free port between 2000 and 2010, using given hostname\nMulti-threaded input mode¶\nA negative port sets a multi-threaded port and if used it must be the initial and only mode of operation, i.e. do not dynamically switch between positive port and negative port.\nWhen active, each IPC connection will create a new thread for its sole use.\nEach connection uses its own heap with a minimum of 64MB, the real amount depending on the working space required by the query being executed.\n\\ts\ncan be used to find the memory requirement of a query.\nIt is designed for serving in-memory static data to an externally constrained number of clients. It is not intended for use as a gateway, or serving mutable data.\nNote that there are a number of restrictions in multithreaded mode:\n- queries are unable to update globals\n- .z.po is not called on connect\n- .z.pc is not called on disconnect\n- .z.W has a view on main thread sockets only\n- Cannot send async message\n- Views can be recalculated from the main thread only\n- Uncompressed pages will not be shared between threads (i.e. same situation as with starting a separate hdb for each request).\nThe main thread is allowed to update globals. The main thread is responsible for reading from stdin (i.e. the console) and executing any loaded scripts on start-up.\nIt also invokes .z.ts on timer expiry.\nAny connections made via IPC from the main thread, can be monitored\nfor callbacks (for example via an async callback) which in turn can update globals.\nWhile the main thread is processing an update (for example, a timer firing or console input) none of the connection threads will be processing any input.\nUpdates should not be frequent, as they wait for completion of exiting queries and block new queries (using multiple-read single-write lock), thus slowing processing speeds.\nIf an attempt is made to update globals from threads other than main, a 'no update\nerror is issued.\nMultithreaded input mode supports WebSockets and HTTP (but not TLS) since 4.1t 2021.03.30. TLS support available since 4.1t 2023.12.14. A custom .z.ph which does not update global state should be used with HTTP.\nThe use of sockets from within those threads is allowed only for the one-shot sync request and HTTP client request (TLS/SSL support added in 4.1t 2023.11.10). These can be inefficient, as it opens, queries and closes each time. Erroneous socket usage is blocked and signals a nosocket error.\nIn multithreaded input mode, the seed for the random-number generator used for threads other than the main thread is based on the socket descriptor for that connection; these threads are transient – destroyed when the socket is closed, and no context is carried over for new threads/connections.\nUnix domain socket¶\nSetting the listening port with -p 5000\nin addition to listening on TCP port 5000, also creates a UDS (Unix domain socket) on /tmp/kx.5000\n.\nYou can disable listening on the UDS, or change the default path from /tmp\nusing environment variable QUDSPATH\n.\nq)/ disable listening on unix domain socket\nq)system\"p 0\";setenv[`QUDSPATH;\"\"];system\"p 6000\"\nq)/ use /home/kdbuser as path\nq)system\"p 0\";setenv[`QUDSPATH;\"/home/kdbuser\"];system\"p 6000\"\nV3.5+ uses abstract namespace for Unix domain sockets on Linux to avoid file-permission issues in /tmp\n.\nN.B. hence V3.5 cannot connect to V3.4 using UDS.\nq)hopen`:unix://5000\nOn macOS:\nq)\\p 5000\nq)\\ls /tmp/kx*\n\"/tmp/kx.5000\"\nq)system\"p 0\";setenv[`QUDSPATH;\"\"];system\"p 5000\"\nq)\\ls /tmp/kx*\nls: /tmp/kx*: No such file or directory\n'os\nq)system\"p 0\";setenv[`QUDSPATH;\"/tmp/kxuds\"];system\"p 5000\"\n'cannot listen on uds /tmp/kxuds/kx.5000. OS reports: No such file or directory\n[0] system\"p 0\";setenv[`QUDSPATH;\"/tmp/kxuds\"];system\"p 5000\"\n^\nq)\\mkdir /tmp/kxuds\nq)system\"p 0\";setenv[`QUDSPATH;\"/tmp/kxuds\"];system\"p 5000\"\nq)\\ls /tmp/kxuds\n\"kx.5000\"\nSecurity¶\nOnce you open a port in q session, it is open to all connections, including HTTP requests.\nIn a production environment secure any process with an open port."}}},{"rowIdx":45,"cells":{"text":{"kind":"string","value":"Variadic syntax¶\nAn applicable value is variadic if its rank is not fixed.\nLists and dictionaries of depth ≥2 and tables are variadic.\nq)m:4 5#\"abcdefghijklmnopqrst\"\nq)m[1 3] / unary\n\"fghij\"\n\"pqrst\"\nq)m[1 3;2 4] / binary\n\"hj\"\n\"rt\"\nq)t:([]name:`Tom`Dick`Harry;city:`London`Paris`Rome)\nq)t[`name] / unary\n`Tom`Dick`Harry\nq)t 1 / unary\nname| Dick\ncity| Paris\nq)t[1;`city] / binary\n`Paris\nSome operators are variadic, for example Apply and Amend.\nEach Prior, Over and Scan applied to binary values derive variadic functions.\nq)+/[2 3 4] / unary\n9\nq)+/[1000000;2 3 4] / binary\n1000009\nq)-':[1952 1954 1960] / unary\n1952 2 6\nq)-':[1900;1952 1954 1960] / binary\n52 2 6\nKeywords defined from such extensions are also variadic.\nq)deltas / Subtract Each Prior\n-':\nq)deltas[15 27 93] / unary\n15 12 66\nq)deltas[10;15 27 93] / binary - unsupported\n5 12 66\nq)-':[10;15 27 93] / binary - supported\n5 12 66\nProjection¶\nVariadic values do not project unless the omitted argument/s are specified as nulls in the argument list.\nTo project a variadic value as a unary, use a 2-item argument list to resolve the binary form.\nq)g:+/[100;] / 2-item argument list resolves the binary form\nq)g 2 3 4 5 / the projection is unary\n114\nUnary forms of binary operators¶\nMany binary operators are variadic: they have unary forms. The unary form can be selected with a suffixed colon.\nq)|[2;til 5] / binary: maximum\n2 2 2 3 4\nq)|:[til 5] / unary: reverse\n4 3 2 1 0\nBinary operators are infixes.\nLike an infix extension, the unary form can be parenthesized and applied prefix.\nq)2|til 5 / maximum\n2 2 2 3 4\nq)(|:)\"zero\" / reverse\n\"orez\"\nq)2#\"zero\" / take\n\"ze\"\nq)(#:)\"zero\" / count\n4\nUnary forms can also be applied by Apply At.\nq)|:[\"zero\"] / bracket notation\n\"orez\"\nq)(|:)\"zero\" / prefix\n\"orez\"\nq)(|:)@\"zero\" / apply-at\n\"orez\"\nq)@[|:;\"zero\"] / apply-at\n\"orez\"\nUnary forms are poor q style\nThe semantics of the unary and binary forms of an operator are not always closely related.\nFor better legibility, q provides keywords for unary forms.\nGood q style prefers them.\nWrite count \"zero\"\n, not (#:)\"zero\"\n.\n\nAuto Scaling for a kdb+ realtime database¶\nAutoscaling the Real-time Database in the Cloud\nkxcontrib/cloud-autoscaling\nCloud computing has fast become the new normal as more and more organizations are migrating their IT systems to the cloud. Big cloud platforms like Amazon Web Services, Google Cloud, and Microsoft Azure have made it reliable, secure, and most importantly cost-effective.\nThe Infrastructure-as-a-Service (IaaS) model they have adopted has made it easier than ever before to provision computing resources. This model has been taken a step further with Auto Scaling technologies. Servers, storage, and networking resources can now be commissioned and decommissioned in an instant without any manual intervention. This elasticity is one of the key benefits of Cloud Computing. Customers can leverage this new technology to scale their infrastructure in order to meet system demands.\nAs these technologies become more prevalent it will become important to start incorporating them into kdb+. This article explores how we can do this while focusing on scaling the random-access memory (RAM) needed for the real-time database (RDB).\nAuto Scaling is the act of monitoring the load on a system and dynamically acquiring or shutting down resources to match this load. Incorporating this technology into an application means we no longer need to provision one large computing resource whose capacity must forever meet the application’s demand. Instead we can use clusters of smaller resources and scale them in and out to follow the demand curve.\nAuto Scaling and kdb+¶\nWhen it comes to databases there are three main types of computing resources that we can look to scale:\n- Storage\n- Compute\n- Random-access memory (RAM)\nScaling storage for our kdb+ databases can be relatively simple in the cloud. As the database grows we can provision extra storage volumes for our instances, or increase the size of the ones currently in use.\nReading and writing data are prime use cases for scaling compute power within a kdb+ application. Scaling compute for reading has been covered by Rebecca Kelly in her blog post KX in the Public Cloud: Autoscaling using kdb+. Here Rebecca demonstrates how to scale the number of historical database (HDB) servers to handle an increasing or decreasing number of queries.\nDynamically scaling the compute needed for writing can be a bit more complicated. Given we want to maintain the data’s order, the entire stream of data for a given source must go through one point in the system to be timestamped.\nThe same can be said for scaling the RAM needed for an RDB. For this use case the number of RDB servers will be increased throughout the day as more and more data is ingested by the tickerplant. The system must ensure that the data is not duplicated across these servers. Building a solution for this problem will be the objective of this article.\nAuto Scaling the RDB¶\nBy Auto Scaling the RDB we will improve both the cost-efficiency and the availability of our databases.\nWhy use Auto Scaling¶\nLet’s say on average we receive a total of 12GB of data which is distributed evenly throughout the day. For a regular kdb+ system we might provision one server with 16GB of RAM to allow for some contingency capacity. We then hope that the data volumes do not exceed that 16GB limit on a daily basis.\nIn a scalable cluster we can begin the day with one small server (for this example a quarter of the size, 4GB). The RAM needed to hold real-time data in memory will grow throughout the day, as it does we can step up our capacity by launching more servers.\nFigure 1.1: Capacities of regular and scalable real-time databases\nCost efficiency¶\nIn the cloud you pay only for what you use. So in a perfect system there should be no spare computing resources running idle accumulating costs. In periods of low demand like weekends or end-of-day (when the day’s data has been flushed from memory) we should have the ability to scale down. By ensuring this we can maintain the performance of a system at the lowest possible cost.\nFigure 1.2: Potential cost savings of a scalable RDB\nIt is worth noting that the number of servers you provision will have no real bearing on the overall cost. For the most part, running one server with 16GB of RAM will cost the same as running four with 4GB.\nBelow is an example of Amazon Web Service’s pricing for the varying sizes of its t3a instances. As you can see the price is largely proportional to the memory capacity of each instance.\nFigure 1.3: Amazon Web Services' t3a instance pricing\nAvailability¶\nReplacing one large server with a scalable cluster will make our system more reliable. By dynamically acquiring resources we can ensure that the load on our system never exceeds its capacity.\nFigure 1.4: Availability of a scalable RDB under high load\nThis will safeguard against unexpected spikes in data volumes crippling our systems and we can stop guessing our capacity needs. When developing a new application there is no need to estimate how much memory the RDB is going to need throughout its lifetime. Even if the estimate turns out to be correct, we will still end up provisioning resources that will lie mostly idle during periods of low demand. Demand varies and so should our capacity.\nDistributing the day’s data among multiple smaller servers will also increase the system’s resiliency. One fault will no longer mean all of the day’s data is lost. The smaller RDBs will also be quicker to recover from a fault as they will only have to replay a portion of the tickerplant’s log."}}},{"rowIdx":46,"cells":{"text":{"kind":"string","value":"Installing multiple versions of kdb+¶\nFor any version of q, the 64-bit and 32-bit interpreter binaries share the same q.k\nfile, located in QHOME\nfor that version.\nAll versions share the same k4.lic\nor kc.lic\nlicense-key file.\nArrange your files as in this example:\n$ tree q\nq\n├── k4.lic\n├── phrases.q\n├── sp.q\n├── trade.q\n├── v3.5\n│ ├── m32\n│ │ └── q\n│ ├── m64\n│ │ └── q\n│ └── q.k\n└── v4.0\n├── m64\n│ └── q\n└── q.k\nIn your profile export QLIC\nand define aliases as in this example:\n# versions of q\nexport QLIC=~/q\nalias q='export QHOME=~/q/v4.0; rlwrap -r $QHOME/m64/q'\nalias q3.5='export QHOME=~/q/v3.5; rlwrap -r $QHOME/m64/q'\nalias q32='export QHOME=~/q/v3.5; rlwrap -r $QHOME/m32/q'\nIn a command shell:\n$ q3.5\nKDB+ 3.5 2019.05.15 Copyright (C) 1993-2019 Kx Systems\nm64/ 8()core 16384MB sjt max.local 127.0.0.1 EXPIRE 2020.08.01…\nq)\\\\\n$\nThe 32-bit interpreter finds and reports the license-key file even though it will run without it.\n$ q32\nKDB+ 3.6 2019.03.07 Copyright (C) 1993-2019 Kx Systems\nm32/ 8()core 16384MB sjt max.local 192.168.0.10 EXPIRE 2020.08.01…\nq)\\pwd\n\"/Users/sjt\"\nq)\\echo $QLIC\n\"/Users/sjt/q\"\nq)\\echo $QHOME\n\"/Users/sjt/q/v3.6\"\nq)\\l ../sp.q\n+`p`city!(`p$`p1`p2`p3`p4`p5`p6`p1`p2;`london`london`london`london`london`lon..\n(`s#+(,`color)!,`s#`blue`green`red)!+(,`qty)!,900 1000 1200\n+`s`p`qty!(`s$`s1`s1`s1`s2`s3`s4;`p$`p1`p4`p6`p2`p2`p4;300 200 100 400 200 300)\nq)\nLoading sp.q\n, a sibling of QHOME\n, requires the relative path specified."}}},{"rowIdx":47,"cells":{"text":{"kind":"string","value":"$\nTok¶\nInterpret a string as a data value\nx$y $[x;y]\nWhere\ny\nis a stringx\nis a non-positive short or upper-case char as below (or the null symbol as a synonym for\"S\"\n)\nreturns y\nas an atom value interpreted according to x\n.\nx\nvalues for Tok:\nq){([result:key'[x$\\:()]];short:neg x;char:upper .Q.t x)}5h$where\" \"<>20#.Q.t\nresult | short char\n---------| ----------\nboolean | -1 B\nguid | -2 G\nbyte | -4 X\nshort | -5 H\nint | -6 I\nlong | -7 J\nreal | -8 E\nfloat | -9 F\nchar | -10 C\nsymbol | -11 S\ntimestamp| -12 P\nmonth | -13 M\ndate | -14 D\ndatetime | -15 Z\ntimespan | -16 N\nminute | -17 U\nsecond | -18 V\ntime | -19 T\nA left argument of 0h\nor \"*\"\nreturns the y\nstring unchanged.\nWhere x\nis a positive or zero short, a lower-case char, \"*\"\n, or a non-null symbol, see Cast.\nq)\"E\"$\"3.14\"\n3.14e\nq)-8h$\"3.14\"\n3.14e\nq)\"D\"$\"2000-12-12\"\n2000.12.12\nq)\"U\"$\"12:13:14\"\n12:13\nq)\"T\"$\"123456789\"\n12:34:56.789\nq)\"P\"$\"2015-10-28D03:55:58.6542\"\n2015.10.28D03:55:58.654200000\nOutside of domain¶\nParsing values outside of the types domain returns null.\nq)\"H\"$\"32768\"\n0Nh\nq)\"I\"$\"2147483648\"\n0Ni\nq)\"D\"$\"2147483648\"\n0Nd\nChanges since 4.1t 2021.09.03,4.0 2021.10.01\nShort converts to 0Nh instead of ±0Wh\nIteration¶\nTok is a near-atomic function. Implicit recursion stops at strings, not atoms.\nq)\"BXH\"$(\"42\";\"42\";\"42\")\n0b\n0x42\n42h\nq)(\"B\";\"XHI\")$(\"42\";(\"42\";\"42\";\"42\"))\n0b\n(0x42;42h;42i)\nq)\"B\"$\" Y \"\n1b\nq)\"B\"$'\" Y \"\n000100b\nSymbols¶\nUse the null symbol as a shorthand left argument for \"S\"\n.\nq)\"S\"$\"hello\"\n`hello\nq)`$\"hello\"\n`hello\nConverting a string to a symbol removes leading and trailing blanks.\nq)`$\" IBM \"\n`IBM\nTruthy characters¶\nCertain characters are recognized as boolean True:\nq)\"B\"$(\" Y \";\" N \")\n10b\nq)\" \",.Q.an\n\" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789\"\nq)\"B\"$'\" \",.Q.an\n0000000000000000000010001100000000000000000000100011000100000000b\nq).Q.an where\"B\"$'.Q.an\n\"txyTXY1\"\nContrast this with casting to boolean:\nq)\"b\"$\" \",.Q.an\n1111111111111111111111111111111111111111111111111111111111111111b\nIP address¶\nq)\"I\"$\"192.168.1.34\" /an IP address as an int\n-1062731486i\nq)\"NT\"$\\:\"123456123987654\" / since V3.4\n0D12:34:56.123987654\n12:34:56.123\nUnix timestamps¶\n(from seconds since Unix epoch), string with 9…11 digits:\nq)\"P\"$\"10129708800\"\n2290.12.31D00:00:00.000000000\nq)\"P\"$\"00000000000\"\n1970.01.01D00:00:00.000000000\nIf these digits are followed by a .\nTok will parse what follows .\nas parts of second, e.g.\nq)\"P\"$\"10129708800.123456789\"\n2290.12.31D00:00:00.123456789\nq)\"P\"$\"00000000000.123456789\"\n1970.01.01D00:00:00.123456789\nq)\"PZ\"$\\:\"20191122-11:11:11.123\"\n2019.11.22D11:11:11.123000000\n2019.11.22T11:11:11.123\nDate formats¶\n\"D\"$\nwill Tok dates with varied formats:\n[yy]yymmdd\nddMMM[yy]yy\nyyyy/[mm|MMM]/dd\n[mm|MMM]/dd/[yy]yy / \\z 0\ndd/[mm|MMM]/[yy]yy / \\z 1\nCommand-line option -z\n(date format)\nSystem command \\z\n(date format)\nCast\nOverloads of $\n.h.iso8601\nISO 8601 timestamp\nCasting\nQ for Mortals\n§7.3.3 Parsing Data from Strings\n\ntrim\n, ltrim\n, rtrim\n¶\nRemove leading or trailing nulls from a list\ntrim x trim[x]\nltrim x ltrim[x]\nrtrim x rtrim[x]\nWhere x\nis a vector or non-null atom, returns x\nwithout leading (ltrim\n) or trailing (rtrim\n) nulls or without either (trim\n).\nq)trim \" IBM \"\n\"IBM\"\nq)trim 0N 0N 1 2 3 0N 0N 4 5 0N 0N\n1 2 3 0N 0N 4 5\nq)ltrim\" IBM \"\n\"IBM \"\nq)rtrim\" IBM \"\n\" IBM\"\nq)trim\"a\"\n\"a\"\nq)trim 42\n42\nImplicit iteration¶\ntrim\n, ltrim\n, and rtrim\nare string-atomic and apply to dictionaries and tables.\nq)trim((\"fox\";(\"jumps \";\"over \"));(\"a\";\"dog \"))\n\"fox\" (\"jumps\";\"over\")\n\"a\" \"dog\"\nq)ltrim`a`b!((\"fox\";(\"jumps \";\"over \"));(\"a\";\"dog \"))\na| \"fox\" (\"jumps \";\"over \")\nb| \"a\" \"dog \"\nq)rtrim ([]a:(\"fox\";(\"jumps \";\"over \"));b:(\"a\";\"dog \"))\na b\n----------------------\n\"fox\" \"a\"\n(\"jumps\";\"over\") \"dog\"\nDomain and range¶\ndomain: b g x h i j e f c s p m d z n u v t\nrange: b g x h i j e f c s p m d z n u v t\n\ntype\n¶\nType of an object\ntype x type[x]\nWhere x\nis any object, returns its type.\nThe type is a short int:\n- zero for a general list\n- negative for atoms of basic datatypes\n- positive for everything else\nq)type 5 / integer atom\n-7h\nq)type 2 3 5 / integer vector\n7h\nq)type (2 3 5;\"hello\") / general list\n0h\nq)type () / general list\n0h\nq)type each (2;3 5;\"hello\") / int atom; int vector; string\n-7 7 10h\nq)type (+) / function\n102h\nq)type (0|+) / composition\n105h\n\nuj\n, ujf\n¶\nUnion join\nx uj y uj [x;y]\nx ujf y ujf[x;y]\nWhere x\nand y\nare both keyed or both unkeyed tables, returns the union of the columns, filled with nulls where necessary:\n- if\nx\nandy\nhave matching key column/s, then records iny\nupdate matching records inx\n- otherwise,\ny\nrecords are inserted.\nq)show s:([]a:1 2;b:2 3;c:5 7)\na b c\n-----\n1 2 5\n2 3 7\nq)show t:([]a:1 2 3;b:2 3 7;c:10 20 30;d:\"ABC\")\na b c d\n--------\n1 2 10 A\n2 3 20 B\n3 7 30 C\nq)s,t / tables do not conform for ,\n'mismatch\nq)s uj t / simple, so second table is inserted\na b c d\n--------\n1 2 5\n2 3 7\n1 2 10 A\n2 3 20 B\n3 7 30 C\nq)(2!s) uj 2!t / keyed, so matching records are updated\na b| c d\n---| ----\n1 2| 10 A\n2 3| 20 B\n3 7| 30 C\nuj\nis a multithreaded primitive.\nuj\ngeneralizes the ,\nJoin operator.\nChanges in V3.0\nThe union join of two keyed tables is equivalent to a left join of the two tables with the catenation of unmatched rows from the second table.\nAs a result a change in the behavior of lj\ncauses a change in the behavior of uj\n:\nq)show x:([a:1 2]b:`x`y;c:10 20)\na| b c\n-| ----\n1| x 10\n2| y 20\nq)show y:([a:1 2]b:``z;c:1 0N)\na| b c\n-| ---\n1| 1\n2| z\nq)x uj y / kdb+ 3.0\na| b c\n-| ---\n1| 1\n2| z\nq)x uj y / kdb+ 2.8\na| b c\n-| ----\n1| x 1\n2| z 20\nSince 2017.04.10, the earlier version is available in all V3.5 and later versions as ujf\n.\nJoins\nQ for Mortals\n§9.9.7 Union Join\n\nungroup\n¶\nungroup x ungroup[x]\nWhere x\nis a table, in which some cells are lists, but for any row, all lists are of the same length, returns the normalized table, with one row for each item of a lists.\nq)p:((enlist 2);5 7 11;13 17)\nq)r:((enlist\"A\");\"CDE\";\"FG\")\nq)show t:([]s:`a`b`c;p;q:10 20 30;r)\ns p q r\n-----------------\na ,2 10 ,\"A\"\nb 5 7 11 20 \"CDE\"\nc 13 17 30 \"FG\"\nq)ungroup t / flatten lists p and r\ns p q r\n---------\na 2 10 A\nb 5 20 C\nb 7 20 D\nb 11 20 E\nc 13 30 F\nc 17 30 G\nTypically used on the result of xgroup\nor select\n.\nq)\\l sp.q\nq)show t:select p,qty by s from sp where qty>200\ns | p qty\n--| ------------------------\ns1| `p$`p1`p3`p5 300 400 400\ns2| `p$`p1`p2 300 400\ns4| `p$,`p4 ,300\nq)ungroup t\ns p qty\n---------\ns1 p1 300\ns1 p3 400\ns1 p5 400\ns2 p1 300\ns2 p2 400\ns4 p4 300\nungroup\nis not the exact inverse of grouping\nGrouping sorts on the keys, so a subsequent ungroup\nreturns the original records sorted by the grouped column/s.\ngroup\n,\nselect\n,\nxgroup\nQ for Mortals\n§9.3.4.2 Grouping without Aggregation\n\nunion\n¶\nUnion of two lists\nx union y union[x;y]\nWhere x\nand y\nare lists or atoms, returns a list of the distinct items of its combined arguments, i.e. distinct x,y\n.\nq)1 2 3 3 6 union 2 4 6 8\n1 2 3 6 4 8\nq)distinct 1 2 3 3 6, 2 4 6 8 / same as distinct on join\n1 2 3 6 4 8\nq)t0:([]x:2 3 5;y:\"abc\")\nq)t1:([]x:2 4;y:\"ad\")\nq)t0 union t1 / also on tables\nx y\n---\n2 a\n3 b\n5 c\n4 d\nq)(distinct t0,t1)~t0 union t1\n1b\n\nupdate\n¶\nAdd or amend rows or columns of a table or entries in a dictionary\nupdate\nis a qSQL query template and varies from regular q syntax.\nFor the Update operator !\n, see\nFunctional SQL\nSince 4.1t 2021.06.04 updates from splayed table and path@tablename now leverage peach to load columns (when running with secondary threads).\nq)update x:0 from get`:mysplay\nSyntax¶\nupdate ps [by pb] from texp [where pw]\nFrom phrase¶\nupdate\nwill not modify a splayed table on disk.\nSelect phrase¶\nNames in the Select phrase refer to new or modified columns in the table expression.\nq)t:([] name:`tom`dick`harry; age:28 29 35)\nq)update eye:`blue`brown`green from t\nname age eye\n---------------\ntom 28 blue\ndick 29 brown\nharry 35 green\nWhere phrase¶\nThe Where phrase restricts the scope of updates.\nq)t:([] name:`tom`dick`harry; hair:`fair`dark`fair; eye:`green`brown`gray)\nq)t\nname hair eye\n----------------\ntom fair green\ndick dark brown\nharry fair gray\nq)update eye:`blue from t where hair=`fair\nname hair eye\n----------------\ntom fair blue\ndick dark brown\nharry fair blue\nNew values must have the type of the column being amended.\nIf the query adds a new column it will have values only as determined by the Where phrase. At other positions, it will have nulls of the column’s type.\nBy phrase¶\nThe By phrase applies the update along groups. This is most useful with aggregate and uniform functions.\nWith an aggregate function, the entire group gets the value of the aggregation on the group.\nq)update avg weight by city from p\np | name color weight city\n--| -------------------------\np1| nut red 15 london\np2| bolt green 14.5 paris\np3| screw blue 17 rome\np4| screw red 15 london\np5| cam blue 14.5 paris\np6| cog red 15 london\nA uniform function is applied along the group in place. This can be used, for example, to compute cumulative volume of orders.\nq)update cumqty:sums qty by s from sp\ns p qty cumqty\n---------------\n0 p1 300 300\n0 p2 200 500\n0 p3 400 900\n0 p4 200 1100\n3 p5 100 100\n0 p6 100 1200\n1 p1 300 300\n1 p2 400 700\n2 p2 200 200\n3 p2 200 300\n3 p4 300 600\n0 p5 400 1600\nSince 4.1 2024.04.29 throws type\nerror if dictionary update contains by clause (previously ignored).\nCond¶\nCond is not supported inside query templates: see qSQL.\ndelete\n,\nexec\n,\nselect\nqSQL,\nFunctional SQL\nQ for Mortals\n§9.5 The update\ntemplate"}}},{"rowIdx":48,"cells":{"text":{"kind":"string","value":"File compression¶\nkdb+ can compress data as it is written to disk. Q operators and keywords read both compressed and uncompressed files.\nWrite compressed files¶\nUse set\nwith a left argument that specifies the file or splay target, and the compression parameters.\n(For a splayed table, you can specify the compression of each column.)\nq)`:a set 1000#enlist asc 1000?10 / uncompressed file\n`:a\nq)(`:za;17;2;9)set get`:a / compressed file\n`:za\nq)get[`:a]~get`:za\n1b\nUsing real NYSE trade data, we observed the gzip\nalgorithm at level 9 compressing to 15% of original size, and the IPC compression algorithm compressing to 33% of original size.\nThe compressed file allows random access to the data.\nSource and target file on the same drive might run slowly\nCompression reads from the source file, compresses the data and writes to the target file. The disk is likely receiving many seek requests.\nIf you move the target file to a different physical disk, you will reduce the number of seeks needed.\nCautions:\n- Do not use streaming compression with log files. After a crash, the log file would be unusable as it will be missing meta information from the end of the file. Streaming compression maintains the last block in memory and compresses/purges it as needed or latest on close of file handle.\n- When a nested data column file, e.g.\nname\n, is compressed, its companion filename#\norname##\nis also compressed: do not try to compress it explicitly. - Use\nset\nand not gzip: they produce different results.\nCompression parameters¶\nCompression is specified by three integers representing logical block size, algorithm, and compression level.\n- Logical block size\n-\nA power of 2 between 12 and 20: pageSize or allocation granularity to 1MB.\n-\nPageSize for AMD64 is 4kB, SPARC is 8kB. Windows seems to have a default allocation granularity of 64kB. Apple Silicon is 16kB.\n-\nWhen choosing the logical block size, consider the minimum of all the platforms that will access the files directly – otherwise you may encounter\ndisk compression - bad logicalBlockSize\n. -\nThis value affects both compression speed and compression ratio: larger blocks can be slower and better compressed.\n- Algorithm and compression level\n-\nPick from:\nalg algorithm level since ---------------------------- 0 none 0 1 q IPC 0 2 gzip 0-9 3 snappy 0 V3.4 4 lz4hc 0-16† V3.6 5 zstd -7-22 V4.1\nLevel 0 for lz4hc\ndefault compression; level>16 behaves the same as 16\nAlgorithm is also used to specify the encryption algorithm which can be used with compression\nSelective compression¶\nYou can choose which files to compress, and which algorithm/level to use per file.\nQ operators read both compressed and uncompressed files. So files that do not compress well, or have an access pattern that does not perform well with compression, can be left uncompressed.\nCompression statistics¶\nThe -21!\ninternal function returns a dictionary of compression statistics, or an empty dictionary if the file is not compressed.\nhcount\nreturns the uncompressed file length.\nCompression by default¶\nkdb+ can write compressed files by default.\nThis is governed by the zip defaults .z.zd\n.\nSet this as an integer vector, e.g.\n.z.zd:17 2 6\nand set\nwill write files (with no extension) compressed in this way unless given different parameters.\nTo disable compression by default, set .z.zd\nto 3#0\n, or expunge it.\n.z.zd:3#0 / no compression\n\\x .z.zd / no compression\nBy default, .z.zd\nis undefined and q writes files uncompressed.\nAppend to a compressed file or splay¶\nq)(`:zippedTest;17;2;6) set 100000?10\n`:zippedTest\nq)`:zippedTest upsert 100000?10\n`:zippedTest\nq)-21!`:zippedTest\ncompressedLength | 148946\nuncompressedLength| 1600016\nalgorithm | 2i\nlogicalBlockSize | 17i\nzipLevel | 6i\nAppending to files with an attribute (e.g. `p#\non sym) causes the whole file to be read and rewritten.\nAppending to compressed enum files in V3.0 2012.05.17\nAppending to compressed enum files was blocked in V3.0 2012.05.17 due to potential concurrency issues, hence these files should not be compressed.\nDecompression¶\nDecompression is implicit: q operators and keywords read both compressed and uncompressed files.\nget`:compressedFile\n\\x .z.zd / write uncompressed by default\n`:uncompressedFile set get `:compressedFile / store again decompressed\nFiles are mapped or unmapped on demand during a query. Only the areas of the file that are touched are decompressed, i.e. kdb+ uses random access. Decompressed data is cached while a file is mapped. Columns are mapped for the duration of the select.\nFor example, say you are querying by date and sum over a date-partitioned table, with each partition parted by sym. The query decompresses only the parts of the column data for the syms in the query predicate.\nConcurrently open files¶\nThe number of concurrently open files is limited by the environment/OS only (e.g. ulimit -n\n).\nPrior to V3.2\nV3.2+ uses two file descriptors per file: you might need to increase the ulimit -n\nvalue used in prior versions.\nPrior to V3.1 2013.02.21 no more than 4096 compressed files could be open concurrently.\nThere is no practical internal limit on the number of uncompressed files.\nMemory allocation¶\nkdb+ allocates enough memory to decompress the whole vector, regardless of how much it finally uses. This reservation is required as there is no backing store for the decompressed data, unlike with mapped files of uncompressed data, which can always read the pages from file again should they have been dropped.\nThis is reservation only, and can be accommodated by increasing the swap space available: even though the swap should never actually be written to, the OS has to be assured that in the worst-case scenario of decompressing the data in full, it could swap it out if needed.\nIf you experience wsfull\neven with sufficient swap space configured, check whether you have any soft/hard limits imposed with ulimit -v\n.\nMemory overcommit settings on Linux\n/proc/sys/vm/overcommit_memory\nand /proc/sys/vm/overcommit_ratio\n– these control how careful Linux is when allocating address space with respect to available physical memory plus swap.\nPerformance¶\nThere are three key aspects of compression algorithms:\n- Compression ratio: This indicates how much the final data file size is reduced. A high compression ratio means smaller files and lower storage, I/O costs. If the column files are smaller, we can store more data on a storage of a given size. Similarly, more storage space costs more (especially in the cloud). Smaller files may reduce query execution time if the storage is slow because smaller files are faster read. You can check the compression ratio of a popular financial database in a case study.\n- Compression speed: This measures the time required to compress a file. Compression is typically CPU-intensive, so a high compression speed minimizes CPU usage and associated costs. High compression speed is good. The time to save a column file determines the upper bound of data ingestion. The faster we can save a file, the more a kdb+ system can ingest. In the kdb+ tick system, the RDB is unavailable for queries during write, meaning that write speed also affects system availability.\n- Decompression speed: This reflects the time taken to restore the original file from the compressed (encrypted) version. High decompression speed means faster queries.\nThere is no single best compression algorithm that outperforms all others in all aspects. You need to select compression (or avoid compression) based on your priorities:\n- Is achieving the fastest possible query execution more important to you, or do you prefer to minimize storage costs?\n- Does your kdb+ system handle a high volume of incoming data, requiring a reliable intraday write process to manage the data effectively?\n- Are you looking for a general solution that provides balanced performance across various aspects without excelling or underperforming in any particular area?\nA single thread with full use of a core can decompress approx 300MB/s, depending on data/algorithm and level.\nBenchmarking¶\nIt is difficult to estimate the impact of compression on performance. On the one hand, compression does trade CPU utilization for disk-space savings. And up to a point, if you’re willing to trade more CPU time, you can save more space. But by reducing the space used, you end up doing less disk I/O, which can improve overall performance if your workload is bandwidth-limited.\nThe only way to know the real impact of compression on your disk utilization and system performance is to run your workload with different levels of compression and observe the results.\nCurrently, ZFS compression probably has an edge over native kdb+ compression, due to keeping more decompressed data in cache, which is available to all processes.\nPerform your benchmarks on the same hardware setup as you would use for production and be aware of the disk cache – flush the cache before each test. The disk cache can be flushed on Linux using\nsync ; sudo echo 3 | sudo tee /proc/sys/vm/drop_caches\nand on macOS, the OS command purge\ncan be used.\nCompression parameters¶\nThe logicalBlockSize\nrepresents how much data is taken as a compression unit, and consequently the minimum size of a block to decompress. E.g. using a logicalBlockSize\nof 128kB, a file of size 128000kB would be cut into 1000 blocks, and each block compressed independently of the others. Later, if a single byte is requested from that compressed file, a minimum of 128kB would be decompressed to access that byte. Fortunately, those types of access patterns are rare, and typically you would be extracting clumps of data that make a logical block size of 128kB quite reasonable.\nExperiment to discover what suits your data, hardware and access patterns best.\nKernel settings¶\nTweaking the kernel settings on Linux may help – it really depends on the size and number of compressed files you have open at any time, and the access patterns used. For example, random access to a compressed file will use many more kernel resources than sequential access.\nLinux production notes/Compression\nMultithreading¶\nDo not read or write a compressed file concurrently from multiple threads.\nHowever, multiple files can be read or written from their own threads concurrently (one file per thread). For example, a segmented historical database with secondary threads will be using the decompression in a multithreaded mode.\nRequirements¶\nCompression libraries may already be installed on your system. kdb+ binds dynamically to the compression libraries when required.\n64-bit and 32-bit kdb+ require corresponding 64-bit and 32-bit libs\nIf in doubt, consult your system administrator for assistance.\nGzip¶\nCompression algorithm 2\nuses Gzip. Source and algorithm details can be found here.\nThe following libraries are required by kdb+:\n| Linux | macOS | Windows |\n|---|---|---|\n| libz.so.1 | libz.dylib (pre-installed) |\nzlibwapi.dll (32-bit and 64-bit versions available from WinImage) |\nGzip has very good compression ratio and average compression/decompression speed. Avoid high compression levels (like 8 and 9) if write speed is important for you. Gzip with level 5 is a good general solution.\nSnappy¶\nCompression algorithm 3\nuses Snappy. Source and algorithm details can be found here.\nThe following libraries are required by kdb+:\n| Linux | macOS | Windows |\n|---|---|---|\n| libsnappy.so.1 | libsnappy.dylib (available via package managers such as Homebrew or MacPorts) |\nsnappy.dll |\nSnappy has excellent compression and decompression speed so it is a good choice if you optimize for query speed and ingestion times. Snappy falls behind the other compression solutions in compression ratio.\nLZ4¶\nCompression algorithm 4\nuses LZ4. Source and algorithm details can be found here.\nThe following libraries are required by kdb+:\n| Linux | macOS | Windows |\n|---|---|---|\n| liblz4.so.1 | liblz4.dylib (available through package managers such as Homebrew or MacPorts) |\nliblz4.dll (build the liblz4-dll project on Windows as outlined in the README at GitHub) |\nCertain releases of lz4\ndo not function correctly within kdb+\nNotably, lz4-1.7.5\ndoes not compress, and lz4-1.8.0\nappears to hang the process.\nkdb+ requires at least lz4-r129\n.\nlz4-1.8.3\nworks.\nWe recommend using the latest lz4\nrelease available.\nLZ4 is great at decompression speed, but is average in compression ratio. The compression level has a significant impact on compression speed. Level 5 is a good choice if you aim fast queries and low storage costs. Avoid high compression levels (above 11).\nZstd¶\nCompression algorithm 5\nuses zstd (Zstandard). Source and algorithm details can be found here.\nThe following libraries are required by kdb+:\n| Linux | macOS | Windows |\n|---|---|---|\n| libzstd.so.1 | libzstd.1.dylib (available via package managers such as Homebrew or MacPorts) |\nlibzstd.dll |\nZstd is outstanding in compression ratio of low entropy columns. Use low compression level (like 1) if you optimize for compression (write) speed and increase level to achieve better compression ratio. Avoid high levels (above 14).\nRunning kdb+ under Gdb¶\nYou should only ever need to run Gdb (the GNU debugger) if you are debugging your own custom shared libs loaded into kdb+.\nGdb will intercept SIGSEGV which should be passed to q. To tell it to do so, issue the following command at the Gdb prompt\n(gdb) handle SIGSEGV nostop noprint\nset\nCompression in kdb+\nLinux production notes: Huge Pages and Transparent Huge Pages\n\nFirewalling¶\nTips for securing your application\nRun kdb+ as a separate (non-root) user. If you need it to run on port 80, use authbind or iptables redirect.\nDo not allow that user to write to any directory or files. If you need file access, arbitrate it via IPC with another kdb+ process. Pay attention to how that process will return values via .z.pg\nor .z.ps\nor similar.\nFirewall all ports inbound and outbound except ones explicitly used.\nFor any backend kdb+ processes, restrict them to localhost\nor a protected network (e.g. iptables --pol ipsec\n)\nSet process limits with ulimit no larger than you need them.\nRestrict input by defining at least:\n.z.pc:{}\n.z.pg:{}\n.z.ph:{}\n.z.pi:{}\n.z.pm:{}\n.z.po:{}\n.z.pp:{}\n.z.pq:{}\n.z.ps:{}\nTo allow certain IPC calls, implement only the ones you want. A denylist for functions is tricky because some otherwise useful functions may have a mode that accesses the disk which may cause information leak (e.g. key). It is much easier to use an allowlist.\nAs IPC functions either receive a parse tree or a string (that you could parse yourself), check the type of the input e.g. x:$[10h=type x;parse x;x]\nIf you use WebSockets, define:\n.z.wc:{a[.z.a]-:1}\n.z.wo:{$[2<;a[.z.a]+:1;hclose .z.w;1]}\nWhen handling untrusted input, consider designing your application to wrap public entrypoints with reval\n.\nPay attention to the fact that each WebSocket client can open up a lot of connections (200 on Mozilla, 256 for Chrome), so limit using .z.a\n.\nLog connections and consider using fail2ban to block suspicious traffic.\nCallbacks,\nUsing .z\nPermissions with kdb+\nQ for Mortals:\n§11.6 Interprocess Communication"}}},{"rowIdx":49,"cells":{"text":{"kind":"string","value":"System commands¶\n\\a tables \\s number of secondary threads \\b views \\S random seed \\B pending views \\t timer \\c console size \\T timeout \\cd change directory \\ts time and space \\C HTTP size \\u reload user password file \\d directory \\v variables \\e error trap clients \\w workspace \\E TLS server mode \\W week offset \\f functions \\x expunge \\g garbage collection mode \\z date parsing \\l load file or directory \\1 & \\2 redirect \\o offset from UTC \\_ hide q code \\p listening port \\ terminate \\P precision \\ toggle q/k \\r replication primary \\\\ quit \\r rename\nSystem commands control the q environment. They have the form:\n\\cmd [p]\nfor some command cmd\n, and optional parameter list p\n.\nCommands with optional parameters that set values, will show the current values if the parameters are omitted.\nSome system commands have equivalent command-line parameters.\nAn alternative method to executing system commands is to use the system\nkeyword. This executes a string representation of a system command and returns its result.\n\\a\n(tables)¶\nList tables\n\\a\n\\a ns\nLists tables in namespace ns\n– defaults to current namespace.\nq)\\a\n`symbol$()\nq)aa:bb:23\nq)\\a\n`symbol$()\nq)tt:([]dd:12 34)\nq)\\a\n,`tt\nq).nn.vv:([]uu:12 45)\nq)\\a\n,`tt\nq)\\a .n\n'.n\nq)\\a .nn\n,`vv\nq)\\d .nn\nq.nn)\\a\n,`vv\nq.nn)vv\nuu\n--\n12\n45\nq.nn)\n\\b\n(views)¶\nList dependencies\n\\b\n\\b ns\nLists dependencies (views) in namespace ns\n– defaults to current namespace.\nq)a::x+y\nq)b::x+1\nq)\\b\n`s#`a`b\n\\B\n(pending views)¶\nList pending dependencies\n\\B\n\\B ns\nLists pending dependencies (views) in namespace ns\n, i.e. dependencies not yet referenced, or not referenced after their referents have changed.\nDefaults to current namespace.\nq)a::x+1 / a depends on x\nq)\\B / the dependency is pending\n,`a\nq)x:10\nq)\\B / still pending after x is defined\n,`a\nq)a / use a\n11\nq)\\B / no longer pending\n`symbol$()\n\\c\n(console size)¶\nConsole maximum rows and columns\n\\c\n\\c size\nWhere size\nis a pair of integers: rows and columns,\nthese values determine when q truncates output with ..\n.\nThe values are coerced to the range [10,2000].\nThe default values are as set by environment variables LINES\nand COLUMNS\n.\nIf the environment variables are undefined, the defaults are\nV4.0 or less 25 80\nV4.1+ dimensions of the command-shell window\nEnvironment variables LINES\nand COLUMNS\nSee Bash documentation for shopt\nparameter checkwinsize\nto make sure they’re reset as needed.\nq)\\c\n45 160\nq)\\c 5 5\nq)\\c\n10 10\nq)til each 20+til 10\n0 1 2 3..\n0 1 2 3..\n0 1 2 3..\n0 1 2 3..\n0 1 2 3..\n0 1 2 3..\n0 1 2 3..\n..\n\\C\n(HTTP size)¶\nHTTP display maximum rows and columns\n\\C\n\\C size\nWhere size\nis a pair of integers: rows and columns,\nthe values determine when q truncates output with ..\n.\nThe default is 36 2000\n; values are coerced to the range [10,2000].\n\\cd\n(change directory)¶\nCurrent directory\n\\cd\n\\cd fp\nWhere fp\nis a filepath, sets the current directory. Creates the directory if it does not exist.\nq)\\cd\n\"/home/guest/q\"\nq)\\cd /home/guest/dev\nq)\\cd\n\"/home/guest/dev\"\nq)\\pwd\n\"/home/guest/dev\"\n\\d\n(directory)¶\nCurrent namespace\n\\d\n\\d ns\nWhere ns\nis the name of a namespace, shows or sets the current namespace, also known as directory or context. The namespace can be empty, and a new namespace is created when an object is defined in it. The q session prompt indicates the current namespace.\nq)\\d / default namespace\n`.\nq)\\d .o / change to .o\nq.o)\\f\n`Cols`Columns`FG`Fkey`Gkey`Key`Special..\nq.o)\\d . / return to default\nq)key` / lists namespaces other than .z\n`q`Q`o`h\nq)\\d .s / change to non-existent namespace\nq.s)key` / not yet created\n`q`Q`o`h\nq.s)a:1 / create object, also creates namespace\nq.s)key`\n`q`Q`o`h`s\nQ for Mortals §12.7 Working in a Context\n\\e\n(error trap clients)¶\nError trapping\n\\e\n\\e mode\nGoverns error trapping for client requests. The default mode is 0 (off).\n| mode | behavior |\n|---|---|\n| 0 | When a client request has an error, by default the server clears the stack. Appropriate for production use as it enables the server to continue processing other client requests. |\n| 1 | The server suspends on an error, and does not process other requests until the stack is cleared. Appropriate for development: enables debugging on the server. |\n| 2 | Dumps stack to stderr for untrapped errors during request from a remote. (Since V3.5 2016.10.03) |\n\\E\n(TLS server mode)¶\n\\E\nDisplays TLS server mode as an int:\n0i plain\n1i plain and TLS\n2i TLS only\nCommand-line option -E\nto set the mode\n\\f\n(functions)¶\nList functions\n\\f\n\\f ns\nWhere ns\nis the name of a namespace, lists functions in it; defaults to current namespace.\nq)f:g:h:{x+2*y}\nq)\\f\n`f`g`h\nq)\\f .h\n`cd`code`data`eb`ec`ed`es`estr`fram`ha`hb`hc`he`hn`hp`hr`ht`hta`htac`htc`html`http`hu`hu..\nq){x where x like\"ht??\"}system\"f .h\"\n`htac`html`http\n\\g\n(garbage collection mode)¶\n\\g / current garbage-collection mode\n\\g mode / set garbage-collection mode\nShow or set garbage-collection mode. The default mode is 0 (deferred). Setting the garbage-collection mode will automatically call .Q.gc[]\nafter setting the provided value.\nQ manages its own thread-local heap. Objects in q use reference counting. As soon as there are no references to an object, its memory is eligable to be returned to the heap.\n- 0 (deferred)\n-\nReturns memory to the thread-local heap. Will subsequently return memory to the OS when either\n.Q.gc[]\nis called or an allocation fails, hence has a performance advantage, but can be more difficult to dimension or manage memory requirements. - 1 (immediate)\n-\nAs memory is returned to the thread-local heap, if the object is ≥64MB then the memory is returned to the OS instead. This has an associated performance overhead. As per\ndefered mode\n, memory used by the heap may be subsequently returned to the OS when either.Q.gc[]\nis called or an allocation fails.\nWhen q is denied additional address space from the OS, it invokes .Q.gc[]\nand retries the request to the OS.\nIf the subsequent attempt fail, the request exits with 'wsfull\n.\nNotes on the allocator\nQ’s allocator bins objects in power-of-two size categories, from 16b (e.g. an atom) to 64MB.\nIn this example, various vectors of longs (8 bytes per long) are created of different sizes using til\n.\nThe memory used for the operation is shown via \\ts\n. Note that more bytes are reported\nthat only the pure vector size due to other house keeping, for example the type information.\nq)\\ts til 800 / 800*8=6400, needs a 2^13=8192 byte slab (too big for a 2^12=4096 byte slab)\n0 8368\nq)\\ts til 1000 / 1000*8=8000, needs a 2^13=8192 byte slab (memory same as smaller vector above)\n0 8368\nq)\\ts til 1200 / 1200*8=9600, cant fit in a 2^13=8192 bytes slab, needs 2^14=16384 byte slab\n0 16560\nIf there is already a slab in the object category’s freelist, it is reused. If there are no available slabs, a larger slab is recursively split in two until the needed category size is reached. If there are no free slabs available, a new 64MB slab is requested from the system. When an object is de-allocated, its memory slab is returned to the corresponding category’s freelist.\nAllocations larger than 64MB are requested from the OS directly, and this is what -g 1\ncauses to be immediately returned.\nNote that larger allocations do not cause any fragmentation and in case of -g 1\nalways immediately return.\nIt is the smaller allocations (<64MB) that typically represent the bulk of a process allocation workload that can cause the heap to become fragmented.\nThere are two primary cases of heap fragmentation:\n- split slab\n-\nSuppose that at some point q needed a 32MB allocation. It requested a new 64MB slab from the OS, split it in half, used and freed the object, and returned the two 32MB slabs to the freelist. Now if q needs to allocate 64MB, it will have to make another request to the OS. When\n.Q.gc\nis called (or an allocation fails), it would attempt to coalesce these two 32MB slabs together back into one 64MB, which would allow it to be returned to the OS (or reused for larger allocations, if the resulting slab is <64MB). - leftover objects\n-\nIf most of the objects allocated from a 64MB slab are freed but one remains, the slab still cannot be returned to the OS (or coalesced).\nThe following example shows freeing an object ≥64MB in deferred\nmode, while inspecting memory usage via .Q.w[]\n:\nq).Q.w[]`used`heap / original memory used and memory reserved by kdb+ at time of test\n371552 67108864\nq)a:til 10000000 / need memory ≥64MB to store value\nq).Q.w[]`used`heap / heap (memory reserved by kdb+) has grown, and used memory grown from the heap has grown\n134589328 201326592\nq)a:1 / variable assigned different value, old value no longer used\nq).Q.w[]`used`heap / heap (memory reserved by kdb+) hasn't reduced as it is kept for future use, used memory has reduced\n371616 201326592\nq)a:til 10000000 / need memory ≥64MB to store value again\nq).Q.w[]`used`heap / heap memory (no increase) as memory used has been taken from the available heap\n134589328 201326592\nThe same example will differ when using immediate\nmode, by returning memory to the OS (as the object free'd is greater than 64MB):\nq).Q.w[]`used`heap / original memory used and memory reserved by kdb+ at time of test\n371648 67108864\nq)a:til 10000000 / need memory ≥64MB to store value\nq).Q.w[]`used`heap / heap (memory reserved by kdb+) has grown, and used memory from the heap has grown\n134589424 201326592\nq)a:1 / variable assigned different value, old value no longer used\nq).Q.w[]`used`heap / heap (memory reserved by kdb+) has reduced, it has been returned to OS\n371712 67108864\nq)a:til 10000000 / need memory ≥64MB to store value again\nq).Q.w[]`used`heap / heap memory has increased (requested from OS) as memory used is more than whats available to use in heap\n134589328 201326592\nImmediate mode\nwill not return the memory to the OS when several objects less than 64MB each are freed, even though their sum may be more than 64MB.\nIn this situation, immediate\nand deferred\nmode operate identically by adding the freed memory to the heap for future use.\nThe following examples shows this effect when running in immediate mode\n.\nNo memory is returned to the OS on freeing the objects, and only when .Q.gc[]\nis run is the memory coalesced and freed.\nq).Q.w[]`used`heap / original memory used and memory reserved by kdb+ at time of test\n371648 67108864\nq)v:`a`b`c`d`e`f`g`h`i`j / create a list of 10 variable names to use\nq){set[x;til 1000000]} each v / create a global variable using each of the names in v, each containing 1000000 longs\nq).Q.w[]`used`heap / heap (memory reserved by kdb+) has grown, and used memory from the heap has grown\n84258096 134217728\nq)![`.;();0b;v] / delete all the variables and their contents\nq).Q.w[]`used`heap / used memory has been reduced, but none of the heap memory has returned to the OS\n371824 134217728\nq).Q.gc[] / running garbage collection freed over 64MB\n67108864\nCommand-line option -g\n(garbage collection mode), Command-line parameter -w\n(workspace memory limit), System command \\w\n(memory stats and workspace memory limit)\nQ for Mortals\n§13.1.10 Garbage Collection \\g\n\\l\n(load file or directory)¶\n\\l name\n\\l .\nWhere name\nis the name of a\n- q script, executes the script\n- serialized object, deserializes it into memory as variable\nname\n- directory of a splayed table, maps the table to variable\nname\n, without loading any columns into memory - directory and the value of one of the permitted partition types, the most recent partition directory is inspected for splayed directories and each such directory mapped into memory with the name of the splayed directory\n- directory containing a kdb+ database, recursively loads whatever it finds there: serialized objects, scripts, splayed tables, etc.\nCurrent directory When a directory is opened, it becomes the current directory.\nReload current directory You can reload the current database with \\l .\n. This will ignore scripts and reload only data.\nNever mind the dollars If a file or directory under the path being loaded has a dollar-sign suffix then it is ignored. e.g. db/tickdata/myfile$\nand db/tickdata/mydir$\nwould be ignored on \\l db/tickdata\nor on \\l .\nif db/tickdata\nis the current directory.\nq)\\l sp.q / load sp.q script\n...\nq)\\a / tables defined in sp.q\n`p`s`sp\nq)\\l db/tickdata / load the data found in db/tickdata\nq)\\a / with tables quote and trade\n`p`quote`s`sp`trade\nIf logging is enabled, the command checkpoints the .qdb\nfile and empties the log file.\nOperating systems may create hidden files, such as DS_Store\n, that block \\l\non a directory.\nload\n,\n.Q.l\n(load)\nLogging\nQ for Mortals\n§10.3 Scripts,\n§13.2.6 Logging -l\nand -L\n\\o\n(offset from UTC)¶\n\\o\n\\o n\nShow or set the local time offset, as integer n\nhours from UTC, or as minutes if abs[n]>23\n.\nThe initial value of 0N\nmeans the machine’s offset is used.\nq)\\o\n0N\nq).z.p / UTC\n2010.05.31D23:45:52.086467000\nq).z.P / local time is UTC + 8\n2010.06.01D07:45:53.830469000\nq)\\o -5 / set local time as UTC - 5\nq).z.P\n2010.05.31D18:45:58.470468000\nq)\\o 390 / set local time as UTC + 6:30\nq).z.P\n2010.06.01D06:16:06.603981000\nThis corresponds to the -o\ncommand line parameter.\n\\p\n(listening port)¶\nShow or set listening port\n\\p [rp,][hostname:][portnumber|servicename]\nSee Listening port for detail.\nhopen\n-p\ncommand-line option\nMultithreaded input mode,\nChanges in 3.5\nSocket sharding with kdb+ and Linux\n\\P\n(precision)¶\n\\P\n\\P n\nShow or set display precision for floating-point numbers, i.e. the number of digits shown.\nThe default value of n\nis 7 and possible values are integers in the range [0,17].\nA value of 0 means use maximum precision.\nThis is used when exporting to CSV files.\nq)\\P / default\n7i\nq)reciprocal 7 / 7 digits shown\n0.1428571\nq)123456789 / integers shown in full\n123456789\nq)123456789f / floats shown to 7 significant digits\n1.234568e+08\nq)\\P 3\nq)1%3\n0.333\nq)\\P 10\nq)1%3\n0.3333333333\nUse .Q.fmt\nand .q.f\nto format numbers to given width and precision\nq).Q.fmt[8;6]a / format to width 8, 6 decimal places\n\"0.142857\"\nq).Q.f[2;]each 9.996 34.3445 7817047037.90 / format to 2 decimal places\n\"10.00\"\n\"34.34\"\n\"7817047037.90\"\n.Q.f\n(precision format),\n.Q.fmt\n(precision format with length)\nPrecision,\n-P\ncommand-line option,\n-27!\nprecision format with IEEE754 rounding\nWhat Every Computer Scientist Should Know About Floating-Point Arithmetic\n\\r\n(replication primary)¶\n\\r\nThis should not be executed manually otherwise it can disrupt replication. It is executed automatically by the replicating process on the primary process, and returns the log file name and log file count.\n\\r\n(rename)¶\n\\r src dst\nRename file src\nto dst\n.\nIt is equivalent to the Unix mv\ncommand, or the windows move\ncommand (except that it will not rename to a different disk drive).\n\\s\n(number of secondary threads)¶\n\\s\n\\s N\nShow or , where N\nis an integer, set the number of secondary threads available for parallel processing, within the limit set by the -s\ncommand-line option.\nN\nis an integer.\nSince V3.5 2017.05.02, secondary threads can be adjusted dynamically up to the maximum specified on the command line. A negative N\nindicates processes should be used, instead of threads.\nq)0N!(\"current secondary threads\";system\"s\");system\"s 4\";0N!(\"current,max secondary threads\";system\"s\";system\"s 0N\"); / q -s 8\n(\"current secondary threads\";0i)\n(\"current,max secondary threads\";4i;8i)\nq)system\"s 0\" / disable secondary threads\nq)system\"s 0N\" / show max secondary threads\n8i\nN parallel processing uses\n------------------------------------\n>0 N threads\n<0 processes with handles in .z.pd\nFor processes:\npeach\nor':\nwill call.z.pd\nfor a list of handles to the processes, which must have been started previously- the absolute value of\n-N\nin the command line is ignored\n-s\ncommand-line option,\nParallel processing\n\\S\n(random seed)¶\n\\S\n\\S n\nWhere n\nis\n- omitted: display the last value to which the random seed was initialized\n0N\n: display the current value of the random seed (since V3.6)- non-zero integer: re-initialize the seed to\nn\nNote that \\S\ndisplays the last value to which the seed was initialized: it is not updated as the random-number generator (rng) is used.\nq)\\S / default\n-314159i\nq)5?10\n8 1 9 5 4\nq)5?10\n6 6 1 8 5\nq)\\S -314159 / restore default seed\nq)5?10 / same random numbers generated\n8 1 9 5 4\nq)\\S / seed is not updated\n-314159\nq)x:system \"S 0N\" / current value of seed\nq)r:10?10\nq)system \"S \",string x / re-initialize seed\nq)r~10?10\n1b\nAllows user to save and restore state of the rng. (Since V3.6 2017.09.26.)\nq)x:system\"S 0N\";r:10?10;system\"S \",string x;r~10?10\n1b\nThread-local\nSince V3.1 2013.08.19 random-number generation (rng) is thread-local.\n\\S 1234\nsets the seed for the rng for the main thread only.\nThe rng in a secondary thread is assigned a seed based on the secondary thread number.\nIn multithreaded input mode, the seed is based on the socket descriptor.\nInstances started on ports 20000 through 20099 (secondary threads, used with e.g. q -s -4\nhave the main thread’s default seed based on the port number.\n\\t\n(timer)¶\n\\t / show timer interval\n\\t N / set timer interval\n\\t exp / time expression\n\\t:n exp / time n repetitions of expression\nThis command has two different uses, according to the parameter. If the parameter is omitted, it shows the number of milliseconds between timer ticks: 0 means the timer is off.\nN\n(integer)-\nSet the number of milliseconds between timer ticks. If 0, the timer is disabled, otherwise the timer is enabled and the first tick given. On each tick, the function assigned to\n.z.ts\nis executed. -\nThis usage corresponds to the\n-t\ncommand-line option [:n] e\n(expression)-\nA q expression\ne\n(other than a single integer) is executed and the execution time shown in milliseconds. Since V3.0 2011.11.22, ifn\nis specified,e\nis executedn\ntimes.\nq)/Show or set timer ticks\nq)\\t / default off\n0\nq).z.ts:{show`second$.z.N}\nq)\\t 1000 / tick each second\nq)13:12:52\n13:12:53\n13:12:54\n\\t 0 / turn off\nq)/Time an expression\nq)\\t log til 100000 / milliseconds for log of first 100000 numbers\n3\nq)\\t:100 log til 100000 / timing for 100 repetitions\n186\nActual timer tick frequency\nThe actual timer tick frequency is determined by the timing granularity supported by the underlying operating system. This can be considerably different from a millisecond.\n\\T\n(timeout)¶\n\\T\n\\T n\nShow or set the client execution timeout, as n\n(integer) number of seconds a client call will execute before timing out.\nThe default is 0: no timeout.\nNote this is in seconds, not milliseconds like \\t\n.\n\\ts\n(time and space)¶\n\\ts exp\n\\ts:n exp\nExecutes the expression exp\nand shows the execution time in milliseconds and the space used in bytes.\n(Since 3.1 2014.02.07)\nq)\\ts log til 100000\n7 2621568\nq)\\ts:10000 log til 1000 /same as \\ts do[10000; log til 1000]\n329 24672\n\\u\n(reload user password file)¶\n\\u\nWhen q is invoked with the -u\nparameter specifying a user password file, then \\u\nwill reload the password file. This allows updates to the password file while the server is running.\n\\v\n(variables)¶\n\\v\n\\v ns\nLists the variables in namespace ns\n; defaults to current namespace.\nq)a:1+b:2\nq)\\v\n`a`b\nq)\\v .h\n`HOME`br`c0`c1`logo`sa`sb`sc`tx`ty\nq){x where x like\"????\"}system\"v .h\"\n`HOME`logo\nTo expunge a\nfrom the default namespace\ndelete a from `.\nQ for Mortals §12.5 Expunging from a Context\n\\w\n(workspace)¶\n\\w / current memory usage\n\\w 0|1 / internalized symbols\n\\w n / set workspace memory limit\nWith no parameter, returns current memory usage, as a list of 6 long integers.\n0 number of bytes from the heap that are currently in use\n1 heap size in bytes\n2 maximum heap size so far\n3 limit on thread heap size, from -w command-line option or \\w system command\n4 mapped bytes\n5 physical memory\nq)\\w\n168144 67108864 67108864 0 0 8589934592\n\\w 0\nand \\w 1\nreturn a pair of longs:\n0 number of internalized symbols\n1 corresponding memory usage\nq)\\w 0\n577 25436\nThe utility .Q.w\nformats all this information.\nRun-time increase\nSince 2017.11.06, \\w\nallows the workspace limit to be increased at run-time, if it was initialized via the\n-w\ncommand-line option. For example \\w 128\nsets the limit to 128MB if the -w\ncommand line option was specified\nwith a smaller value. The operation will return the current setting in bytes.\nIf the system tries to allocate more memory than allowed, it signals -w abort\nand terminates with exit code 1.\nSpecifying too large a number will fall back to the same behavior as \\w 0\nor \\w 1\n.\nq)\\w\n339168 67108864 67108864 104857600 0 8589934592\nq)\\w 0\n651 28009\nq)\\w 128\n134217728\nq)\\w 1000000000\n1048576000000000\nq)\\w 1000000000000\n651 28009\nIf the workspace limit has not been set by the command-line option -w\n, an error is signalled.\nq)\\w 3\n'-w init via cmd line\nDomain-local Since V4.0 2020.03.17 returns information for the current memory domain only.\nq)value each (\"\\\\d .m\";\"\\\\w\";\"\\\\d .\";\"\\\\w\")\n::\n353968 67108864 67108864 0 0 8589934592\n::\n354032 67108864 67108864 0 0 8589934592\n-w\nworkspace command-line option, \\g\n(garbage-collection mode)\n.m\nnamespace (DAX-enabled filesystems)\n\\W\n(week offset)¶\n\\W\n\\W n\nShow or set the start-of-week offset n\n, where 0 is Saturday. The default is 2, i.e Monday.\n\\x\n(expunge)¶\n\\x .z.p*\nBy default, callbacks like .z.po\nare not defined in the session. After they have been assigned, you can restore the default using \\x\nto delete the definition that was made.\nq).z.pi / default has no user defined function\n'.z.pi\nq).z.pi:{\">\",.Q.s value x} / assign function\nq)2+3\n>5\nq)\\x .z.pi / restore default\nWorks only for .z.p*\nvariables defined in k before q.k\nis loaded\nFor example, as .z.ph\nis defined in q.k\n, there is no default for it to be reset to.\n\\z\n(date parsing)¶\n\\z\n\\z 0|1\nShow or set the format for \"D\"$\ndate parsing. B\nis 0 for mm/dd/yyyy and 1 for dd/mm/yyyy.\nq)\\z\n0\nq)\"D\"$\"06/01/2010\"\n2010.06.01\nq)\\z 1\nq)\"D\"$\"06/01/2010\"\n2010.01.06\n\\1\n& \\2\n(redirect)¶\n\\1 filename\n\\2 filename\n\\1\nand \\2\nlet you redirect stdout and stderr to files from within the q session. The files and intermediate directories are created if necessary.\n~/q$ rm -f t1.txt t2.txt\n~/q$ l64/q\nKDB+ 4.0 2021.04.26 Copyright (C) 1993-2021 Kx Systems\n...\nq)\\1 t1.txt / stdout\nq)\\2 t2.txt / stderr\ntil 10\n2 + \"hello\"\n\\\\\n~/q$ cat t1.txt / entry in stdout\n0 1 2 3 4 5 6 7 8 9\n~/q$ cat t2.txt / entry in stderr\nq)q)'type\nOn macOS and Linux \\1 /dev/stdin\nreturns output to the default.\n\\_\n(hide q code)¶\n\\_ / show client write access\n\\_ scriptname / make runtime script\nThis command has two different uses depending on whether a parameter is given.\nIf no parameter, then \\_\nchecks if client write-access is blocked.\nq)\\_\n0b\nIf a parameter is given, it should be a scriptname and \\_ f.q\nmakes a runtime script f.q_\n. The q code loaded from a runtime script cannot be viewed or serialized.\nq)`:t1.q 0:enlist \"a:123;f:{x+2*y}\"\nq)\\_ t1.q / create locked script\n`t1.q_\nq)\\l t1.q_ / can be loaded as usual\nq)a / definitions are correct\n123\nq)f[10;1 2 3]\n12 14 16\nq)f / q code is not displayed\nlocked\nq)-8!f / or serialized\n'type\n[0] -8!f\n^\nq)read0`:t1.q\n\"a:123;f:{x+2*y}\"\nq)read0`:t1.q_ / file contents are scrambled\n\"'\\374E\\331\\207'\\262\\355\"\n\"S\\014%\\210\\0273\\245\"\n\\\n(terminate)¶\nAt the debugger’s q))\nprompt clears one level from the execution stack and (eventually) returns to the interactive session.\nq)f:{g[]}\nq)g:{'`xyz}\nq)f[]\n{g[]}\n'xyz\n@\n{'`xyz}\n::\nq))\\\nq)\nWithout a suspension, \\\ntoggles in an out of the k interpreter.\nIf there is a suspension, this exits one level of the suspension. Otherwise, it toggles between q and k mode. (To switch languages from inside a suspension, type \"\\\n\".)\nq){1+x}\"hello\"\n{1+x}\n'type\n+\n1\n\"hello\"\nq))\\ / clear suspension (only one level)\nq)\\ / toggle to k mode\n\\\n(toggle q/k)¶\nIn the interactive session \\\ntoggles between the q and k interpreters.\nq)\\\n\\\n!5 / this is k\n0 1 2 3 4\n\\\nq)\nThe k programming language is exposed infrastructure.\n\\\\\n(quit)¶\n\\\\\n- In the interactive session type\n\\\\\nat the prompt to quit the session. - Inside a function, use\nvalue\"\\\\\\\\\"\norexit 0\nfor the same result.\nFinal comments\nThe text following \\\\\nand white space is ignored by q. This is often useful in scripts where \\\\\ncan be followed by comments or usage examples.\nInterrupt and terminate¶\nCtl-c signals an interrupt to the interpreter.\nSome operations are coded so tightly the interrupt might not be registered.\nCtl-z will kill the q session. Nothing in memory is saved.\nOS commands¶\nIf an expression begins with \\\nbut is not recognized as a system command, then it is executed as an OS command.\nTypos can get passed to the OS\nq)\\ls / usual ls command\n\"help.q\"\n\"k4.lic\"\n\"l64\"\n\"odbc.k\"\n\"profile.q\"\n\"q.k\"\n.."}}},{"rowIdx":50,"cells":{"text":{"kind":"string","value":"ss\n, ssr\n¶\nString search – and replace\nss\n¶\nString search\nx ss y ss[x;y]\nWhere\nx\nis a stringy\nis a pattern as a string (no asterisk)\nreturns an int vector of position/s within x\nof substrings that match pattern y\n.\nq)\"We the people of the United States\" ss \"the\"\n3 17\nq)s:\"toronto ontario\"\nq)s ss \"ont\"\n3 8\nq)s ss \"[ir]o\"\n2 13\nq)s ss \"t?r\"\n0 10\nssr\n¶\nString search and replace\nssr[x;y;z]\nWhere\nx\nis a stringy\nis a pattern as a string (no asterisk)z\nis a string or a function\nreturns x\nwith each substring matching y\nreplaced by:\nz\nifz\nis a stringz[Y]\nwherez\nis a function andY\nis the matched substring\nq)s:\"toronto ontario\"\nq)ssr[s;\"ont\";\"x\"] / replace \"ont\" by \"x\"\n\"torxo xario\"\nq)ssr[s;\"t?r\";upper] / replace matches by their uppercase\n\"TORonto onTARio\"\nlike\nRegular Expressions in q\nStrings\nUsing regular expressions\n\nstring\n¶\nCast to string\nstring x string[x]\nReturns x\nas a string. Applies to all datatypes.\nq)string `ibm\n\"ibm\"\nq)string 2\n,\"2\"\nq)string {x*x}\n\"{x*x}\"\nq)string (+/)\n\"+/\"\nImplicit iteration¶\nstring\nis an atomic function and iterates through dictionaries and tables.\nq)string (2 3;\"abc\")\n(,\"2\";,\"3\")\n(,\"a\";,\"b\";,\"c\")\nq)string \"cat\" / not the no-op you might expect\n,\"c\"\n,\"a\"\n,\"t\"\nq)string `a`b`c!2002 2004 2010\na| \"2002\"\nb| \"2004\"\nc| \"2010\"\nq)string ([]a:1 2 3;b:`ibm`goog`aapl)\na b\n-----------\n,\"1\" \"ibm\"\n,\"2\" \"goog\"\n,\"3\" \"aapl\"\nDomain and range¶\ndomain b g x h i j e f c s p m d z n u v t\nrange c c c c c c c c c c c c c c c c c c\nRange: c\n.h\nnamespace\n.Q.addr\n(IP/host as int),\n.Q.f\n(precision format),\n.Q.fmt\n(precision format with length)\nQ for Mortals\n§7.3.1 Data to Strings\n\nsublist\n¶\nSelect a sublist of a list\nx sublist y sublist[x;y]\nWhere\nx\nis an integer atom or pairy\nis a list\nreturns a sublist of y\n. The result contains no more items than are available in y\n.\nHead or tail¶\nWhere x\nis an integer atom returns up to x\nitems from the beginning of y\nif positive, or from the end if negative\nq)p:2 3 5 7 11\nq)3 sublist p / 3 from the front\n2 3 5\nq)10 sublist p / only available values\n2 3 5 7 11\nq)2 sublist `a`b`c!(1 2 3;\"xyz\";2 3 5) / 2 keys from a dictionary\na| 1 2 3\nb| x y z\nq)-3 sublist sp / last 3 rows of a table\ns p qty\n-------\n3 1 200\n3 3 300\n0 4 400\nTaking a sample from the beginning of string can go wrong if the string turns out to be shorter than the sample taken.\nq)10#\"take me\"\n\"take metak\"\nInstead, compose Pad with sublist\n.\nq){x$x sublist y}[10;]\"take me\"\n\"take me \"\nSlice¶\nWhere x\nis an integer pair returns up to x[1]\nitems from y\n, starting at item x[0]\n.\nq)1 2 sublist p / 2 items starting from position 1\n3 5\n\n-\nSubtract¶\nx-y -[x;y]\nWhere x\nand y\nare numerics or temporals, returns their\ndifference.\nq)3 4 5-2\n1 2 3\nq)2000.11.22 - 03:44:55.666\n2000.11.21D20:15:04.334000000\n-\nis a multithreaded primitive.\nImplicit iteration¶\nSubtract is an atomic function.\nq)(10;20 30)-(2;3 4)\n8\n17 26\nIt applies to dictionaries and tables.\nq)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 -21 3;4 5 -6)\nq)d-1\na| 9 -22 2\nb| 3 4 -7\nq)d-`b`c!(10 20 30;1000*1 2 3) / upsert semantics\na| 10 -21 3\nb| -6 -15 -36\nc| -1000 -2000 -3000\nq)t-100\na b\n---------\n-90 -96\n-121 -95\n-97 -106\nq)k-k\nk | a b\n---| ---\nabc| 0 0\ndef| 0 0\nghi| 0 0\nAdd is generally faster than Subtract.\nRange and domains¶\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | i . i i i j e f i . p m d z n u v t\ng | . . . . . . . . . . . . . . . . . .\nx | i . i i i j e f i . p m d z n u v t\nh | i . i i i j e f i . p m d z n u v t\ni | i . i i i j e f i . p m d z n u v t\nj | j . j j j j e f j . p m d z n u v t\ne | e . e e e e e f e . p m d z n u v t\nf | f . f f f f f f f . f f z z f f f f\nc | . . . . . . . f . . p m d z n u v t\ns | . . . . . . . . . . . . . . . . . .\np | p . p p p p p f p . n . . . p p p p\nm | m . m m m m m f m . . i . . p p p p\nd | d . d d d d d z d . . . i . p p p p\nz | z . z z z z z z z . . . . f p z z z\nn | n . n n n n n f n . p p p p n n n n\nu | u . u u u u u f u . p p p z n u v t\nv | v . v v v v v f v . p p p z n v v t\nt | t . t t t t t f t . p p p z n t t t\nRange: defijmnptuvz\nAdd,\ndeltas\n,\ndiffer\n,\n.Q.addmonths\nDatatypes,\nMathematics\nHow to handle temporal data in q\nQ for Mortals\n§4.9.2 Temporal Arithmetic\n\nsum\n, sums\n, msum\n, wsum\n¶\nTotals – simple, running, moving, and weighted\nsum\n¶\nTotal\nsum x sum[x]\nWhere x\nis\n- a simple numeric list, returns the sums of its items\n- an atom, returns\nx\n- a list of numeric lists, returns their sums\n- a dictionary with numeric values\nNulls are treated as zeros.\nq)sum 7 / sum atom (returned unchanged)\n7\nq)sum 2 3 5 7 / sum list\n17\nq)sum 2 3 0N 7 / 0N is treated as 0\n12\nq)sum (1 2 3 4;2 3 5 7) / sum list of lists\n3 5 8 11 / same as 1 2 3 4 + 2 3 5 7\nq)sum `a`b`c!1 2 3\n6\nq)\\l sp.q\nq)select sum qty by s from sp / use in select statement\ns | qty\n--| ----\ns1| 1600\ns2| 700\ns3| 200\ns4| 600\nq)sum \"abc\" / type error if list is not numeric\n'type\nq)sum (0n 8;8 0n) / n.b. sum list of vectors does not ignore nulls\n0n 0n\nq)sum 0n 8 / the vector case was modified to match sql92 (ignore nulls)\n8f\nq)sum each flip(0n 8;8 0n) /do this to fall back to vector case\n8 8f\nsum\nis an aggregate function, equivalent to +/\n.\nFloating-point addition is not associative\nDifferent results may be obtained by changing the order of the summation.\n❯ q -s 4\nKDB+ 4.0 2021.01.20 Copyright (C) 1993-2021 Kx Systems\nm64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 ..\nq)\\s 0\nq)a:100000000?1.\nq)\\P 0\nq)sum a\n49999897.181930684\nq)sum reverse a\n49999897.181931004\nThe order of summation changes when the primitive is able to use threads.\nq)\\s 4\nq)sum a\n49999897.181933172\nsum\nis a multithreaded primitive.\nsums\n¶\nRunning totals\nsums x sums[x]\nWhere x\nis a numeric or temporal list, returns the cumulative sums of the items of x\n.\nThe sum of an atom is itself. Nulls are treated as zeros.\nq)sums 7 / cumulative sum atom (returned unchanged)\n7\nq)sums 2 3 5 7 / cumulative sum list\n2 5 10 17\nq)sums 2 3 0N 7 / 0N is treated as 0\n2 5 5 12\nq)sums (1 2 3;2 3 5) / cumulative sum list of lists\n1 2 3 / same as (1 2 3;1 2 3 + 2 3 5)\n3 5 8\nq)\\l sp.q\nq)select sums qty by s from sp / use in select statement\ns | qty\n--| --------------------------\ns1| 300 500 900 1100 1200 1600\ns2| 300 700\ns3| ,200\ns4| 100 300 600\nq)sums \"abc\" / type error if list is not numeric\n'type\nsums\nis a uniform function, equivalent to +\\\n.\nmsum\n¶\nMoving sums\nx msum y msum[x;y]\nWhere\nx\nis a positive int atomy\nis a numeric list\nreturns the x\n-item moving sums of y\n, with nulls replaced by zero. The first x\nitems of the result are the sums of the terms so far, and thereafter the result is the moving sum.\nq)3 msum 1 2 3 5 7 11\n1 3 6 10 15 23\nq)3 msum 0N 2 3 5 0N 11 / nulls treated as zero\n0 2 5 10 8 16\nmsum\nis a uniform function.\nwsum\n¶\nWeighted sum\nx wsum y wsum[x;y]\nWhere x\nand y\nare numeric lists, returns the weighted sum of the products of x\nand y\n. When both x\nand y\nare integer lists, they are first converted to floats.\nq)2 3 4 wsum 1 2 4 / equivalent to sum 2 3 4 * 1 2 4f\n24f\nq)2 wsum 1 2 4 / equivalent to sum 2 * 1 2 4\n14\nq)(1 2;3 4) wsum (500 400;300 200)\n1400 1600\nwsum\nis an aggregate function, equivalent to {sum x*y}\n.\nImplicit iteration¶\nsum\n, sums\n, and msum\napply to dictionaries and tables.\nwsum\napplies to dictionaries.\nq)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 21 3;4 5 6)\nq)sum d\n14 26 9\nq)sum t\na| 34\nb| 15\nq)sum k\na| 34\nb| 15\nq)sums d\na| 10 21 3\nb| 14 26 9\nq)2 msum t\na b\n-----\n10 4\n31 9\n24 11\nq)1 2 wsum d\n18 31 15\nAggregating nulls¶\navg\n, min\n, max\nand sum\nare special: they ignore nulls, in order to be similar to SQL92.\nBut for nested x\nthese functions preserve the nulls.\nq)sum (1 2;0N 4)\n0N 6\nDomains and ranges¶\nsum\nand sums\ndomain: b g x h i j e f c s p m d z n u v t\nrange: i . i i i j e f i . p m d z n u v t\nmsum\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | i . i i i j e f . . n i i f n u v t\ng | . . . . . . . . . . . . . . . . . .\nx | i . i i i j e f . . n i i f n u v t\nh | i . i i i j e f . . n i i f n u v t\ni | i . i i i j e f . . n i i f n u v t\nj | i . i i i j e f . . n i i f n u v t\ne | . . . . . . . . . . . . . . . . . .\nf | . . . . . . . . . . . . . . . . . .\nc | . . . . . . . . . . . . . . . . . .\ns | . . . . . . . . . . . . . . . . . .\np | . . . . . . . . . . . . . . . . . .\nm | . . . . . . . . . . . . . . . . . .\nd | . . . . . . . . . . . . . . . . . .\nz | . . . . . . . . . . . . . . . . . .\nn | . . . . . . . . . . . . . . . . . .\nu | . . . . . . . . . . . . . . . . . .\nv | . . . . . . . . . . . . . . . . . .\nt | . . . . . . . . . . . . . . . . . .\nRange: efijntuv\nwsum\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | i . i i i j e f . . p m d z n u v t\ng | . . . . . . . . . . . . . . . . . .\nx | i . i i i j e f . . p m d z n u v t\nh | i . i i i j e f . . p m d z n u v t\ni | i . i i i j e f . . p m d z n u v t\nj | j . j j j j e f . . p m d z n u v t\ne | e . e e e e e f . . p m d z n u v t\nf | f . f f f f f f f . f f z z f f f f\nc | . . . . . . . f . . p m d z n u v t\ns | . . . . . . . . . . . . . . . . . .\np | p . p p p p p f p . . . . . . . . .\nm | m . m m m m m f m . . . . . . . . .\nd | d . d d d d d z d . . . . . . . . .\nz | z . z z z z z z z . . . . . . . . .\nn | n . n n n n n f n . . . . . . . . .\nu | u . u u u u u f u . . . . . . . . .\nv | v . v v v v v f v . . . . . . . . .\nt | t . t t t t t f t . . . . . . . . .\nRange: defijmnptuvz"}}},{"rowIdx":51,"cells":{"text":{"kind":"string","value":"/ - function for loading in config csv with multiple processes in one line\nduplicateconfig:{[t] update proc:raze[t `proc] from ((select from t)where count each t[`proc])};\n\n/ - end of default parameters\n\n/- called at every EOD by .u.end\ninit:{\n .lg.o[`init;\"searching for servers\"];\n /- Open connection to discovery. Retry until connected to dqe.\n .servers.startupdependent[`dqedb; 30];\n /- set timer to call EOD\n if[.dqe.utctime=1b;.eodtime.nextroll:.eodtime.getroll[`timestamp$.dqe.currentpartition]+(.z.T-.z.t)];\n .timer.once[.eodtime.nextroll;(`.u.end;.dqe.getpartition[]);\"Running EOD on Checker\"];\n /- add dqe functions to .api.detail\n .api.add .'value each .dqe.readdqeconfig[.dqe.detailcsv;\"SB***\"];\n .dqe.compcounter[0N]:(0N;();());\n\n configtable:([] action:`$(); params:(); proc:(); mode:`$(); starttime:`timespan$(); endtime:`timespan$(); period:`timespan$())\n /- Set up configtable from csv\n `.dqe.configtable upsert .dqe.duplicateconfig[update \";\"vs/:proc from (.dqe.readdqeconfig[.dqe.configcsv;\"S**SNNN\"])];\n update checkid:til count .dqe.configtable from `.dqe.configtable;\n /- from timespan to timestamp\n update starttime:(`date$(.z.D,.z.d).dqe.utctime)+starttime from `.dqe.configtable;\n update endtime:?[0W=endtime;0Wp;(`date$(.z.D,.z.d).dqe.utctime)+endtime] from `.dqe.configtable;\n\n .dqe.loadtimer'[.dqe.configtable];\n\n /- store i numbers of rows to be saved down to DB\n .dqe.tosavedown:()!();\n .lg.o[`.dqc.init; \"Starting EOD writedown.\"];\n /- Checking if .eodtime.nextroll is correct\n if[((.z.P,.z.p).dqe.utctime)>.eodtime.nextroll:.eodtime.getroll[((.z.P,.z.p).dqe.utctime)];system\"t 0\";.lg.e[`init; \"Next roll is in the past.\"]]\n st:.dqe.writedownperiod+exec min starttime from .dqe.configtable;\n et:.eodtime.nextroll-.dqe.writedownperiod;\n /- Log the start and end times.\n .lg.o[`.dqe.init; \"Start time: \",(string st),\". End time: \",string et];\n .timer.repeat[st;et;.dqe.writedownperiod;(`.dqe.writedown;`);\"Running periodic writedown for results\"];\n .timer.repeat[st;et;.dqe.writedownperiod;(`.dqe.writedownconfig;`);\"Running periodic writedown for configtable\"];\n .lg.o[`init;\"initialization completed\"];\n }\n\nwritedown:{\n if[0=count .dqe.tosavedown`.dqe.results;:()];\n .dqe.savedata[.dqe.dqcdbdir;.dqe.getpartition[];.dqe.tosavedown`.dqe.results;`.dqe;`results];\n /- get handles for DBs that need to reload\n hdbs:distinct raze exec w from .servers.SERVERS where proctype=`dqcdb;\n /- send message for DBs to reload\n .dqe.notifyhdb[.os.pth .dqe.dqcdbdir]'[hdbs];\n }\n\nwritedownconfig:{\n if[0=count .dqe.tosavedown`.dqe.configtable;:()];\n .dqe.savedata[.dqe.dqcdbdir;.dqe.getpartition[];.dqe.tosavedown`.dqe.configtable;`.dqe;`configtable];\n /- get handles for DBsthat need to reload\n hdbs:distinct raze exec w from .servers.SERVERS where proctype=`dqcdb;\n /- send message for DB\n .dqe.notifyhdb[.os.pth .dqe.dqcdbdir]'[hdbs];\n }\n\n/- checks for unfinished runs that match the new run\ndupchk:{[runtype;idnum;params;proc]\n if[params`comp;proc:params`compresproc];\n if[`=proc;:()];\n if[count select from .dqe.results where id=idnum,procschk=proc,chkstatus=`started;\n .dqe.updresultstab[runtype;idnum;0Np;0b;\"error:fail to complete before next run\";`failed;params;proc]];\n }\n\n/- set initial values in results table\ninitstatusupd:{[runtype;idnum;funct;params;rs]\n if[idnum in exec id from .dqe.compcounter;delete from `.dqe.compcounter where id=idnum;];\n .lg.o[`initstatus;\"setting up initial record(s) for id \",(string idnum)];\n /- calls dupchk function to check if last runs chkstatus is still started\n .dqe.dupchk[runtype;idnum;params]'[rs];\n vars:params`vars;\n updvars:(key params[`vars]) where (),10h=type each value params`vars;\n if[count updvars;vars[updvars]:`$params[`vars] updvars];\n parprint:`$(\",\" sv string (raze/) (),enlist each vars params`fnpar),$[params`comp;\",comp(\",(string params[`compproc]),\",\",(string params`compallow),\")\";\"\"];\n `.dqe.results insert (idnum;funct;parprint;rs[0];rs[1];.proc.cp[];0Np;0b;\"\";`started;runtype);\n }\n\n/- updates a check in the results table\nupdresultstab:{[runtype;idnum;end;res;des;status;params;proc]\n if[1b=params`comp;proc:params`compresproc];\n /- obtain count of checks that will be updated\n if[c:count s:exec i from .dqe.results where id=idnum, procschk=proc,chkstatus=`started;\n .lg.o[`updresultstab;raze \"run check id \",(string idnum),\" update in results table with check status \",string status];\n `.dqe.results set update endtime:end,result:res,descp:enlist des,chkstatus:status,chkruntype:runtype from .dqe.results where id=idnum,procschk=proc,chkstatus=`started];\n .dqe.tosavedown[`.dqe.results],:s;\n delete from `.dqe.compcounter where id=idnum;\n params:()!();\n s2:exec i from .dqe.configtable where checkid=idnum;\n .dqe.tosavedown[`.dqe.configtable]:.dqe.tosavedown[`.dqe.configtable] union s2;\n .lg.o[`updresultstab;\"Updated check id \",(string idnum),\" in the results table with status \",string status];\n }\n\n/- compares the third atom of results when comparison is on\nchkcompare:{[runtype;idnum;params]\n /- checks if all async check results have returned - if not, exit the function\n if[params[`compcount]<>(d:.dqe.compcounter idnum)`counter;:()];\n .lg.o[`chkcompare;\"comparison started with id \",string idnum];\n /- obtain all the check returns\n a:d[`results] where not d[`procs]=params`compproc;\n procsforcomp:d[`procs] except params`compproc;\n /- obtain the check to compare the others to\n b:d[`results] where d[`procs]=params`compproc;\n\n /- if error in compare proc then fail check\n if[@[{all 0W=x};first b;0b];\n .dqe.updresultstab[runtype;idnum;.proc.cp[];0b;\"error: error on comparison process\";`failed;params;`];:()];\n errorprocs:d[`procs] where (),all each @[{0W=x};d`results;0b];\n /- if error in all comparison procs then fail check\n if[(count errorprocs)= count d`results;\n .dqe.updresultstab[runtype;idnum;.proc.cp[];0b;\"error: error with all comparison procs\";`failed;params;`];:()];\n matching:procsforcomp where all each params[`compallow] >= 100* abs -\\:[a;first b]%\\:first b;\n notmatching:procsforcomp except errorprocs,matching;\n .lg.o[`chkcompare;\"comparison finished with id \",string idnum];\n\n s:(string params[`compproc]),\" \";\n if[count errorprocs;s,:\" | \";s,: raze\"error \",(\",\" sv string errorprocs)];\n if[count notmatching;s,:\" | \";s,:raze\"no match \",(\",\" sv string notmatching)];\n if[count matching;s,:\" | \";s,:raze\"match \",(\",\" sv string matching)];\n\n .lg.o[`chkcompare;\"Updating descp of compare process in the results table\"];\n resbool:not(count errorprocs)|count notmatching;\n .dqe.updresultstab[runtype;idnum;.proc.cp[];resbool;s;`complete;params;`];\n }\n\n/- updates the results table with the check result\npostback:{[runtype;idnum;proc;params;result]\n .lg.o[`postback;\"postback successful for id \",(string idnum),\" from \",string proc];\n /- if comparision, add to compcounter table\n if[params`comp;\n .dqe.compcounter[idnum]:(\n 1+0^.dqe.compcounter[idnum][`counter];\n .dqe.compcounter[idnum][`procs],proc;\n /- join result to the list\n .dqe.compcounter[idnum][`results],$[3.z.t)|null expires; //select all cookies that apply\n if[not pr~\"https://\";t:delete from t where secure]; //delete HTTPS only cookies if not HTTPS request\n :\"; \"sv\"=\"sv'flip value exec name,val from t; //compile cookies into string\n }\n\n// @kind function\n// @category private\n// @fileoverview Add stored cookie(s) relevant to current query\n// @param q {dict} query object\n// @return {dict} query objeect with added cookies\naddcookies:{[q]\n if[count c:getcookies[q`url];q[`headers;`Cookie]:c];\n :q;\n }\n\n// @kind function\n// @category public\n// @fileoverview Read a Netscape/cURL format cookiejar\n// @param f {string|symbol|#hsym} filename\n// @return {table} cookie jar\nreadjar:{[f]\n j:read0 .url.hsurl f; //get hsym of input file & read\n j:j where not (\"#\"=first'[j])|0=count'[j]; //remove comments & empty lines\n t:flip`host`tailmatch`path`secure`expires`name`val!(\"*S*SJ**\";\"\\t\")0:j; //convert to a table\n t:update host:{\"*.\",x}'[host] from t where tailmatch=`TRUE; //implement tailmatching\n t:update path:{x,\"*\"}'[path] from t; //implement path matching\n t:update secure:secure=`TRUE from t; //convert secure to boolean\n t:update expires:?[0=expires;0Nz;`datetime$`timestamp$1970.01.01D00+1e9*expires] from t; //calculate expiry\n :delete tailmatch from update httponly:0b,maxage:0Nj,samesite:` from t; //add extra fields for reQ cookiejar\n }\n\n// @kind function\n// @category public\n// @fileoverview Write a Netscape/cURL format cookiejar\n// @param f {string|symbol|#hsym} filename\n// @param j {table} cookie jar\n// @return {#hsym} cookie jar filename\nwritejar:{[f;j]\n t :\"# Netscape HTTP Cookie File\\n\"; //make file header (copy cURL)\n t,:\"# https://curl.haxx.se/docs/http-cookies.html\\n\";\n t,:\"# This file was generated by reQ! Edit at your own risk.\\n\\n\";\n t,:\"\\n\"sv 1_\"\\t\"0:select //convert to tab delimited & drop headers\n {(\".\"=first x)_x}'[except\\:[host;\"*\"]],\n `FALSE`TRUE \"*\"=first'[host],\n except\\:[path;\"*\"],\n `FALSE`TRUE secure,\n ?[null expires;0;`long$1e-9*(`timestamp$expires)-1970.01.01D00:00], //convert expires back to epoch time\n name,\n val \n from j;\n :.url.hsurl[f] 0: \"\\n\"vs t; //write to file\n }\n\n\\d .\n\n================================================================================\nFILE: reQ_req_doh_google.q\nSIZE: 1,182 characters\n================================================================================\n\n\\d .doh\n\nENABLED:1b; //enable by default\nurl:\"dns.google.com\"; //URL for API\ncache:()!() //cache IP for URL\n\ncache[`$url]:url; //don't resolve the resolver\n\nresolve:{[url]\n /* take a URL, resolve URL to IP & return */\n uo:.url.parse0[0b;url]; //parse to object\n if[(`$h:uo`host) in key cache;\n :.url.format @[uo;`host;:;cache`$h]; //return from cache if present\n ];\n r:.j.k .req.get[\"https://dns.google.com/resolve?name=\",h;()!()]; //request from Google API\n i:first r[`Answer][`data]; //get first record\n cache[`$h]:i; //cache resovled IP\n :.url.format @[uo;`host;:;i]; //return resolved URL\n }\n\n\\d .\n\n================================================================================\nFILE: reQ_req_ext_os.q\nSIZE: 1,120 characters\n================================================================================\n\n/ os.q taken from https://github.com/jonathonmcmurray/qutil_packages @ beaabdd\n\n\\d .os\n\nes:$[.z.o like \"w*\";\" 2>NUL\";\" 2>/dev/null\"]; //error suppression dependent on os\ntest:{[x]\n /* .os.test - test if a command works on current os */\n :@[{system x;1b};x,es;0b]; //run with system & suppress error\n }\n\nhome:hsym`$getenv$[.z.o like \"w*\";`USERPROFILE;`HOME] //get home dir depending on OS\nhfile:(` sv home,) //get file path relative to home dir\n\nread:{$[1=count a;first;]a:read0 x} //read text file, single string if one line\nwrite:{x 0:$[10=type y;enlist;]y} //write text file, list of strings or single\n\nhread:{read hfile x} //read file from home dir\nhwrite:{write[hfile x;y]} //write file in home dir\n\n\\d .\n\n================================================================================\nFILE: reQ_req_init.q\nSIZE: 247 characters\n================================================================================\n\n/package code\nif[.z.K<=3.1;.utl.pkg\"json.q\"]; //add JSON support for older q versions\n.utl.pkg\"ext/os.q\"\n.utl.pkg\"url.q\"\n.utl.pkg\"cookie.q\"\n.utl.pkg\"b64.q\"\n.utl.pkg\"status.q\"\n.utl.pkg\"req.q\"\n.utl.pkg\"auth.q\"\n.utl.pkg\"multipart.q\"\n\n================================================================================\nFILE: reQ_req_json.q\nSIZE: 927 characters\n================================================================================\n\n/downloaded from https://raw.githubusercontent.com/KxSystems/kdb/master/e/json.k\n\n\\d .j\n\n/[]{} Cbg*xhijefcspmdznuvt\nk)q:\"\\\"\";s:{q,x,q};J:(($`0`1)!$`false`true;s;{$[#x;x;\"null\"]};s;{s@[x;&\".\"=8#x;:;\"-\"]};s)1 2 5 11 12 16h bin\nk)j:{$[10=abs t:@x;s@,/{$[x in r:\"\\t\\n\\r\\\"\\\\\";\"\\\\\",\"tnr\\\"\\\\\"r?x;x]}'x;99=t;\"{\",(\",\"/:(j'!x),'\":\",'j'. x),\"}\";-1*x;\"F\"$x;\"n\"=*x;0n;\"t\"=*x]}\nk)k:{c x@&~v[x]&x in\" \\t\\n\\r\"};\n\n\\\n\nk j x:([]C:$`as`;b:01b;j:0N 2;z:0Nz,.z.z)\nk j x:\"\\\"a \\\\\"\nk\"{},2]\"\n\n\n================================================================================\nFILE: reQ_req_multipart.q\nSIZE: 2,153 characters\n================================================================================\n\n\\d .req"}}},{"rowIdx":53,"cells":{"text":{"kind":"string","value":"Comparing option pricing methods in q¶\nIn this paper, we compare the use of both Monte Carlo (MC) and Quasi-Monte Carlo (QMC) methods in the process of pricing European and Asian options. In doing so, we consider the use of two discretization schemes - standard discretization and Brownian-bridge construction. Results produced by the different methods are compared with the deterministic Black-Scholes price for each option type, using Global Sensitivity Analysis (SA). Note that the methods demonstrated below follow the work presented by S. Kucherenko et al. 2007.\nS. Kucherenko et al. 2007, “The Importance of Being Global – Application of Global Sensitivity Analysis in Monte Carlo Option Pricing”, Wilmott, pp. 82–91\nBlack-Scholes¶\nThe most common model used to calculate the price of options is Black-Scholes, where the formula for each market is derived from the Black-Scholes equation. In this paper, we look specifically at the Black-Scholes models for European and Asian call options. The standard Black-Scholes model for European options assumes a payoff based on the underlying price at exercise. The modified model for Asian options assumes a payoff based on the average underlying price over a predefined time period. In each case, the Black-Scholes model produces a closed-form solution with a deterministic result.\nFor European call options, the price of the corresponding option at time \\(t\\), \\(P(S_{t},t)\\), is given by:\nWhere \\(T\\) is the expiry, \\(S_{t}\\) is the price of the underlying asset at time \\(t\\), \\(K\\) is the strike price of the option, \\(\\sigma\\) is the volatility and \\(r\\) is the interest rate. Note that the price is discounted by the dividends, \\(q\\), throughout.\nFor Asian call options, we implement the same formula, using an adjusted \\(S_{t}\\), \\(\\sigma^{2}\\) and drift rate, \\(\\mu\\):\nWhere \\(n\\) is the number of timesteps.\nMonte Carlo and Quasi-Monte Carlo simulations¶\nWithin the financial industry, there is a need to price complex financial instruments. Despite this need, there are a lack of analytical solutions to do so. MC methods are used with the financial industry to mimic the uncertainty associated with the underlying price of an instrument and to subsequently generate a value based on the possible underlying input values. One example of where MC is used in finance, is in evaluating an option on an equity. For each underlying asset, an MC simulation is used to create thousands of random price paths, with an associated payoff. The option price for each path is calculated by taking the average over the future payoffs and discounting them to the present.\nThese models are based on pseudo-random numbers which, despite being commonly used, exhibit very slow convergence, with a rate of \\(O(1/\\sqrt{N})\\) where \\(N\\) is the number of sampled points. To improve upon these models, QMC methods have been developed which use low-discrepancy sequences (LDS) to produce a rate of convergence ~ \\(O(1/N)\\). LDS are deterministic uniformly distributed sequences which are specifically designed to place sample points as uniformly as possible. Practical studies have shown that the most effective QMC method for application in financial engineering is based on Sobol' LDS.\nS. Kucherenko et al. 2001, “Construction and Comparison of High-Dimensional Sobol’ Generators”, Wilmott, Nov, pp. 64-79\nbroda.co.uk\nP. Jäckel 2001, Monte Carlo Methods In Finance, pp. 122.\nP. Glasserman 2003, Monte Carlo Methods in Financial Engineering, Springer.\nWiener path¶\nThe starting point for asset price simulation is the construction of a Wiener path (or Brownian motion). Such paths are built from a set of independent Gaussian variates, using either standard discretization or Brownian-bridge construction.\nIn the standard approximation, the Wiener path is found by taking the cumulative sum of the Gaussian variates.\nWhen constructing a Brownian bridge, the last step of the Wiener path is calculated first, followed by the mid-step, and then the space left between steps is bisected until all steps have been determined.\nAn example of building up a Brownian bridge is shown in the diagram below, where we have a total of 14 timesteps (from 1 to 14). Note that we also have an additional timestep 0\n, which is assumed to have a value of 0.\nThe construction of a Brownian bridge over 14 steps. See Jäckel, 2001, op. cit.\nBoth standard discretization and Brownian-bridge construction share the same variance and therefore the same resulting convergence when used with MC models. However, performance differs between the two when QMC methods are introduced, with faster convergence seen for Brownian-bridge construction.\nIn order to showcase how performant the QMC simulation is when paired with Brownian-bridge construction, we use Global SA as outlined in S. Kucherenko et al. 2007. This method allows us to estimate the contribution of individual input parameters in the final variance of the output over a number of experiments. In each experiment, we:\n- Randomly generate \\(n\\) random numbers, either pseudo-random (MC) or Sobol’ sequence (QMC). (See also broda.co.uk)\n- Convert into a normal distribution.\n- Convert into a Wiener-path random walk using standard discretization or Brownian-bridge construction.\n-\nConvert into an asset-price path based on parameters:\ns\n: Asset price at \\(t=0\\)v\n: Volatilityr\n: Interest rateq\n: Dividendst\n: Expiry\n-\nConvert into an option price based on the option type and strike price,\nk\n.\nThe prices produced are then averaged to find a final predicted price.\nImplementation¶\nIn the following sections, we compare the methods of option pricing mentioned above. The Black-Scholes price for each market is compared to an average price generated using the following combinations of simulation and discretization methods:\n- Pseudo-random number generation (MC) with standard discretization.\n- Sobol’ sequences (QMC) with standard discretization.\n- Sobol’ sequences (QMC) with Brownian-bridge construction.\nThe Black-Scholes function for each market produces a closed-form solution with a deterministic result, while the MC/QMC functions perform a number of random experiments and return an average price, based on the option type and the strike price.\nOnce both the Black-Scholes and MC/QMC prices have been calculated for each market, the root mean square error (RMSE) is calculated between the two. This is demonstrated in the final example below, where the process is repeated for an increasing number of paths, with resulting errors compared.\nThe technical dependencies required for the below work are as follows:\n- Option Pricing kdb+/q library\n- embedPy\n- Sobol’ C++ library -\nSobolSeq1024\nfunction provided in the Option Pricing kdb+/q library with max dimension of 1024. - matplotlib\nUtility functions\nFor simplicity, utility functions are omitted from the code snippets below. These can be found within the Option Pricing library linked above.\nLoad scripts¶\nAs mentioned previously, the implementations of option pricing methods outlined below are based on original C++ scripts used in S. Kucherenko et al. 2007. All code is contained within the option-pricing repository:\nWrappers for the C++ pseudo-random and Sobol’ sequence number generators (see also broda.co.uk) are contained within rand.q\n, along with the cumulative and inverse cumulative normal distribution functions in norm.q\n.\nTo run the below examples, q scripts are loaded including the C++ wrappers and graphics functions used throughout.\n\\l code/q/rand.q\n\\l code/q/norm.q\n\\l notebooks/graphics/graphics.q\nBlack-Scholes option pricing¶\nThe following functions provide q implementations of the Black-Scholes formula for each market, outlined above. They take in parameter dictionary pd\nas an argument, containing the parameters s\n, v\n, r\n, q\n, k\n, and t\ndetailed in the previous section. Note that the Black-Scholes price of an Asian call option depends on the number of timesteps n\n, which must also be passed as an argument.\n/ European\nbsEuroCall:{[pd]\n/ Calculate volatility*sqrt delta T coefficient\ncoeff:(v:pd`v)*sqrt t:pd`t;\n/ Calculate d1\nd1:(log[pd[`s]%pd`k]+t*(pd[`r]-pd`q)+.5*v*v)%coeff;\n/ Calculate d2\nd2:d1-coeff;\n/ Calculate the option price - P(S,t)\n(pd[`s]*exp[neg t*pd`q]*cnorm1 d1) -\npd[`k]*exp[neg t*pd`r]*cnorm1 d2 }\n/ Asian\nbsAsiaCall:{[n;pd]\n/ Calculate adjusted drift rate\nadjmu:.5*((r:pd`r)-.5*v2:v*v:pd`v)*n1:1+1.%n;\n/ Calculate adjusted volatility squared\nadjv2:(v2%3)*n1*1+.5%n;\n/ Calculate adjusted price\nadjS :pd[`s]*exp(t:pd`t)*(hv2:.5*adjv2)+adjmu-r;\n/ Calculate d1\nd1:(log[adjS%k:pd`k]+t*(r-q:pd`q)+hv2)%rtv2:sqrt adjv2*t;\n/ Calculate d2\nd2:d1-rtv2;\n/ Calculate the option price - P(S,t)\n(adjS*exp[neg q*t]*cnorm1 d1)-k*exp[neg r*t]*cnorm1 d2 }\nThe outputs of these functions are demonstrated below for 512 timesteps.\nnsteps:512 / number of timesteps\npd:`s`k`v`r`q`t!100 100 .2 .05 0 1 / parameter dictionary\n/ Calculate BS price for European/Asian options\n\"European Black Scholes Price: \",string bseuro:bsEuroCall pd\n\"Asian Black Scholes Price: \",string bsasia:bsAsiaCall[nsteps]pd\nEuropean Black Scholes Price: 10.45058\nAsian Black Scholes Price: 5.556009\nMonte Carlo and Quasi-Monte Carlo option pricing¶\nGenerate random numbers¶\nThe first stage in predicting an option price is to generate a set of random numbers using either MC or QMC methods. In the example below we generate 512 pseudo-random and Sobol’ sequence numbers, with results plotted for comparison.\nRandom numbers are generated using the Mersenne Twister number generator which has one parameter, the number of steps.\nThe Sobol’ sequence generator takes two arguments:\n- the index of the point (0 < i < 231 - 1)\n- the dimension of the Sobol’ sequence, i.e. the number of steps (0 < d < 1025).\n/ Function to generate n random numbers in d dimensions\nrdmngen:{[n;d](d;n)#mtrand3 d*n}\n/ Function to generate n sobol numbers in d dimensions\nsobngen:{[n;d]flip sobolrand[d]each 1+til n}\n/ Generate n random and sobol numbers in 2D\ndata:(rdmngen;sobngen).\\:nsteps,2\nsubplot[data;(\"Random\";\"Sobol\");2;2#`scatter]\nIt is clear that the pseudo-random numbers are not evenly distributed, with points clustering together in some sections, while leaving large portions of white space in others.\nIn contrast, the Sobol’ sequence plot exhibits a much more even distribution, with few points clumping together.\nConvert to a Gaussian distribution¶\nThe generated sequences are converted from a uniform distribution to a Gaussian distribution. Following this conversion, around 68% of the values lie within one standard deviation, while two standard deviations account for around 95% and three account for 99.7%.\nGaussian distribution\nIn the example below we convert the uniform generated Sobol’ sequence to a Gaussian distribution, using the inverse cumulative normal function, invcnorm\n.\n/ Convert sobol sequence to normal distribution\nzsob:invcnorm each sob:last data\nsubplot[(sob;zsob);(\"Sobol Uniform\";\"Sobol Gaussian\");2;2#`scatter]\nThe differences between the Gaussian distributions produced for random and Sobol’ sequences are best demonstrated for a small number of timesteps, e.g. 64. Below we plot the 1-D Gaussian distributions for both random and Sobol’ number generation across 64 timesteps.\n/ Returns 1D Gaussian distribution to plot\ngausscnv:{[g;n;d]first invcnorm each$[g~`rdm;rdmngen;sobngen][n;d] }\n/ Calculates Gaussian variates for 64 steps, in 2 dimensions\ndist:gausscnv[;64;2]each`rdm`sob\nsubplot[dist;(\"Random\";\"Sobol\");2;2#`hist]\nAs expected, the Sobol’ sequence exhibits a Gaussian curve with much better statistical properties than the random-number sequence.\nConvert into a Wiener-path random walk¶\nThe q code to build both a Brownian-bridge and Wiener-path random walk is shown below.\nBrownian bridge:\n/* n = number of timesteps\n/* dt = length of timesteps\nbbridge:{[n;dt]\n/ create initial brownian bridge with indices for all timesteps\nbb:first flip(n-1).[util.initbb n]\\(`bidx`ridx`lidx!3#n-1;((n-1)#0b),1b);\n/ calculate weights and sigma value for each point in the path\nbb:update lwt:bidx-lidx,rwt:ridx-bidx,sigma:ridx-lidx from bb;\nbb:update lwt%sigma,rwt%sigma,sigma:sqrt dt*lwt*rwt%sigma from bb;\n/ create a projection for weiner path creation containing new bbridge\nutil.buildpath .[bb;(0;`sigma);:;sqrt n*dt] }\nWiener path:\n/ Performs cumulative sum\n/ or inverse cumulative normal for random/sobol numbers\n/* u = gaussian variates\n/* d = dictionary containing bbridge and boolean for sobol/random numbers\nwpath:{[u;d]$[(::)~d`bb;sums;d`bb]invcnorm u }\nAn example of how the Brownian bridge is built is shown below using bbdemo\n. The function outputs a table with n\ntimesteps.\n/ Demonstrates build up of bbridge indices\nbbdemo:{[n]\n/ Create matrix showing steps taken\n/ Step already taken = 1b, 0b otherwise\nx:1b,'enlist[n#0b],\nlast flip(n-1).[util.initbb n]\\(`bidx`ridx`lidx!3#n-1;((n-1)#0b),1b);\n/ Print \"X\" where 1b, showing path taken\nflip(`$\"i\",'string til count x)!x:flip(\" X\")x }\nAn example is shown below using 8 timesteps, showing the order in which steps are added to the path. Note that i0\nwas added here, where we assume that it has a value equal to 0.\nq)bbdemo 8\ni0 i1 i2 i3 i4 i5 i6 i7 i8\n--------------------------\nX\nX X\nX X X\nX X X X\nX X X X X\nX X X X X X\nX X X X X X X\nX X X X X X X X\nX X X X X X X X X\nWhen recording the order of steps in the path, we take note of the left and right weights and indexes, and the corresponding sigma value for each step in the sequence. This is shown for 512 timesteps and 1 unit of time, with the sigma value for each index in the Brownian bridge subsequently plotted.\nq)dt:1\nq)10#b:last value bbex:bbridge[nsteps;dt]\nbidx ridx lidx lwt rwt sigma\n-------------------------------\n511 511 511 22.62742\n255 511 -1 0.5 0.5 11.31371\n127 255 -1 0.5 0.5 8\n383 511 255 0.5 0.5 8\n63 127 -1 0.5 0.5 5.656854\n191 255 127 0.5 0.5 5.656854\n319 383 255 0.5 0.5 5.656854\n447 511 383 0.5 0.5 5.656854\n31 63 -1 0.5 0.5 4\n95 127 63 0.5 0.5 4\nOnce the Brownian bridge has been initialized, it can be used to transform Gaussian variates into a Wiener-path random walk. Below, a Wiener path with 512 timesteps is constructed using a Sobol’ sequence (of length 512) and the Brownian bridge constructed previously. Note that the function wpath\ntakes two arguments:\n- Sequence of generated numbers, Sobol’ or random.\n- Dictionary indicating whether to use standard discretization or Brownian-bridge construction, and whether to use Sobol’ sequences (\n1b\n) or pseudo-random numbers (0b\n). If using a Brownian bridge, the initial Brownian bridge must be passed in, if not use(::)\n.\nq)d:`bb`sobol!(bbex;1b)\nq)show w:wpath[sobolrand[nsteps;2];d]\n-0.4450092 0.06385387 -0.1017726 -1.221271 -0.9099617 -1.552524 -0.56..\nq)plt[`:title]\"Wiener path random walk\";\nq)plt[`:plot]w;\nq)plt[`:show][];\nConvert into asset price path¶\nAt this point, the Wiener path is converted into an asset-price path using the methods outlined in S. Kucherenko et al. 2007, where the asset-price path is calculated as:\nWhere \\(S_0\\) and \\(S(t)\\) are the asset prices at time \\(0\\) and \\(t\\) respectively, \\(r\\) is the interest rate, \\(\\sigma\\) is the volatility and \\(W(t)\\) is a Wiener path up to time \\(t\\).\nThis process can be done using the function spath\n, detailed below.\n/* u = gaussian variates\n/* n = number of timesteps\n/* d = dictionary with bbridge and boolean for random/sobol\n/* pd = dictionary of parameters s,v,r,t,q\nspath:{[u;n;d;pd]\n/ Original asset price\npd[`s]*\n/ Wiener path*volatility*time for one timestep\nexp(wpath[u;d]*pd[`v]*sqrt dt)+\n/ Calculate sum of interest rate (discounted by dividends)\n/ and half volitility squared for each timestep\n(1+til n)*(pd[`r]-pd[`q]+.5*v*v:pd`v)*dt:pd[`t]%n }\nHere we calculate six different asset-price paths and overplot them for comparison. We start by generating the Sobol’ sequences for 8 paths with 512 timesteps, incrementing the Sobol’ index each time. Brownian-bridge approximation is also used.\n-1\"\\nGenerated sequences: \\n\";\nshow u:sobolrand[nsteps;]each 2+til 8\nplt[`:title]\"Asset Price Path\"\nplt[`:plot] each spath[;nsteps;d;pd]each u\nplt[`:show][]\nGenerated sequences\n0.25 0.75 0.25 0.75 0.25 0.75 0.25 0.25 0.75 0.75 ..\n0.75 0.25 0.75 0.25 0.75 0.25 0.75 0.75 0.25 0.25 ..\n0.375 0.625 0.125 0.875 0.875 0.125 0.625 0.125 0.875 0.625 ..\n0.875 0.125 0.625 0.375 0.375 0.625 0.125 0.625 0.375 0.125 ..\n0.125 0.375 0.375 0.125 0.625 0.875 0.875 0.375 0.125 0.375 ..\n0.625 0.875 0.875 0.625 0.125 0.375 0.375 0.875 0.625 0.875 ..\n0.3125 0.3125 0.6875 0.5625 0.1875 0.0625 0.9375 0.5625 0.0625 0.8125..\n0.8125 0.8125 0.1875 0.0625 0.6875 0.5625 0.4375 0.0625 0.5625 0.3125..\nConvert into option price¶\nLastly, to find a single option price, an average is taken across the asset-price path for the MC/QMC method. This allows for comparison between the predicted price and the Black-Scholes equivalent. The formulae for both the European and Asian options are outlined in S. Kucherenko et al. 2007.\nFor a European call option, the final MC/QMC price is calculated using:\nWhere \\(C\\) is the final price of the call option, \\(r\\) is the interest rate, \\(T\\) is the length of timestep, \\(N\\) is the finite number of simulated price paths and \\(K\\) is the strike price. Note that \\(max(S^{(i)}_{T}-K,0)\\) represents the payoff for a call option.\nThis has been translated into the below function:\n/ MC/QMC Price of European Call Option\n/* m = number of paths\n/* u = sequence of random numbers\n/* d = dictionary with bbridge and boolean for random/sobol\n/* pd = dictionary of parameters s,v,r,t,q\nmcEuroCall:{[u;n;d;pd]\nexp[neg pd[`r]*pd`t]*avg 0 |\n(last each spath[;n;d;pd]each u)-pd`k }\nSimilarly, for Asian call options, the below is used:\nWhere we integrate over unit hypercube \\(H^{n}\\). In this case, the payoff for an geometric average Asian call option is calculated as the maximum between 0 and the geometric average of the underlying price, \\(S(t)\\), minus the strike price, \\(K\\).\nThe final price for an Asian call option can therefore be determined generated by using the below function:\n/ MC/QMC Price of Asian Call Option\n/* m = number of paths\n/* n = number of timesteps\n/* d = dictionary with bbridge and boolean for random/sobol\n/* pd = dictionary of parameters s,v,r,t,q\nmcAsiaCall:{[u;n;d;pd]\nexp[neg pd[`r]*pd`t]*avg 0 |\n(prd each xexp[;1%n]spath[;n;d;pd]each u)-pd`k }\nWe also need a number-generator function for l\ntrials, m\npaths and n\nsteps which can be used with the Sobol’ or random-number generators.\nnumgen:{[ng;l;m;n]ng@''$[ng~mtrand3;(l;first m)#n;(0N;m)#1+til l*m]}\nHere we demonstrate how to run these functions below for 512 timesteps, 256 paths and 5 trials. Sequences are generated for Sobol’ sequences using the above numgen function which will produce a sequence for each path and each trial.\nntrials:5\nnpaths:256\n\"\\nGenerated sequences:\\n\"\n5#u:first numgen[sobolrand nsteps;ntrials;npaths;nsteps]\n\"European Monte Carlo Price: \",\nstring mcEuroCall[u;nsteps;`bb`sobol!(bbex;1b);pd]\n\"Asian Monte Carlo Price: \",\nstring mcAsiaCall[u;nsteps;`bb`sobol!(bbex;1b);pd]\nGenerated sequences:\n0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5..\n0.25 0.75 0.25 0.75 0.25 0.75 0.25 0.25 0.75 0.75 0.25 0.2..\n0.75 0.25 0.75 0.25 0.75 0.25 0.75 0.75 0.25 0.25 0.75 0.7..\n0.375 0.625 0.125 0.875 0.875 0.125 0.625 0.125 0.875 0.625 0.125 0.3..\n0.875 0.125 0.625 0.375 0.375 0.625 0.125 0.625 0.375 0.125 0.625 0.8..\nEuropean Monte Carlo Price: 10.28224\nAsian Monte Carlo Price: 5.365942\nRemembering that the Black-Scholes option prices for the same number of timesteps were:\n\"European Black Scholes Price: \",string bseuro\n\"Asian Black Scholes Price: \",string bsasia\nEuropean Black Scholes Price: 10.45058\nAsian Black Scholes Price: 5.556009\nExample¶\nIn this section we deploy all the aforementioned techniques and compare the results.\nMultiple threads\nThe example below can be run from the terminal across multiple threads using the following commands:\nq -s 8\nq)\\l op.q\nq)loadfile\\`:init.q\nq)loadfile\\`:code/q/run.q\nwhere we load in the functions contained within the Option Pricing library using the first two commands and run the example by loading in run.q\n.\nParameters¶\nAs shown previously, a dictionary of parameters is created which contains the initial asset price s\n, volatility v\n, interest rate r\n, dividends q\n, expiry t\nand strike price k\n.\nq)show pd:`s`k`v`r`q`t!100 100 .2 .05 0 1\ns| 100\nk| 100\nv| 0.2\nr| 0.05\nq| 0\nt| 1\nAdditional parameters are also initialized for the number of paths (experiments), steps and trials.\nq)l:20 / Number of trials\nq)n:1024 / Number of steps\nq)show m:\"j\"$xexp[2;3+til 8] / Number of paths\n8 16 32 64 128 256 512 1024\nGiven that the initial Brownian bridge is the same throughout, it is also initialized and passed in as an argument.\nq)10#last value bb:bbridge[n;1]\nbidx ridx lidx lwt rwt sigma\n-------------------------------\n1023 1023 1023 32\n511 1023 -1 0.5 0.5 16\n255 511 -1 0.5 0.5 11.31371\n767 1023 511 0.5 0.5 11.31371\n127 255 -1 0.5 0.5 8\n383 511 255 0.5 0.5 8\n639 767 511 0.5 0.5 8\n895 1023 767 0.5 0.5 8\n63 127 -1 0.5 0.5 5.656854\n191 255 127 0.5 0.5 5.656854\nRun experiments¶\nThe functions below calculate the RMSE between the Black-Scholes and MC/QMC prices for each market and each MC/QMC technique. Note that we reset the sobolrand\nindex after each set of trials have been completed.\n/ Run all techniques for option pricing\n/* bb = initial brownian bridge\n/* pd = dictionary of parameters\n/* l = number of trials\n/* n = number of timesteps\n/* m = number of paths\nrunall:{[bb;pd;l;n;m]\n/ Start timer for European options\nst:.z.p;\n/ Output column names\n0N!util.rcol;\n/ Run experiments for European options\ne:util.run[`euro;bsEuroCall pd;bb;pd;l;n]each m;\n/ Output total time taken for European\n-1\"European: time taken = \",string[.z.p-st],\"\\n\";\n/ Start timer for Asian options\nst:.z.p;\n/ Output column names\n0N!util.rcol;\n/ Run experiments for Asian options\na:util.run[`asia;bsAsiaCall[n;pd];bb;pd;l;n]each m;\n/ Output total time taken for Asian\n-1\"Asian: time taken = \",string .z.p-st;\n/ Return table with European and Asian prices and errors\ne,a }\n/ Dictionary keys\nutil.d:`bb`sobol!\n/ Output column names\nutil.rcol:`mkt`npaths`rmse_bb_sobol`rmse_std_sobol`rmse_std_rdm,\n`prx_bb_sobol`prx_std_sobol`prx_std_rdm`prx_bs\n/ RMSE\nutil.rmse:{sqrt avg x*x-:y}\n/ Run each technique for a specific market\n/* mkt = market, European/Asian\n/* bs = Black-Scholes price\nutil.run:{[mkt;bs;bb;pd;l;n;m]\n/ Create project with correct MC function for each market\nmc:$[mkt~`asia;mcAsiaCall;mcEuroCall][;n;;pd];\n/ Generate MC option price and calculate error for bbridge and sobol\nea:util.rmse[bs]a:mc[;util.d(bb;1b)]each sob:numgen[sobolrand n;l;m;n];\n/ Generate MC option price and calculate error for standard and sobol\neb:util.rmse[bs]b:mc[;util.d(::;1b)]each sob;\n/ Generate MC option price and calculate error for bbridge and random\nec:util.rmse[bs]c:mc[;util.d(bb;0b)]each numgen[mtrand3;l;m;n];\n/ Return dictionary of results\nutil.rcol!0N!(mkt;m;ea;eb;ec;last a;last b;last c;bs) }\nCompare results¶\nAt this stage it is possible to plot the results obtained for the option prices, RMSE and log RMSE values.\nq)r:runall[bb;pd;l;n;m]\nq)select from r where mkt=`euro / European RMSEs and prices\nmkt npaths rmse_bb_sobol rmse_std_sobol rmse_std_rdm prx_bb_sobol pr..\n---------------------------------------------------------------------..\neuro 8 2.218523 3.328543 4.537168 13.86836 4...\neuro 16 1.345787 2.442911 3.505794 9.206011 12..\neuro 32 0.6865024 1.623545 3.618555 9.879788 11..\neuro 64 0.3774031 0.9891046 1.93851 10.90519 11..\neuro 128 0.2089234 0.5977986 1.532864 10.34505 11..\neuro 256 0.117329 0.3648233 0.9575077 10.52265 10..\neuro 512 0.05984563 0.3127605 0.7618771 10.50504 9...\neuro 1024 0.03176637 0.2853521 0.5670563 10.43112 10..\nq)/ Asian RMSEs and prices\nq)select from r where mkt=`asia\nmkt npaths rmse_bb_sobol rmse_std_sobol rmse_std_rdm prx_bb_sobol pr..\n---------------------------------------------------------------------..\nasia 8 1.044296 2.126291 2.421675 6.461112 5...\nasia 16 0.6879741 1.37292 1.80831 4.369775 6...\nasia 32 0.3959254 0.90278 1.167337 5.445392 6...\nasia 64 0.2453828 0.4006613 0.8905137 5.641087 5...\nasia 128 0.1543742 0.3089822 0.5973851 5.473975 5...\nasia 256 0.0771557 0.2283313 0.4139539 5.590241 5...\nasia 512 0.03863931 0.1614974 0.3102061 5.576155 5...\nasia 1024 0.01975347 0.166499 0.1831748 5.544304 5...\nOption prices¶\nThe plot below shows the option prices produced for each number of paths, compared to the Black-Scholes equivalent (black-dashed line). It is clear that the Sobol-Brownian bridge method converges the fastest.\nq)prxerrplot[r;`prx]\nRMSE¶\nWe can also plot the RMSE produced by comparing the prices for each method as they converge to the relative Black-Scholes price. The expected result is again exhibited, where the Sobol-Brownian bridge method converges the fastest.\nq)prxerrplot[r;`rmse]\nLog RMSE¶\nLastly, we can look at the log RMSE plot as another means of comparison between the methods. Similarly, we see that the Sobol-Brownian bridge method (blue) exhibits superior performance.\nq)prxerrplot[r;`logrsme]\nConclusion¶\nIn this paper we demonstrated that it is possible to calculate option prices using both Black-Scholes and Monte Carlo/Quasi-Monte Carlo methods in q. The Monte Carlo/Quasi-Monte Carlo methods deployed different implementations of both Wiener-path approximation and random-number generation.\nLooking at the results produced, it is clear that both the option price produced and the resulting RMSE/log RMSE converged fastest when compared with the Black-Scholes price for the Quasi-Monte Carlo approach, with Sobol’ sequence number generation and Brownian-bridge construction.\nAuthor¶\nDeanna Morgan joined First Derivatives in June 2018 as a data scientist in the Capital Markets Training Program and currently works as a machine-learning engineer in London.\nAcknowledgements¶\nI gratefully acknowledge Sergei Kucherenko for allowing us to create a version of the C++ Option Pricing library in q and for providing technical knowledge throughout the project. I would additionally like to acknowledge my colleagues in the KX Machine Learning team for their guidance in the technical aspects of this paper."}}},{"rowIdx":54,"cells":{"text":{"kind":"string","value":"mod\n¶\nModulus\nx mod y mod[x;y]\nWhere x\nand y\nare numeric, returns the remainder of x%y\n.\nq)-3 -2 -1 0 1 2 3 4 mod 3\n0 1 2 0 1 2 0 1\nq)7 mod 2 3 4\n1 1 3\nq)-7 7 mod/:\\:-2.5 -2 2 2.5\n-2 -1 1 0.5\n-0.5 -1 1 2\nmod\nis a multithreaded primitive.\nImplicit iteration¶\nmod\nis an atomic function.\nq)(10;20 30)mod(7 13;-12)\n3 10\n-4 -6\nIt applies to dictionaries and keyed tables.\nq)d mod 5\na| 0 4 3\nb| 4 0 4\nq)5 mod d\na| 5 -16 2\nb| 1 0 -1\nq)k mod 5\nk | a b\n---| ---\nabc| 0 4\ndef| 4 0\nghi| 3 4\nDomain and range¶\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | i . i i i j e f . . p m d z n u v t\ng | . . . . . . . . . . . . . . . . . .\nx | i . i i i j e f . . p m d z n u v t\nh | i . i i i j e f . . p m d z n u v t\ni | i . i i i j e f . . p m d z n u v t\nj | j . j j j j e f . . p m d z n u v t\ne | f . f f f f f f f . f f z z f f f f\nf | f . f f f f f f f . f f z z f f f f\nc | . . . . . . . f . . p m d z n u v t\ns | . . . . . . . . . . . . . . . . . .\np | n . n n n n n f n . . . . . . . . .\nm | i . i i i i i f i . . . . . . . . .\nd | i . i i i i i . i . . . . . . . . .\nz | f . f f f f f f f . . . . . . . . .\nn | n . n n n n n f n . . . . . . . . .\nu | u . u u u u u f u . . . . . . . . .\nv | v . v v v v v f v . . . . . . . . .\nt | t . t t t t t f t . . . . . . . . .\nRange: defijmnptuvz\n%\nDivide, div\n, reciprocal\nMathematics\nQ for Mortals: §4.8.1 Integer Division div\nand Modulus mod\n\n*\nMultiply¶\nx*y *[x;y]\nWhere x\nand y\nare conformable numerics or temporals, returns their\nproduct.\nq)3 4 5*2.2\n6.6 8.8 11\nq)1.1*`a`b`c!5 10 20\na| 5.5\nb| 11\nc| 22\nq)t:([]price:10 20 30;qty:200 150 17)\nq)t*\\:1.15 1 /raise all prices 15%\nprice qty\n---------\n11.5 200\n23 150\n34.5 17\nq)update price:price*1+.15*qty<50 from t /raise prices 15% where stock<50\nprice qty\n---------\n10 200\n20 150\n34.5 17\n*\nis a multithreaded primitive.\nImplicit iteration¶\nMultiply is an atomic function.\nq)(10;20 30)*(2;3 4)\n20\n60 120\nIt applies to dictionaries and tables.\nq)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 -21 3;4 5 -6)\nq)d*2\na| 20 -42 6\nb| 8 10 -12\nq)d*`b`c!(10 20 30;1000*1 2 3) / upsert semantics\na| 10 -21 3\nb| 40 100 -180\nc| 1000 2000 3000\nq)t*100\na b\n----------\n1000 400\n-2100 500\n300 -600\nq)k*k\nk | a b\n---| ------\nabc| 100 16\ndef| 441 25\nghi| 9 36\nRange and domains¶\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | i . i i i j e f . . p m d z n u v t\ng | . . . . . . . . . . . . . . . . . .\nx | i . i i i j e f . . p m d z n u v t\nh | i . i i i j e f . . p m d z n u v t\ni | i . i i i j e f . . p m d z n u v t\nj | j . j j j j e f . . p m d z n u v t\ne | e . e e e e e f . . p m d z n u v t\nf | f . f f f f f f f . f f z z f f f f\nc | . . . . . . . f . . p m d z n u v t\ns | . . . . . . . . . . . . . . . . . .\np | p . p p p p p f p . . . . . . . . .\nm | m . m m m m m f m . . . . . . . . .\nd | d . d d d d d z d . . . . . . . . .\nz | z . z z z z z z z . . . . . . . . .\nn | n . n n n n n f n . . . . . . . . .\nu | u . u u u u u f u . . . . . . . . .\nv | v . v v v v v f v . . . . . . . . .\nt | t . t t t t t f t . . . . . . . . .\nRange: defijmnptuvz\n\nneg\n¶\nNegate\nneg x neg[x]\nReturns the negation of boolean or numeric x\n.\nA null has no sign, so is its own negation.\nq)neg -1 0 1 2\n1 0 -1 -2\nq)neg 01001b\n0 -1 0 0 -1i\nq)neg (0W;-0w;0N) / infinities and a null\n-0W\n0w\n0N\nq)neg 2000.01.01 2012.01.01 / negates the underlying data value\n2000.01.01 1988.01.01\nAn atomic function.\nneg\nis a multithreaded primitive.\nDomain and range¶\ndomain b g x h i j e f c s p m d z n u v t\nrange i . i h i j e f i . p m d z n u v t\nRange: ihjefpmdznuvt\nnot\n,\nSubtract\nMathematics\nQ for Mortals\n§4.3.2 Not Zero not\nQ for Mortals\n§4.9.2 Temporal Arithmetic\n\nnext\n, prev\n, xprev\n¶\nImmediate or near neighbors\nWhat if just under this layer of life you could\nfind the old one, moving forward just the same,\nand just above, what’s yet to come\n— Emily Berry, Unexhausted Time\nnext\n¶\nNext item/s in a list\nnext x next[x]\nWhere x\nis a list, for each item in x\n, returns the next item.\nFor the last item, it returns a null if the list is a vector, otherwise an empty list ()\n.\nq)next 2 3 5 7 11\n3 5 7 11 0N\nq)next (1 2;\"abc\";`ibm)\n\"abc\"\n`ibm\n`int$()\nDuration of a quote:\nq)update (next time)-time by sym from quote\nnext\nis a uniform function.\nprev\n¶\nImmediately preceding item/s in a list\nprev x prev[x]\nWhere x\nis a list, for each item, returns the previous item.\nFor the first item, it returns a null if the list is vector, otherwise an empty list ()\n.\nq)prev 2 3 5 7 11\n0N 2 3 5 7\nq)prev (1 2;\"abc\";`ibm)\n`int$()\n1 2\n\"abc\"\nShift the times in a table:\nq)update time:prev time by sym from t\nprev\nis a uniform function.\nxprev\n¶\nNearby items in a list\nx xprev y xprev[x;y]\nWhere x\nis a long atom and y\nis a list, returns for each item of y\nthe item x\nindices before it.\nThe first x\nitems of the result are null, empty or blank as appropriate.\nThere is no xnext\nfunction.\nFortunately xprev\nwith a negative number on the left can achieve this.\nq)2 xprev 2 7 5 3 11\n0N 0N 2 7 5\nq)-2 xprev 2 7 5 3 11\n5 3 11 0N 0N\nq)1 xprev \"abcde\"\n\" abcd\"\nxprev\nis a right-uniform function.\n\nnot\n¶\nNot zero\nnot x not[x]\nReturns 0b\nwhere x\nnot equal to zero, and 1b\notherwise.\nApplies to all data types except symbol, and to items of lists, dictionary values and table columns, referring to the underlying data value.\nNulls and infinities never equal zero.\nq)not -1 0 1 2\n0100b\nq)not \"abc\",\"c\"$0\n0001b\nq)not `a`b!(-1 0 2;\"abc\",\"c\"$0)\na| 010b\nb| 0001b\nq)not 2000.01.01 2020.06.30\n10b\nq)not 00:00:00\n1b\nq)not 12:00:00.000000000\n0b\nq)not (0W;-0w;0N)\n000b\nAn atomic function.\nnot\nis a multithreaded primitive.\nneg\nLogic\nQ for Mortals\n§4.3.2 Not Zero not\n\n<>\nNot Equal¶\nx<>y <>[x;y]\nThis atomic binary operator returns 1b\nwhere (items of) x\nare less than y\n.\nq)(3;\"a\")<>(2 3 4;\"abc\")\n101b\n011b\nEqual =\nComparison\nQ for Mortals: §4.3.1 Equality = and Disequality <>\n<>\nNot Equal¶x<>y <>[x;y]\nThis atomic binary operator returns 1b\nwhere (items of) x\nare less than y\n.\nq)(3;\"a\")<>(2 3 4;\"abc\")\n101b\n011b\nEqual =\nComparison\nQ for Mortals: §4.3.1 Equality = and Disequality <>\n\nnull\n¶\nIs null\nnull x null[x]\nReturns 1b\nwhere x\nis null.\nApplies to all data types except enums, and to items of lists, dict values and table columns.\nnull\nis an atomic function.\nq)null 0 0n 0w 1 0n\n01001b\nq)where all null ([] c1:`a`b`c; c2:0n 0n 0n; c3:10 0N 30)\n,`c2\nEnums always show as non-null.\nq)a:``a\nq)`=`a$` / non-enumerated and enumerated null symbol show as equivalent\n1b\nq)null` / null symbol behaves as expected\n1b\nq)null`a$` / enumeration of null symbol does not\n0b\nThe intention was not to have nulls in the enums. That value is used to indicate out of range. (Think of them as a way to represent foreign keys.) To test for an enumeration backed by a null symbol, one can use the equality test – but at the cost of CPU cycles:\nq)a:10000000?`8\nq)v:`a$a\nq)\\ts null v\n18 16777344\nq)\\ts `=v\n66 268435648\nnull\nis a multithreaded primitive."}}},{"rowIdx":55,"cells":{"text":{"kind":"string","value":"// Set the timer to 200ms if not set already\n if[not system\"t\"; system\"t 200\"]];\nif[@[value;`.proc.lowerpowermode;0b];\n if[.timer.enabled;\n // Set the timer to 1000ms if lowpowermode\n system\"t 1000\"]];\n\\\nf:{0N!`firing;x+1}\nf1:{0N!`firing;system\"sleep \",string x}\nrepeat[.proc.cp[];.proc.cp[]+0D00:01;0D00:00:15;(f1;2);\"test timer\"]\nrep[.proc.cp[];.proc.cp[]+0D00:01;0D00:00:15;(f1;3);0h;\"test timer\";1b]\nrep[.proc.cp[];.proc.cp[]+0D00:01;0D00:00:15;(f1;4);1h;\"test timer\";1b]\n\nonce[.proc.cp[]+0D00:00:10;(`.timer.f;2);\"test once\"]\n.dotz.set[`.z.ts;run]\n\\t 500\n\n\n================================================================================\nFILE: TorQ_code_common_timezone.q\nSIZE: 1,203 characters\n================================================================================\n\n// taken from http://code.kx.com/wiki/Cookbook/Timezones\n\n\\d .tz\n\ndefault:@[value;`default;`$\"Europe/London\"]\n\n// Load the timezone info from the config directory\nt:@[get;hsym`$tzfile;{.lg.e[`init;\"failed to load timezone table from \",x,\" \",y]}[tzfile:string first .proc.getconfigfile[\"tzinfo\"]]]\n\n// local from GMT\nlg:{[tz;z] $[0>type z;first;(::)]@exec gmtDateTime+adjustment from aj[`timezoneID`gmtDateTime;([]timezoneID:tz;gmtDateTime:z,());select timezoneID,gmtDateTime,adjustment from t]};\n\n// GMT from local\ngl:{[tz;z] $[0>type z;first;(::)]@exec localDateTime-adjustment from aj[`timezoneID`localDateTime;([]timezoneID:tz;localDateTime:z,());select timezoneID,localDateTime,adjustment from t]};\n\n// timezone switch\n// d = destination time zone\n// s = source timezone\n// z = time\nttz:{[d;s;z]lg[d;gl[s;z]]}\n\n// default from GMT\ndg:lg[default]\n// GMT from default\ngd:gl[default]\n\n\\\n\\d .\n/ To recreate tzinfo from tzinfo.csv\nt:(\"SPJ\";enlist \",\")0:`:tzinfo.csv;\nt: delete offset from update adjustment:`timespan$1000000000*offset from t;\nt: update localDateTime:gmtDateTime+adjustment from t;\nt: `gmtDateTime xasc t;\nt: update `g#timezoneID from t;\n`:tzinfo set t; / save file for easy distribution\n\n\n================================================================================\nFILE: TorQ_code_common_tplogutils.q\nSIZE: 3,435 characters\n================================================================================\n\n/ - functions for checking and repairing (if required) a tickerplant log file\n\\d .tplog\n\nHEADER: 8 # -8!(`upd;`trade;());\t\t\t/ - header to build deserialisable msg\nUPDMSG: `char$10 # 8 _ -8!(`upd;`trade;());\t/ - first part of tp update msg\nCHUNK: 10 * 1024 * 1024;\t\t\t\t\t/ - size of default chunk to read (10MB)\nMAXCHUNK: 8 * CHUNK;\t\t\t\t\t\t/ - don't let single read exceed this\n\ncheck: {[logfile;lastmsgtoreplay]\n\t/ - logfile (symbol) is the handle to the logsfile\n\t/ - lastmsgtoreplay (long) is index position of the last message to be replayed from the log\n\t.lg.o[`tplog.check;\"Checking \",string[logfile],\" . Index of last message to replay is : \",string lastmsgtoreplay];\n\t/ - check if the logfile is corrupt\n\tloginfo: -11!(-2;logfile);\n\t.lg.o[`tplog.check;\"Finished running check on log file. Result is : \",.Q.s1 loginfo];\n\t:$[ 1 = count loginfo;\n\t\t/ - the log file is good so return the good log file handle\n\t\t[.lg.o[`tplog.check;\"The logfile is not corrupt\"];logfile];\n\t/ - elseif the number of messages to be replayed is lower than the number of good messages then don't bother repairing the log\n\tloginfo[0] <= lastmsgtoreplay + 1;\n\t\t[.lg.o[`tplog.check;\"The logfile is corrupt but the number of messages to replay (\",string[lastmsgtoreplay + 1],\") is less than the number of messages (\",string[loginfo 0],\")that can be read from the log\"];logfile];\n\t/ - else run the repair function and return out the handle for the \"good\" log\n\t\t[.lg.o[`tplog.check;\"The logfile is corrupt, attempting to write a good log\"];repair[logfile]]\n\t]\n\t};\n\t\nrepair: {[logfile]\n\t/ - append \".good\" to the \"good\" log file\n\tgoodlog: `$ string[logfile],\".good\";\n\t.lg.o[`tplog.repair;\"Writing good log to \",string goodlog];\n\t/ - create file and open handle to it\n\tgoodlogh: hopen goodlog set ();\n\t/ - loop through the file in chunks\n\t.lg.o[`tplog.repair;\"Starting to loop through the log file - \",string logfile];\n\trepairover[logfile;goodlogh] over `start`size!(0j;CHUNK);\n\t.lg.o[`tplog.repair;\"Finished looping through the log file - \",string logfile];\n\t/ - return goodlog\n\tgoodlog\n\t};\n\t\nrepairover: {[logfile;goodlogh;d]\n\t/ - logfile (symbol) is the handle to the logsfile\n\t/ - goodlogh (int) is the handle to the \"good\" log file\n\t/ - d (dictionary) has two keys start and size, the point to start reading from and size of chunk to read\n\t.lg.o[`tplog.repairover;\"Reading logfile with an offset of : \",string[d`start],\" bytes and a chunk of size : \",string[d`size],\" bytes\"];\n\tx:read1 logfile,d`start`size;\t\t\t/ - read bytes from \n\tu: ss[`char$x;UPDMSG];\t\t\t\t\t/ - find the start points of upd messages\n\tif[not count u;\t\t\t\t\t\t\t/ - nothing in this block \n\t\tif[hcount[logfile] <= sum d`start`size;:d];\t/ - EOF - we're done\n\t\t:@[d;`start;+;d`size]];\t\t\t\t/ - move on bytes\n\tm: u _ x;\t\t\t\t\t\t\t\t/ - split bytes into msgs\n\tmz: 0x0 vs' `int$ 8 + ms: count each m;\t/ - message sizes as bytes\n\thd: @[HEADER;7 6 5 4;:;] each mz;\t\t/ - set msg size at correct part of hdr\n\tg: @[(1b;)@-9!;;(0b;)@] each hd,'m;\t\t/ - try and deserialize each msg\n\tgoodlogh g[;1] where k:g[;0];\t\t\t/ - write good msgs to the \"good\" log \n\tif[not any k;\t\t\t\t\t\t\t/ - saw msg(s) but couldn't read\n\t\tif[MAXCHUNK <= d`size;\t\t\t\t/ - read as much as we dare, give up\n\t\t\t:@[d;`start`size;:;(sum d`start`size;CHUNK)]];\n\t\t:@[d;`size;*;2]];\t\t\t\t\t/ - read a bigger chunk\n\tns: d[`start] + sums[ms] last where k;\t/ - move to the end of the last good msg\n\t:@[d;`start`size;:;(ns;CHUNK)]; \n\t};\n\n================================================================================\nFILE: TorQ_code_common_u.q\nSIZE: 1,131 characters\n================================================================================\n\n/2016.07.22 torq edit - added broadcast\n/2008.09.09 .k -> .q\n/2006.05.08 add\n\n\\d .u\nbroadcast:@[value;`broadcast;1b]; // broadcast publishing is on by default. Availble in kdb version 3.4 or later.\n\ninit:{w::t!(count t::tables`.)#()}\n\ndel:{w[x]_:w[x;;0]?y};\n.dotz.set[`.z.pc;{del[;x]each t}];\n\nsel:{$[`~y;x;select from x where sym in y]}\n\npub:{[t;x]{[t;x;w]if[count x:sel[x]w 1;(neg first w)(`upd;t;x)]}[t;x]each w t}\n\nadd:{$[(count w x)>i:w[x;;0]?.z.w;.[`.u.w;(x;i;1);union;y];w[x],:enlist(.z.w;y)];(x;$[99=type v:value x;sel[v]y;@[0#v;`sym;`g#]])}\n\nsub:{if[x~`;:sub[;y]each t];if[not x in t;'x];del[x].z.w;add[x;y]}\n\nend:{(neg union/[w[;;0]])@\\:(`.u.end;x)}\n\n// broadcasting. will override .u.pub with -25!\nif[broadcast and .z.K>=3.4;\n // group subscribers by their sym subscription\n pub_broadcast:{[t;x]\n subgroups:flip (w[t;;0]@/:value g;key g:group w[t;;1]);\n {[t;x;w] if[count x:sel[x]w 1;-25!(w 0;(`upd;t;x))] }[t;x] each subgroups};\n\n // store the old definition\n pub_default:pub;\n // override .u.pub\n pub:pub_broadcast;\n ];\n\n================================================================================\nFILE: TorQ_code_dataaccess_checkinputs.q\nSIZE: 7,886 characters\n================================================================================\n\n\\d .dataaccess\n\n// checkinputs is the main function called when running a query - it checks:\n// (i) input format\n// (ii) whether any parameter pairs clash\n// (iii) parameter specific checks\n// The input dictionary accumulates some additional table information/inferred info\ncheckinputs:{[dict]\n if[not in[`checksperformed;key dict];dict:.checkinputs.checkinputs dict];\n dict:checktablename dict;\n if[in[`columns;key dict];.dataaccess.checkcolumns[dict`tablename;dict`columns;`columns];dict:rdbdate[dict;`columns]];\n if[in[`timecolumn;key dict];dict:.dataaccess.checktimecolumn[dict];dict:rdbdate[dict;`timecolumn]];\n dict:filldefaulttimecolumn dict;\n if[in[`instrumentcolumn ;key dict];.dataaccess.checkcolumns[dict`tablename;dict`instrumentcolumn;`instrumentcolumn ]];\n if[in[`aggregations;key dict];.dataaccess.checkaggregations dict;dict:rdbdate[dict;`aggregations]];\n if[in[`filters;key dict];.dataaccess.checkcolumns[dict`tablename;key dict`filters;`filters]];\n if[in[`grouping;key dict];.dataaccess.checkcolumns[dict`tablename;dict`grouping;`grouping];dict:rdbdate[dict;`grouping]];\n if[in[`timebar;key dict];.dataaccess.checktimebar dict;dict:rdbdate[dict;`timebar]];\n if[in[`freeformwhere;key dict];.dataaccess.checkfreeformwhere dict;dict:freeformrdbdate[dict;`freeformwhere]];\n if[in[`freeformby;key dict];.dataaccess.checkfreeformby dict;dict:freeformrdbdate[dict;`freeformby]];\n if[in[`freeformcolumn;key dict];.dataaccess.checkfreeformcolumns dict;dict:freeformrdbdate[dict;`freeformcolumn]];\n if[in[`sqlquery;key dict];'`$.checkinputs.formatstring[.schema.errors[`sqlquery;`errormessage];.proc.proctype]];\n if[in[`firstlastsort;key dict];'`$.checkinputs.formatstring[.schema.errors[`firstlastsort;`errormessage];.proc.proctype]];\n :dict;\n };\n\n// function to check the validity of tablenames\nchecktablename:{[dict]\n if[not dict[`tablename]in exec tablename from .checkinputs.tablepropertiesconfig where proctype in (.proc.proctype,`,`all);\n '`$.checkinputs.formatstring[.schema.errors[`tableexists;`errormessage];dict]];\n dict:.checkinputs.jointableproperties dict;\n :update metainfo:(metainfo,`starttime`endtime!(starttime;endtime))from dict;\n };\n\n//check that time column is of the correct type\nchecktimecolumn:{[dict]\n .dataaccess.checkcolumns[dict`tablename;dict`timecolumn;`timecolumn];\n if[dict[`timecolumn]~`date;:dict];\n if[not first (exec t from meta dict`tablename where c=(dict[`timecolumn])) in \"pzd\";'`$.checkinputs.formatstring[\"Parameter:`timecolumn - column:{column} in table:{table} is of type:{type}, validtypes:-12 -14 -15h\";`column`table`type!(dict`timecolumn;dict`tablename;(type( exec from dict`tablename)dict`timecolumn))]]; :dict;\n };\n\n\n// function to fill in default columns to reduce the amount of information a user has to\n// fill in\nfilldefaulttimecolumn:{[dict]\n if[not `timecolumn in key dict; \n :@[dict;`timecolumn;:;.checkinputs.getdefaulttime dict]];\n :dict;\n };\n\n// function to check the validity of columns with respect to the chosen tablename\n// parameter\n checkcolumns:{[table;columns;parameter]\n if[not all(`~columns)& parameter~`columns;\n columns,:();\n avblecols:`i`date,cols table;\n if[any not in[columns;avblecols];\n badcol:columns where not in[columns;avblecols];\n '`$.checkinputs.formatstring[.schema.errors[`checkcolumns;`errormessage];`badcol`tab`parameter!(badcol;table;parameter)]]];};"}}},{"rowIdx":56,"cells":{"text":{"kind":"string","value":"The q language¶\nQ is the programming system for working with kdb+. This corresponds to SQL for traditional databases, but unlike SQL, q is a powerful programming language in its own right.\nQ is an interpreted language. Q expressions can be entered and executed in the q console, or loaded from a q script, which is a text file with extension .q\n.\nYou need at least some familiarity with q to use kdb+. Try following the examples here in the q console interface.\nThe following pages will also be useful:\nLoading q¶\nYou load q by changing to the main q directory, then running the q executable. Note that you should not just click the q executable from the file explorer – this will load q but start in the wrong directory.\nIt is best to create a start-up batch file or script to do this, and there are examples in the q/start\ndirectory, see q.bat\n(Windows), q.sh\n(Linux) and q.app\n(macOS).\nFor example, the Windows q.bat\nis:\nc:\ncd \\q\nw32\\q.exe %*\nIn Linux/macOS, it is best to call the q executable under rlwrap\nto support line recall and edit. The Linux q.sh\nscript is:\n#!/bin/bash\ncd ~/q\nrlwrap l32/q \"$@\"\nFirst steps¶\nOnce q is loaded, you can enter expressions for execution:\nq)2 + 3\n5\nq)2 + 3 4 7\n5 6 9\nYou can confirm that you are in the QHOME\ndirectory by calling a directory list command, e.g.\nq)\\ls *.q\n...\n\"sp.q\"\n...\nq)\\dir *.q\n...\n\"sp.q\"\n...\nCommand-line options\nCommand-line options e.g. q profile.q -p 5001\n- loads script\nprofile.q\nat startup. This can in turn load other scripts. - sets listening port to 5001\nAt any prompt, enter \\\\\nto exit q.\nConsole modes¶\nThe usual prompt is q)\n. Sometimes a different prompt is given; you need to understand why this is, and how to return to the standard prompt.\n-\nIf a function is suspended, then the prompt has two or more\n)\n. In this case, enter a single\\\nto remove one level of suspension, and repeat until the prompt becomesq)\n. For example:q)f:{2+x} / define function f q)f `sym / function call fails with symbol argument {2+x} / and is left suspended 'type + 2 `sym q))\\ / prompt becomes q)). Enter \\ to return to usual prompt q)\n-\nIf there is no suspension, then a single\n\\\nwill toggle between q and k modes:q)count each (1 2;\"abc\") / q expression for length of each list item 2 3 q)\\ / toggle to k mode #:'(1 2;\"abc\") / equivalent k expression 2 3 \\ / toggle back to q mode q)\n-\nIf you change namespace, then the prompt includes the namespace.\nq)\\d .h / change to .h namespace q.h)\\d . / change back to default namespace q)\nBasics: System command\n\\d\nError messages¶\nError messages are terse. The format is a single quote, followed by error text:\nq)1 2 + 10 20 30 / cannot add 2 numbers to 3 numbers\n'length\nq)2 + \"hello\" / cannot add number to character\n'type\nBasics: Errors\nIntroductory examples¶\nTo gain experience with the language, enter the following examples and explain the results. Also experiment with similar expressions.\nq)x:2 5 4 7 5\nq)x\n2 5 4 7 5\nq)count x\n5\nq)8 # x\n2 5 4 7 5 2 5 4\nq)2 3 # x\n2 5 4\n7 5 2\nq)sum x\n23\nq)sums x\n2 7 11 18 23\nq)distinct x\n2 5 4 7\nq)reverse x\n5 7 4 5 2\nq)x within 4 10\n01111b\nq)x where x within 4 10\n5 4 7 5\nq)y:(x;\"abc\") / list of lists\nq)y\n2 5 4 7 5\n\"abc\"\nq)count y\n2\nq)count each y\n5 3\nThe following is a function definition, where x\nrepresents the argument:\nq)f:{2 + 3 * x}\nq)f 5\n17\nq)f til 5\n2 5 8 11 14\nQ makes essential use of a symbol datatype:\nq)a:`toronto / symbol\nq)b:\"toronto\" / character string\nq)count a\n1\nq)count b\n7\nq)a=\"o\"\n`type\nq)b=\"o\"\n0101001b\nq)a~b / a is not the same as b\n0b\nq)a~`$b / `$b converts b to symbol\n1b\nData structures¶\nQ basic data structures are atoms (singletons) and lists. Other data structures like dictionaries and tables are built from lists. For example, a simple table is just a list of column names associated with a list of corresponding column values, each of which is a list.\nq)item:`nut / atom (singleton)\nq)items:`nut`bolt`cam`cog / list\nq)sales: 6 8 0 3 / list\nq)prices: 10 20 15 20 / list\nq)(items;sales;prices) / list of lists\nnut bolt cam cog\n6 8 0 3\n10 20 15 20\nq)dict:`items`sales`prices!(items;sales;prices) / dictionary\nq)dict\nitems | nut bolt cam cog\nsales | 6 8 0 3\nprices| 10 20 15 20\nq)tab:([]items;sales;prices) / table\nq)tab\nitems sales prices\n------------------\nnut 6 10\nbolt 8 20\ncam 0 15\ncog 3 20\nNote that a table is a flip\n(transpose) of a dictionary:\nq)flip dict\nitems sales prices\n------------------\nnut 6 10\nbolt 8 20\ncam 0 15\ncog 3 20\nThe table created above is an ordinary variable in the q workspace, and could be written to disk. In general, you create tables in memory and then write to disk.\nSince it is a table, you can use SQL-like query expressions on it:\nq)select from tab where prices < 20\nitems sales prices\n------------------\nnut 6 10\ncam 0 15\nSince it is an ordinary variable, you can also index it and do other typical data manipulations:\nq)tab 1 3 / index rows 1 and 3\nitems sales prices\n------------------\nbolt 8 20\ncog 3 20\nq)tab `sales / index column sales\n6 8 0 3\nq)tab, tab / join two copies\nitems sales prices\n------------------\nnut 6 10\nbolt 8 20\ncam 0 15\ncog 3 20\nnut 6 10\nbolt 8 20\ncam 0 15\ncog 3 20\nA keyed table has one or more columns as keys:\nq)1!tab / keyed table\nitems| sales prices\n-----| ------------\nnut | 6 10\nbolt | 8 20\ncam | 0 15\ncog | 3 20\nFunctions, operators, keywords, iterators¶\nAll functions take arguments on their right in brackets. Operators can also take arguments on left and right, as in 2+2\n(infix syntax). Iterators take value arguments on their left (postfix syntax) and return derived functions.\nq)sales * prices / operator: *\n60 160 0 60\nq)sum sales * prices / keyword: sum\n280\nq)sumamt:{sum x*y} / define lambda: sumamt\nq)sumamt[sales;prices]\n280\nq)(sum sales*prices) % sum sales / calculate weighted average\n16.47059\nq)sales wavg prices / keyword: wavg\n16.47059\nq)sales , prices / operator: , join lists\n6 8 0 3 10 20 15 20\nq)sales ,' prices / iterator: ' join lists in pairs\n6 10\n8 20\n0 15\n3 20\nFunctions can apply to dictionaries and tables:\nq)-2 # tab\nitems sales prices\n------------------\ncam 0 15\ncog 3 20\nFunctions can be used within queries:\nq)select items,sales,prices,amount:sales*prices from tab\nitems sales prices amount\n-------------------------\nnut 6 10 60\nbolt 8 20 160\ncam 0 15 0\ncog 3 20 60\nScripts¶\nA q script is a plain text file with extension .q\n, which contains q expressions that are executed when loaded.\nFor example, load the script KxSystems/kdb/sp.q\nand display the s\ntable that it defines:\nq)\\l sp.q / load script\nq)s / display table s\ns | name status city\n--| -------------------\ns1| smith 20 london\ns2| jones 10 paris\ns3| blake 30 paris\ns4| clark 20 london\ns5| adams 30 athens\nWithin a script, a line that contains a single /\nstarts a comment block. A line with a single \\\nends the comment block, or if none, exits the script.\nA script can contain multi-line definitions. Any line that is indented is taken to be a continuation of the previous line. Blank lines, superfluous blanks, and lines that are comments (begin with /\n) are ignored in determining this. For example, if a script has contents:\na:1 2\n/ this is a comment line\n3\n+ 4\nb:\"abc\"\nThen loading this script would define a\nand b\nas:\nq)a\n5 6 7 / i.e. 1 2 3 + 4\nq)b\n\"abc\"\nMulti-line function definitions\nIn scripts, indentation allows function definitions to span multiple lines.\nfn:{[x,y]\na:x*2.5;\nb:x+til floor y;\na & b }\nThe convention entails that in a multi-line definition the closing brace must also be indented. It is less likely to get misplaced if suffixed to the last line.\nQ queries¶\nQ queries are similar to SQL, though often much simpler.\nLoading the script KxSystems/kdb/sp.q\nto populate tables s\n,p\nand sp\nwe can show some query examples:\n\\l sp.q\nq)select from p where weight=17\np | name color weight city\n--| ------------------------\np2| bolt green 17 paris\np3| screw blue 17 rome\nSQL statements can be entered, if prefixed with s)\n.\nq)s)select * from p where color in (red,green) / SQL query\np | name color weight city\n--| -------------------------\np1| nut red 12 london\np2| bolt green 17 paris\np4| screw red 14 london\np6| cog red 19 london\nThe q equivalent would be:\nq)select from p where color in `red`green\nSimilarly, compare:\nq)select distinct p,s.city from sp\ns)select distinct sp.p,s.city from sp,s where sp.s=s.s\nand\nq)select from sp where s.city=p.city\ns)select sp.s,sp.p,sp.qty from s,p,sp where sp.s=s.s\nand sp.p=p.p and p.city=s.city\nNote that the dot notation in q automatically references the appropriate table.\nQ results can have lists in the rows.\nq)select qty by s from sp\ns | qty\n--| -----------------------\ns1| 300 200 400 200 100 400\ns2| 300 400\ns3| ,200\ns4| 100 200 300\nungroup\nwill flatten the result.\nq)ungroup select qty by s from sp\ns qty\n------\ns1 300\ns1 200\ns1 400\ns1 200\n...\nCalculations can be performed on the intermediate results.\nq)select countqty:count qty,sumqty:sum qty by p from sp\np | countqty sumqty\n--| ---------------\np1| 2 600\np2| 4 1000\np3| 1 400\np4| 2 500\np5| 2 500\np6| 1 100"}}},{"rowIdx":57,"cells":{"text":{"kind":"string","value":"About this site¶\nThis site is the official documentation for kdb+ and the q programming language.\nIt reflects the work of the KX community since 1993, has many authors, and continues to evolve.\nSearch¶\nThe Search Box on this site is customized for the q language. Some examples:\nOperator glyphs $ ^ . <> /: ':\nand their names dollar bang at\nOperator names Drop roll Enum Extend\nKeywords xbar like ajf0 uj\nNamespace objects .z.pd .Q.dpfts\nSystem commands \\d \\ts \\_ \\\\\nCommand-line options -b -p\nInternal functions -11!\nPopular queries types datatypes\nQueries not matched by the Search Box are handled by Google Search.\nInstall man.q\nto open the Reference direct from the q session.\nGitHub¶\nTruncated GitHub URLs are prefixed with the GitHub icon and omit the https://github.com/\nprefix.\nFor example, read KxSystems/kdb\nas https://github.com/KxSystems/kdb\n.\nContribute¶\nA finished work is exactly that, requires resurrection.\n— John Cage, “Lecture on Nothing”, 1949\nThe repository for this site is KxSystems/docs. The contribution model is GitHub and Forking Workflow. To contribute, submit a pull request.\nThe repo includes a style guide for contributors.\nWe gratefully acknowledge pull requests from\nAlexander Belopolsky James Hanna\nAleks Bunin Jason Quinn\nAngus Wilson kylenarocroc\nAndrew in New York Letian Wang\nAlex Shroyer Mohammad Noor\nBob Herrmann Peter Storeng\nChris Shucksmith Sean Keevey\nCillian Reilly Sean O’Hagan\nConor McCarthy Rian Ó Cuinneagáin\nDavid Crossey Rikesh\nDavid Lu Thomas Smyth\nDavid Z. Han Sergey Vidyuk\nDeanna Morgan Simon Shanks\nDiane O’Donoghue Simon Watson\nEsperanza Lopez Aguilera Vincent Bernardoff\nGeo Carncross William Da Silva\nIan O’Dwyer\nLicense¶\nThis work is licensed under a Creative Commons Attribution 4.0 International License.\nImages¶\nThis site includes images for which KX holds neither copyright nor permission. These images serve as links to their original sites. We understand this to be fair use.\nIf you are a copyright holder and object to this use, please write to docs@kx.com.\nTerminology¶\nIn 2018 and 2019 we made changes to the terminology used to describe the q language.\nCitations¶\nHow to cite the q programming language:\nBibtex format\n@misc{OMS,\nauthor= {{Kx Systems, Inc.}},\nyear = {2020},\ntitle = {Documentation for kdb+ and q},\nnote = {\\url{https://code.kx.com/q/ref/},\nLast accessed on 2020-04-15},\n}\n- Chicago style\n-\n“Reference Card.” Documentation for kdb and q . Kx Systems, Inc. Accessed April 15, 2020. https://code.kx.com/q/ref/.\n- Harvard style\n-\nDocumentation for kdb+ and q. 2020. Reference Card. [online] Available at: https://code.kx.com/q/ref/ [Accessed 15 April 2020].\n- Vancouver style\n-\nReference Card [Internet]. Documentation for kdb and q. Kx Systems, Inc.; 2020 [cited 2020Apr15]. Available from: https://code.kx.com/q/ref/\nCitation Machine for other citation styles\nWiki¶\nThe KX wiki was the primary documentation for q and kdb+ until January 2017.\nThe content, which runs on Mediawiki, has been archived on GitHub.\n\nPerformance of Intel Optane persistent memory¶\nKey findings\nUse of Intel® Optane™ persistent memory (PMem) as a block-storage device with KX Streaming Analytics delivers 4× to 12x improved analytics performance compared to high-performance NVMe storage – similar performance to DRAM for query workloads. For key data-processing workloads, we found DRAM requirements were significantly reduced.\nPMem lets organizations support more demanding analytic workloads on more data with less infrastructure.\nSetup and evaluation¶\nHardware setup¶\nWe configured two systems:\n| configuration | baseline without Optane |\nwith Optane |\n|---|---|---|\n| Server and operating system | Supermicro 2029U-TN24R4T Centos 8 | |\n| RAM | 768 GB RAM (2666 MHz) LRDIMMs | |\n| CPU | 36 physical cores: 2 × Intel® Xeon® Gold 6240L Gen.2 2.6 GHz CPU Hyper-threading turned on |\n|\n| Optane Persistent Memory | n/a | 12 DIMMs × 512 GB NMA1XXD512GPS |\n| Log | RAID 50 data volume 24× NVMe P4510 NVME |\nIntel Optane² persistent memory 3 TB EXT4³ DAX |\n| Intraday Database ≤ 24 Hrs | 48 TB RAIDIX ERA¹ Raid Software, XFS 6 RAID 5 Groups, 64 chunk |\nIntel Optane persistent memory 3 TB EXT4 DAX |\n| Historical Database > 24 Hrs | RAID 50 data volume Same as baseline configuration |\nEnvironment setup and testing approach¶\nWe configured a KX Streaming Analytics system operating in a high-availability (HA) cluster, processing and analyzing semiconductor manufacturing data as follows. We ran tests on both configurations for ingestion, processing, and analytics. Tests were run with the same data and durations.\nPublishing and ingestion¶\n- Publish and ingest over 2.25M sensor readings in 894 messages per second, 2.5 TB per day\n- Ingest sensor trace, aggregation, event, and IoT data using four publishing clients from a semiconductor front-end processing environment\nAnalytics¶\n- 81 queries per second spanning real-time data, intraday data (< 24 hours), and historical data\n- 100 queries at a time targeting the real-time database (DRAM), intra-day database (on Intel Optane PMem), and historical database (on NVMe storage)\n- Single-threaded calculation and aggregation tests targeted at the in-memory database and intra-day database\nData processing¶\n- Perform a data-intensive process, entailing reading and writing all of the data ingested for the day\nHigh availability and replication¶\n- System ran 24×7 with real-time replication to secondary node\n- Logged all data ingested, to support data protection and recovery\n- Data fed to two nodes, mediated to ensure no data loss in event of disruption to the primary system\nData model and ingestion¶\nThe data-model workload involved multiple tables representing reference or master data, sensor reading, event and aggregation data. This relational model is used in fulfilling streaming analytics and queries spanning real-time and historical data. KX ingests raw data streams, processes and persists data into the following structure. For efficient queries and analytics, KX batches and stores data for similar time ranges together, using one or more sensor or streaming data loaders. The tables and fields used in our configuration are illustrated below.\nTest results¶\nReading and writing to disk¶\nWe used the kdb+ nano I/O benchmark for reading and writing data to a file system backed by block storage. The nano benchmark calculates basic raw I/O capability of non-volatile storage, as measured by kdb+. Note the cache is cleared after each test, unless otherwise specified.\nBypassing page cache\nWith most block storage devices, data is read into page cache to be used by the application. However, reads and writes to Intel Optane persistent memory configured as block storage bypass page cache (in DRAM).\nThis improves overall system performances and lowers demand on the Linux kernel for moving data in/out of page cache and for overall memory management.\nRead performance (Intel Optane persistent memory as block device vs NVMe storage):\n- 2× to 9× faster reading data from 36 different files in parallel\n- Comparable to retrieving data from page cache (near DRAM performance)\n- 41× better for reading a file in a single thread.\nWrite performance:\n- 42% slower than NVMe devices, due to striping only across 6 DIMM devices vs 24 NVME drives\n- Similar single-threaded write performance across the two configurations\n| before | after | comparison4 | ||||\n|---|---|---|---|---|---|---|\n| NVMe | PMem | PMem vs NVMe | ||||\n| Threads | 1 | 36 | 1 | 36 | 1 | 36 |\n| Total Write Rate (sync) | 1,256 | 5,112 | 1,137 | 2,952 | 0.91 | 0.58 |\n| Total create list rate | 3,297 | 40,284 | 4,059 | 30,240 | 1.23 | 0.75 |\n| Streaming Read (mapped) | 1,501 | 12,702 | 61,670 | 118,502 | 41.08 | 9.33 |\n| Walking List Rate | 2,139 | 9,269 | 3,557 | 28,657 | 1.66 | 3.09 |\n| Streaming ReRead (mapped) Rate (from DRAM for NVMe) | 35,434 | 499,842 | 101,415 | 479,194 | 2.86 | 0.96 |\n| random1m | 828 | 12,050 | 1,762 | 24,700 | 2.13 | 2.05 |\n| random64k | 627 | 8,631 | 1,905 | 36,970 | 3.04 | 4.28 |\n| random1mu | 607 | 10,216 | 1,099 | 14,679 | 1.81 | 1.44 |\n| random64ku | 489 | 6,618 | 1,065 | 8,786 | 2.18 | 1.33 |\nQuery performance¶\nWe tested query performance by targeting data that would be cached in DRAM, on Intel Optane PMem, and NVMe drives, with parallel execution of each query using multiple threads where possible.\nEach query involved retrieving trace data with a range of parameters including equipment, chamber, lot, process plan, recipe, sequence, part, sensor, time range, columns of data requested. The parameters were randomized for time range of 10 minutes.\nQuery response times using Intel Optane persistent memory were comparable to DRAM and 3.8× to 12× faster than NVMe.\n| QUERY PROCESSES | COMPARISONS | ||||\n|---|---|---|---|---|---|\n| DRAM RDB 2 | PMem IDB 8 | NVMe HDB 8 | PMem vs DRAM4 | PMem vs NVMe | |\n| 1 query at a time | |||||\n| Mean response time (ms) | 23 | 26 | 319 | 1.17 | 12.10 |\n| Mean payload size (KB) | 778 | 778 | 668 | 1 | 1 |\n| 100 queries at a time | |||||\n| Mean response time (ms) | 100 | 82 | 310 | 0.82 | 3.77 |\n| Mean payload size (KB) | 440 | 440 | 525 | 1 | 1 |\nTwo real-time database query processes were configured matching typical configurations, with each process maintaining a copy of the recent data in DRAM. (Additional real-time processes could be added to improve performance with higher query volumes at the cost of additional DRAM.)\nData-processing performance¶\nKX Streaming Analytics enables organizations to develop and execute data and storage I/O intensive processes. We compare the performance of a mix of PMem with NVMe storage to NVMe-only storage configuration when reading significant volume of data from the intraday database and persisting it to the historical database on NVME storage.\nBy reading data from PMem and writing to NVMe-backed storage, Optane cut data processing time by 1.67× and reduced the RAM required by 37%.\n| before | after | ||\n|---|---|---|---|\n| NVMe only, no PMem | PMem & NVMe | PMem vs NVMe only4 | |\n| Data processed (GB) | 2,200 | 3,140 | 1.43 |\n| Processing Time (minutes) | 24.97 | 21.40 | 0.86 |\n| Processing time GB/s | 1.47 | 2.45 | 1.67 |\n| Max DRAM Utilisation5 | 56% | 35% | 0.63 |\nSummary results¶\nAnalytics¶\n- Performed within 10% of DRAM for queries involving table joins\n- Performed 4× to 12× faster than 24 NVMe storage in RAID configuration\n- DRAM performed 3× to 10× faster when performing single-threaded calculations and aggregations on data\nData processing and I/O operations¶\n- Processed 1.6× more data per second than NVMe-only storage where data was read from PMem and written to NVMe storage\n- 2× to 10× faster reading data from files in parallel\n- Seed of reading data similar to page cache (DRAM)\n- Single-threaded file-write performance within 10% in both configurations\n- Multithreaded file-write performance 42% slower\nInfrastructure resources¶\n- Required 37% less RAM to complete key I/O-intensive data processing\n- Required no page cache for querying or retrieving data stored in PMem\nBusiness benefits¶\n- Collect and process more data with higher velocity sensors and assets\n- Accelerate analytics and queries on recent data by 4× to 12×\n- Reduce cost of infrastructure running with less servers and DRAM to support data processing and analytic workloads\n- Align infrastructure more closely to the value of data by establishing a storage tier between DRAM and NVMe- or SSD-backed performance block storage\nOrganizations should consider Intel Optane persistent memory where there is a need to accelerate analytic performance beyond what is available with NVMe or SSD storage.\nNotes¶\n-\nWe used software RAID from RAIDIX to deliver lower latency and higher throughput for reads and writes over and above VROC and MDRAID.\nKX Streaming Analytics platform raises its performance with RAIDIX era\n-\nIntel Optane persistent memory configured in App Direct Mode as EXT4 volume single-block device.\n-\nIn our testing we found EXT4 performed significantly better than XFS, with EXT4 performing 1.5× to 12× better than XFS\n-\nHigher is better. Factor of 1 = same performance. Factor of 2 = 200% faster than comparator.\n-\nMaximum DRAM utilization as measured by the operating system during the process and is primarily a function of amount of data that needed to be maintained in RAM for query access. The faster the completion of the process the less RAM that is required on the system."}}},{"rowIdx":58,"cells":{"text":{"kind":"string","value":"// @private\n//\n// @overview\n// Set a Keras model within the ML Registry\n//\n// @param registryPath {string} Full/relative path to the model registry\n// @param model {any} `(<|foreign)` The Keras object to be saved as a h5 file.\n// @param modelInfo {dict} Information relating to the model which is\n// being saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.util.set.kerasModel:{[registryPath;model;modelInfo]\n $[99h=type model;\n [{[registryPath;modelInfo;sym;model]\n mlops.check.keras[model;0b];\n modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model];\n registry.util.set.write[model[`:save];modelPath,\"/\",string[sym],\"/mdl.h5\"];\n }[registryPath;modelInfo]'[key model;value model];\n ];\n [\n mlops.check.keras[model;0b];\n modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model];\n registry.util.set.write[model[`:save];modelPath,\"/mdl.h5\"];\n ]\n ]\n }\n\n// @private\n//\n// @overview\n// Set a Torch model within the ML Registry\n//\n// @param registryPath {string} Full/relative path to the model registry\n// @param model {any} `(<|foreign)` The Torch object to be saved as a h5 file.\n// @param modelInfo {dict} Information relating to the model which is\n// being saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.util.set.torchModel:{[registryPath;model;modelInfo]\n $[99h=type model;\n [{[registryPath;modelInfo;sym;model]\n mlops.check.torch[model;0b];\n modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model];\n registry.util.set.write[{.p.import[`torch][`:save][x;y]}[model];modelPath,\"/\",string[sym],\"/mdl.pt\"];\n }[registryPath;modelInfo]'[key model;value model];\n ];\n [\n mlops.check.torch[model;0b];\n modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model];\n registry.util.set.write[{.p.import[`torch][`:save][x;pydstr y]}[model];modelPath,\"/mdl.pt\"];\n ]\n ]\n }\n\n// @private\n//\n// @overview\n// Add a code file with extension '*.p','*.py','*.q' to a specific\n// model such that the code can be loaded on retrieval of the model.\n// This is required to facilitate comprehensive support for PyTorch\n// models being persisted and usable.\n//\n// @param files {symbol|symbol[]} The absolute/relative path to a file or\n// list of files that are to be added to the registry associated with a\n// model. These must be '*.p', '*.q' or '*.py'\n// @param registryPath {string} Full/relative path to the model registry\n// @param modelInfo {dict} Information relating to the model which is\n// being saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.util.set.code:{[files;registryPath;modelInfo]\n if[(11h<>abs type files)|all null files;:(::)];\n files:registry.util.check.code[files];\n if[0~count files;:(::)];\n codePath:registry.util.path.modelFolder[registryPath;modelInfo;`code];\n registry.util.copy.file[;hsym`$codePath]each files;\n }\n\n// @private\n//\n// @overview\n// Add a requirements file associated with a model to the versioned model\n// folder this can be either a 'pip freeze` of the current environment,\n// a user supplied list of requirements which can be pip installed or the\n// path to an existing requirements.txt file which can be used.\n//\n// 'pip freeze' is only suitable for users running within venvs and as such\n// is not supported within environments which are not inferred to be venvs as\n// running within 'well' established environments can cause irreconcilable\n// requirements.\n//\n// @param folderPath {string|null} A folder path indicating the location\n// the registry containing the model which is to be populated with a requirements\n// file\n// @param config Configuration provided by the user to\n// customize the experiment\n//\n// @return {null}\nregistry.util.set.requirements:{[config]\n requirement:config[`requirements];\n $[0b~requirement;\n :(::);\n 1b~requirement;\n registry.util.requirements.pipfreeze config;\n -11h=type requirement;\n registry.util.requirements.copyfile config;\n 0h=type requirement;\n registry.util.requirements.list config;\n logging.error\"requirements config key must be a boolean, symbol or list of strings\"\n ];\n }\n\n// @private\n//\n// @overview\n// Set the parameters to a json file\n//\n// @param paramPath {string} The path to the parameter file\n// @param params {dict|table|string} The parameters to save to file\n//\n// @return {null}\nregistry.util.set.params:{[paramPath;params]\n (hsym `$paramPath) 0: enlist .j.j params\n }\n\n// @private\n//\n// @overview\n// Set a metric associated with a model to a supported cloud\n// vendor or on-prem. This is a wrapper function used to facilitate\n// protected execution.\n//\n// @param storage {symbol} Type of registry storage - local or cloud\n// @param experimentName {string|null} The name of an experiment\n// @param modelName {string|null} The name of the model to be retrieved\n// @param version {long[]|null} The specific version of a named model\n// @param metricName {string} The name of the metric to be persisted\n// @param metricValue {float} The value of the metric to be persisted\n//\n// @return {null}\nregistry.util.set.metric:{[storage;experimentName;modelName;version;config;metricName;metricValue]\n modelDetails:registry.util.search.model[experimentName;modelName;version;config];\n if[not count modelDetails;\n logging.error\"No model meeting your provided conditions was available\"\n ];\n // Construct the path to metric folder containing the config to be updated\n config,:flip modelDetails;\n metricPath:registry.util.path.modelFolder[config`registryPath;config;`metrics];\n fileExists:`metric in key hsym`$metricPath;\n if[not fileExists;registry.util.create.modelMetric[metricPath]];\n registry.set.modelMetric[metricName;metricValue;metricPath];\n if[`local<>storage;\n registry.cloud.update.publish config\n ];\n }\n\n// @private\n//\n// @overview\n// Set JSON file for specified object\n//\n// @param config {dict} Information relating to the model\n// being saved, this includes version, experiment and model names\n// @param jsonTyp {symbol} `registry.util.create` function to call\n// @param jsonStr {string} Name of JSON file\n// @param args {any} Arguments to apply to `registry.util.create` function.\n//\n// @return {null}\nregistry.util.set.json:{[config;jsonTyp;jsonStr;args]\n jsonConfig:registry.util.create[jsonTyp]. args;\n if[not(::)~jsonConfig;\n (hsym `$config[`versionPath],\"/config/\",jsonStr,\".json\") 0: enlist .j.j jsonConfig\n ];\n }\n\n// @private\n//\n// @overview\n// Set Python library and q/Python language versions with persisted models\n//\n// @param modelType {string} User provided model type defining is the model was \"q\"/\"sklearn\" etc\n// @param config Information relating to the model\n// being saved, this includes version, experiment and model names along with\n// path information relating to the saved location of model\n//\n// @return {null}\nregistry.util.set.version:{[modelType;config]\n // Information about Python/q version used in model saving\n versionFile:config[`versionPath],\"/.version.info\";\n\n // Define q version used when persisting the model\n versionInfo:enlist[`q_version]!enlist \"Version: \",string[.z.K],\" | Release Date: \",string .z.k;\n\n // Add model type to version info\n versionInfo,:enlist[`model_type]!enlist modelType;\n\n // If the model isn't q save version of Python used\n if[`q<>`$modelType;versionInfo,:enlist[`python_version]!enlist .p.import[`sys;`:version]`];\n \n // Information about the Python library version used in the process of generating the model\n if[(`$modelType) in `sklearn`keras`torch`xgboost`pyspark;\n versionInfo,:enlist[`python_library_version]!enlist pygetver modelType;\n ];\n // dont allow same model with different versions of q/python\n $[count key hsym `$versionFile;\n $[(.j.k raze read0 hsym `$versionFile)~.j.k raze .j.j versionInfo;\n (hsym `$versionFile) 0: enlist .j.j versionInfo;\n '\"Error writing same model with two environments see .version.info file\"\n ];\n (hsym `$versionFile) 0: enlist .j.j versionInfo];\n }\n\n\n================================================================================\nFILE: ml_ml_registry_q_main_utils_update.q\nSIZE: 2,570 characters\n================================================================================\n\n// update.q - Functionality for updating information related to the registry\n// Copyright (c) 2021 Kx Systems Inc\n//\n// @overview\n// Utilities for updating registry information\n//\n// @category Model-Registry\n// @subcategory Utilities\n//\n// @end\n\n\\d .ml\n\n// @private\n//\n// @overview\n// Update the configuration supplied by a user such to include\n// all relevant information for the saving of a model and its\n// associated configuration\n//\n// @param modelName {string} The name to be associated with the model\n// @param modelType {string} The type of model that is being saved, namely\n// \"q\"|\"sklearn\"|\"keras\"\n// @param config {dict} Configuration information provided by the user\n//\n// @return {dict} Default configuration defined by\n// '.ml.registry.config.model' updated with user supplied information\nregistry.util.update.config:{[modelName;modelType;config]\n config:registry.config.model,config;\n config[`experimentName]:registry.util.check.experiment config`experimentName;\n config,:`modelName`modelType!(modelName;modelType);\n registry.util.check.modelType config;\n config,:`registrationTime`uniqueID!(enlist .z.p;-1?0Ng);\n registry.util.search.version config\n }\n\n// @private\n//\n// @overview\n// Check folder paths, storage type and configuration and prepare the\n// ML Registry for publishing to the appropriate vendor\n//\n// @param folderPath {string|null} A folder path indicating the location\n// of the registry or generic null if in the current directory\n// @param experimentName {string|null} The name of an experiment from which\n// to retrieve a model, if no modelName is provided the newest model\n// within this experiment will be used. If neither modelName or\n// experimentName are defined the newest model within the\n// \"unnamedExperiments\" section is chosen\n// @param modelName {string|null} The name of the model to be retrieved\n// in the case this is null, the newest model associated with the\n// experiment is retrieved\n// @param version {long[]|null} The specific version of a named model to retrieve\n// in the case that this is null the newest model is retrieved (major;minor)\n// @param config {dict|null} Configuration information provided by the user\n//\n// @return {dict} Updated configuration information\nregistry.util.update.checkPrep:{[folderPath;experimentName;modelName;version;config]\n config,:registry.util.check.config[folderPath;config];\n if[`local<>storage:config`storage;storage:`cloud];\n prepParams:(folderPath;experimentName;modelName;version;config);\n registry[storage;`update;`prep]. prepParams\n }\n\n\n================================================================================\nFILE: ml_ml_registry_tests_scripts_monitorUtils.q\nSIZE: 504 characters\n================================================================================\n\nmonitorCols:`nulls`infinity`schema`latency`psi`csi`supervised;\nmonitorFeatureChecks:{[k;r]\n all(type[r]~99h;\n count[r]~7;\n cols[r]~k;\n value[r]~1111110b\n )\n }[monitorCols]\nmonitorValueChecks:{[k;r]\n all(type[r]~99h;\n count[r]~7;\n cols[r]~k;\n key[r`nulls]~enlist`x;\n key[r`infinity]~`negInfReplace`posInfReplace;\n key[r`latency]~`avg`std;\n key[r`csi]~enlist`x;\n r[`schema]~enlist[`x]!enlist(),\"f\";\n r[`supervised]~()\n )\n }[monitorCols]\n\n\n================================================================================\nFILE: ml_ml_stats_init.q\nSIZE: 131 characters\n================================================================================\n\n// stats/init.q - Load stats library\n// Copyright (c) 2021 Kx Systems Inc\n\n.ml.loadfile`:stats/utils.q\n.ml.loadfile`:stats/stats.q\n\n\n================================================================================\nFILE: ml_ml_stats_stats.q\nSIZE: 6,255 characters\n================================================================================\n\n// stats/stats.q - Statistical tools\n// Copyright (c) 2021 Kx Systems Inc\n//\n// This statistical library contains functionality ranging from\n// descriptive statistical methods to gain more insight into a \n// users data, to linear regression estimation methods to investigate \n// unknown parameters in a model. Includes OLS, WLS, describe, \n// and percentile\n\n\\d .ml"}}},{"rowIdx":59,"cells":{"text":{"kind":"string","value":"C# client for kdb+¶\nA kdb+ interface for the C# programming language is documented and available to download from https://github.com/KxSystems/csharpkdb.\nThe interface permits connecting C# and kdb+ processes via IPC.\nA kdb+ interface for the C# programming language is documented and available to download from https://github.com/KxSystems/csharpkdb.\nThe interface permits connecting C# and kdb+ processes via IPC.\n\nWorking with Microsoft Excel™¶\nInterfacing via HTTP and CSV files¶\nAssume that a kdb+ server process is listening on port 5001. Then an HTTP client can send a request that will return a CSV file. For instance, you can type this URL into a browser\nhttp://localhost:5001/q.csv?select from trade where i < 10\nto get the first 10 trades. Depending on your browser settings, the result will be opened directly in Excel, saved to a CSV file, etc. The resulting CSV file would look something like this:\nstock,price,amount,time\ngoog,75.43086,1800,05:21:48.815\namzn,96.28739,1400,03:46:53.366\ngoog,4.82224,2700,19:21:25.970\namd,34.25556,2400,16:00:29.397\nmsft,79.84078,1800,10:46:41.918\nibm,85.37164,1700,08:51:43.909\nintel,60.03132,1900,08:17:48.629\namd,48.66041,2200,00:59:15.559\nibm,97.46072,1000,00:50:52.943\nibm,7.951954,1200,20:21:11.319\nAlternatively a command-line HTTP client, such as wget\n, can also be used:\nwget -O output.csv \"http://localhost:5001/q.csv?select from trade where i < 10\"\nThis saves the result of the query to the file output.csv, which can be loaded into Excel later.\nTable result\nThe result must be a table, so that it can be converted to a CSV file. For instance, the following is invalid:\nwget -O output.csv \"http://localhost:5001/q.csv?first trade\"\nbecause the result is a dictionary. We need 1 # trade\n. Notice that the #\nsymbol cannot be written literally in a URL.\nwget -O output.csv \"http://localhost:5001/q.csv?1 %23 trade\"\nInterfacing via CSV files¶\nCSV files can also be generated by a q process, without using HTTP. For instance, the result of the previous query can be saved into a table and then to a file:\nq)output: select from trade where i < 10\nq)save `:output.csv\nExcel automation add-ins¶\nWith Automation add-ins for Excel, you can use a C# function in a cell formula. This function can communicate with a kdb+ server process.\nWriting Custom Excel Worksheet Functions in C#\nReal-time interface via Excel RTD¶\nIt is possible to have Excel display changing data dynamically using the RTD (real-time data) functionality. Charles Skelton has developed a RTD server for q.\nThis server is a .NET application, and it communicates directly with a q Ticker Plant, or a chained ticker plant. The RTD feature allows real-time data coming in from the ticker plant to be displayed in Excel. The schema can be customized according to whatever table names and column names are present in the ticker plant.\nDownloading¶\nThe RTD server can be downloaded from CharlesSkelton/excelrtd\nTo install, run the setup.exe\nprogram and follow the instructions. You will need the Microsoft .NET Runtime installed.\nConfiguration¶\nChange directory to the location where you installed the program. At that location you will see the file\nK4RtdServer.dll.config\n,\nwhich is an XML file that contains config information for the server. Change the host\nand port\nkeys in this file to connect to your ticker plant or chained ticker plant. Chained ticker plants are recommended as they provide some level of protection to your primary ticker plant.\nThe name\nkey indicates the logical name of the plant, and is referenced from within the Excel worksheet.\nFor troubleshooting, a log can be activated – the log directory is specified in the config file under the logdir\nkey. The program will create log files in the format logdir/log\\_hhmmssfff.txt\n. Several levels of tracing are available:\noff\nerror\nwarning\ninfo\nverbose\nThe RTD Server can also “fill” data on your behalf – should a null value be received from your ticker plant, the RTD server will use the last non-null value received for that cell instead.\nExample RTD file¶\nThe distribution contains an example Excel file that works with the default schema for demo trade and quote schema shipped with q.\nThe format for requesting data from the RTD Server is\n=RTD(\"K4RtdServer\",,\"plantname\",\"tablename\",\"column\",\"symbol\")\nThe RTD server can also store the recent history of a cell, and this can be made available by using an index into the history as an additional parameter to the RTD call, e.g.\n=RTD(\"K4RtdServer\",,\"plantname\",\"tablename\",\"column\",\"symbol\",1)\nwill get the previous value of the cell. This can be useful for conditional formatting or perhaps triggering some other calculation. Other cells can be dependent on cells using the RTD function, as can series in charts.\nAdjusting the update rate for Excel¶\nTo set the engine to handle a larger volume of updates, in Excel complete the following:\n- In Excel, go to the Visual Basic Editor, by pressing Alt+F11 or clicking Tools > Macro > Visual Basic Editor\n- In the Immediate window (press Ctrl+G or click View > Immediate Window), type:\nApplication.RTD.ThrottleInterval = 1000\n? Application.RTD.ThrottleInterval\n\nFFI interface for kdb+¶\nFFI (foreign function interface) is a mechanism by which a program written in one programming language can call routines or make use of services written in another.\nThe FFI interface is an extension to kdb+ for loading and calling dynamic libraries using pure q. The main purpose of the library is to build stable interfaces on top of external libraries, or to interact with the operating system from q.\nNo compiler toolchain or writing C/C++ code is required to use this library.\nThe FFI interface is documented and available to download from https://github.com/KxSystems/ffi/\n\nGPUs¶\nThis is a quick example of calling CUDA code from q. It’s quite trivial to call out to the code.\nTo set the scene (and hopefully experts will forgive the simplifications) CUDA is a variant on C that is used to write general-purpose programs that execute on NVIDIA graphics cards. Data is copied to the card, the computation executed, and the results copied back. It is important that\n- there is significant computation work to be performed on the card – ideally this entirely dominates the execution time\n- there is enough parallelism in the computation to keep the hardware resources of the card/s busy\n- that the data set fit in the limited memory of the cards\nOn to a simple example of a function that takes an array of reals and squares it. Here we use single-precision floating point, however double can be used as well on later-model cards. Here is the annotated code:\n// Include the cuda header and the k.h interface.\n#include \n#include\"k.h\"\n// Export the function we will load into kdb+\nextern \"C\" K gpu_square(K x);\n// Define the \"Kernel\" that executes on the CUDA device in parallel\n__global__ void square_array(float *a, int N) {\nint idx = blockIdx.x * blockDim.x + threadIdx.x;\nif (idx>> (device_memory, xn);\n// Copy back the data, overwriting the input,\n// free the memory we allocated on the graphics card\ncudaMemcpy(host_memory, device_memory, size, cudaMemcpyDeviceToHost);\ncudaFree(device_memory);\nreturn 0;\n}\nThen we write test.q\n.\nsquare:`cudalib 2:(`gpu_square;1)\nnumbers: \"e\"$til 10\nsquare[numbers]\nnumbers\n\\\\\nHere’s a sample execution 64-bit Linux with an NVIDIA GTX 8800.\n$ q test.q\nKDB+ 2.4 2008.09.02 Copyright (C) 1993-2008 Kx Systems\nl64/ ...\n0 1 4 9 16 25 36 49 64 81e\nTo give a feel for real use cases, a Libor Monte-Carlo portfolio computation runs in about 26 seconds on a single core of an x86 machine, and in 0.2 seconds on the graphics card. Some companies are releasing commercial code, such as swaption volatility calculations, as libraries that use GPUs under the covers.\n\nJava client for kdb+¶\nA kdb+ interface for the Java programming language is documented and available to download from https://github.com/KxSystems/javakdb.\nThe interface comprises of the following features:\n- query kdb+\n- subscribe to a kdb+ publisher\n- publish to a kdb+ consumer\n- serialize/deserialize kdb+ formatted data\n- act as a server for a kdb+ instance"}}},{"rowIdx":60,"cells":{"text":{"kind":"string","value":"\\d .gw\n// if error & sync message, throws an error. Else passes result as normal\n// status - 1b=success, 0b=error. sync - 1b=sync, 0b=async\nformatresponse:{[status;sync;result]$[not[status]and sync;'result;result]};\nsynccallsallowed:0b // whether synchronous calls are allowed\nquerykeeptime:0D00:30 // the time to keep queries in the\nerrorprefix:\"error: \" // the prefix for clients to look for in error strings\nclearinactivetime:0D01:00 // the time to keep inactive handle data\n\n\\d .kxdash\nenabled:0b // Functionality for parsing and handling kx dashboard queries - disabled by default\n\n\\d .proc\nloadprocesscode:1b // whether to load the process specific code defined at ${KDBCODE}/{process type}\n\n// Server connection details\n\\d .servers\nCONNECTIONS:`rdb`hdb`idb // list of connections to make at start up\nRETRY:0D00:01 // period on which to retry dead connections. If 0, no reconnection attempts\n\n\\d .aqrest\nloadexecute:0b // Whether to reset .aqrest.execute\n\n\n================================================================================\nFILE: TorQ_config_settings_hdb.q\nSIZE: 303 characters\n================================================================================\n\n// Bespoke HDB config\n\n\\d .proc\nloadprocesscode:1b // whether to load the process specific code defined at ${KDBCODE}/{process type}\n\n// Server connection details\n\\d .servers\nCONNECTIONS:()\t\t\t// list of connections to make at start up\nSTARTUP:1b // create connections\n\n================================================================================\nFILE: TorQ_config_settings_housekeeping.q\nSIZE: 136 characters\n================================================================================\n\n//housekeeping config\n\\d .hk\n\ninputcsv:first .proc.getconfigfile[\"housekeeping.csv\"]\nruntimes:02:00:00\nrunnow:0b\n\n\\d .win\n\nversion:`w10\n\n\n================================================================================\nFILE: TorQ_config_settings_idb.q\nSIZE: 520 characters\n================================================================================\n\n// Bespoke IDB config\n\\d .idb\nwdbtypes:`wdb;\n\n// Server connection details\n\\d .servers\nCONNECTIONS:`wdb // list of connections to make at start up\nSTARTUP:1b // create connections\n\n\\d .proc\nloadprocesscode:0b // whether to load the process specific code defined at ${KDBCODE}/{process type}\n\n\n================================================================================\nFILE: TorQ_config_settings_monitor.q\nSIZE: 1,371 characters\n================================================================================\n\n// Default configuration for the monitor process\n\n\\d .monitor \nconfigcsv: first .proc.getconfigfile[\"monitorconfig.csv\"]; //filepath to checkmonitor config csv file \nconfigstored:`; //filepath to checkmonitor flat table file\nruncheckinterval:0D00:00:05; //interval to run checks \ncheckinginterval:0D00:00:07; //interval to identify that checks are not lagging \ncleartrackinterval:0D01:00:00; //interval to check tracks are under certain age in checktracker \nagecheck:0D12:00:00; //if check over agecheck, delete from tracker\nlagtime:0D00:01:00; //if check has been running over this time, set to neg\n\n\n//Enable loading\n\\d .proc\nloadprocesscode:1b; //whether to load process specific code defined at ${KDBCODE}/{process type} \n\n// Server connection details\n\\d .servers\nCONNECTIONS:`ALL\t\t // list of connections to make at start up\n\n\n================================================================================\nFILE: TorQ_config_settings_rdb.q\nSIZE: 3,042 characters\n================================================================================\n\n// Bespoke RDB config\n\n\\d .rdb\nignorelist:`heartbeat`logmsg //list of tables to ignore when saving to disk\nhdbtypes:`hdb //list of hdb types to look for and call in hdb reload\nhdbnames:() //list of hdb names to search for and call in hdb reload\ntickerplanttypes:`segmentedtickerplant //list of tickerplant types to try and make a connection to\ngatewaytypes:`gateway //list of gateway types to try and make a connection to\nchecktpperiod:0D00:00:05 //how often to check for tickerplant connection\nonlyclearsaved:0b //if true, eod writedown will only clear tables which have been successfully saved to disk\nsubscribeto:` //a list of tables to subscribe to, default (`) means all tables\nsubscribesyms:` //a list of syms to subscribe for, (`) means all syms\nsavetables:1b //if true tables will be saved at end of day, if false tables wil not be saved, only wiped\ngarbagecollect:1b //if true .Q.gc will be called after each writedown - tradeoff: latency vs memory usage\nupd:insert //value of upd\nhdbdir:`:hdb //the location of the hdb directory\nreplaylog:1b //replay the tickerplant log file\nschema:1b //retrieve the schema from the tickerplant\ntpconnsleepintv:10 //number of seconds between attempts to connect to the tp \ngc:1b //if true .Q.gc will be called after each writedown - tradeoff: latency vs memory usage\n\nsortcsv:hsym first .proc.getconfigfile[\"sort.csv\"]\t//location of csv file\nreloadenabled:0b //if true, the RDB will not save when .u.end is called but\n //will clear it's data using reload function (called by the WDB)\nparvaluesrc:`log //where to source the rdb partition value, can be log (from tp log file name),\n //tab (from the the first value in the time column of the table that is subscribed for)\n //anything else will return a null date which is will be filled by pardefault \npardefault:.z.D //if the src defined in parvaluesrc returns null, use this default date instead\ntpcheckcycles:0W //specify the number of times the process will check for an available tickerplant\nsubfiltered:0b //allows subscription filters to be loaded and applied in the rdb\nconnectonstart:1b //rdb connects to tickerplant as soon as it is started\n\n\\d .proc\nloadprocesscode:1b // whether to load the process specific code defined at ${KDBCODE}/{process type}\n\n// Server connection details\n\\d .servers\nCONNECTIONS:`hdb // list of connections to make at start up\nSTARTUP:1b // create connections\n\n================================================================================\nFILE: TorQ_config_settings_reporter.q\nSIZE: 375 characters\n================================================================================\n\n/- Reporter config\n\n\\d .rp\n\ninputcsv:first .proc.getconfigfile[\"reporter.csv\"];\t/- Location of report configuration csv file\nflushqueryloginterval:1D00:00:00;\t\t/- How often to flush the report query log data\nwritetostdout:1b;\t\t\t \t/- whether to write query log info to standard out as well\t\n\n\\d .servers\nCONNECTIONS:`gateway`rdb`hdb\t\t\t\t/- create connections to all processes\n\n\n================================================================================\nFILE: TorQ_config_settings_segmentedchainedtickerplant.q\nSIZE: 1,666 characters\n================================================================================\n\n\\d .\n\ncreatelogs:0b; // create an stp log file (off in SCTP as createlogs does not control SCTP logging)\n\n\\d .sctp\n\nchainedtp:1b; // switched between STP and SCTP codebases\nloggingmode:`none; // [none|create|parent]\ntickerplantname:`stp1; // list of tickerplant names to try and make a connection to\ntpconnsleep:@[value;`tpconnsleep;10]; // number of seconds between attempts to connect to source tickerplant\ntpcheckcycles:@[value;`tpcheckcycles;0W]; // number of times the process will check for an available tickerplant\nsubscribeto:`; // list of tables to subscribe for\nsubscribesyms:`; // list of syms to subscription to\nreplay:0b; // replay the tickerplant log file\nschema:1b; // retrieve schema from tickerplant\n\n\\d .stplg\n\nmultilog:`tabperiod; // [tabperiod|none|periodic|tabular|custom]\nmultilogperiod:0D01;\nerrmode:1b;\nbatchmode:`defaultbatch; // [autobatch|defaultbatch|immediate]\ncustomcsv:hsym first .proc.getconfigfile[\"stpcustom.csv\"];\nreplayperiod:`day // [period|day|prior]\n\n\\d .proc\n\nloadcommoncode:1b;\nloadprocesscode:1b;\n\n\\d .timer\n\nenabled:1b; // enable timer"}}},{"rowIdx":61,"cells":{"text":{"kind":"string","value":"h:-2 / handle to print log\nlvl:2 / log level\nunit:\"BKMGTP\" / memory unit character\nmult:5 (1024*)\\ 1 / memory multiplier\n\n/ build memory string\nmem:{@[string\"i\"$(3#x)%mult m;2;,;unit m:mult bin x 2]}\n\n/ build log header\nhdr:{string[(.z.D;.z.T)],mem system \"w\"}\n\n/ build log message\nmsg:{if[x<=lvl;h \" \" sv hdr[],(y;$[10h=type z;z;-3!z])]}\n\n/ user level functions to log messages\nerr:msg[0;\"[E]\"]\nwrn:msg[1;\"[W]\"]\ninf:msg[2;\"[I]\"]\ndbg:msg[3;\"[D]\"]\ntrc:msg[4;\"[T]\"]\n\n\n================================================================================\nFILE: qtips_md.q\nSIZE: 1,325 characters\n================================================================================\n\n/ empty tables\nref:.util.sattr 1!flip `id`px`ts`qs`vol`rfr!\"jffjff\"$\\:()\nprices:.util.sattr flip `id`px`time!\"jfp\"$\\:()\nprice:.util.sattr 1!prices\ntrades:.util.sattr flip `id`ts`tp`time!\"jjfp\"$\\:()\ntrade:.util.sattr 1!trades\nquotes:.util.sattr flip `id`bs`bp`ap`as`time!\"jjffjp\"$\\:()\nquote:.util.sattr 1!quotes\n\n\\d .md\n\n/ update the current price for id\nupdp:{[id;tm]\n .log.dbg \"updating price for \", string id;\n p:`price id;\n r:`ref id;\n z:.stat.norminv rand 1f;\n f:.stat.gbm[r `vol;r `rfr;(tm-p `time)%365D06;z];\n p:`id`px`time!(id;f*p `px;tm);\n `price`prices upsert\\: p;\n }\n\n/ update the current quote for id\nupdq:{[id;tm]\n .log.dbg \"updating quote for \", string id;\n px:`price[id;`px];\n r:`ref id;\n q:`id`time!(id;tm);\n q,:`bp`ap!.sim.tickrnd[r `ts] px;\n q,:`bs`as!1+2?r `qs;\n `quote`quotes upsert\\: q;\n }\n\n/ update the current trade price for id\nupdt:{[id;tm]\n if[not id in key `quote;:(::)];\n .log.dbg \"updating trade for \", string id;\n q:`quote id;\n t:`id`time!(id;tm);\n t,:`ts`tp!.sim.trd[rand 0b;rand 1f] . q `bs`bp`ap`as;\n `trade`trades upsert\\: t;\n }\n\n/ dump all md.q tables in partitioned database format\ndump:{[db;tm]\n dt:\"d\"$tm;\n .log.inf \"dumping tables in \", 1_ string ` sv db,`$string dt;\n 0!/:`price`quote`trade;\n .Q.dpft[db;dt;`id] each`price`quote`trade`prices`quotes`trades;\n 1!/:`price`quote`trade;\n }\n \n\n================================================================================\nFILE: qtips_net.q\nSIZE: 496 characters\n================================================================================\n\n/ table to hold active and inactive connection information\nhandle:.util.sattr 1!flip `h`active`user`host`address`time!\"ibss*p\"$\\:()\n\n/ record new client connection\n.z.po:{[h]`handle upsert (h;1b;.z.u;.Q.host .z.a;\"i\"$0x0 vs .z.a;.z.P);}\n.z.po 0i / simulate opening of 0\n\n/ mark client connection as inactive\n.z.pc:{[h]`handle upsert `h`active`time!(h;0b;.z.P);}\n\n/ modify log header to include user, handle and host\n\\d .log\nhdr:{string[(.z.D;.z.T;.z.u;.z.w;`handle . (.z.w;`host))],mem get\"\\\\w\"}\n\n\n================================================================================\nFILE: qtips_opt.q\nSIZE: 526 characters\n================================================================================\n\n\\d .opt\n\n/ empty getopt configuration \nconfig:1#flip `opt`def`doc!\"s**\"$\\:()\n\n/ parse x according to (c)onfig and list of (h)syms\ngetopt:{[c;h;x]\n p:(!). c`opt`def;\n p:.Q.def[p] .Q.opt x;\n p:@[p;h;hsym];\n p}\n\n/ wrap a list of (s)trings (l)eft and (r)ight text\nwrap:{[l;r;s](max count each s)$s:l,/:s,\\:r}\n\n/ print usage according to (c)onfig and (f)ile\nusage:{[c;f]\n u:enlist \"usage: q \",(string f),\" [option]...\";\n a:wrap[(7#\" \"),\"-\";\" \"] string c `opt;\n a:a,'wrap[\"<\";\"> \"] c `doc;\n a:a,'wrap[\"(\";\")\"] -3!'c `def;\n u,:a;\n u}\n\n================================================================================\nFILE: qtips_prof.q\nSIZE: 1,068 characters\n================================================================================\n\n/ empty events table\nprof.events:flip `id`pid`func`time!\"jjsn\"$\\:()\n\n/ view of profile statistics report\nprof.rpt::.prof.stats prof.events\n\n\\d .prof\n\npid:id:0\n\n/ record timing of a (f)unction with (n)ame when called with (a)rgs\ntime:{[n;f;a]\n s:.z.p;\n id:.prof.id+:1;\n pid:.prof.pid;\n .prof.pid:id;\n r:f . a;\n .prof.pid:pid;\n `prof.events upsert (id;pid;n;.z.p-s);\n r}\n\n/ instrument function (n)ame\ninstr:{[n]\n m:get f:get n;\n system \"d .\",string first m 3;\n n set (')[.prof.time[n;f];enlist];\n system \"d .\";\n n}\n\n/ generate list of directories\ndirs:{(` sv x,) each key[x] except `q`Q`h`j`o`prof}\n\n/ generate list of profileable functions\nlambdas:{x where 100h=(type get@) each x} \n\n/ instrument all functions\ninstrall:{instr each lambdas raze .util.tree each `.,dirs`}\n\n/ generate profile statistics report given an (e)vents table\nstats:{[e]\n c:select sum time,nc:count i by id:pid from e;\n e:e pj update neg time from c;\n s:select sum time*1e-6,n:count i,avg nc by func from e;\n s:update timepc:time%n from s;\n s:`pct xdesc update pct:100f*time%sum time from s;\n s}\n\n\n================================================================================\nFILE: qtips_qtips.q\nSIZE: 116 characters\n================================================================================\n\n\\l util.q\n\\l stat.q\n\\l sim.q\n\\l timer.q\n\\l log.q\n\\l md.q\n\\l opt.q\n\\l net.q\n\\l hist.q\n\\l deriv.q\n\\l prof.q\n\\l hist.q\n\n\n================================================================================\nFILE: qtips_sim.q\nSIZE: 1,253 characters\n================================================================================\n\n\\d .sim\n\n/ generate simulated security paths\n/ (s)igma, (r)ate, (t)ime\npath:{[s;r;t]\n z:.stat.norminv count[t]?1f;\n p:prds .stat.gbm[s;r;deltas[first t;t]] z;\n p}\n\n/ generate price path\n/ security (id), (S)pot, (s)igma, (r)ate, (d)ate/(t)i(m)e\ngenp:{[id;S;s;r;dtm]\n t:abs type dtm;\n tm:(\"np\" t in 12 14 15h)$dtm;\n p:S*path[s;r;tm%365D06];\n c:`id,`time`date[t=14h],`price;\n p:flip c!(id;dtm;p);\n p}\n \n/ round price to nearest tick (up and down)\ntickrnd:{if[99h=type x;x@:y];(y;x+y:x*floor y%x)}\n\n/ randomly delay a timeseries \ndelay:{abs[type x]$x+next deltas[x]*count[x]?1f}\n\n/ randomly throw away elements of list\nfilter:{y asc (neg\"j\"$x*n)?n:count y}\n\n/ generate bid/ask quotes\n/ (t)ick (s)ize, (q)uote (s)ize, (p)rice path\ngenq:{[ts;qs;p]\n q:p,'flip `bp`ap!tickrnd[ts] p `price;\n q:q,'flip `bs`as!1+count[p]?/:2#qs;\n q:`id`time`bs`bp`ap`as#q;\n q}\n\n/ generate trade event\n/ (b)id/ask flag, pct:percent fills\n/ (b)id (s)ize, (b)id (p)rice, (a)sk (p)rice, (a)sk (s)izie\ntrd:{[b;pct;bs;bp;ap;as](ceiling pct*?[b;bs;as];?[b;bp;ap])}\n\n/ generate trade event\n/ (q)uote table and (pct) fill rate\ngent:{[pct;q]\n q:filter[pct] raze (-1_@[;`time;delay] q@) each group q `id;\n t:q,' flip `ts`tp!trd[n?0b;(n:count q)?1f] . q `bs`bp`ap`as;\n t:`id`time`ts`tp#t;\n t}\n\n================================================================================\nFILE: qtips_stat.q\nSIZE: 2,011 characters\n================================================================================\n\n\\d .stat\n\n/ percentile\npctile:{[p;x]x iasc[x] -1+ceiling p*count x}\n\n/ 12 uniforms\nu12:{-6f+sum x cut (12*x)?1f}\n\nskew:{avg[x*x2]%sqrt m2*m2*m2:avg x2:x*x-:avg x}\nkurt:{-3f+avg[x2*x2]%x*x:avg x2:x*x-:avg x}\n\n/ box-muller\nbm:{\n if[count[x] mod 2;'`length];\n x:2 0N#x;\n r:sqrt -2f*log first x;\n theta:2f*acos[-1f]*last x;\n x: r*cos theta;\n x,:r*sin theta;\n x}\n\n/ geometric brownian motion\n/ (s)igma, (r)ate, (t)ime, z:uniform random\n/ user multiplies by (S)pot\ngbm:{[s;r;t;z]exp (t*r-.5*s*s)+z*s*sqrt t}\n\n/ inter quartile range\niqr:{(-) . pctile[.75 .25;x]}\n\n/ auto correlation\nac:{x%first x:x{(y#x)$neg[y]#x}/:c-til c:count x-:avg x}\n\n/ horner's method\n/ x:coefficients, y:data\nhorner:{{z+y*x}[y]/[x]}\n\n/ exponentially weighted moving average\n/ x:decay rate, y:data\newma:{first[y](1f-x)\\x*y}\n\n/ central region - normal inverse\ncnorminv:{\n a:-25.44106049637 41.39119773534 -18.61500062529 2.50662823884;\n b: 3.13082909833 -21.06224101826 23.08336743743\n -8.47351093090 1;\n x*:horner[a;s]%horner[b] s:x*x-:.5;\n x}\n\n/ tail region - normal inverse\ntnorminv:{\n a:0.0000003960315187 0.0000002888167364 0.0000321767881768\n 0.0003951896511919 0.0038405729373609 0.0276438810333863\n 0.1607979714918209 0.9761690190917186 0.3374754822726147;\n x:horner[a] log neg log 1f-x;\n x}\n\n/ beasley-springer-moro normal inverse approximation\nnorminv:{\n i:x<.5;\n x:?[i;1f-x;x];\n x:?[x<.92;cnorminv x;tnorminv x];\n x:?[i;neg x;x];\n x}\n\n/ open high low close\nohlc:{`o`h`l`c!(first;max;min;last)@\\:x}\n\n/ count, min, max, median, standard deviation of x\nsummary:{`n`mn`mx`md`dv!(count;min;max;med;sdev)@\\:x}\n\n/ error function\nerf:{\n a: 1.061405429 -1.453152027 1.421413741\n -0.284496736 0.254829592;\n t:1f%1f+0.3275911*abs x;\n t:1f-t*horner[a;t]*exp neg x*x;\n x:t*1 -1f x<0f;\n x}\n\n/ cumulative normal \ncnorm:{.5*1f+erf x%sqrt 2f}\n\n/ newton-raphson\n/ (e)rror tolerance, (f)unction\nnr:{[e;f;x]$[e>abs d:first[r]%last r:f x;x;x-d]}\n\n/ function inversion\n/ (r)oot-finding (f)unction, (f)unction\ninvert:{[rf;f;y;x]rf[(neg y;0f)+f@]x}\n\n\n================================================================================\nFILE: qtips_timer.q\nSIZE: 788 characters\n================================================================================\n\n/ timer jobs\ntimer.job:flip `name`func`time!\"s*p\"$\\:()\ntimer.job,:(`;();0Wp)\n\n\\d .timer\n\n/ merge record(y) into table(x) in reverse chronological order\nmerge:`time xdesc upsert\n\n/ add new timer (f)unction with (n)ame and (t)i(m)e into (t)able\nadd:{[t;n;f;tm]\n r:(n;f;gtime tm);\n t:merge[t;$[0h>type tm;r;reverse flip r]];\n t}\n\n/ run timer job at (i)ndex from (t)able and current time tm\nrun:{[t;i;tm]\n j:t i;\n t:.[t;();_;i];\n r:value (f:j `func),ltime tm;\n if[not null r;t:merge[t;(j `name;f;tm+r)]];\n t}\n\n/ scan timer (t)able for runable jobs\nloop:{[t;tm]\n while[tm>=last tms:t `time;t:run[t;-1+count tms;tm]];\n t}\n\n/ helper function to generate repeating jobs\n/ (d)elay, (e)nd (t)ime, (f)unction, tm:current time\nuntil:{[d;et;f;tm]if[tm/) 0b vs'(x;y)} / XOR\n.finos.util.land:{0b sv (&). 0b vs'(x;y)} / AND\n.finos.util.lnot:{0b sv not 0b vs x} / NOT\n\n.finos.util.crc32:{.finos.util.lnot(.finos.util.lnot\"i\"$x){.finos.util.xor[.finos.util.shr[8]y]x .finos.util.xor[.finos.util.land[y]255i]0x0 sv 0x000000,\"x\"$z}[{8{$[x mod 2i;.finos.util.xor -306674912i;::].finos.util.shr[1]x}/x}each\"i\"$til 256]/y}\n\n// Run and log garbage collection.\n.finos.util.free:{[].finos.log.debug\"freed \",(string .Q.gc[]),\" bytes\";}\n\n// Date from year/month/day.\n.finos.util.ymd:{\"D\"$\".\"sv\"0\"^-4 -2 -2$string(x;y;z)}'\n\n// Convert epoch seconds to (global) timestamp.\n// @param x number or number vector\n// @return timestamp or timestamp vector\n.finos.util.timestamp_from_epoch:{\"p\"$(\"j\"$1970.01.01D)+1000000000*x}\n\n// Attempt to execute a monadic function.\n// Can be replaced with {(1b;x y)} for debugging.\n// @param x monadic function\n// @param y arg\n// @return pair: (1b;result) or (0b;error)\n.finos.util.try:{@[(1b;)x@;y;(0b;)]}\n\n// Print progress, with peach and try-catch.\n// The weight function is used to measure progress more accurately when\n// different arguments will take significantly different amounts of time.\n// When this is not the case, pass a constant function (e.g. {1}).\n// E.g. to (re/de)compress files, set/unset .z.zd and pass x as hcount, y\n// as {x set get x}, and z as the files.\n// @param x monadic function: weight (e.g. hcount, {1}, etc.)\n// @param y monadic function\n// @param z list: args for y\n// @return dict: z!@[(1b;)y@;;(0b;)]peach z\n.finos.util.progress:{\n f:{[s;f;a;w;i]\n eta:{x+(abs type e)$(e:y-x)%z};\n dll:{\" \"sv(key x){\": \"sv(string x;$[10<>type y;string;]y)}'get x};\n progper:{\n paren:{\"(\",x,\")\"};\n prog:{\"/\"sv(neg count string y)$string(x;y)};\n per:{.Q.fmt[6;2;100*x],\"%\"};\n \" \"sv(prog[x;y];paren per x%y)};\n .finos.log.debug dll`now`position`work`elapsed`eta!(\n p;\n progper[i+1;count a];\n progper[w i;last w];\n p-s;\n eta[s;p:.z.P;(w i)%last w]\n );\n .finos.util.try[f]a i};\n z!f[.z.P;y;z;w:sums x peach z]peach til count z}\n\n\n================================================================================\nFILE: kdb_qdoc_src_qdoc_example_dba.q\nSIZE: 4,484 characters\n================================================================================\n\n//DBA (kdb+ datastore) utilities to manage partitioned and splayed tables.\n//Originally based on open source utils from code.kx.com\n\n///\n//Get all the partitions in a HDB.\n//Support all valid types.\n//@param path to database (hsym)\n//@return empty list if no partitions are found\n.finos.dba.getParts0:{[db]\n if[()~l:key db;'(string db),\": No such file or directory\"];\n f:{[db] d:key db;d@where d like \"[0-9]*\"};\n r:$[`par.txt in key db;\n raze f'[hsym each `$read0` sv db,`par.txt];\n f db];\n if[0=count r;:r];\n c:.finos.dba.partCastChar first r;\n c$string r}\n\n///\n//Get all the partitions in a HDB.\n//Support all valid types.\n.finos.dba.getParts:{[db]\n r:.finos.dba.getParts0 db;\n if[0=count r;'\"No partitions found at: \",string db];\n r}\n\n///\n//Load a splayed table as a dictionary.\n//Normally, `:partitionDir@`tableName would work, but for directories\n// that have dotfiles that are lexicographically earlier than .d,\n// assumptions are violated and q fails to load the table.\n//Also, tables with uneven column lengths can't be flipped.\n//@return table\n.finos.dba.loadSplayedTableDict:{[tableHsym]\n .finos.tc2.argMatch[.z.s;enlist tableHsym;enlist `:/path/to/table];\n files:key tableHsym; / Get filenames in tableDir.\n if[not `.d in files;\n '`notTableDir]; / Ensure there's a .d file.\n colNames:tableHsym`.d; / Read the .d file.\n /Return map-on-demand or map-immediate depending on trailing \"/\".\n colVals:$[\"/\"~last string tableHsym;\n x;\n @[tableHsym@;;()]each colNames]; / Return () if cannot read col.\n colNames!colVals}\n\n///\n//Matching column lists like wantTypes~haveTypes yields false positives\n// for empty tables that have compound columns since the empty column\n// would have a type of \" \" rather than \"C\".\n//@param wantTypes List of char.\n//@param haveTypes List of char.\n//@param cnt Number of rows in the table.\n//@return True if wantTypes~haveTypes or all \" \" columns are compound columns in wantTypes.\n.finos.dba.colTypeMatcher:{[wantTypes;haveTypes;cnt]\n matched:wantTypes~haveTypes;\n if[matched|0count haveTypes;\n :0b];\n haveTypesBlank:haveTypes=\" \";\n blankPos:where haveTypesBlank; / Positions with relaxed matching.\n nonblankPos:where not haveTypesBlank; / Positions with exact matching.\n (wantTypes[nonblankPos]~haveTypes[nonblankPos]) & all wantTypes[blankPos] in .Q.A}\n\n///\n//\"Denumerate\" (i.e. resolve enumerations in) object x (recurse if necessary)\n//@param x Object to process (simple type, list, dict or table)\n//@return \"Denumerated\" object.\n.finos.dba.priv.help,: enlist\".finos.dba.denum[list/table/dict]\";\n.finos.dba.denum:{[x]\n $[0=t:abs type x;\n .z.s'[x];\n t<20;\n x; / built-in-types\n t<=77;\n $[-11h=type key enumName:key x;\n value x; / enumeration\n '\"sym list not loaded: \",string enumName];\n t<98;\n .z.s'[x]; / compound list\n t=98;\n @[x;cols x;.z.s]; / table\n t=99;\n !/[.z.s'[(key;get)@\\:x]]; / dict\n '`unknownType]}\n\n///\n//Section: Datastore generation functions\n//Examples:\n//splay trade table, enumerate all symbol cols to `sym and use `sym column as parted column\n//> splayToPartition[`:/d/d1/data;2009.01.01;`sym;`trade]\n//splay trade table as `prints, enumerate all symbol cols to `sym and use `sym column as parted column\n//> splayDataToPartition[`:/d/d1/data;2009.01.01;`sym;`prints;trade]\n//splay trade table, enumerate named `foo`bar symbol columns individually all others against\n//`sym, sort by `foo and apply parted attribute, do not reorder columns\n//> splayToPartition[`:/d/d1/data;2009.01.01;(`foo`bar;`foo;`);`trade]\n//snapshot and copy symbol files and repoint all files to a new a new dated directory\n//copies `:/d/d1/data/sym to `:/d/d1/snapshot/sym.2009.01.01 (today's date) and\n//creates a symlink from `:/d/d1/data/sym to the snapshot\n//> snapshotSym[`:/d/d1/data;`:/d/d1/snapshot]\n.finos.dba.priv.help,: \"--- Datastore production functions ---\";\n\n///\n//Load (reload) all sym files\n//@param db DB root path (hsym)\n//@return list of sym variables\n.finos.dba.loadSyms:{[db]\n db:.finos.dba.pathAsHsym db;\n /find all sym files\n s:f where (f:key db) like string[.finos.dba.priv.SYMPREFIX],\"*\";\n /load all sym files\n s set'get each ` sv'db,'s}\n\n================================================================================\nFILE: kdb_qdoc_src_qtabledoc_example_bovespa_schema.q\nSIZE: 668 characters\n================================================================================\n\n// @table cqs_bbo\n// @owner brunk\n// @src rtdev feed journals at /path/to/feed/YYYY/MM/cqs_mdelta.*\n// @desc Per-exchange best bid and offer details for listed underliers.\n// @note Table is a merged view of redundant a-side and b-side (per-line) journal files.\n// @seealso cqs_nbbo, cts_prints\n// @col cqsID Stock ticker\n// @col line Cqs line id\n// @col seq Per-line sequence number\n// @col exchangeTime Exchange time\n// @col exch Exchange\n// @col bp Bid price\n// @col bs Bid size\n// @col ap Ask price\n// @col as Ask size\n// @col qc Quote condition\n// @col rcvTimeA A-side receive time. May be null\n// @col rcvTimeB A-side receive time. May be null\n\nt:([]c1:();c2:())\n\n\n================================================================================\nFILE: kdb_tests_dep_a_a1_module.q\nSIZE: 161 characters\n================================================================================\n\nif[not a2Loaded; '\"a2 not loaded but is a dependency of this module\"];\n-1\"this is a/a1\";\na1Loaded:1b;\n.finos.dep.include\"a1s1.q\";\n.finos.dep.loadScript\"a1s2.q\";\n\n\n================================================================================\nFILE: kdb_tests_dep_test1.q\nSIZE: 1,175 characters\n================================================================================\n\n.test.root:.finos.dep.cutPath[.finos.dep.currentFile[]]0;\n.finos.dep.resolvers[`test]:{\n if[x[`name]~\"a/a5\"; '\"sorry, wrong resolver\"];\n root:.test.root,\"/\",x[`name];`projectRoot`scriptPath`libPath!(root;\"\";\"\")};\n.finos.dep.resolvers[`test2]:{root:.test.root,\"/test2/\",x[`name];`projectRoot`scriptPath`libPath!(root;\"\";\"\")};\n\n.finos.dep.regModule[\"a/a1\";\"1.0\";system[\"cd\"],\"/a/a1\";\"\";\"\"];\n.finos.dep.loadModule\"a/a1\";\nif[not a1Loaded; '\"a1 not loaded\"];\nif[not a2Loaded; '\"a2 not loaded\"];\nif[not a1s1Loaded; '\"a1s1 not loaded\"];\nif[not a1s2Loaded; '\"a1s2 not loaded\"];\n.finos.dep.loadScriptIn[\"a/a1\";\"a1s3.q\"];\nif[not a1s3Loaded; '\"a1s3 not loaded\"];\n.finos.dep.loadFromRecord`name`version`resolver!(\"a/a3\";\"0.0\";`test);\nif[not a3Loaded; '\"a3 not loaded\"];\n.finos.dep.loadFromRecord`name`version`resolver!(\"a/a4\";\"0.0\";\"test\");\nif[not a4Loaded; '\"a4 not loaded\"];\n\na5Loaded:0b;\n.finos.dep.loadFromRecord`name`version`resolver`override!(\"a/a5\";\"0.0\";\"test2\";1b);\nif[a5Loaded; '\"a5 loaded when it shouldn't have\"];\n.finos.dep.loadFromRecord`name`version`resolver!(\"a/a5\";\"0.0\";\"test\");\nif[not a5Loaded; '\"a5 not loaded\"];\nif[a5Loc<>2; '\"a5 loaded from wrong location\"];\n\n\n================================================================================\nFILE: kdb_tests_inithook_inithook1.q\nSIZE: 989 characters\n================================================================================\n\n\\l timer/timer.q\n\\l inithook/inithook.q\n\n//Asynchronous inithook example.\n//Suppose we want to connect to two different services and run some code when both connections succeed.\n//In this case we can use two separate inithook symbols to indicate which connection is done and then\n//have an inithook depending on both so that it only runs when both provide calls are done.\n\ntpConnected:{\n -1\"TP connected\";\n .finos.init.provide`tpConnected;\n };"}}},{"rowIdx":64,"cells":{"text":{"kind":"string","value":"Summarize and say¶\nAnalyze a dictionary of results; map between dictionaries\nA lambda for the Look & Say sequence composes with desc\nand the Do iterator to produce the Summarize & Say sequence.\nA million integers in string form hashes with group\nand desc\nto a dictionary of 8002 unique seeds.\nA dictionary of sequences, keyed by the unique seeds:\ncomposition count distinct@\nmeasures the iterations needed to converge; max\nfinds the slowest sequences.\nReverse lookup on a dictionary finds their unique seeds; indexing the sequence and seed dictionaries with them finds the slowest sequences and all the seeds that produce them.\nSeven code lines, no loops, no counters, no control structures.\nFind for Sum & Say sequences all the seed values up to a million that take the most iterations to converge\nSee Rosetta Code for the task details.\nNote that in this context a series has converged when a value in the series is repeated.\nStart with the simple Look and Say sequence.\nq)ls:{raze(string 1_ deltas d,count x),'x d:where differ x} / look & say\nq)ls string 0\n\"10\"\nq)10 ls\\string 0\n,\"0\"\n\"10\"\n\"1110\"\n\"3110\"\n\"132110\"\n\"1113122110\"\n\"311311222110\"\n\"13211321322110\"\n\"1113122113121113222110\"\n\"31131122211311123113322110\"\n\"132113213221133112132123222110\"\nThe Look and Say sequence (or “Morris sequence”) grows indefinitely. The Summarize and Say variant converges.\nq)sumsay:ls desc@ / summarize & say\nq)15 sumsay\\string 0\n,\"0\"\n\"10\"\n\"1110\"\n\"3110\"\n\"132110\"\n\"13123110\"\n\"23124110\"\n\"1413223110\"\n\"1423224110\"\n\"2413323110\"\n\"1433223110\"\n\"1433223110\"\n\"1433223110\"\n\"1433223110\"\n\"1433223110\"\n\"1433223110\"\nUnique seeds¶\nBecause sumsay\nsorts its argument digits, variations in order produce the same sequence.\nq)sumsay each string`123`321\n\"131211\"\n\"131211\"\nSo it is not necessary to construct a sequence for every one of a million seeds.\nq)seeds:group desc each string til 1000000\nA dictionary. Its key is the unique seeds; its value their permutations.\nq)seeds string`21`9721`66633\n12 21\n1279 1297 1729 1792 1927 1972 2179 2197 2719 2791 2917 2971 7129 7192 7219 72..\n33666 36366 36636 36663 63366 63636 63663 66336 66363 66633\nSequences¶\nConstruct a 30-term sequence for each seed.\nseq:(key seeds)!30 sumsay\\'key seeds\nA dictionary of seeds and their sequences.\nIterator syntax\nAn iterator is a unary operator with postfix syntax. It takes a single argument, on its left.\nAbove, sumsay\nis the argument of \\\n(the Scan form of) the Do iterator.\nApplying Do to sumsay\nderives the function sumsay\\\n.\nWe earlier saw sumsay\\\napplied as a binary, with infix syntax and 15 as a left argument. Here the left argument is 30: thirty successive applications of sumsay\n.\nWith no right argument, 30 sumsay\\\nis a projection of sumsay\\\non its left argument 30, forming a unary that will apply sumsay\nto its argument 30 times.\nThat projection 30 sumsay\\\nis the left argument of the Each operator '\n.\nThe unary derived function 30 saysum\\'\napplies 30 saysum\\\nto each item of its argument.\nBonus points\nThe repetition in (key seeds)!30 sumsay\\'key seeds\ncan be removed by another iterator.\nThe expression n f/x\napplies f\nsuccessively n\ntimes to x\n. Its Scan form n f\\x\ndoes the same, but returns the result of each application. The result has n+1\nitems, corresponding to til n+1\napplications. The first item of the result corresponds to 0 applications; i.e. the original argument.\n0 f/\nand 0 f\\\nare identity functions for any f\n.\nWhich means… 1 f\\x\n<=> (x;f x)\n.\nq)1 reverse\\1011001b\n1011001b\n1001101b\nThe Apply operator applies a function to a list of its arguments. So for some f\nwe can define\nq)(~) . 1 reverse\\ \"madamimadam\"\n1b\nIn our case f\nis {30 sumsay\\'x}\n.\nseq:(!) . 1{30 sumsay\\'x}\\key seeds\nConvergence¶\nCount the iterations to convergence: how many does the slowest take?\nq)max its:(count distinct@)each seq\n21\nAbove, (count distinct@)\nis a composition, equivalent to {count distinct x}\nSelecting seeds¶\nWhat unique seeds had sequences that took 21 steps to converge?\nits\nwas defined by applying (count distinct@)each\nto seq\n, so its\nis a dictionary with the same keys, and\nq)where its=max its\n\"9900\"\nis a dictionary reverse lookup, returning the keys with values of 21.\nJust that one, then. But that unique seed corresponds to several seeds:\nq)raze seeds where its=max its\n9009 9090 9900\nAbove, the reverse dictionary lookup, then the results looked up in dictionary seeds\n.\nScript¶\nPut it all together.\nls:{raze(string 1_ deltas d,count x),'x d:where differ x} / look & say\nsumsay:ls desc@ / summarize & say\nseeds:group desc each string til 1000000 / seeds for million integers\nseq:(key seeds)!30 sumsay\\'key seeds / sequences for unique seeds\ntop:max its:(count distinct@)each seq / count iterations\n/ report results\nrpt:{1 x,\": \",y,\"\\n\\n\";}\nrpt[\"Seeds\"]\" \"sv string raze seeds where its=top / all forms of top seed/s\nrpt[\"Iterations\"]string top\nrpt[\"Sequence\"]\"\\n\\n\",\"\\n\"sv raze seq where its=top\nOutput:\nSeeds: 9009 9090 9900\nIterations: 21\nSequence:\n9900\n2920\n192210\n19222110\n19323110\n1923123110\n1923224110\n191413323110\n191433125110\n19151423125110\n19251413226110\n1916151413325110\n1916251423127110\n191716151413326110\n191726151423128110\n19181716151413327110\n19182716151423129110\n29181716151413328110\n19281716151423228110\n19281716151413427110\n19182716152413228110\n19281716151413427110\n19182716152413228110\n19281716151413427110\n19182716152413228110\n19281716151413427110\n19182716152413228110\n19281716151413427110\n19182716152413228110\n19281716151413427110\n19182716152413228110\nReview¶\nDefined function ls\nfor the Look & Say sequence; sumsay\nfor the Summarize & Say sequence is just he composition ls desc@\n.\nTook a million integers as strings and used group\nto hash them into an 8002-entry dictionary keyed by their digits sorted in descending order.\nMade a dictionary of 30-item sequences for the unique seeds, using the Do and Each iterators.\nUsed composition count distinct@\nand each\nto count the number of iterations required in each sequence before it converged, and aggregator max\nto measure the longest.\nUsed reverse lookup on the dictionary of iterations to find the unique seeds for the slowest-converging sequences, then mapped them to the corresponding original seeds and to the sequences themselves. (Turns out there is just the one.)"}}},{"rowIdx":65,"cells":{"text":{"kind":"string","value":"// @kind function\n// @category preprocessing\n// @desc Transform a list of integers based on a previously generated\n// label encoding\n// @param data {int[]} Data to be reverted to original representation\n// @param map {dictionary} Maps true representation to associated integer or\n// the return from .ml.labelEncode.fit\n// @return {symbol[]} Integer values of `data` replaced by their appropriate \n// 'true' representation. Values that do not appear in the mapping supplied\n// by `map` are returned as null values \napplyLabelEncode:{[data;map]\n if[99h<>type map;'\"Input must be a dictionary\"];\n $[`modelInfo`transform~key map;map[`modelInfo]?;map?]data\n }\n\n// @kind function\n// @category preprocessing\n// @desc Break specified time columns into constituent components\n// @param tab {table} Contains time columns\n// @param timeCols {symbol[]} Columns to apply encoding to, if set to :: \n// all columns with date/time types will be encoded\n// @return {dictionary} All time or date types broken into labeled versions\n// of their constituent components\ntimeSplit:{[tab;timeCols]\n if[(::)~timeCols;timeCols:i.findCols[tab;\"dmntvupz\"]];\n timeDict:i.timeDict/:[tab]timeCols,:();\n flip(timeCols _ flip tab),raze timeDict\n }\n\n\n================================================================================\nFILE: ml_ml_util_utilities.q\nSIZE: 6,275 characters\n================================================================================\n\n// util/utilities.q - Utilities library\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Includes range, arange, combs, eye, iMax, iMin,\n// linearSpace, shape, trainTestSplit, tab2df,\n// df2tabTimezone, df2tab\n\n\\d .ml\n\n// @kind function\n// @category utilities\n// @desc Range of values\n// @param array {number[]} A numerical array \n// @returns {float} Range of its values\nrange:{[array]\n max[array]-min array\n }\n\n// @kind function\n// @category utilities\n// @desc Evenly-spaced values\n// @param start {number} Start of the interval (inclusive)\n// @param end {number} End of the interval (non-inclusive)\n// @param step {number} Spacing between values \n// @return {number[]} A vector of evenly-spaced values between start and end\n// in steps of length `step`\narange:{[start;end;step]\n start+step*til 0|ceiling(end-start)%step\n }\n\n// @kind function\n// @category utilities\n// @desc Unique combinations of a vector or matrix\n// @param n {int} Number of values required for combinations\n// @param degree {int} Degree of the combinations to be produced\n// @return {int[]} Unique combinations of values from the data \ncombs:{[n;degree]\n flip(degree-1)i.combFunc[n]/enlist til n\n }\n\n// @kind function\n// @category utilities\n// @desc Create identity matrix \n// @param n {int} Width/height of identity matrix\n// @return {int[]} Identity matrix of height/width n\neye:{[n]\n @[n#0.;;:;1.]each til n\n }\n\n// @kind function\n// @category utilities\n// @desc Index of the first occurance of the maximum value in a list\n// @param array {number[]} Array of values \n// @return {number} The index of the maximum element of the array\niMax:{[array]\n array?max array\n }\n\n// @kind function\n// @category utilities\n// @desc Index of minimum element of a list\n// @param array {number[]} Array of values \n// @return {number} The index of the minimum element of the array\niMin:{[array]\n array?min array\n }\n\n// @kind function\n// @category utilities\n// @desc Create an array of evenly-spaced values\n// @param start {number} Start of the interval (inclusive)\n// @param end {number} End of the interval (non-inclusive)\n// @param n {int} How many spaces are to be created\n// @return {number[]} A vector of `n` evenly-spaced values between\n// start and end\nlinearSpace:{[start;end;n]\n start+til[n]*(end-start)%n-1\n }\n\n// @kind function\n// @category utilities\n// @desc Shape of a matrix\n// @param matrix {number[]} Matrix of values\n// @return {number[]} Its shape as a list of dimensions\nshape:{[matrix]\n -1_count each first scan matrix\n }\n\n// @kind function\n// @category utilities\n// @desc Split data into training and test sets\n// @param data {any[]} Matrix of input values\n// @param target {any[]} A vector of target values the same count as data\n// @param size {float[]} Percentage size of the testing set\n// @return {dictionary} Contains the data matrix and target split into a\n// training and testing set\ntrainTestSplit:{[data;target;size]\n dictKeys:`xtrain`ytrain`xtest`ytest;\n n:count data;\n split:(0,floor n*1-size)_neg[n]?n;\n dictVals:raze(data;target)@\\:/:split;\n dictKeys!dictVals\n }\n\n// @kind function\n// @category utilities\n// @desc Convert q table to Pandas dataframe\n// @param tab {table} A q table\n// @return {<} a Pandas dataframe\ntab2df:{\n if[.pykx.loaded;:.pykx.eval[\"lambda x:x\"].pykx.topd x];\n keyTab:keys x;\n c:cols x:0!x;\n c1:i.findCols[x;\"bxhijef\"];\n df:i.pandasDF[{$[count y;y!x y;()!()]}[x;c1]];\n cls:c except c1;\n // Early exit if only numeric columns existed\n if[0=count cls;:df];\n updTab:@[flip x;i.findCols[x;\"c\"];enlist each];\n // Convert temporal columns to timestamps and\n // assign as datetime64[ns] columns\n timeCols:i.findCols[x;\"pmdznuvt\"];\n timeTab:?[updTab;();0b;timeCols!timeCols];\n timeTab:@[timeTab;timeCols;{(\"p\"$@[4#+[\"d\"$0];-16+type x]x)-\"p\"$1970.01m}];\n convfn:{x[`:assign][z pykw i.npArray[y z;\"datetime64[ns]\"]]};\n df:convfn/[df;count[timeCols]#enlist timeTab;timeCols];\n // Convert symbols to strings (char vector conversions are faster)\n // otherwise assign the underlying datatype\n convfn:{x[=;z;enlist $[11h=type dat:y z;string dat;dat]]}[df;updTab];\n convfn each cls except timeCols;\n // Reorder the columns based on initial input\n df:df[`:reindex][`columns pykw c];\n // Index the table if originally keyed\n $[count keyTab;\n df[`:set_index]keyTab;\n df\n ]\n }\n\n// @kind function\n// @category utilities\n// @desc Convert a pandas dataframe containing datetime timezones and\n// datetime objects (datetime.datetime, datetime.time) to a q table\n// @param tab {<} An embedPy representation of a Pandas dataframe\n// @param local {boolean} Indicates if timezone objects are to be converted\n// to local time (1b) or UTC (0b)\n// @param qObj {boolean} Indicates if python datetime.date/datetime.time\n// objects are returned as q (1b) or foreign objects (0b)\n// @return {<} a q table\ndf2tabTimezone:{[tab;local;qObj]\n index:$[enlist[::]~tab[`:index.names]`;0;tab[`:index.nlevels]`];\n tab:$[index;tab[`:reset_index][];tab];\n numpyCols:csym tab[`:columns.to_numpy][]`;\n if[`index in numpyCols;numpyCols:numpyCols except`index;index-:1];\n dataArgs:enlist[`exclude]!enlist`float32`datetime`datetimetz`timedelta;\n dict:tab[`:select_dtypes][pykwargs dataArgs][`:to_dict;`list]`;\n dateTimeData:tab[`:select_dtypes][`include pykw`datetime];\n dict,:i.dateConvert dateTimeData;\n timeDeltaData:tab[`:select_dtypes][`include pykw`timedelta];\n dict,:i.dateDict[timeDeltaData]+\"n\"$0;\n timezoneData:tab[`:select_dtypes][`include pykw`datetimetz];\n dict,:i.timezoneConvert[timezoneData;local];\n float32Data:tab[`:select_dtypes][`include pykw`float32][`:to_dict;`list]`;\n dict,:i.float32Convert[float32Data;local];\n // Check if the first value in columns are foreign\n foreign:where 112h=type each first each value dict;\n if[0k:n-j:1+last vals;\n sumVals:-1_sums@[(1+sum k i)#1;0,sums k i;:;(j,0)-0,-1+j+k i];\n (vals@\\:where k),enlist sumVals\n }\n\n// @private\n// @kind function\n// @category utilitiesUtility\n// @desc Convert python float32 function to produce correct precision\n// Note check for x~()!() which is required in cases where underlying \n// representation is float32 for dates/times\n// @param data {float[]} Floating point data from the dataFrame\n// @param local {boolean} Indicates if timezone objects are to be converted\n// to local time (1b) or UTC (0b)\n// @return {float[]} Python float32 objects converted to correct precision \n// in kdb\ni.float32Convert:{[data;local]\n $[(local~0b)|data~()!();\n data;\n ?[0.000001>data;\"F\"$string data;0.000001*floor 0.5+data*1000000]\n ]\n }\n\n// @private\n// @kind function\n// @category utilitiesUtility\n// @desc Convert datetime.timezone types to kdb+ date/time\n// @param tab {<} Contains columns with datetime timezone objects\n// @param local {boolean} Indicates if timezone objects are to be converted\n// to local time (1b) or UTC (0b)\n// @return {dictionary} Datetime objects are converted to kdb date/time \n// objects\ni.timezoneConvert:{[tab;local]\n $[local~0b;\n i.dateConvert tab;\n \"P\"$neg[6]_/:'cstring tab[`:astype;`str][`:to_dict;<;`list]\n ]\n }\n\n// @private\n// @kind function\n// @category utilitiesUtility\n// @desc Convert datetime/datetimetz objects to kdb timestamp\n// @param dataFrame {<} Pandas dataFrame containing datetime data\n// @return {dictionary} Datetime objects are converted to timestamps in kdb\ni.dateConvert:{[dataFrame]\n nullCols:where any each dataFrame[`:isnull;::][`:to_dict;<;`list];\n $[count nullCols;\n [npCols:csym dataFrame[`:columns.to_numpy][]`;\n dropCols:dataFrame[`:drop;npCols except nullCols;`axis pykw 1];\n nullData:\"P\"$cstring dropCols[`:astype;`str][`:to_dict;<;`list];\n nonNullData:i.dateDict dataFrame[`:drop;nullCols;`axis pykw 1];\n nullData,nonNullData+1970.01.01D0\n ];\n i.dateDict[dataFrame]+1970.01.01D0\n ]\n }\n\n// @private\n// @kind function\n// @category utilitiesUtility\n// @desc Convert datetime data to integer representation\n// @param data {<} Pandas dataframe object containing timedelta objects\n// @return {dictionary} Datetime objects are converted to integer values\ni.dateDict:{[data]\n data[`:astype;`int64][`:to_dict;<;`list]\n }\n\n// @private\n// @kind function\n// @category utilitiesUtility\n// @desc Convert datetime.date/time objects to kdb+ date/time\n// @param dateTime {<} Python datetime object\n// @param qObj {boolean} Indicates if python datetime.date/datetime.time \n// objects\n// are returned as q (1b) or foreign objects (0b)\n// @return {datetime;<} kdb date/time format or embedpy object\ni.dateTimeConvert:{[dateTime;qObj]\n $[qObj~0b;\n dateTime;\n [firstVal:.p.wrap first dateTime;\n // Convert datetime.time/date to iso string format and convert to kdb+\n // otherwise return foreign\n $[i.isInstance[firstVal;i.dateTime`:time];\n i.isoFormat[\"N\"]each dateTime;\n i.isInstance[firstVal;i.dateTime`:date];\n i.isoFormat[\"D\"]each dateTime;\n dateTime\n ]\n ]\n ]\n }\n\n// @private\n// @kind function\n// @category utilitiesUtility\n// @desc Cast python datetime object to a kdb datatype\n// @param cast {string} Data type in which python object will be cast to\n// @param dateTime {<} Python datetime object\n// @return {any} Python datetime object casted to kdb datatype \ni.isoFormat:{[cast;dateTime]\n cast$.p.wrap[dateTime][`:isoformat][]`\n }"}}},{"rowIdx":66,"cells":{"text":{"kind":"string","value":"Timezones (TZ) and Daylight Savings Time (DST)¶\nQ has two built-in functions ltime\nand gtime\nwhich can be used to get the UTC time or local time according to the TZ shell environment setting.\nOne solution for more comprehensive timezone calculations is to have a table that contains the timezones, their UTC offsets, and the datetime of any DST changes. e.g.\ntimezoneID gmtDateTime gmtOffset localDateTime\n----------------------------------------------------------------------------------------------\nEurope/Zurich 2006.03.26D01:00:00.000000000 0D02:00:00.000000000 2006.03.26D03:00:00.000000000\nEurope/Zurich 2006.10.29D01:00:00.000000000 0D01:00:00.000000000 2006.10.29D02:00:00.000000000\nEurope/Zurich 2007.03.25D01:00:00.000000000 0D02:00:00.000000000 2007.03.25D03:00:00.000000000\nEurope/Zurich 2007.10.28D01:00:00.000000000 0D01:00:00.000000000 2007.10.28D02:00:00.000000000\nEurope/Zurich 2008.03.30D01:00:00.000000000 0D02:00:00.000000000 2008.03.30D03:00:00.000000000\nEurope/Zurich 2008.10.26D01:00:00.000000000 0D01:00:00.000000000 2008.10.26D02:00:00.000000000\nEurope/Zurich 2009.03.29D01:00:00.000000000 0D02:00:00.000000000 2009.03.29D03:00:00.000000000\nEurope/Zurich 2009.10.25D01:00:00.000000000 0D01:00:00.000000000 2009.10.25D02:00:00.000000000\nEurope/Zurich 2010.03.28D01:00:00.000000000 0D02:00:00.000000000 2010.03.28D03:00:00.000000000\nEurope/Zurich 2010.10.31D01:00:00.000000000 0D01:00:00.000000000 2010.10.31D02:00:00.000000000\nEurope/Zurich 2011.03.27D01:00:00.000000000 0D02:00:00.000000000 2011.03.27D03:00:00.000000000\nEurope/Zurich 2011.10.30D01:00:00.000000000 0D01:00:00.000000000 2011.10.30D02:00:00.000000000\nEurope/Zurich 2012.03.25D01:00:00.000000000 0D02:00:00.000000000 2012.03.25D03:00:00.000000000\nand then, using three functions, where t\nis the timezone table:\nlg:{[tz;z] exec gmtDateTime+gmtOffset from aj[`timezoneID`gmtDateTime;([]timezoneID:tz;gmtDateTime:z);t]};\ngl:{[tz;z] exec localDateTime-gmtOffset from aj[`timezoneID`localDateTime;([]timezoneID:tz;localDateTime:z);t]};\nttz:{[d;s;z]lg[d;gl[s;z]]}\none can transform between local time and UTC and vice-versa, for any specified timezone.\nq)lg[enlist `$\"Europe/Zurich\";enlist 2010.03.28D01:00:00.000]\n,2010.03.28D03:00:00.000000000\nq)gl[enlist `$\"Europe/Zurich\";enlist 2010.03.28D03:00:00.000]\n,2010.03.28D01:00:00.000000000\nand local times between timezones\nq)show ttz[enlist `$\"America/New_York\";enlist `$\"Europe/Zurich\";enlist 2010.03.28D03:00:00.000]\n,2010.03.27D21:00:00.000000000\nq)show ttz[enlist `$\"America/New_York\";enlist `$\"Europe/Zurich\";enlist .z.P]\n,2010.01.20D07:00:08.088411000\nGenerating Reference Data¶\nVia TimeZoneDB¶\nTimeZoneDB provides a .csv\nfile generated from IANA tz database, which can be downloaded from https://timezonedb.com/download.\nPlease check any current license details from https://timezonedb.com;\nThe time_zone.csv\ncan be loaded as follows:\nq)t:flip `timezoneID`gmtDateTime`gmtOffset`dst!(\"S JIB\";csv)0:`:time_zone.csv\nq)delete from `t where gmtDateTime>=10170056837; / remove any unix timestamps greater than our max timestamp\nq)update gmtDateTime:12h$-946684800000000000+gmtDateTime*1000000000 from `t; / change datatype timestamp\nq)update gmtOffset:16h$gmtOffset*1000000000 from `t; / change datatype to timespan\nq)update localDateTime:gmtDateTime+gmtOffset from `t; / create localtime when change occurred\nq)`gmtDateTime xasc `t;\nq)update `g#timezoneID from `t;\nVia Java util¶\nThe timezone information can be generated using a brute-force approach in Java, and written to a CSV file using:\nKxSystems/cookbook/timezones/WriteTzInfo.java\nDate Period\nThe above Java code creates times between years 1900 and 2100, and can be edited for different date periods\nImport into kdb+ and save to a binary file using\nq)t:(\"SPJ\";enlist \",\")0:`:tzinfo.csv;\nq)update gmtOffset:`timespan$1000000000*gmtOffset from `t;\nq)update localDateTime:gmtDateTime+gmtOffset from `t;\nq)`gmtDateTime xasc `t;\nq)update `g#timezoneID from `t;\nq)`:tzinfo set t; / save file for easy distribution\nA previously generated CSV can be found at:\nKxSystems/cookbook/timezones/tzinfo.zip\n– zipped tzinfo.csv\nVia Unix zdump¶\nAlternatively you can use the zdump\nUnix command.\nValid timezones supported by the system can be found in /usr/share/zoneinfo/\ne.g.\nq)system\"zdump -v Africa/Cairo\"\n\"Africa/Cairo Fri Dec 13 20:45:52 1901 UTC = Fri Dec 13 22:45:52 1901 EET isdst=0\"\n\"Africa/Cairo Sat Dec 14 20:45:52 1901 UTC = Sat Dec 14 22:45:52 1901 EET isdst=0\"\n\"Africa/Cairo Sun Jul 14 21:59:59 1940 UTC = Sun Jul 14 23:59:59 1940 EET isdst=0\"\n\"Africa/Cairo Sun Jul 14 22:00:00 1940 UTC = Mon Jul 15 01:00:00 1940 EEST isdst=1\"\n...\nfor example, to load a table based on info from Africa/Cairo\n:\nt:([] timezoneID:(); gmtDateTime:(); gmtOffset:(); localDateTime:(); abbr:(); dst:());\nmon:`Jan`Feb`Mar`Apr`May`Jun`Jul`Aug`Sep`Oct`Nov`Dec!(\"01\";\"02\";\"03\";\"04\";\"05\";\"06\";\"07\";\"08\";\"09\";\"10\";\"11\";\"12\")\nuptz:{[x;y]\nprepend:{if[1=count x;:\"0\",x];x};\nx:\" \" vs ssr[x;\" \";\" \"];\nt1:12h$value \"\" sv (x[5];enlist\".\";mon`$x[2];enlist\".\";prepend[x[3]];enlist\"D\";x[4];\".000000000\");\nt2:12h$value \"\" sv (x[12];enlist\".\";mon`$x[9];enlist\".\";prepend[x[10]];enlist\"D\";x[11];\".000000000\");\ny upsert (`$x[0];t1;t2-t1;t2;`$x[13];1h$parse @[\"=\" vs x[14];1]);\n};\npoptz:{[x;y]uptz[;`t] each system \"zdump -v \",x;};\npoptz[\"Africa/Cairo\";`t];\n\nUnicode¶\nUnicode text can be stored in symbol, byte and character datatypes.\nSince the data is simply a sequence of bytes, any Unicode format can be stored. However, it is best to use an encoding such as UTF-8 or GBK that extends 7-bit ASCII, i.e. a single byte in the range 00\n–7f\nmeans the same thing in ASCII. kdb+ will load a script with such encoding, but it will not load other formats. Note that if using these encodings, avoid having a byte-order-mark prefix on the data.\nThe q language itself uses only 7-bit ASCII. For example, the statement 2+3\nshould be given as the three decimal bytes 50 43 51, as in:\nq)`char$50 43 51\n\"2+3\"\nq)value `char$50 43 51\n5\nFixed-width Unicode formats cannot be used, since for example, in UTF-16, 2+3\nwould be the six decimal bytes 50 0 43 0 51 0, and q does not recognize this:\nq)value `char$50 0 43 0 51 0\n'char\nThe display console should have the matching code page set or you will not be able to view the data correctly. e.g. if you store in UTF-8 format, ensure that your code page for the display is also UTF-8.\nTable and column names should be plain ASCII.\nFor example, the following has Chinese characters in symbol and character columns:\nsym:`apples`bananas`oranges\nname:(`$\"蘋果\";`$\"香蕉\";`$\"橙\")\ntext:(\"每日一蘋果, 醫生遠離我\";\"香蕉船是一道可口的甜品\";\"從佛羅里達州來的鮮橙很甜美\")\nt:([]sym;name;text)\nYou can work with this table as usual, but note that the q console displays the text entries as their octal character numbers:\nq)select sym,name from t\nsym name\n--------------\napples 蘋果\nbananas 香蕉\noranges 橙\nq)select from t where name=`$\"香蕉\"\nsym name text ..\n---------------------------------------------------------..\nbananas 香蕉 \"\\351\\246\\231\\350\\225\\211\\350\\210\\271\\346\\..\nDisplay with -1\nto show formatted text:\nq)-1 text 0;\n每日一蘋果, 醫生遠離我\nExample assignments using the C interface:\nint main(){\nint c=khp(\"localhost\",5001);\nk(c,\"set\",ks(\"a\"),kp(\"香蕉\"),(K)0);\nk(c,\"set\",ks(\"b\"),kp(\"\\351\\246\\231\\350\\225\\211\"),(K)0);\nclose(c);\n}"}}},{"rowIdx":67,"cells":{"text":{"kind":"string","value":"// @private\n//\n// @overview\n// Delete all folders relating to an experiment or to 1/all versions of a model\n//\n// @param config {dict} Configuration information provided by the user\n// @param objectType {symbol} ``` `experiment `allModels or `modelVersion```\n//\n// @return {null}\nregistry.util.delete.object:{[config;objectType]\n // Required variables\n folderPath:config`folderPath;\n experimentName:config`experimentName;\n modelName:config`modelName;\n version:config`version;\n // Generate modelStore and object paths based on objectType\n paths:registry.util.getObjectPaths\n [folderPath;objectType;experimentName;modelName;version;config];\n modelStorePath:paths`modelStorePath;\n checkPath:objectPath:paths`objectPath;\n objectString:1_string objectPath;\n // Check if object exists before attempting to delete\n if[\"*\"~last objectString;checkPath:hsym`$-1_objectString];\n if[emptyPath:()~key checkPath;\n logging.info\"No artifacts created for \",objectString,\". Unable to delete.\"\n ];\n // Where clause relative to each object type\n objectCondition:registry.util.delete.where\n [experimentName;modelName;version;objectType];\n whereClause:enlist(not;objectCondition);\n // Update the modelStore with remaining models\n newModels:?[modelStorePath;whereClause;0b;()];\n modelStorePath set newModels;\n // Delete relevant folders\n if[not emptyPath;\n logging.info\"Removing all contents of \",objectString;\n registry.util.delete.folder objectPath\n ];\n // Load new modelStore\n load modelStorePath;\n }\n\n// @private\n//\n// @overview\n// Functional where clause required to delete objects from the modelStore\n//\n// @param experimentName {string} Name of experiment\n// @param modelName {string} Name of model\n// @param version {long[]} Model version number (major;minor)\n// @param objectType {symbol} ``` `experiment `allModels or `modelVersion```\n//\n// @return {(fn;symbol;symbol)} Where clause in functional form\nregistry.util.delete.where:{[experimentName;modelName;version;objectType]\n $[objectType~`allModels;\n (like;`modelName;modelName);\n objectType~`modelVersion;\n (&;(like;`modelName;modelName);({{x~y}[y]'[x]};`version;version));\n (like;`experimentName;experimentName)\n ]\n }\n\n\n================================================================================\nFILE: ml_ml_registry_q_main_utils_get.q\nSIZE: 10,695 characters\n================================================================================\n\n// get.q - Utilties relating to retrieval of objects from the registry\n// Copyright (c) 2021 Kx Systems Inc\n//\n// @overview\n// Utilities for object retrieval within the registry\n//\n// @category Model-Registry\n// @subcategory Utilities\n//\n// @end\n\n\\d .ml\n\n// @private\n//\n// @overview\n// Retrieve a model from the registry, this is a wrapped version of\n// this functionality to facilitate protected execution in the case\n// that issues arise with retrieval and loading of a model from\n// cloud providers or an on-prem location\n//\n// @param storage {symbol} The form of storage from which the model is\n// being retrieved\n// @param experimentName {string|null} The name of an experiment from which\n// to retrieve a model, if no modelName is provided the newest model\n// within this experiment will be used. If neither modelName or\n// experimentName are defined the newest model within the\n// \"unnamedExperiments\" section is chosen\n// @param modelName {string|null} The name of the model to be retrieved\n// in the case this is null, the newest model associated with the\n// experiment is retrieved\n// @param version {long[]|null} The specific version of a named model to retrieve\n// in the case that this is null the newest model is retrieved (major;minor)\n// @param config {dict} Configuration containing information surrounding\n// the location of the registry and associated files\n// @param optionalKey {sym} Optional symbol for loading model\n//\n// @return {dict} The model and information related to the\n// generation of the model\nregistry.util.get.model:{[storage;experimentName;modelName;version;config;optionalKey]\n // Retrieve the model from the store meeting the user specified conditions\n modelDetails:registry.util.search.model[experimentName;modelName;version;config];\n if[not count modelDetails;\n logging.error\"No model meeting your provided conditions was available\"\n ];\n // Construct the path to model folder containing the model to be retrieved\n config,:flip modelDetails;\n configPath:registry.util.path.modelFolder[config`registryPath;config;::];\n modelPath:registry.util.path.modelFolder[config`registryPath;config;`model];\n codePath:registry.util.path.modelFolder[config`registryPath;config;`code];\n registry.util.load.code codePath;\n func:{[k;configPath;modelDetails;modelPath;config;storage]\n $[k~(::);\n modelConfig:configPath,\"/config/modelInfo.json\";\n modelConfig:configPath,\"/config/\",string[k],\"/modelInfo.json\"\n ];\n modelInfo:.j.k raze read0 hsym`$modelConfig;\n // Retrieve the model based on the form of saved model\n modelType:first`$modelDetails`modelType;\n modelPath,:$[k~(::);\"\";string[k],\"/\"],$[modelType~`q;\n \"mdl\";\n modelType~`keras;\n \"mdl.h5\";\n modelType~`torch;\n \"mdl.pt\";\n modelType~`pyspark;\n \"mdl.model\";\n \"mdl.pkl\"\n ];\n model:mlops.get[modelType] $[modelType in `q;modelPath;pydstr modelPath];\n if[registry.config.commandLine`deployType;\n axis:modelInfo[`modelInformation;`axis];\n model:mlops.wrap[`python;model;axis];\n ];\n returnInfo:`modelInfo`model!(modelInfo;model);\n returnInfo\n }[;configPath;modelDetails;modelPath;config;storage];\n if[b:()~key hsym `$configPath,\"/config/modelInfo.json\";\n k:key hsym `$configPath,\"/config\"];\n r:$[b;$[optionalKey~(::);k!func'[k];func optionalKey];func[::]];\n if[`local<>storage;registry.util.delete.folder config`folderPath];\n r\n }\n\n// @private\n//\n// @overview\n// Retrieve metrics from the registry, this is a wrapped version of this\n// functionality to facilitate protected execution in the case that issues\n// arise with retrieval or loading of metrics from cloud providers or\n// an on-prem location\n//\n// @param storage {symbol} The form of storage from which the model is\n// being retrieved\n// @param experimentName {string|null} The name of an experiment from which\n// to retrieve a model, if no modelName is provided the newest model\n// within this experiment will be used. If neither modelName or\n// experimentName are defined the newest model within the\n// \"unnamedExperiments\" section is chosen\n// @param modelName {string|null} The name of the model to be retrieved\n// in the case this is null, the newest model associated with the\n// experiment is retrieved\n// @param version {long[]|null} The specific version of a named model to retrieve\n// in the case that this is null the newest model is retrieved (major;minor)\n// @param config {dictionary} Configuration containing information surrounding\n// the location of the registry and associated files\n// @param param {null|dict|symbol} Search parameters for the retrieval\n// of metrics\n//\n// @return {table} The metric table for a specific model, which may\n// potentially be filtered\nregistry.util.get.metric:{[storage;experimentName;modelName;version;config;param]\n modelDetails:registry.util.search.model[experimentName;modelName;version;config];\n if[not count modelDetails;\n logging.error\"No model meeting your provided conditions was available\"\n ];\n // Construct the path to model folder containing the model to be retrieved\n config,:flip modelDetails;\n metricPath:registry.util.path.modelFolder[config`registryPath;config;`metrics];\n metricPath:metricPath,\"metric\";\n metric:1_get hsym`$metricPath;\n returnInfo:registry.util.search.metric[metric;param];\n if[`local<>storage;registry.util.delete.folder config`folderPath];\n returnInfo\n }\n\n// @private\n//\n// @overview\n// Retrieve parameters from the registry, this is a wrapped version of this\n// functionality to facilitate protected execution in the case that issues\n// arise with retrieval or loading of metrics from cloud providers or\n// an on-prem location\n//\n// @param storage {symbol} The form of storage from which the model is\n// being retrieved\n// @param experimentName {string|null} The name of an experiment from which\n// to retrieve a model, if no modelName is provided the newest model\n// within this experiment will be used. If neither modelName or\n// experimentName are defined the newest model within the\n// \"unnamedExperiments\" section is chosen\n// @param modelName {string|null} The name of the model to be retrieved\n// in the case this is null, the newest model associated with the\n// experiment is retrieved\n// @param version {long[]|null} The specific version of a named model to retrieve\n// in the case that this is null the newest model is retrieved (major;minor)\n// @param config {dictionary} Configuration containing information surrounding\n// the location of the registry and associated files\n// @param paramName {symbol|string} The name of the parameter to retrieve\n//\n// @return {string|dict|table|float} The value of the parameter associated\n// with a named parameter saved for the model.\nregistry.util.get.params:{[storage;experimentName;modelName;version;config;paramName]\n modelDetails:registry.util.search.model[experimentName;modelName;version;config];\n if[not count modelDetails;\n logging.error\"No model meeting your provided conditions was available\"\n ];\n // Construct the path to model folder containing the model to be retrieved\n config,:flip modelDetails;\n paramPath:registry.util.path.modelFolder[config`registryPath;config;`params];\n paramName:$[-11h=type paramName;\n string paramName;\n 10h=type paramName;\n paramName;\n logging.error\"ParamName must be of type string or symbol\"\n ];\n paramPath,:paramName,\".json\";\n returnInfo:registry.util.search.params[paramPath];\n if[`local<>storage;registry.util.delete.folder config`folderPath];\n returnInfo\n }\n\nregistry.util.get.version:{[storage;experimentName;modelName;version;config;param]\n modelDetails:registry.util.search.model[experimentName;modelName;version;config];\n if[not count modelDetails;\n logging.error\"No model meeting your provided conditions was available\"\n ];\n config,:flip modelDetails;\n rootPath:registry.util.path.modelFolder[config`registryPath;config;::];\n versionInfo:@[read0;hsym `$rootPath,\"/.version.info\";{'\"Version information not found for model\"}];\n .j.k raze versionInfo\n };\n\n\n// @private\n//\n// @overview\n// Retrieve a q/python/sklearn/keras model or parameters/metrics related to a\n// specific model from the registry.\n//\n// @todo\n// Add type checking for modelName/experimentName/version\n//\n// @param cli {dict} Command line arguments as passed to the system on\n// initialisation, this defines how the fundamental interactions of\n// the interface are expected to operate.\n// @param folderPath {dict|string|null} Registry location.\n// 1. Can be a dictionary containing the vendor and location as a string, e.g.:\n// - enlist[`local]!enlist\"myReg\"\n// - enlist[`aws]!enlist\"s3://ml-reg-test\"\n// 2. A string indicating the local path\n// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON\n// @param experimentName {string|null} The name of an experiment from which\n// to retrieve a model, if no modelName is provided the newest model\n// within this experiment will be used. If neither modelName or\n// experimentName are defined the newest model within the\n// \"unnamedExperiments\" section is chosen\n// @param modelName {string|null} The name of the model to be retrieved\n// in the case this is null, the newest model associated with the\n// experiment is retrieved\n// @param version {long[]|null} The specific version of a named model to retrieve\n// in the case that this is null the newest model is retrieved (major;minor)\n// @param param {null|dict|symbol|string} Parameter required for parameter/\n// metric retrieval\n// in the case when this is a string, it is converted to a symbol\n//\n// @return {dict} The model and information related to the\n// generation of the model\nregistry.util.get.object:{[typ;folderPath;experimentName;modelName;version;param]\n if[(typ~`metric)&abs[type param] in 10 11h;\n param:enlist[`metricName]!enlist $[10h=abs[type param];`$;]param\n ];\n config:registry.util.check.config[folderPath;()!()];\n if[not`local~storage:config`storage;storage:`cloud];\n // Locate/retrieve the registry locally or from the cloud\n config:$[storage~`local;\n registry.local.util.check.registry config;\n [checkFunction:registry.cloud.util.check.model;\n checkFunction[experimentName;modelName;version;config`folderPath;config]\n ]\n ];\n getParams:$[(typ~`model)&param~(::);\n (storage;experimentName;modelName;version;config;::);\n (storage;experimentName;modelName;version;config;param)\n ];\n .[registry.util.get typ;\n getParams;\n {[x;y;z]\n $[`local~x;;registry.util.delete.folder]y;\n 'z\n }[storage;config`folderPath]\n ]\n }\n\n\n================================================================================\nFILE: ml_ml_registry_q_main_utils_init.q\nSIZE: 846 characters\n================================================================================\n\n// init.q - Initialise main q utilities for the model registry\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Utilities relating to all basic interactions with the registry\n\n\\d .ml"}}},{"rowIdx":68,"cells":{"text":{"kind":"string","value":"Circles in a Circle, 1923\nEverything begins with a dot.\n— W.W. Kandinsky\n.\nApply, Index, Trap\n@\nApply At, Index At, Trap At¶\n- Apply a function to a list of arguments\n- Get items at depth in a list\n- Trap errors\n| rank | syntax | function semantics | list semantics |\n|---|---|---|---|\n| 2 | v . vx .[v;vx] |\nApply Apply v to list vx of arguments |\nIndex Get item/s vx at depth from v |\n| 2 | u @ ux @[u;ux] |\nApply At Apply unary u to argument ux |\nIndex At Get items ux from u |\n| 3 | .[g;gx;e] |\nTrap Try g . gx ; catch with e |\n|\n| 3 | @[f;fx;e] |\nTrap At Try f@fx ; catch with e |\nWhere\ne\nis an expression, typically a functionf\nis a unary function andfx\nin its domaing\nis a function of rank \\(n\\) andgx\nan atom or list of count \\(n\\) with items in the domains ofg\nv\nis a value of rank \\(n\\) (or a handle to one) andvx\na list of count \\(n\\) with items in the domains ofv\nu\nis a unary value (or a handle to one) andux\nin its domain\nAmend, Amend At¶\nFor the ternary and quaternary forms\n.[d; i; u] @[d; i; u]\n.[d; i; v; vy] @[d; i; v; vy]\nwhere\nd\nis a list or dictionary, or a handle to a list, dictionary or datafilei\nindexesd\nasd . i\nord @ i\n(must be a list for Amend)u\nis a unary withd\nin its domainv\nis a binary withd\nandvy\nin its left and right domains\nsee Amend and Amend At.\nApply, Index¶\nv . vx\nevaluates value v\non the \\(n\\) arguments listed in vx\n.\nq)add / addition 'table'\n0 1 2 3\n1 2 3 4\n2 3 4 5\n3 4 5 6\nq)add . 2 3 / add[2;3] (Index)\n5\nq)(+) . 2 3 / +[2;3] (Apply)\n5\nq).[+;2 3]\n5\nq).[add;2 3]\n5\nIf v\nhas rank \\(n\\), then vx\nhas \\(n\\) items and v\nis evaluated as:\nv[vx[0]; vx[1]; …; vx[-1+count vx]]\nIf v\nhas rank 2, then vx\nhas 2 items and v\nis applied to the first argument vx[0]\nand the second argument vx[1]\n.\nv[vx[0];vx[1]]\nVariadic operators\nMost binary operators such as Add have deprecated unary forms and are thus actually variadic.\nWhere v\nis such a variadic operator, parenthesize it to provide it as the left argument of Apply.\nq).[+;2 2]\n4\nq)(+) . 2 2\n4\nIf v\nhas rank 1, then vx\nhas one item and v\nis applied to the argument vx[0]\n.\nv[vx[0]]\nQ for Mortals §6.5.3 Indexing at Depth\nNullaries¶\nNullaries (functions of rank 0) are handled differently. The pattern above suggests that the empty list ()\nwould be the argument list to nullary v\n, but Apply for nullary v\nis denoted by v . enlist[::]\n, i.e. the right argument is the enlisted null.\nFor example:\nq)a: 2 3\nq)b: 10 20\nq){a + b} . enlist[::]\n12 23\nIndex¶\nd . i\nreturns an item from list or dictionary d\nas specified by successive items in list i\n. Since 4.1t 2022.03.25, d\ncan be a persisted table.\nThe result is found in d\nat depth count i\nas follows.\nThe list i\nis a list of successive indexes into d\n. i[0]\nmust be in the domain of d@\n. It selects an item of d\n, which is then indexed by i[1]\n, and so on.\n( (d@i[0]) @ i[1] ) @ i[2]\n…\nq)d\n((1 2 3;4 5 6 7) ;(8 9;10;11 12) ;(13 14;15 16 17 18;19 20))\nq)d . enlist 1 / select item 1, i.e. d@1\n8 9\n10\n11 12\nq)d . 1 2 / select item 2 of item 1\n11 12\nq)d . 1 2 0 / select item 0 of item 2 of item 1\n11\nA right argument of enlist[::]\nselects the entire left argument.\nq)d . enlist[::]\n(1 2 3;4 5 6 7)\n(8 9;10;11 12)\n(13 14;15 16 17 18;19 20)\nIndex At¶\nThe selections at each level are individual applications of Index At: first, item d@i[0]\nis selected, then (d@i[0])@i[1]\n, then ((d@i[0])@ i[1])@ i[2]\n, and so on.\nThese expressions can be rewritten using Over applied to Index At; the first is d@/i[0]\n, the second is d@/i[0 1]\n, and the third is d@/i[0 1 2]\n.\nIn general, for a vector i\nof any count, d . i\nis identical to d@/i\n.\nq)((d @ 1) @ 2) @ 0 / selection in terms of a series of @s\n11\nq)d @/ 1 2 0 / selection in terms of @-Over\n11\nCross sections¶\nIndex is cross-sectional when the items of i\nare lists. That is, items-at-depth in d\nare indexed for paths made up of all combinations of atoms of i[0]\nand atoms of i[1]\nand atoms of i[2]\n, and so on to the last item of i\n.\nThe simplest case of cross-sectional index occurs when the items of i\nare vectors. For example, d .(2 0;0 1)\nselects items 0 and 1 from both items 2 and 0:\nq)d . (2 0; 0 1)\n13 14 15 16 17 18\n1 2 3 4 5 6 7\nq)count each d . (2 0; 0 1)\n2 2\nNote that items appear in the result in the same order as the indexes appear in i\n.\nThe first item of i\nselects two items of d\n, as in d@i[0]\n. The second item of i\nselects two items from each of the two items just selected, as in (d@i[0])@'i[1]\n. Had there been a third vector item in i\n, say of count 5, then that item would select five items from each of the four items-at-depth 1 just selected, as in ((d@i[0])@'i[1])@''i[2]\n, and so on.\nWhen the items of i\nare vectors the result is rectangular to at least depth count i\n, depending on the regularity of d\n, and the k\nth item of its shape vector is (count i)[k]\nfor every k\nless than count i\n. That is, the first count i\nitems of the shape of the result are count each i\n.\nMore general cross-sectional indexing occurs when the items of i\nare rectangular lists, not just vectors, but the situation is much like the simpler case of vector items.\nNulls in i\n¶\nNulls in i\nmean “select all”: if i[0]\nis null, then continue on with d\nand the rest of i\n, i.e. 1_i\n; if i[1]\nis null, then for every selection made through i[0]\n, continue on with that selection and the rest of i\n, i.e. 2_i\n; and so on. For example, d .(::;0)\nmeans that the 0th item of every item of d\nis selected.\nq)d\n(1 2 3;4 5 6 7)\n(8 9;10;11 12)\n(13 14;15 16 17 18;19 20)\nq)d . (::;0)\n1 2 3\n8 9\n13 14\nAnother example, this time with i[1]\nequal to null:\nq)d . (0 2;::;1 0)\n(2 1;5 4)\n(14 13;16 15;20 19)\nNote that d .(::;0)\nis the same as d .(0 1 2;0)\n, but in the last example, there is no value that can be substituted for null in (0 2;;1 0)\nto get the same result, because when item 0 of d\nis selected, null acts like 0 1\n, but when item 2 of d\nis selected, it acts like 0 1 2\n.\nThe general case of a non-negative integer list i\n¶\nIn the general case, when the items of i\nare non-negative integer atoms or lists, or null, the structure of the result can be thought of as cascading structures of the items of i\n. That is, with nulls aside, the result is structurally like i[0]\n, except that wherever there is an atom in i[0]\n, the result is structurally like i[1]\n, except that wherever there is an atom in i[1]\n, the result is structurally like i[2]\n, and so on.\nThe general case of Index can be defined recursively in terms of Index At by partitioning the list i\ninto its first item and the rest:\nIndex:{[d;F;R]\n$[ F~::; Index[d; first R; 1 _ R];\n0 =count R; d @ F;\n0>type F; Index[d @ F; first R; 1 _ R]\nIndex[d;; R]'F ]}\nThat is, d . i\nis Index[d;first i;1_i]\n.\nTo work through the definition, start with F\nas the first item of i\nand R\nas the remainder. At each step in the recursion:\n- if\nF\nis null then select all ofd\nand continue on, with the first item of the remainderR\nas the newF\nand the remainder ofR\nas the new remainder; - otherwise, if the remainder is the empty vector apply Index At (the right argument\nF\nis now the last item ofi\n), and we are done; - otherwise, if\nF\nis an atom, apply Index At to select that item ofd\nand continue on in the same way as whenF\nis null; - otherwise, apply Index with fixed arguments\nd\nandR\n, but independently to the items of the listF\n.\nDictionaries and symbolic indexing¶\nIf i\nis a symbol atom then d\nmust be a dictionary or handle of a directory on the K-tree, and d . i\nselects the value of the entry named in i\n. For example, if:\ndir:`a`b!(2 3 4;\"abcdefg\")\nthen `dir . enlist`b\nis \"abcdefg\"\nand `dir . (`b;1 3 5)\nis \"bdf\"\n.\nIf i\nis a list whose items are non-negative integer atoms and symbol atoms, then just like the non-negative integer vector case, d . i\nis a single item at depth count i\nin d\n. The difference is that wherever a symbol appears in i\n, say as the kth item, the selection up to the kth item must produce a dictionary or a handle of a directory. Selection by the kth item is the value of an entry in that dictionary or directory, and further selections go on from there. For example:\nq)(1;`a`b!(2 3 4;10 20 30 40)) . (1; `b; 2)\n30\nAs we have seen above for the general case, every atom in the k\nth item of i\nmust be a valid index of all items at depth k\nselected by d . k # i\n. Moreover, symbols can only select from dictionaries and directories, and integers cannot.\nConsequently, if the k\nth item of i\ncontains a symbol atom, then all items selected by d . k # i\nmust be dictionaries or handles of directories, and therefore all atoms in the k\nth item of i\nmust be symbols.\nIt follows that each item of i\nmust be made up entirely of non-negative integer atoms, or entirely of symbol atoms, and if the k\nth item of i\nis made up of symbols, then all items at depth k\nin d\nselected by the first k\nitems of i\nmust be dictionaries.\nNote that if d\nis either a dictionary or handle to a directory then d . enlist key d\nis a list of values of all the entries.\nStep dictionaries¶\nWhere d\nis a dictionary, d@i\nor d[i]\nor d i\nreturns for each item of i\nthat is outside the domain of d\na null of the same type as the keys.\nq)d:`cat`cow`dog`sheep!`chat`vache`chien`mouton\nq)d\ncat | chat\ncow | vache\ndog | chien\nsheep| mouton\nq)d `sheep`snake`cat`ant\n`mouton``chat`\nq)\nq)e:(10*til 10)!til 10\nq)e\n0 | 0\n10| 1\n20| 2\n30| 3\n40| 4\n50| 5\n60| 6\n70| 7\n80| 8\n90| 9\nq)e 80 35 20 -10\n8 0N 2 0N\nA step dictionary has the sorted attribute set.\nIts keys are a sorted vector.\nWhere s\nis a step dictionary, and i[k]\nare the items of i\nthat are outside the domain of d\n, the value/s for d@i@k\nare the values for the highest keys that are lower than i k\n.\nq)d:`cat`cow`dog`sheep!`chat`vache`chien`mouton\nq)ds:`s#d\nq)ds~d\n1b\nq)ds `sheep`snake`cat`ant\n`mouton`mouton`chat`\nq)\nq)es:`s#e\nq)es~e\n1b\nq)es 80 35 20 -10\n8 3 2 0N\nSet Attribute\nStep Dictionaries\nApply At, Index At¶\n@\nis syntactic sugar for the case where u\nis a unary and ux\na 1-item list.\nu@ux\nis always equivalent to u . enlist ux\n.\nBrackets are syntactic sugar\nThe brackets of an argument list are also syntactic sugar. Nothing can be expressed with brackets that cannot also be expressed using .\n.\nYou can use the derived function @\\:\nto apply a list of unary values to the same argument.\nq){`o`h`l`c!(first;max;min;last)@\\:x}1 2 3 4 22 / open, high, low, close\no| 1\nh| 22\nl| 1\nc| 22\nComposition¶\nA sequence of unaries u\n, v\n, w\n… can be composed with Apply At as u@v@w@\n.\nAll but the last @\nmay be elided: u v w@\n.\nq)tc:til count@ / indexes of a list\nq)tc \"abc\"\n\"0 1 2\"\nThe last value in the sequence can have higher rank if projected as a unary by Apply.\nq)di:reciprocal(%). / divide into\nq)di 2 3 / divide 2 into 3\n1.5\nTrap¶\nIn the ternary, if evaluation of the function fails, the expression is evaluated. (Compare try/catch in some other languages.)\nq).[+;\"ab\";`ouch]\n`ouch\nIf the expression is a function, it is evaluated on the text of the signalled error.\nq).[+;\"ab\";{\"Wrong \",x}]\n\"Wrong type\"\nFor a successful evaluation, the ternary returns the same result as the binary.\nq).[+;2 3;{\"Wrong \",x}]\n5\nTrap At¶\n@[f;fx;e]\nis equivalent to .[f;enlist fx;e]\n.\nUse Trap At as a simpler form of Trap, for unary values.\n.Q.trp (extend trap at)\nLimit of the trap¶\nTrap catches only errors signalled in the applications of f\nor g\n. Errors in the evaluation of fx\nor gg\nthemselves are not caught.\nq)@[2+;\"42\";`err]\n`err\nq)@[2+;\"42\"+3;`err]\n'type\n[0] @[2+;\"42\"+3;`err]\n^\nWhen e\nis not a function¶\nIf e\nis a function it will be evaluated only if f\nor g\nfails. It will however be parsed before any of the other expressions are evaluated.\nq)@[2+;\"42\";{)}]\n')\n[0] @[2+;\"42\";{)}]\n^\nIf e\nis any other kind of expression it will always be evaluated – and first, in the usual right-to-left sequence. In this respect Trap and Trap At are unlike try/catch in other languages.\nq)@[string;42;a:100] / expression not a function\n\"42\"\nq)a // but a was assigned anyway\n100\nq)@[string;42;{b::99}] / expression is a function\n\"42\"\nq)b // not evaluated\n'b\n[0] b\n^\nFor most purposes, you will want e\nto be a function.\nQ for Mortals §10.1.8 Protected Evaluation\nErrors signalled¶\nindex an atom in vx or ux is not an index to an item-at-depth in d\nrank the count of vx is greater than the rank of v\ntype v or u is a symbol atom, but not a handle to an value\ntype an atom of vx or ux is not an integer, symbol or null"}}},{"rowIdx":69,"cells":{"text":{"kind":"string","value":"Connection handles¶\nkdb+ communicates with the console, stdout, stderr, file system, and other processes through connection handles.\nThere are three permanent system handles:\n0 console\n1 stdout\n2 stderr\nFile and process handles are created by hopen\nand destroyed by hclose\n.\nWrite¶\nSyntax:\nh x\nneg[h] x\nwhere h\nis a handle, writes x\nto its target as described below and returns itself.\nA handle is an int atom but is variadic. Syntactically, it can be an int atom or a unary function.\nq)1 / one is one\n1\nq)1 \"abc\\n\" / or stdout\nabc\n1\nA handle is an applicable value. It (and its negation) can be applied to an argument and iterated.\nConsole¶\nWhere h\nis 0 and x\nis a string or parse tree, evaluates x\nin the main thread and returns the result.\nq)0 \"1 \\\"hello\\\"\" /string\nhello1\nq)0 (+;2;2) /parse tree\n4\nFile, stdout, stderr¶\nWhere h\nis stdout, stderr, or a file handle\nh x\nappends stringx\nto the fileneg[h] x\nwherex\nis a- string, appends\nx,\"\\n\"\n- list of strings, appends\nx,'\"\\n\"\nto the file.\n- string, appends\nq)a:1 \"quick brown fox\\n\"\nquick brown fox\nq)a\n1\nq)a:-1 (\"quick\";\"brown\";\"fox\")\nquick\nbrown\nfox\nq)a\n-1\nq)f:`:tmp.txt\nq)hopen f\n3i\nq)3 \"quick brown fox\"\n3\nq)-3 (\"quick\";\"brown\";\"fox\")\n-3\nq)hclose 3\nq)read0 f\n\"quick brown foxquick\"\n\"brown\"\n\"fox\"\nq)\\ls data\nls: data: No such file or directory\n'os\n[0] \\ls data\n^\nq)h:hopen `:data/new\nq)h /handle is an integer\n3i\nq)type h /atom\n-6h\nq)h \"now is the time\" /but can be applied as a unary\n3i\nq)/and iterated\nq)h each (\" for all good men\";\" to come to the aid of the party\")\n3 3i\nq)hclose h\nq)read0 `:data/new /hopen created file path\n\"now is the time for all good men to come to the aid of the party\"\nProcess¶\nh x\nsends stringx\nas a sync request (get)neg[h] x\nsends stringx\nas an async request (set)\nRead¶\nConsole¶\nReading from the console with read0\npermits interactive input.\nq)s:{1 x;read0 0}\"Next track: \"\nNext track: Bewlay Brothers\nq)s\n\"Bewlay Brothers\""}}},{"rowIdx":70,"cells":{"text":{"kind":"string","value":"deltas\n¶\nDifferences between adjacent list items\ndeltas x deltas[x]\nWhere x\nis a numeric or temporal vector, returns differences between consecutive pairs of its items.\nq)deltas 1 4 9 16\n1 3 5 7\nIn a query to get price movements:\nupdate diff:deltas price by sym from trade\nWith signum\nto count the number of up/down/same ticks:\nq)select count i by signum deltas price from trade\nprice| x\n-----| ----\n-1 | 247\n0 | 3\n1 | 252\ndomain: b g x h i j e f c s p m d z n u v t\nrange: i . i i i j e f . . n i i f n u v t\nFirst predecessor¶\nThe predecessor of the first item is 0.\nq)deltas 2000 2005 2007 2012 2020\n2000 5 2 5 8\nIt may be more convenient to have 0 as the first item of the result.\nq)deltas0:{first[x]-':x}\nq)deltas0 2000 2005 2007 2012 2020\n0 5 2 5 8\nSubtract Each Prior\nThe derived function -':\n(Subtract Each Prior) used to define deltas\nis variadic and can be applied as either a unary or a binary.\nHowever, deltas\nis supported only as a unary function.\nFor binary application, use the derived function.\n\ndesc\n, idesc\n, xdesc\n¶\nSort and grade: descending\nQ chooses from a variety of algorithms, depending on the type and data distribution.\ndesc\n¶\nDescending sort\ndesc x desc[x]\nReturns x\nsorted into descending order.\nThe function is uniform.\nThe sort is stable: it preserves order between equals.\nWhere x\nis a\n- vector, it is returned sorted\n- mixed list, the result is sorted within datatype\n- dictionary, returns it sorted by the values\n- table, returns it sorted by the first non-key column and with the sorted attribute set on that column\nUnlike asc\n, which sets the parted attribute where there are other non-key columns, desc\nsets only the sorted attribute.\nq)desc 2 1 3 4 2 1 2 / vector\n4 3 2 2 2 1 1\nq)desc (1;1b;\"b\";2009.01.01;\"a\";0) / mixed list\n2009.01.01\n\"b\"\n\"a\"\n1\n0\nq)desc `a`b`c!2 1 3 / dictionary\nc| 3\na| 2\nb| 1\nq)desc([]a:3 4 1;b:`a`d`s) / table\na b\n---\n4 d\n3 a\n1 s\nq)meta desc([]a:3 4 1;b:`a`d`s)\nc| t f a\n-| -----\na| j\nb| s\ndomain: b g x h i j e f c s p m d z n u v t\nrange: b g x h i j e f c s p m d z n u v t\nidesc\n¶\nDescending grade\nidesc x idesc[x]\nWhere x\nis a list or dictionary, returns the indices needed to sort list it in descending order.\nq)L:2 1 3 4 2 1 2\nq)idesc L\n3 2 0 4 6 1 5\nq)L idesc L\n4 3 2 2 2 1 1\nq)(desc L)~L idesc L\n1b\nq)idesc `a`c`b!1 2 3\n`b`c`a\ndomain: b g x h i j e f c s p m d z n u v t\nrange: j j j j j j j j j j j j j j j j j j\nxdesc\n¶\nSorts a table in descending order of specified columns. The sort is by the first column specified, then by the second column within the first, and so on.\nx xdesc y xdesc[x;y]\nWhere x\nis a symbol vector of column names defined in y\n, which is passed by\n- value, returns\n- reference, updates\ny\nsorted in descending order by x\n.\nThe sorted attribute is not set. The sort is stable, i.e. it preserves order amongst equals.\nq)\\l sp.q\nq)s\ns | name status city\n--| -------------------\ns1| smith 20 london\ns2| jones 10 paris\ns3| blake 30 paris\ns4| clark 20 london\ns5| adams 30 athens\nq)`city xdesc s / sort descending by city\ns | name status city\n--| -------------------\ns2| jones 10 paris\ns3| blake 30 paris\ns1| smith 20 london\ns4| clark 20 london\ns5| adams 30 athens\nq)meta `city xdesc s / `s# attribute not set\nc | t f a\n------| -----\ns | s\nname | s\nstatus| i\ncity | s\nDuplicate column names xdesc\nsignals dup\nif it finds duplicate columns in the right argument. (Since V3.6 2019.02.19.)\nSorting data on disk¶\nxdesc\ncan sort data on disk directly, without loading the entire table into memory: see xasc\n.\nDuplicate keys in a dictionary or duplicate column names in a table will cause sorts and grades to return unpredictable results.\nasc\n, iasc\n, xasc\n,\nattr\n,\nSet Attribute\nDictionaries & tables,\nMetadata,\nSorting\nQ for Mortals\n§8.8 Attributes\n\ndev\n, mdev\n, sdev\n¶\nDeviations\ndev\n¶\nStandard deviation\ndev x dev[x]\nWhere x\nis a numeric list, returns its standard deviation (as the square root of the variance).\nApplies to all numeric data types and signals an error with temporal types, char and sym.\nq)dev 10 343 232 55\n134.3484\ndev\nis an aggregate function, equivalent to {sqrt var x}\n.\ndomain: b g x h i j e f c s p m d z n u v t\nrange: f . f f f f f f f . f f f f f f f f\nSince 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists.\nq)M:get`:m77 set m:(2 3;4 0N;1 7)\nq)dev m\n1.247219 2\nq)dev M\n1.247219 2\nq)T:get`:tab/ set t:flip`a`b!flip m\nq)dev t\na| 1.247219\nb| 2\nq)dev T\na| 1.247219\nb| 2\ndev\nis a multithreaded primitive.\nmdev\n¶\nMoving deviations\nx mdev y mdev[x;y]\nWhere\nx\nis a positive int atomy\nis a numeric list\nreturns the floating-point x\n-item moving deviations of y\n, with any nulls after the first item replaced by zero. The first x\nitems of the result are the deviations of the terms so far, and thereafter the result is the moving deviation.\nq)2 mdev 1 2 3 5 7 10\n0 0.5 0.5 1 1 1.5\nq)5 mdev 1 2 3 5 7 10\n0 0.5 0.8164966 1.47902 2.154066 2.87054\nq)5 mdev 0N 2 0N 5 7 0N / nulls after the first are replaced by 0\n0n 0 0 1.5 2.054805 2.054805\nq)t\nb c\n----\n1 45\n2 46\n3 47\nq)2 mdev t\nb c\n-------\n0 0\n0.5 0.5\n0.5 0.5\nmdev\nis a uniform function.\nDomain and range:\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | f . f f f f f f f . f f f f f f f f\ng | . . . . . . . . . . . . . . . . . .\nx | f . f f f f f f f . f f f f f f f f\nh | f . f f f f f f f . f f f f f f f f\ni | f . f f f f f f f . f f f f f f f f\nj | f . f f f f f f f . f f f f f f f f\ne | . . . . . . . . . . . . . . . . . .\nf | . . . . . . . . . . . . . . . . . .\nc | . . . . . . . . . . . . . . . . . .\ns | . . . . . . . . . . . . . . . . . .\np | . . . . . . . . . . . . . . . . . .\nm | . . . . . . . . . . . . . . . . . .\nd | . . . . . . . . . . . . . . . . . .\nz | . . . . . . . . . . . . . . . . . .\nn | . . . . . . . . . . . . . . . . . .\nu | . . . . . . . . . . . . . . . . . .\nv | . . . . . . . . . . . . . . . . . .\nt | . . . . . . . . . . . . . . . . . .\nRange: f\nImplicit iteration¶\nmdev\napplies to dictionaries and tables.\nq)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 21 3;4 5 6)\nq)2 mdev d\na| 0 0 0\nb| 3 8 1.5\nq)2 mdev t\na b\n-------\n0 0\n5.5 0.5\n9 0.5\nq)2 mdev k\nk | a b\n---| -------\nabc| 0 0\ndef| 5.5 0.5\nghi| 9 0.5\nsdev\n¶\nSample standard deviation\nsdev x sdev[x]\nWhere x\nis a numeric list, returns its sample standard deviation as the square root of the sample variance.\nq)sdev 10 343 232 55\n155.1322\nsdev\nis an aggregate function, equivalent to {sqrt var[x]*count[x]%-1+count x}\n.\ndomain: b g x h i j e f c s p m d z n u v t\nrange: f . f f f f f f f . f f f f f f f f\nSince 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists.\nq)M:get`:m77 set m:(2 3;4 0N;1 7)\nq)sdev m\n1.527525 2.828427\nq)sdev M\n1.527525 2.828427\nq)T:get`:tab/ set t:flip`a`b!flip m\nq)sdev t\na| 1.527525\nb| 2.828427\nq)sdev T\na| 1.527525\nb| 2.828427\nsdev\nis a multithreaded primitive.\nvar\n, svar\nMathematics\nSliding windows\nStandard deviation,\nVariance\nStandard deviation\n\n!\nDict¶\nMake a dictionary or keyed table; remove a key from a table\nx!y ![x;y]\nWhere\nx\nandy\nare same-length lists, returns a dictionary in whichx\nis the key andy\nis the valuey\nis a simple table andx\nis a member of1_til count y\n, returns a keyed table with the firstx\ncolumns as its keyy\nis a table andx\nis 0, returns a simple table; i.e. removes the key\nDictionary keys should be distinct (i.e. {x~distinct x}key dict)\nbut no error is signalled if that is not so.\nItems of x\nand y\ncan be of any datatype, including dictionaries and tables.\nq)`a`b`c!1 2 3\na| 1\nb| 2\nc| 3\nq)show kt:2!([]name:`Tom`Jo`Tom; city:`NYC`LA`Lagos; eye:`green`blue`brown; sex:`m`f`m)\nname city | eye sex\n----------| ---------\nTom NYC | green m\nJo LA | blue f\nTom Lagos| brown m\nq)show ku:([]name:`Tom`Jo`Tom; city:`NYC`LA`Lagos)!([]eye:`green`blue`brown; sex:`m`f`m)\nname city | eye sex\n----------| ---------\nTom NYC | green m\nJo LA | blue f\nTom Lagos| brown m\nq)kt~ku\n1b\nq)0!kt\nname city eye sex\n--------------------\nTom NYC green m\nJo LA blue f\nTom Lagos brown m\nDict is a uniform function on its right domain.\nErrors¶\n| error | cause |\n|---|---|\n| length | x and y are not same-length lists |\n| length | x is not in 1_ til count y |\n| type | y is not a simple table |\nkey\n,\nvalue\nDictionaries & tables\nQ for Mortals\n§5 Dictionaries\n\ndiffer\n¶\nFind where list items change value\ndiffer x differ[x]\nReturns a boolean list indicating where consecutive pairs of items in x\ndiffer.\nIt applies to all data types.\nIt is a uniform function.\nThe first item of the result is always 1b\n:\nr[i]=1b for i=0\nr[i]=not A[i]~A[i-1] otherwise\nq)differ`IBM`IBM`MSFT`CSCO`CSCO\n10110b\nq)differ 1 3 3 4 5 6 6\n1101110b\nSplit a table with multiple dates into a list of tables with distinct dates.\nq)d:2009.10.01+asc 100?30\nq)s:100?`IBM`MSFT`CSCO\nq)t:([]date:d;sym:s;price:100?100f;size:100?1000)\nq)i:where differ t[`date] / indices where dates differ\nq)tlist:i _ t / list of tables with one date per table\nq)tlist 0\ndate sym price size\n-----------------------------\n2009.10.01 IBM 37.95179 710\n2009.10.01 CSCO 52.908 594\n2009.10.01 MSFT 32.87258 250\n2009.10.01 CSCO 75.15704 592\nq)tlist 1\ndate sym price size\n----------------------------\n2009.10.02 MSFT 18.9035 26\n2009.10.02 CSCO 12.7531 760\ndomain: b g x h i j e f c s p m d z n u v t\nrange: b b b b b b b b b b b b b b b b b b\ndiffer\nis a multithreaded primitive.\nBinary use deprecated\nAs of V3.6 the keyword is variadic. Binary application is deprecated and may disappear in future versions. The keyword cannot be applied infix.\nFor a binary version, use Match Each Prior: ~:'\n.\nBasics: Comparison\n\n! Display¶ Write to console and return 0N!x ![0N;x] Returns x after printing its unformatted text representation to the console. q)2+0N!3 3 5 Useful for debugging, or avoiding formatting that obscures the data’s structure. show Debugging\n\ndistinct\n¶\nUnique items of a list\ndistinct x distinct[x]\nWhere x\nis a list returns the distinct (unique) items of x\nin the order of their first occurrence.\nThe result does not have the unique attribute set.\nq)distinct 2 3 7 3 5 3\n2 3 7 5\nReturns the distinct rows of a table.\nq)distinct flip `a`b`c!(1 2 1;2 3 2;\"aba\")\na b c\n-----\n1 2 a\n2 3 b\nIt does not use comparison tolerance\nq)\\P 14\nq)distinct 2 + 0f,10 xexp -13\n2 2.0000000000001\ndistinct\nis a multithreaded primitive.\nErrors¶\n| error | cause |\n|---|---|\n| type | x is an atom |"}}},{"rowIdx":71,"cells":{"text":{"kind":"string","value":"The .Q\nnamespace¶\nTools\nGeneral Datatype addmonths btoa b64 encode dd join symbols j10 encode binhex f precision format j12 encode base 36 fc parallel on cut ty type ff append columns x10 decode binhex fmt precision format x12 decode base 36 ft apply simple fu apply unique Database gc garbage collect chk fill HDB gz GZip dpft dpfts save table id sanitize dpt dpts save table unsorted qt is table dsftg load process save res keywords en enumerate varchar cols s plain text ens enumerate against domain s1 string representation fk foreign key sha1 SHA-1 encode hdpf save tables V table to dict l load v value ld load and group view subview li load partitions lo load without Constants M chunk size A a an alphabets qp is partitioned b6 bicameral alphanums qt is table n nA nums & alphanums Partitioned database state Debug/Profile bv build vp bt backtrace bvi build incremental vp prf0 code profiler cn count partitioned table sbt string backtrace D partitions trp extend trap at ind partitioned index trpd extend trap MAP maps partitions ts time and space par locate partition PD partition locations Environment pd modified partition locns K k version pf partition field w memory stats pn partition counts pt partitioned tables Environment (Command-line) PV partition values def command defaults pv modified partition values opt command parameters qp is partitioned x non-command parameters vp missing partitions\nIPC Segmented database state addr IP/host as int P segments fps fpn pipe streaming u date based fs fsn file streaming hg HTTP get File I/O host IP to hostname Cf create empty nested char file hp HTTP post Xf create file\nFunctions defined in q.k\nare loaded as part of the ‘bootstrap’ of kdb+. Some are exposed in the default namespace as the q language. Others are documented here as utility functions in the .Q\nnamespace.\nThe .Q\nnamespace is reserved for use by KX, as are all single-letter namespaces.\nConsider all undocumented functions in the namespace as exposed infrastructure – and do not use them.\nIn non-partitioned databases the partitioned database state variables remain undefined.\nA\n(upper-case alphabet)¶\na\n(lower-case alphabet)¶\nan\n(all alphanumerics)¶\n.Q.A / upper-case alphabet\n.Q.a / lower-case alphabet\n.Q.an / all alphanumerics\nStrings: upper-case Roman alphabet (.Q.A\n), lower-case Roman alphabet (.Q.a\n), and all alphanums (.Q.an\n).\nq).Q.A\n\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nq).Q.a\n\"abcdefghijklmnopqrstuvwxyz\"\nq).Q.an\n\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789\"\naddmonths\n¶\n.Q.addmonths[x;y]\nWhere x\nis a date and y\nis an int, returns x\nplus y\nmonths.\nq).Q.addmonths[2007.10.16;6 7]\n2008.04.16 2008.05.16\nIf the date x\nis near the end of the month and (x.month + y\n)’s month has fewer days than x.month\n, the result may spill over to the following month.\nq).Q.addmonths[2006.10.29;4]\n2007.03.01\nMathematics with temporals\nHow to handle temporal data in q\naddr\n(IP/host as int)¶\n.Q.addr x\nWhere x\nis a hostname or IP address as a symbol atom, returns the IP address as an integer (same format as .z.a\n)\nq).Q.addr`localhost\n2130706433i\nq).Q.host .Q.addr`localhost\n`localhost\nq).Q.addr`localhost\n2130706433i\nq)256 vs .Q.addr`localhost\n127 0 0 1\nb6\n(bicameral-alphanums)¶\n.Q.b6\nReturns upper- and lower-case alphabet and numerics.\nq).Q.b6\n\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\"\nUsed for binhex encoding and decoding.\nbt\n(backtrace)¶\n.Q.bt[]\nDumps the backtrace to stdout at any point during execution or debug.\nq)f:{{.Q.bt[];x*2}x+1}\nq)f 4\n[2] f@:{.Q.bt[];x*2}\n^\n[1] f:{{.Q.bt[];x*2}x+1}\n^\n[0] f 4\n^\n10\nq)g:{a:x*2;a+y}\nq)g[3;\"hello\"]\n'type\n[1] g:{a:x*2;a+y}\n^\nq)).Q.bt[]\n>>[1] g:{a:x*2;a+y}\n^\n[0] g[3;\"hello\"]\n^\n>>\nmarks the current stack frame. (Since V4.0 2020.03.23.)\nThe debugger itself occupies a stack frame, but its source is hidden. (Since V3.5 2017.03.15.)\nbtoa\n(b64 encode)¶\n.Q.btoa x\nq).Q.btoa\"Hello World!\"\n\"SGVsbG8gV29ybGQh\"\nSince V3.6 2018.05.18.\nbv\n(build vp)¶\n.Q.bv[]\n.Q.bv[`]\nIn partitioned DBs, construct the dictionary .Q.vp\nof table schemas for tables with missing partitions. Optionally allow tables to be missing from partitions, by scanning partitions for missing tables and taking the tables’ prototypes from the last partition.\nAfter loading/re-loading from the filesystem, invoke .Q.bv[]\nto (re)populate .Q.vt\n/.Q.vp\n, which are used inside .Q.p1\nduring the partitioned select .Q.ps\n.\n(Since V2.8 2012.01.20, modified V3.0 2012.01.26)\nIf your table exists at least in the latest partition (so there is a prototype for the schema), you could use .Q.bv[]\nto create empty tables on the fly at run-time without having to create those empties on disk.\n.Q.bv[`]\n(with argument) will use prototype from first partition instead of last. (Since V3.2 2014.08.22.)\nSome admins prefer to see errors instead of auto-manufactured empties for missing data, which is why .Q.bv\nis not the default behavior.\nq)n:100\nq)t:([]time:.z.T+til n;sym:n?`2;num:n)\nq).Q.dpft[`:.;;`sym;`t]each 2010.01.01+til 5\n`t`t`t`t`t\nq)tt:t\nq).Q.dpft[`:.;;`sym;`tt]last 2010.01.01+til 5\n`tt\nq)\\l .\nq)tt\n+`sym`time`num!`tt\nq)@[get;\"select from tt\";-2@]; / error\n./2010.01.01/tt/sym: No such file or directory\nq).Q.bv[]\nq).Q.vp\ntt| +`date`sym`time`num!(`date$();`sym$();`time$();`long$())\nq)@[get;\"select from tt\";-2@]; / no error\nbvi\n(build incremental vp)¶\nIt offers the same functionality as .Q.bv\n, but scans only new partitions loaded in the hdb since the last time .Q.bv\nor .Q.bvi\nwas run. Since v4.1 2024.09.13.\nCf\n(create empty nested char file)¶\nDeprecated\nDeprecated since 4.1t 2022.03.25. Using resulting files could return file format errors since 3.6.\n.Q.Cf x\nA projection of .Q.Xf\n: i.e. .Q.Xf[`char;]\nchk\n(fill HDB)¶\n.Q.chk x\nWhere x\nis a HDB as a filepath, fills tables missing from partitions using the most recent partition containing the table as a template, and reports which partitions (but not which tables) it is fixing.\nq).Q.chk[`:hdb]\n()\n()\n,`:/db/2009.01.04\n,`:/db/2009.01.03\nQ must have write permission for the HDB area to create missing tables\nIf it signals an error similar to\n'./2010.01.05/tablename/.d: No such file or directory\ncheck the process has write permissions for that filesystem.\nQ for Mortals\n§14.5.2 .Q.chk\ncn\n(count partitioned table)¶\n.Q.cn x\nWhere x\nis a partitioned table, passed by value, returns its count. Populates .Q.pn\ncache.\nD\n(partitions)¶\n.Q.D\nIn segmented DBs, contains a list of the partitions – conformant to .Q.P\n– that are present in each segment.\n.Q.P!.Q.D\ncan be used to create a dictionary of partition-to-segment information.\nq).Q.P\n`:../segments/1`:../segments/2`:../segments/3`:../segments/4\nq).Q.D\n2010.05.26 2010.05.31\n,2010.05.27\n2010.05.28 2010.05.30\n2010.05.29 2010.05.30\nq).Q.P!.Q.D\n:../segments/1| 2010.05.26 2010.05.31\n:../segments/2| ,2010.05.27\n:../segments/3| 2010.05.28 2010.05.30\n:../segments/4| 2010.05.29 2010.05.30\ndd\n(join symbols)¶\n.Q.dd[x;y]\nShorthand for ` sv x,`$string y\n. Useful for creating filepaths, suffixed stock symbols, etc.\nq).Q.dd[`:dir]`file\n`:dir/file\nq){x .Q.dd'key x}`:dir\n`:dir/file1`:dir/file2\nq).Q.dd[`AAPL]\"O\"\n`AAPL.O\nq)update sym:esym .Q.dd'ex from([]esym:`AAPL`IBM;ex:\"ON\")\nesym ex sym\n--------------\nAAPL O AAPL.O\nIBM N IBM.N\ndef\n(command defaults)¶\nDefault values and type checks for command-line arguments parsed with .Q.opt\n.Q.def[x;y]\nWhere x\nis a dictionary of default parameter names and values, and y\nis the output of .Q.opt\n.\nTypes are inferred from the default values provided, which must be an atom type.\n$ q -abc 123 -xyz 321\nq).Q.def[`abc`xyz`efg!(1;2.;`a)].Q.opt .z.x\nabc| 123\nxyz| 321f\nefg| `a\nIf a command-line value cannot be converted to the data type of the default value, a null is produced\n$ q -param1 11 -param2 2000.01.01 -param3 wrong\nq).Q.def[`param1`param2`param3!(1;1999.01.01;23.1)].Q.opt .z.x\nparam1| 11\nparam2| 2000.01.01\nparam3| 0n\n.z.x\n(argv), .z.X\n(raw command line), .z.f\n(file), .z.q\n(quiet mode), .Q.opt\n(command parameters), .Q.x\n(non-command parameters)\ndpft\n(save table)¶\ndpfts\n(save table with symtable)¶\ndpt\n(save table unsorted)¶\ndpts\n(save table unsorted with symtable)¶\n.Q.dpft[d;p;f;t]\n.Q.dpfts[d;p;f;t;s]\n.Q.dpt[d;p;t]\n.Q.dpts[d;p;t;s]\nWhere\nd\nis a directory handlep\nis a partition of a databasef\na field of the table (required to be present in table since 4.1t 2021.09.03) named byt\nbelowt\n, the name (as a symbol) of a simple table whose columns are vectors or compound listss\nis the handle of a symtable\nsaves t\nsplayed to partition p\n.\nThe table cannot be keyed.\nThis would signal an 'unmappable\nerror if there are columns which are not vectors or simple nested columns (e.g. char vectors for each row).\nIt also rearranges the columns of the table so that the column specified by f\nis second in the table (the first column in the table will be the virtual column determined by the partitioning e.g. date).\nReturns the table name if successful.\nq)trade:([]sym:10?`a`b`c;time:.z.T+10*til 10;price:50f+10?50f;size:100*1+10?10)\nq).Q.dpft[`:db;2007.07.23;`sym;`trade]\n`trade\nq)delete trade from `.\n`.\nq)trade\n'trade\nq)\\l db\nq)trade\ndate sym time price size\n-----------------------------------------\n2007.07.23 a 11:36:27.972 76.37383 1000\n2007.07.23 a 11:36:27.982 77.17908 200\n2007.07.23 a 11:36:28.022 75.33075 700\n2007.07.23 a 11:36:28.042 58.64531 200\n2007.07.23 b 11:36:28.002 87.46781 800\n2007.07.23 b 11:36:28.012 85.55088 400\n2007.07.23 c 11:36:27.952 78.63043 200\n2007.07.23 c 11:36:27.962 90.50059 400\n2007.07.23 c 11:36:27.992 73.05742 600\n2007.07.23 c 11:36:28.032 90.12859 600\nIf you are getting an 'unmappable\nerror, you can identify the offending columns and tables:\n/ create 2 example tables\nq)t:([]a:til 2;b:2#enlist (til 1;10)) / bad table, b is unmappable\nq)t1:([]a:til 2;b:2#til 1) / good table, b is mappable\nq)helper:{$[(type x)or not count x;1;t:type first x;all t=type each x;0]};\nq)select from (raze {([]table:enlist x;columns:enlist where not helper each flip .Q.en[`:.]`. x)} each tables[]) where 0= 64MB to os).\nThis has the advantage of running return faster than .Q.gc[]\n, but with the disadvantage of not defragmenting unused memory blocks of a smaller size (therefore may not free as much unused memory).\n.Q.w\n(memory stats),\n\\g\n(garbage collection mode),\n\\w\n(workspace)\ngz\n(GZip)¶\n.Q.gz[::] / zlib loaded?\n.Q.gz cbv / unzipped\n.Q.gz (cl;cbv) / zipped\nWhere\ncbv\nis a char vector (or byte vector since 4.1t 2021.09.03,4.0 2021.10.01)cl\nis compression level [1-9] as a long\nreturns, for\n- the general null, a boolean atom as whether Zlib is loaded\ncbv\n, the inflated (unzipped) vector- a 2-list, the deflated (zipped) vector\nsince V4.0 2020.04.16.\nq).Q.gz{0N!count x;x}[.Q.gz(9;10000#\"helloworld\")]\n66\n\"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhellow..\n-18!x (ipc compress bytes)\nhdpf\n(save tables)¶\n.Q.hdpf[historicalport;directory;partition;`p#field]\nThe function:\n- saves all tables to disk, by calling\n.Q.dpft\n(saves as splayed tables to a partition) - clears in-memory tables\n- sends reload message to HDB, by opening a temporary connection and sending\n\\l .\nhg\n(HTTP get)¶\n.Q.hg x\nWhere x\nis a URL as a symbol atom or (since V3.6 2018.02.10) a string, returns a string for the result of an HTTP[S] GET query.\n(Since V3.4)\nq).Q.hg`:http://www.google.com\nq)count a:.Q.hg`:http:///www.google.com\n212\nq)show a\n\"\\n\\n4..\nq).Q.hg \":http://username:password@www.google.com\"\nIf you have configured SSL/TLS, HTTPS can also be used.\nq).Q.hg \":https://www.google.com\"\n.Q.hg\nwill utilize proxy settings from the environment, lower-case versions taking precedence:\n| environment variable | use |\n|---|---|\nhttp_proxy , HTTP_PROXY |\nThe URL of the HTTP proxy to use |\nno_proxy , NO_PROXY |\nComma-separated list of domains for which to disable use of proxy |\nN.B. HTTPS is not supported across proxies which require CONNECT\n.\nSince 4.0 2019.10.22, gzip compression is supported. Requests include the HTTP header \"Accept-Encoding: gzip\". The server then decides whether to gzip the returned payload, which is uncompressed prior to .Q.hg returning.\nhost\n(IP to hostname)¶\n.Q.host x\nWhere x\nis an IP address as an int atom, returns its hostname as a symbol atom.\nq).Q.host .Q.addr`localhost\n`localhost\nq).Q.addr`localhost\n2130706433i\nq)\"I\"$\"104.130.139.23\"\n1753385751i\nq).Q.host \"I\"$\"104.130.139.23\"\n`netbox.com\nq).Q.addr `netbox.com\n1753385751i\n.Q.addr\n(IP/host as int), $\ntok (IP address as int)\nhp\n(HTTP post)¶\n.Q.hp[x;y;z]\nWhere\nx\nis a URL as a symbol handle or string (since V3.6 2018.02.10)y\nis a MIME type as a stringz\nis the POST query as a string\nReturns a string for the result of an HTTP[S] POST query. (Since V3.4)\nUses proxy settings (if defined) and compression handling, as described in hg (HTTP get).\nq).Q.hp[\"http://google.com\";.h.ty`json]\"my question\"\n\"<!DOCTYPE html>\\n<html lang=en>\\n <meta charset=utf-8>\\n <meta name=viewpo..\nid\n(sanitize)¶\n.Q.id x\nWhere x\nis\n-\na symbol atom, returns\nx\nwith items sanitized to valid q namesq).Q.id each `$(\"ab\";\"a/b\";\"two words\";\"2drifters\";\"2+2\") `ab`ab`twowords`a2drifters`a22\n-\na table, returns\nx\nwith column names sanitized by removing characters that interfere withselect/exec/update\nand adding\"1\"\nto column names which clash with commands in the.q\nnamespace. Updated in V3.2 to include.Q.res\nfor checking collisions.q).Q.id flip (5#.Q.res)!(5#()) in1 within1 like1 bin1 binr1 ---------------------------- q).Q.id flip(`$(\"a\";\"a/b\"))!2#() a ab ----\n-\na dictionary (since v4.1 2024.09.13), supports the same rules as\ntable\naboveq).Q.id (5#.Q.res)!(5#()) abs1 | acos1| asin1| atan1| avg1 |\nSince 4.1t 2022.03.25,4.0 2022.10.26 produces a symbol a\nwhen the input contains a single character that is not in .Q.an (it previously produced an empty sym) e.g.\nq).Q.id`$\"+\"\na / previous version returned `\nTable processing also has additional logic to cater for duplicate column names (names are now appended with 1,2,etc. when matched against previous columns) after applying previously defined rules e.g.\nq)cols .Q.id(`$(\"count+\";\"count*\";\"count1\"))xcol([]1 2;3 4;5 6)\n`count1`count11`count12 / previous version returned `count1`count1`count1\nq)cols .Q.id(`$(\"aa\";\"=\";\"+\"))xcol([]1 2;3 4;5 6)\n`aa`a`a1 / previous version returned `aa`1`1\nSince 4.1t 2022.11.01,4.0 2022.10.26, the same rule is applied when the provided name begins with either an underscore or a numerical character. Previously, it could produce an invalid column name.\nq).Q.id`$\"_\"\n`a_\nq)cols .Q.id(`$(\"3aa\";\"_aa\";\"_aa\"))xcol([]1 2;3 4;5 6)\n`a3aa`a_aa`a_aa1\nind\n(partitioned index)¶\n.Q.ind[x;y]\nWhere\nx\nis a partitioned tabley\nis a long int vector of row indexes intox\nreturns rows y\nfrom x\n.\nWhen picking individual records from an in-memory table you can simply use the special virtual field i\n:\nselect from table where i<100\nBut you cannot do that directly for a partitioned table.\n.Q.ind\ncomes to the rescue here, it takes a table and indexes into the table – and returns the appropriate rows.\n.Q.ind[trade;2 3]\nA more elaborate example that selects all the rows from a date:\nq)t:select count i by date from trade\nq)count .Q.ind[trade;(exec first sum x from t where date<2010.01.07)+til first exec x from t where date=2010.01.07]\n28160313\n/ show that this matches the full select for that date\nq)(select from trade where date=2010.01.07)~.Q.ind[trade;(exec first sum x from t where date<2010.01.07)+til first exec x from t where date=2010.01.07]\n1b\nContinuous row intervals\nIf you are selecting a continuous row interval, for example if iterating over all rows in a partition, instead of using .Q.ind\nyou might as well use\n```q q)select from trade where date=2010.01.07,i within(start;start+chunkSize) ````\nj10\n(encode binhex)¶\nx10\n(decode binhex)¶\nj12\n(encode base-36)¶\nx12\n(decode base-36)¶\n.Q.j10 s .Q.j12 s\n.Q.x10 s .Q.x12 s\nWhere s\nis a string, these functions return s\nencoded (j10\n, j12\n) or decoded (x10\n, x12\n) against restricted alphabets:\n…10\nen/decodes against the alphabet.Q.b6\n, this is a base-64 encoding - see BinHex and Base64 for more details than you ever want to know about which characters are where in the encoding. To keep the resulting number an integer the maximum length ofs\nis 10.-12\nen/decodes against.Q.nA\n, a base-36 encoding. As the alphabet is smallers\ncan be longer – maximum length 12.\nThe main use of these functions is to encode long alphanumeric identifiers (CUSIP, ORDERID..) so they can be quickly searched – but without filling up the symbol table with vast numbers of single-use values.\nq).Q.x10 12345\n\"AAAAAAADA5\"\nq).Q.j10 .Q.x10 12345\n12345\nq).Q.j10 each .Q.x10 each 12345+1 2 3\n12346 12347 12348\nq).Q.x12 12345\n\"0000000009IX\"\nq).Q.j12 .Q.x12 12345\n12345\nTip\nIf you don’t need the default alphabets it can be very convenient to change them to have a blank as the first character, allowing the identity 0\n<-> \" \"\n.\nIf the values are not going to be searched (or will be searched with like\n) then keeping them as nested character is probably going to be simpler.\nK\n(version date)¶\nk\n(version)¶\n.Q.K / version date\n.Q.k / version\nReturn the interpreter version date (.Q.K\n) and number (.Q.k\n) for which q.k\nhas been written:\nchecked against .z.K\nat startup.\nq).Q.K\n2020.10.02\nq).Q.k\n4f\nl\n(load)¶\n.Q.l x\nWhere x\nis a hsym or symbol atom naming a directory in the current directory, loads\nit recursively as in load\n, but into the default namespace.\n(Implements system command \\l\n.)\nld\n(load and group)¶\n.Q.ld x\nExposes logic used by \\l\nto group script lines for evaluation.\nSince 4.1t 2022.11.01,4.0 2023.03.28.\nq).Q.ld read0`:funcs.q\n1 2 5 6\n\"/ multi line func\" \"f:{\\n x+y\\n }\" \"/ single line func\" \"g:{x*y}\"\nli\n(load partitions)¶\n.Q.li[partitions]\nIn the current hdb, adds any partition(s) which are both in the list supplied and on disk. Partitions can be a list or atomic variable. For example:\nq)`:/tmp/db/2001.01.01/t/ set tt:.Q.en[`:/tmp/db]([]sym:10?`A`B`C;time:10?.z.T;price:10?10f)\nq)\\l /tmp/db\nq)`:2001.01.02/t/`:2001.01.03/t/ set\\:tt\nq)date\n,2001.01.01\nq).Q.li[2001.01.02];date\n2001.01.01 2001.01.02\nq).Q.li[2001.01.02 2001.01.03];select count i by date from t\ndate | x\n----------| --\n2001.01.01| 10\n2001.01.02| 10\n2001.01.03| 10\nSince v4.1 2024.09.20.\nlo\n(load without)¶\n.Q.lo[`:database;cd;scripts]\nWhere\ndatabase\nis a hsym or symbol atom (as per parameter to .Q.l)cd\nis a boolean flag indicating whether to cd to the database dirscripts\nis a boolean flag indicating whether to execute any scripts in the database dir\nLoad a database without changing directory and/or loading scripts in the database (since 4.1t 2023.03.01).\nq)\\cd\n\"/tmp\"\nq)key`:db/2023.02.01\n`s#,`trade\nq).Q.lo[`:db;0;0]\nq)trade\ndate sym time price\n------------------------------------\n2023.02.01 C 10:15:18.957 6.346716\n2023.02.01 B 10:15:18.958 9.672398\n2023.02.01 C 10:15:18.959 2.306385\n2023.02.01 B 10:15:18.960 9.49975\n2023.02.01 A 10:15:18.961 4.39081\nq)\\cd\n\"/tmp\"\nM\n(chunk size)¶\n.Q.M\nChunk size for dsftg\n(load-process-save).\nq)0W~.Q.M / defaults to long infinity\n1b\nMAP\n(maps partitions)¶\n.Q.MAP[]\nKeeps partitions mapped to avoid the overhead of repeated file system calls during a select\n.\n(Since V3.1.)\nFor use with partitioned HDBS, used in tandem with \\l dir\nq)\\l .\nq).Q.MAP[]\n.Q.MAP currently has the following limitations:\n-\n.Q.MAP does not work with linked columns\n-\n.Q.MAP does not work with virtual partition columns\n-\nUse of .Q.MAP with compressed files is not recommended, as the uncompressed maps will be retained in memory\nYou may need to increase the number of available file handles, and also the number of available file maps (for Linux see vm.max_map_count\n)\nSince 4.1t 2024.01.11 parallelized over tables and partitions with peach when kdb+ running with secondary threads.\nn\n(nums)¶\nnA\n(alphanums)¶\n.Q.n\n.Q.nA\nStrings: numerics (.Q.n\n) and upper-case alphabet and numerics (.Q.nA\n).\nq).Q.n\n\"0123456789\"\nq).Q.nA\n\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n.Q.nA\nis used for base-36 encoding and decoding.\nopt\n(command parameters)¶\n.Q.opt .z.x\nPresents command-line arguments as a dictionary, using the output of .z.x\n. Defaults can be added using .Q.def\n.\n$ q -param1 val1 -param2 val2\nq)params:.Q.opt .z.x\nq)show params\nparam1| \"val1\"\nparam2| \"val2\"\nq)params`param1\n\"val1\"\nExample of a command-line parameter with no value and a parameter with multiple values:\n$ q -param1 -param2 as asd -param3\nq).Q.opt .z.x\nparam1| ()\nparam2| (\"as\";\"asd\")\nparam3| ()\n.z.x\n(argv), .z.X\n(raw command line), .z.f\n(file), .z.q\n(quiet mode), .Q.def\n(command defaults), .Q.x\n(non-command parameters)\nP\n(segments)¶\n.Q.P\nIn segmented DBs, returns a list of the segments (i.e. the contents of par.txt\n).\nq).Q.P\n`:../segments/1`:../segments/2`:../segments/3`:../segments/4\npar\n(get expected partition location)¶\n.Q.par[dir;part;table]\nWhere\ndir\nis a directory filepathpart\nis a date\nreturns the expected location of table\n. (Sensitive to par.txt\n.)\nq).Q.par[`:.;2010.02.02;`quote]\n`:/data/taq/2010.02.02/quote\nCan assist in checking `p\nattribute is present on all partitions of a table in an HDB\nq)all{`p=attr .Q.par[`:.;x;`quote]`sym}each date\n1b\nDoes not look into the segment directories.\nThe function calculates only the path, based on the partition and the contents of par.txt\nin a round-robin fashion. It does not check the contents of the segments to see if the partition is there. See Segmented databases for details.\nPD\n(partition locations)¶\n.Q.PD\nIn partitioned DBs, a list of partition locations – conformant to .Q.PV\n– which represents the partition location for each partition.\n(In non-segmented DBs, this will be simply count[.Q.PV]#`:.\n.)\n.Q.PV!.Q.PD\ncan be used to create a dictionary of partition-to-location information.\nq).Q.PV\n2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31\nq).Q.PD\n`:../segments/1`:../segments/2`:../segments/3`:../segments/4`:../segments/3`:../segments/4`:../segments/1\nq).Q.PV!.Q.PD\n2010.05.26| :../segments/1\n2010.05.27| :../segments/2\n2010.05.28| :../segments/3\n2010.05.29| :../segments/4\n2010.05.30| :../segments/3\n2010.05.30| :../segments/4\n2010.05.31| :../segments/1\npd\n(modified partition locations)¶\n.Q.pd\nIn partitioned DBs, .Q.PD\nas modified by .Q.view\n.\npf\n(partition field)¶\n.Q.pf\nIn partitioned DBs, the partition field.\nPossible values are `date`month`year`int\n.\npn\n(partition counts)¶\n.Q.pn\nIn partitioned DBs, returns a dictionary of cached partition counts – conformant to .Q.pt\n, each conformant to .Q.pv\n– as populated by .Q.cn\n.\nCleared by .Q.view\n.\n.Q.pv!flip .Q.pn\ncan be used to create a crosstab of table-to-partition-counts once .Q.pn\nis fully populated.\nq)n:100\nq)t:([]time:.z.T+til n;sym:n?`2;num:n)\nq).Q.dpft[`:.;;`sym;`t]each 2010.01.01+til 5\n`t`t`t`t`t\nq)\\l .\nq).Q.pn\nt|\nq).Q.cn t\n100 100 100 100 100\nq).Q.pn\nt| 100 100 100 100 100\nq).Q.pv!flip .Q.pn\n| t\n----------| ---\n2010.01.01| 100\n2010.01.02| 100\n2010.01.03| 100\n2010.01.04| 100\n2010.01.05| 100\nq).Q.view 2#date\nq).Q.pn\nt|\nq).Q.cn t\n100 100\nq).Q.pn\nt| 100 100\nq).Q.pv!flip .Q.pn\n| t\n----------| ---\n2010.01.01| 100\n2010.01.02| 100\nprf0\n(code profiler)¶\n.Q.prf0 pid\nWhere pid\nis a process ID, returns a table representing a snapshot of the call stack at the time of the call in another kdb+ process pid\n, with columns\nname assigned name of the function\nfile path to the file containing the definition\nline line number of the definition\ncol column offset of the definition, 0-based\ntext function definition or source string\npos execution position (caret) within text\nThis process must be started from the same binary as the one running .Q.prf0\n, otherwise binary mismatch\nis signalled.\nSince 4.1t 2022.03.25, .Q.prf0 will not try to stop the process if passed a negative pid\n.\nThis should be used when a kdb+ process is already stopped under control of something other than .Q.prf0\n(for example, in a debugger or a native-code profiler).\nA negative pid\nshould not be used in a running process.\npt\n(partitioned tables)¶\n.Q.pt\nReturns a list of partitioned tables.\npv\n(modified partition values)¶\n.Q.pv\nA list of the values of the partition domain: the values corresponding to the slice directories actually found in the root.\nIn partitioned DBs, .Q.PV\nas modified by .Q.view\n.\nQ for Mortals\n§14.5.3 .Q.pv\nPV\n(partition values)¶\n.Q.PV\nIn partitioned DBs, returns a list of partition values – conformant to .Q.PD\n– which represents the partition value for each partition.\n(In a date-partitioned DB, unless the date has been modified by .Q.view\n, this is simply date.)\nq).Q.PD\n`:../segments/1`:../segments/2`:../segments/3`:../segments/4`:../segments/3`:../segments/4`:../segments/1\nq).Q.PV\n2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31\nq)date\n2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31\nq).Q.view 2010.05.28 2010.05.29 2010.05.30\nq)date\n2010.05.28 2010.05.29 2010.05.30 2010.05.30\nq).Q.PV\n2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31\nqp\n(is partitioned)¶\n.Q.qp x\nWhere x\n- is a partitioned table, returns\n1b\n- a splayed table, returns\n0b\n- anything else, returns 0\nq)\\\nB\n+`time`sym`price`size!`B\nC\n+`sym`name!`:C/\n\\\nq).Q.qp B\n1b\nq).Q.qp select from B\n0\nq).Q.qp C\n0b\nqt\n(is table)¶\n.Q.qt x\nWhere x\nis a table, returns 1b\n, else 0b\n.\nres\n(keywords)¶\n.Q.res\nReturns the control words and keywords as a symbol vector. key `.q\nreturns the functions defined to extend k to the q language. Hence to get the full list of reserved words for the current version:\nq).Q.res,key`.q\n`abs`acos`asin`atan`avg`bin`binr`cor`cos`cov`delete`dev`div`do`enlist`exec`ex..\n.Q.id\n(sanitize)\ns\n(plain text)¶\n.Q.s x\nReturns x\nformatted to plain text, as used by the console. Obeys console width and height set by \\c\n.\nq).Q.s ([h:1 2 3] m: 4 5 6)\n\"h| m\\n-| -\\n1| 4\\n2| 5\\n3| 6\\n\"\nOccasionally useful for undoing Studio for kdb+ tabular formatting.\ns1\n(string representation)¶\n.Q.s1 x\nReturns a string representation of x\n.\nsbt\n(string backtrace)¶\n.Q.sbt x\nWhere x\nis a backtrace object returns it as a string formatted for display.\nSince V3.5 2017.03.15.\nsha1\n(SHA-1 encode)¶\n.Q.sha1 x\nWhere x\nis a string, returns as a bytestream its SHA-1 hash.\nq).Q.sha1\"Hello World!\"\n0x2ef7bde608ce5404e97d5f042f95f89f1c232871\nSince V3.6 2018.05.18.\nt\n(type letters)¶\n.Q.t\nList of chars indexed by datatype numbers.\nq).Q.t\n\" bg xhijefcspmdznuvts\"\nq).Q.t?\"j\" / longs have datatype 7\n7\ntrp\n(extend trap at)¶\n.Q.trp[f;x;g]\nWhere\nf\nis a unary functionx\nis its argumentg\nis a binary function\nextends Trap At (@[f;x;g]\n) to collect backtrace: g\ngets called with arguments:\n- the error string\n- the backtrace object\nYou can format the backtrace object with .Q.sbt\n.\nq)f:{`hello+x}\nq) / print the formatted backtrace and error string to stderr\nq).Q.trp[f;2;{2\"error: \",x,\"\\nbacktrace:\\n\",.Q.sbt y;-1}]\nerror: type\nbacktrace:\n[2] f:{`hello+x}\n^\n[1] (.Q.trp)\n[0] .Q.trp[f;2;{2\"error: \",x,\"\\nbacktrace:\\n\",.Q.sbt y;-1}]\n^\n-1\nq)\n.Q.trp\ncan be used for remote debugging.\nq)h:hopen`::5001 / f is defined on the remote\nq)h\"f `a\"\n'type / q's IPC protocol can only get the error string back\n[0] h\"f `a\"\n^\nq) / a made up protocol: (0;result) or (1;backtrace string)\nq)h\".z.pg:{.Q.trp[(0;)@value@;x;{(1;.Q.sbt y)}]}\"\nq)h\"f 3\"\n0 / result\n,9 9 9\nq)h\"f `a\"\n1 / failure\n\" [4] f@:{x*y}\\n ^\\n [3..\nq)1@(h\"f `a\")1; / output the backtrace string to stdout\n[4] f@:{x*y}\n^\n[3] f:{{x*y}[x;3#x]}\n^\n[2] f `a\n^\n[1] (.Q.trp)\n[0] .z.pg:{.Q.trp[(0;)@enlist value@;x;{(1;.Q.sbt y)}]}\n^\nSince V3.5 2017.03.15.\ntrpd\n(extend trap)¶\n.Q.trpd[f;x;g]\nWhere\nf\nis a function of rank-\nx\nis an atom or list of count with items in the domains of f -\ng\nis a binary function\nextends Trap (.[f;x;g]\n) to collect backtrace: g\nis called with arguments:\n- the error string\n- the backtrace object\nYou can format the backtrace object with .Q.sbt\n.\nq).Q.trpd[{x+y};(1;2);{2\"error: \",x,\"\\nbacktrace:\\n\",.Q.sbt y;-1}]\n3\nq).Q.trpd[{x+y};(1;`2);{2\"error: \",x,\"\\nbacktrace:\\n\",.Q.sbt y;-1}]\nerror: type\nbacktrace:\n[2] {x+y}\n^\n[1] (.Q.trpd)\n[0] .Q.trpd[{x+y};(1;`2);{2\"error: \",x,\"\\nbacktrace:\\n\",.Q.sbt y;-1}]\n^\n-1\nUse .Q.trp as a simpler form of .Q.trpd, for unary values.\nSince 4.1 2024.03.12.\nts\n(time and space)¶\nApply, with time and space\n.Q.ts[x;y]\nWhere x\nand y\nare valid arguments to Apply returns a 2-item list:\n- time and space as\n\\ts\nwould - the result of\n.[x;y]\nq)\\ts .Q.hg `:http://www.google.com\n148 131760\nq).Q.ts[.Q.hg;enlist`:http://www.google.com]\n148 131760\n\"<!doctype html><html itemscope=\\\"\\\" itemtype=\\\"http://schema.org/WebPa\nq).Q.ts[+;2 3]\n0 80\n5\nSince V3.6 2018.05.18.\nty\n(type)¶\n.Q.ty x\nWhere x\nis a list, returns the type of x\nas a character code:\n- lower case for a vector\n- upper case for a list of uniform type\n- else blank\nq)t:([]a:3 4 5;b:\"abc\";c:(3;\"xy\";`ab);d:3 2#3 4 5;e:(\"abc\";\"de\";\"fg\"))\nq)t\na b c d e\n------------------\n3 a 3 3 4 \"abc\"\n4 b \"xy\" 5 3 \"de\"\n5 c `ab 4 5 \"fg\"\nq).Q.ty each t`a`b`c`d`e\n\"jc JC\"\n.Q.ty\nis a helper function for meta\nIf the argument is a table column, returns upper case for mappable/uniform lists of vectors.\nu\n(date based)¶\n.Q.u\n- In segmented DBs, returns\n1b\nif each partition is uniquely found in one segment. (E.g., true if segmenting is date-based, false if name-based.) - In partitioned DBs, returns\n1b\n.\nV\n(table to dict)¶\n.Q.V x\nWhere x\nis\n- a table, returns a dictionary of its column values.\n- a partitioned table, returns only the last partition (N.B. the partition field values themselves are not restricted to the last partition but include the whole range).\nv\n(value)¶\n.Q.v x\nWhere x\nis\n- a filepath, returns the splayed table stored at\nx\n- any other symbol, returns the global named\nx\n- anything else, returns\nx\nview\n(subview)¶\n.Q.view x\nWhere x\nis a list of partition values that serves as a filter for all queries against any partitioned table in the database, x\nis added as a constraint in the first sub-phrase of the where-clause of every query.\n.Q.view\nis handy when you are executing queries against partitioned or segmented tables. Recall that multiple tables can share the partitioning. Q.view\ncan guard against runaway queries that ask for all historical data.\n.Q.view 2#date\nSince 4.1t 2022.03.25,4.0 2023.05.26 this would signal an invalid partition filter\nerror if partition value(s) resulted in no matches with .Q.PV.\n.Q.view\n, also used when loading an hdb, now utilizes threads to load .d files (column names) since 4.1t 2023.04.17.\nQ for Mortals\n§14.5.8 Q.view\nvp\n(missing partitions)¶\n.Q.vp\nIn partitioned DBs, returns a dictionary of table schemas for tables with missing partitions, as populated by .Q.bv\n.\n(Since V3.0 2012.01.26.)\nq)n:100\nq)t:([]time:.z.T+til n;sym:n?`2;num:n)\nq).Q.dpft[`:.;;`sym;`t]each 2010.01.01+til 5\n`t`t`t`t`t\nq)tt:t\nq).Q.dpft[`:.;;`sym;`tt]last 2010.01.01+til 5\n`tt\nq)\\l .\nq)tt\n+`sym`time`num!`tt\nq)@[get;\"select from tt\";-2@]; / error\n./2010.01.01/tt/sym: No such file or directory\nq).Q.bv[]\nq).Q.vp\ntt| +`date`sym`time`num!(`date$();`sym$();`time$();`long$())\nq)@[get;\"select from tt\";-2@]; / no error\nw\n(memory stats)¶\n.Q.w[]\nReturns the memory stats from \\w\ninto a more readable dictionary. Refer to \\w\nfor an explanation of each statistic.\nq).Q.w[]\nused| 168304\nheap| 67108864\npeak| 67108864\nwmax| 0\nmmap| 0\nmphy| 8589934592\nsyms| 577\nsymw| 25436\n.Q.gc\n(garbage collect)\nCommand-line parameter -w\n(workspace memory limit)\nSystem command \\w\n(memory stats and workspace memory limit)\nXf\n(create file)¶\nDeprecated\nDeprecated since 4.1t 2022.03.25. Using resulting files could return file format errors since 3.6.\n.Q.Xf[x;y]\nWhere\nx\nis a mapped nested datatype as either an upper-case char atom, or as a short symbol (e.g.`char\n)y\nis a filepath\ncreates an empty nested-vector file at y\n.\nq).Q.Xf[\"C\";`:emptyNestedCharVector];\nq)type get`:emptyNestedCharVector\n87h\nx\n(non-command parameters)¶\n.Q.x\nSet by .Q.opt\n: a list of non-command parameters from the command line, where command parameters are prefixed by -\n.\n$ q taq.k path/to/source path/to/destn\nq)cla:.Q.opt .z.X /command-line arguments\nq).Q.x\n\"/Users/me/q/m64/q\"\n\"path/to/source\"\n\"path/to/destn\"\n.z.x\n(argv), .z.X\n(raw command line), .z.f\n(file), .z.q\n(quiet mode), .Q.opt\n(command parameters), .Q.def\n(command defaults)"}}},{"rowIdx":72,"cells":{"text":{"kind":"string","value":"Common design principles for kdb+ gateways¶\nIn the vast majority of kdb+ systems, data is stored across several processes. These setups can range from a single real-time and historic database on the same server to multi-site architectures where data of various forms is stored in hundreds of different processes. In either scenario there is likely to be the same requirement to access data across processes. This is typically achieved using a ‘gateway’ process.\nThe primary objective of a gateway is to act as a single interface point and separate the end user from the configuration of underlying databases or ‘services’. With this, users do not need to know where data is stored or make multiple requests to retrieve it. An optimal solution serves to assist the user without imposing any unnecessary constraints or loss in performance. While the implementation of any particular gateway is likely to differ, dependent on specific system requirements, there are a number of shared technical challenges and solutions.\nThis paper aims to outline the common design options and examine the advantages and disadvantages associated with each method. In doing so, it seeks to offer a best-practice guide on how to implement an efficient and scalable kdb+ gateway framework. Where appropriate, sample code extracts are included to illustrate techniques.\nGateway design¶\nFigure 1 outlines the general principle of how a gateway acts as a single point of contact for a client by collecting data from several underlying services, combining data sets and if necessary performing an aggregation operation before returning the result to the client.\nFigure 1: Gateway schematic\nWhilst the above diagram covers the principle of all gateways, the specific design of a gateway can vary in a number of ways according to expected use cases. The implementation of a gateway is largely determined by the following factors.\n- Number of clients or users\n- Number of services and sites\n- Requirement of data aggregation\n- Level of redundancy and failover\nIn addition the extent to which functionality is exposed to the end user can be controlled using one of the following options. The first approach serves to act as a standard kdb+ process to the user, offering ad-hoc qSQL query access. The specific implementation of this approach is outside the scope of this paper. The second and more common approach offers a number of stored procedures for specific data retrieval scenarios. This more structured API form of gateway is generally easier to implement and arguably offers a more robust solution for production kdb+ applications than supporting free form qSQL-type requests.\nLet’s consider a basic example where a user makes a request for trade data for a single stock across yesterday and today. The task of the gateway can be broken down into the following steps.\n- Check user entitlements and data-access permissions\n- Provide access to stored procedures\n- Gain access to data in the required services\n- Provide best possible service and query performance\nAs the gateway serves as the sole client interface it is the logical point for entitlement validation. Permissioning is commonly broken into two components; user level access using the .z.pw\nfunction, and execution access using either .z.pg\nor .z.ps\n. These functions can be customized to control access at several levels including by symbol, table, service or region.\nAny user requests that fail entitlement checks should be returned to the client with an appropriate message without proceeding any further. At this point it is worth noting that gateways are primarily used for data retrieval and not for applying updates to data.\nOnce the user’s request has been verified, the gateway needs to retrieve the data from the real-time and historic services that store the data for the particular symbol requested. The most basic gateway would create and maintain connections to these services on startup. The gateway would then send the query to these services using a function similar to the sample outlined below, in which we make use of an access function to correctly order the constraint clause of the query.\n/ access function in RDB and HDB\n/ tlb: table to query; sd:start date; ed:end date; ids:list of ids or symbols\nselectFunc:{[tbl;sd;ed;ids]\n$[`date in cols tbl;\nselect from tbl where date within (sd;ed),sym in ids;\n[res:$[.z.D within (sd;ed); select from tbl where sym in ids;0#value tbl];\n`date xcols update date:.z.D from res]] }\n/ stored procedure in gateway.\n/ calls access function in RDB/HDB and joins results\n/ sd:start date; ed:end date; ids:list of ids or symbols\ngetTradeData:{[sd;ed;ids]\nhdb:hdbHandle(`selectFunc;`trade;sd;ed;ids);\nrdb:rdbHandle(`selectFunc;`trade;sd;ed;ids);\nhdb,rdb }\n/ client calls stored procedure in gateway\ngatewayHandle \"getTradeData[.z.D-1;.z.D;`ABC.X]\"\nWhilst the above may satisfy a single user setup with a small number of underlying services, it quickly becomes inefficient as the number of processes grows and we look to handle the processing of concurrent requests. As this example uses synchronous messaging between the client and the gateway, the gateway is effectively blocked from receiving any additional user requests until the first completes. Also as the requests made by the gateway to the services are synchronous they can only be made in succession, rather than in parallel.\nThese issues obviously reduce the performance in terms of time taken to complete user requests. By using asynchronous communication it is possible for the gateway to process a second request which may only require a RDB whilst the gateway is idle awaiting the response from a first query which may have only required a HDB.\nExtending this principle, if we then wanted to be able to process two simultaneous requests requiring the same service, we could start additional services and introduce load balancing between them. These two principles of load balancing and IPC messaging are considered further in the following sections.\nLoad balancing¶\nAs we scale the number of user requests we will need to scale both the number of gateways, to handle the processing of requests and responses, and the number of underlying services to retrieve the required data. The number of gateways required is likely to be determined by the scope of the stored procedures and the intensity of any joining or aggregation of data being performed. In practice the number of gateways required is usually small compared to the number of data services.\nIn the initial example the configuration for all services was loaded into the gateway; however in systems with multiple processes the load balancing of services is typically achieved using a separate standalone process. There are a couple of different ways in which this process can function.\nAs a pass-through¶\nIn this setup the gateway sends each individual service request to the load-balancer process, which distributes them to the least busy service in its pool of resources and returns the result to the gateway when it is received from the service. Figure 2 shows this flow between processes.\nFigure 2: Pass-through load balancer schematic\nThe mserve\nsolution written by Arthur Whitney and Simon Garland provides a sample implementation of how a load-balancing process can allocate the query to the least busy service and return the result to the client, which in this case is the gateway.\nKnowledge Base: A load-balancing kdb+ server\nIn this script the load balancer process determines which service to issue the request to according to which has the minimum number of outstanding requests queued at the time the request is received.\nNathan Perrem has also provided an extended version of this solution, which queues outstanding requests in the load-balancer process if all services are busy and processes the queue as services complete their request. This script ensures requests are processed in the order in which they are received and provides support for scenarios where services crash before completing a request. This solution makes it easier to track the status of all incoming queries as well as allowing clients to provide callback functions for results of queries.\nAs a connection manager¶\nIn this configuration the gateway process makes a request to the load balancer for connection details of one or more services required to process the request. When the gateway receives the connection details it then sends the request directly to the service. In Figure 3, the numbers show the sequence of communications between each of the processes, before the result is returned to the client.\nFigure 3: Connection manager load balancer schematic\nThis process can act such that it immediately returns the connection details for a service on a simple round-robin basis. Alternatively it can only return connection details to the gateway whenever a particular service is free to receive a query. The key difference with the latter method is that the load-balancing process can be used for thread management. In the basic round-robin approach the load balancer is distributing details based on a simple count of how many requests have been allocated to a particular service within a period of time.\nThe communication between the gateway and the load balancer is only on the initial request for connection details for the service. There is no requirement for the gateway to notify the load balancer when it has received results from the service. A sample extract of the code for this method is shown as follows.\n/ table of services configuration in load balancer\nt:([]\nservice:`rdb`rdb`hdb`hdb;\naddr:hsym@/:`$”localhost:”,/:string 5000+til 4;\nhandle:4#0n;\ncounter:4#0)\n/ function in load balancer\nrequestForService:{serv]\ndet:select from t where service=serv,not null handle;\nres:(det(sum det`counter)mod count det)`addr;\nupdate counter:counter+1 from `t where addr=res;\nres }\n/ gateway request\nloadBalancerHandle \"requestForService[`rdb]\"\nA disadvantage with this method is that an equal distribution of queries of different duration could result in one service being idle having processed its requests, whilst another is still queued.\nA more intelligent solution involves both communication with the load balancer for the initial connection request and also whenever the service completes a request. The benefit of this is an efficient use of services where there will not be requests queued whenever a suitable server is idle. The additional requirement with this approach is that if all services are currently busy, any subsequent requests need to be maintained in a queue and processed whenever a service becomes available again. The following outlines the functions and callbacks required for this approach.\n/ service table and queue in load balancer\nt:([]\nservice:`rdb`rdb`hdb`hdb;\naddr:hsym@/:`$”localhost:”,/:string 5000+til 4;\nhandle:4#0n;\ninUse:4#0b)\nserviceQueue:()!()\n/ functions in load balancer\nrequestForService:{[serv]\nres:exec first addr from t where service=serv,not inUse;\n$[null res;\naddRequestToQueue[.z.w;serv];\n[update inUse:1b from `t where addr=res;\nneg[.z.w](`receiveService;res)]]; }\naddRequestToQueue{[hdl;serv]\nserviceQueue[serv]::serviceQueue[serv],hdl; }\nreturnOfService:{[ad] update inUse:0b from `t where addr=ad; }\n/ gateway callback and requests\nreceiveService:{[addr]-1”Received Service:”,string[addr]; }\nloadBalancerHandle(`requestForService;`rdb)\nloadBalancerHandle(`returnOfService;`:localhost:5000)\nWhilst both the pass-through and connection-manager methods can incorporate similar logic for service allocation, the pass-through approach has the disadvantage that it adds an additional and unnecessary IPC hop, with the data being returned via the load balancer, whereas with the latter method the data is sent directly from the service to the gateway. For a system with a smaller number of user requests the simpler round-robin approach may be sufficient, however in general the additional benefit of thread management from using the last method offers a more efficient use of services.\nSynchronous vs asynchronous¶\nWith each of the communications, outlined in Figure. 3, between the client and the gateway (1,8), gateway and load balancer (2,3), and gateway and service (4-7), we have the option of making synchronous or asynchronous requests.\nIn addition to the standard asynchronous request type we can also make use of blocking-asynchronous requests where the client sends a request asynchronously and blocks until it receives a callback from the server. Which method is used is largely driven by the communication between the client and the gateway.\nIf the client makes a synchronous request to the gateway, both the subsequent requests to the load balancer and the service are required to be synchronous or blocking-asynchronous requests. In this case each gateway is only able to process a single user request at a time. As noted earlier this is an inefficient design as the gateway will largely be idle awaiting responses from the load balancer or service.\nA much more efficient design is where communication between the client and the gateway uses either asynchronous or blocking-asynchronous messaging. With this arrangement the gateway is able to make multiple asynchronous requests to the load balancer or services without having to wait on a response from either. To support multiple concurrent requests the gateway needs to track which processes have been sent a request related to the original client request. This can be easily achieved by tagging each user request with an identifier which is then passed between processes and used when handling callbacks. With the asynchronous method the gateway maintains the state of each request in terms of caching results returned from services and any outstanding service requests. When all results are returned and processed the gateway then invokes a callback in the client with the result set.\nData transport and aggregation¶\nAs a general principle it is more efficient to perform aggregation at the service level instead of pulling large datasets into the gateway process. By doing so we can take full advantage of the map-reduce method built in to kdb+.\nHowever there are use cases which may require gateway-level aggregation, for example the correlation of two datasets. In these cases the location of processes is important and can have a significant effect on performance. By positioning the gateway as close as possible to the underlying sources of data, we can reduce any latency caused by unnecessary data transport over IPC.\nIn practice this means it is preferable to have gateways located on the same server as the data services, which perform the aggregation locally and return the much smaller dataset to the remote client. Expanding this principle to a system where there may be multiple distinct zones, for a request requiring data from multiple services in two different zones, it may be quicker to send two separate requests to a gateway in each zone rather than one single request to a single gateway. This approach has the additional overhead of requiring the client to maintain connections to multiple gateways and to know how the data is separated – the very same problem the use of gateways seeks to avoid.\nDepending on the application and the nature of requests, it may be beneficial to use a tiered gateway setup. In this configuration a client would make a single connection to a primary-level gateway, which would break up the request into sub-requests and distribute to their relevant zones.\nAn example could be the calculation of correlation between two pairs of stocks, were the data for the first pair is split across different HDBs in one zone and the data for the second pair is split between two HDBs in another zone. In this example, each lower level gateway would process a sub-request for the single pair that is stored in that zone. By performing the aggregation close to the data source, the amount of data transport required between zones is reduced and the result can be joined in the primary gateway and returned to the client.\nFigure 4: Tiered-gateway schematic\nResilience¶\nTo offer the most reliable service to clients it is important to consider the potential for failure within any application. In the context of gateways, failure generally relates to one of the following issues.\nService failure¶\nService failure can relate to any reason which prevents a service from being available or functional. In this case we need to have a similar service available which can process the request.\nFailover can either be incorporated using a hot-hot set up where duplicate processes and data are used on an equal basis, or where redundant processes are used as a secondary alternative.\nFor the majority of systems where failover is provided by hardware of the same specification, the hot-hot setup provides the more efficient use of resources. As the load balancer process acts as a single point of failure between the gateway and service level, each gateway should be configured to use an additional failover load balancer process in the event the primary side is not available. To handle failure of any individual data service each load balancer process should have knowledge of a number of similar processes.\nIncorporating failover at each level also makes it possible to create maintenance windows whilst providing a continuous client service.\nDisconnections¶\nIn addition to service failure which may result in a sustained outage of a particular process or server, there can also be shorter-term disconnect events caused by network problems. This can occur between any of the three connections we looked at earlier, client to gateway, gateway to load balancer and gateway to service. While client-to-gateway disconnects will require failure logic to be implemented on the client side, disconnects between the gateway and the other processes should be handled seamlessly to the client.\nBy using the .z.pc\nhandler, the gateway can recognize when processes with outstanding requests disconnect before returning a result. In these scenarios the gateway can reissue the request to an equivalent process using the failover mentioned above.\nCode error¶\nWhilst every effort can be made to prevent code errors a robust gateway framework should also be designed to handle unexpected errors at each point.\nCode errors may result in an explicit error being reached, in which case protected evaluation of callbacks between processes should be used so that any error can be returned through the process flow to the client. It is generally safer to handle and return errors than reissue the same request to multiple equivalent services which are likely to return the same error. Some code errors may result in no explicit error being reached, but a failure to return a response to a process which expects it.\nThis point raises the issue of client expectation and whether it may be desirable to terminate requests at the gateway level after a period of time.\nService expectation¶\nFor systems where there may be legitimate requests that take an extended period of time at the service level, compared to an average request, it would obviously not be desirable to terminate the request, particularly when it would not be possible for the gateway to terminate the request at the service level.\nIn these cases it may be preferable to use the query timeout -T\nparameter selectively at the service level to prevent any individual requests impacting the application. In other scenarios were the quantity of requests may be too large, resulting in long queuing in the load balancer process, it can be desirable to reject the request for a service and return an error to the client. This method can provide stability where it isn’t possible for the underlying services to process the requests at the rate at which they are being made.\nData caching¶\nIn general gateways are used as a means through which underlying data servers are accessed and the gateway isn’t responsible for storing any data itself. However, there are two scenarios in which it may be efficient for it to do so. Depending on the nature of the application and the client requests, it may be efficient for the gateway to store aggregated responses from a number of underlying services.\nIn use cases where cached data wont become ‘stale’ and where it is likely that additional users will make similar requests there can be benefit in storing the response rather than going to the service level, reading the same data and performing the aggregation, on each request.\nWhat data is cached in the gateway can either be determined by configuration and calculated on startup or driven dynamically by user requests for a particular type of data. It is likely that any such data would come from a HDB as opposed to a RDB where the data is constantly updating. The additional memory overhead in storing the response means this method is only appropriate for highly-aggregated data which is time-consuming to calculate from a number of different sources. For most cases where clients are frequently requesting the same aggregated data it is preferable to have this calculated in a separate service level process which is accessed by the gateway.\nThe second and more common case for storing data in the gateway process is with reference data. Depending on the extent of the stored procedures available this relatively static data can either be applied to incoming requests or to the result set. Storage of reference data at the gateway level removes the need for the same data to be stored by multiple processes at the service level as well as being used to determine which underlying services are required for a particular user request.\nConclusion¶\nAs outlined in the initial overview, the implementation of any particular gateway framework is likely to be different and it is important to consider all potential requirements of an application in its design. In this paper we have addressed the most common technical challenges associated with gateways and looked at the reasoning for selecting various design options. As applications can evolve over time it is also beneficial to maintain usage and performance metrics which can be used to verify framework suitability over time or identify when any changes in design may be required. In summary:\n- Gateway frameworks should provide a means to load-balance data processes and achieve an efficient use of resources. The use of a separate process is an effective way of centralizing configuration and making it straightforward to scale the number of data services.\n- Gateways using asynchronous messaging allow simultaneous execution of multiple requests with no unnecessary latency caused by blocking.\n- The location of gateway processes is important to minimize data transport and use of a tiered gateway arrangement can provide an optimal solution with multi-zoned applications.\n- Failover procedures for process failures and disconnections are required to create a manageable environment which can provide a continuous and reliable service to clients.\nAll code included is using kdb+ 3.0 (2012.11.12).\nAuthor¶\nMichael McClintock has worked as consultant on a range of kdb+ applications for hedge funds and leading investment banks. Based in New York, Michael has designed and implemented data-capture and analytics platforms across a number of different asset classes."}}},{"rowIdx":73,"cells":{"text":{"kind":"string","value":"Linear programming¶\nLinear Programming is a large topic, of which this article reviews just a few applications. More articles on it would be very welcome: please contact docs@kx.com.\nIverson Notation and linear algebra\nQ is a descendant of the notation devised at Harvard by the Turing Award winner, mathematician Ken Iverson, when he worked with Howard Aiken and Nobel Prize winner Wassily Leontief on the computation of economic input-output tables. At Harvard, Ken Iverson and fellow Turing Award winner Fred Brooks gave the world’s first course in what was then called ‘data processing’.\nLike other descendants of Iverson Notation (e.g. A+, APL, J), q inherits compact and powerful expression of linear algebra.\nQ Math Library: zholos/qml\nProblem¶\nGiven a series of nodes and distances, find the minimum path from each node to get to each other node.\nSolution¶\nEdsger W. Dijkstra published an optimized solution in 1959 that calculated cumulative minimums. A simple Linear Algebra approach entails producing a ‘path connection matrix’ (square matrix with nodes down rows and across columns) showing the distances, which is typically symmetric. Inner product is used in repeated iterations to enhance the initial matrix to include paths possible through 1 hop (through 1 intermediate node), 2 hops and so forth by repeated calls. The optimal solution (all paths) is found by iterating until no further changes are noted in the matrix (called transitive closure).\nExample¶\nHere is a simple case for just 6 nodes and the distances between connected nodes.\nq)node6:`a`b`c`d`e`f\nq)bgn:`a`a`a`b`b`b`b`d`d`e`e`f`f`f\nq)end:`b`d`c`a`d`e`f`a`e`d`f`b`c`e\nq)far:30 40 80 21 25 16 23 12 30 23 25 17 18 22\nq)show dist6:flip `src`dst`dist!(bgn;end;far)\nsrc dst dist\n------------\na b 30\na d 40\na c 80\nb a 21\nb d 25\nb e 16\nb f 23\nd a 12\nd e 30\ne d 23\ne f 25\nf b 17\nf c 18\nf e 22\nFirst, transform the above table into a connectivity matrix of path lengths.\nSymmetry\nIn this example a->b can differ from b->a, which is more general than the problem requires, but you could make the matrix symmetric for real distances.\nFor ‘no connection’ we use infinity, so the inner product of cumulative minimums works properly over the iterations.\nq)cm[node6;dist6;`inf]\n0 30 80 40 0w 0w\n21 0 0w 25 16 23\n0w 0w 0 0w 0w 0w\n12 0w 0w 0 30 0w\n0w 0w 0w 23 0 25\n0w 17 18 0w 22 0\ncm\nis a simple function to produce the connectivity matrix.\ncm\ncreates a connectivity matrix from nodes and a distance table.- Result is a square float matrix where a cell contains distance to travel between nodes.\n- An unreachable node is marked with the infinity value for minimum path distance. (Or 0 for credit matrix – see below).\ncm:{[n;d;nopath]\nnn:count n; / number of nodes\nres:(2#nn)#(0 0w)`zero`inf?nopath; / default whole matrix to nopath\nip:flip n?/:d`src`dst; / index pairs\nres:./[res;ip;:;`float$d`dist]; / set reachable index pairs\n./[res;til[nn],'til[nn];:;0f] / zero on diagonal to exclude a node with itself\n}\nAssignment with a scattered index\nThe last two lines of cm\nboth use ./\nfor assignment with a scattered index. The second argument is a list of index pairs – co-ordinates in res\n. The fourth argument is a corresponding list of values. The third argument is the assignment function.\nOver for how the iterator /\nspecifies the iteration here.\ntview\nadds row and column labels.\ntview:{[mat]\n$[(`$nodes:\"node\",string[count mat])in key `.;\nnodes:value nodes;\nnodes:`$string til count mat];\n((1,1+count nodes)#`,nodes),((count[nodes],1)#nodes),'mat\n}\nTo improve the display of the connection matrix:\nq)tview cm[node6;dist6;`inf]\na b c d e f\n`a 0f 30f 80f 40f 0w 0w\n`b 21f 0f 0w 25f 16f 23f\n`c 0w 0w 0f 0w 0w 0w\n`d 12f 0w 0w 0f 30f 0w\n`e 0w 0w 0w 23f 0f 25f\n`f 0w 17f 18f 0w 22f 0f\nIn the above result note that [a;e]\nis not directly accessible.\nSo we use a bridge function to jump through one intermediate node and see new paths.\nq)tview bridge cm[node6;dist6;`inf]\na b c d e f\n`a 0f 30f 80f 40f 46f 53f\n`b 21f 0f 41f 25f 16f 23f\n`c 0w 0w 0f 0w 0w 0w\n`d 12f 42f 92f 0f 30f 55f\n`e 35f 42f 43f 23f 0f 25f\n`f 38f 17f 18f 42f 22f 0f\nWe now see a path [a;e]\nof 46, [a->b(30), then b->e(16)].\nAfter 1 hop we also see path [d;c]\nof 92, [d->a(12), then a->c(80)].\nbridge\napplies connectivity over each hop by using a Minimum.Sum inner product cumulatively:\nq)bridge\n{x & x('[min;+])\\: x}\nSo for 2 hops:\nq)tview bridge bridge cm[node6;dist6;`inf]\na b c d e f\n`a 0f 30f 71f 40f 46f 53f\n`b 21f 0f 41f 25f 16f 23f\n`c 0w 0w 0f 0w 0w 0w\n`d 12f 42f 73f 0f 30f 55f\n`e 35f 42f 43f 23f 0f 25f\n`f 38f 17f 18f 42f 22f 0f\nNote with 2 hops we improve [d;c]\nto 73 [d->e(30), then e->f(25), then f->c(18)]:\nFor ‘transitive closure’ iterate until no further improvement (i.e. optimal path lengths reached)\nq)tview (bridge/) cm[node6;dist6;`inf]\na b c d e f\n`a 0f 30f 71f 40f 46f 53f\n`b 21f 0f 41f 25f 16f 23f\n`c 0w 0w 0f 0w 0w 0w\n`d 12f 42f 73f 0f 30f 55f\n`e 35f 42f 43f 23f 0f 25f\n`f 38f 17f 18f 42f 22f 0f\nA larger example was presented in k4 listbox publicly available here:\nq)\\curl -s https://us-east.manta.joyent.com/edgemesh/public/net_dist -o dist\nq)\\l dist\n`dist\nq)dist\nsrc dst dist\n------------\n2 17 139\n2 34 131\n3 174 150\n4 226 171\n4 567 13\n7 786 130\n9 174 112\n..\nq)node:0N!distinct raze dist`src`dst\n2 3 4 7 9 12 13 14 16 17 18 20 21 22 24 26 27 29 31 34 35 37 41 42 43 44 45 4..\nRepeating the above process with this node\nand dist\nfor the optimal solution, also showing calculation time and space (using \\ts\n):\nq)\\ts opt:(bridge/) cm[node;dist;`inf]\n92 1706512\nCheck node length from node 2 to node 174.\nq)node?2 174 / Find row, col of node in optimal matrix\n0 72\nq)opt[0;72] / Cell [0;74] is path length to go from node 2 to node 174\n398f\nq)opt . node?2 174 / Or in one simple step using . index notation\n398f\nThis does not get the hops, although the hops could be calculated by ‘capturing’ the intermediate results in the optimal case.\nTo do this use bridge\\\ninstead of bridge/\n, then count changes between iterations, or just index in to see the path length converge …\nq)count iters:(bridge\\) cm[node;dist;`inf] / Calculate all iterations\n5\nq)/ It took 5 iterations to find the optimal paths\nNow we can see how the path length changes during the iterations: here we see it “first converges” to 398 after 1 hop for node [2;174].\nq)iters .\\: node?2 174 / Index into each iteration to see iterative path improvement\n0w 398 398 398 398\nAnother random path choice for node[2;210] does not converge until after 3 hops, also showing iterative improvement:\nq)iters .\\: node?2 210 / Path improvement for node [2;210]\n0w 0w 638 555 555\nRelated applications of this approach¶\nThe principle used can be generalized to different inner-product solutions for related problems.\nThe solution above is an instance of generalized inner-product of 2 functions f.g\nand was an example Ken Iverson often used to demonstrate how Linear Algebra can be applied to real-world problems.\nThe solution may be considered ‘expensive’ on memory and CPU, as it calculates all possible paths, but that is becoming less of an issue.\nThe bridge\nfunction above uses the inner product of Minimum.Sum\n(&\nand +\nin q), but variants can be used in similar, related problem domains.\nHere is a summary of three related use cases, starting with the above minimum-path solution.\nMinimum distances¶\nFor minimum distances in a path table (example above), using an inner product of Minimum.Sum\n, where ‘no path’ is represented by 0w\n(float infinity) to determine minimums properly.\nThis calculates the minimum of the sums of distances between nodes at each pivot. The bridge\nfunction looks like this:\nbridge:{x & x('[min;+])\\: x}\nCounterparty credit¶\nFor a counterparty credit-matrix solution, using an inner product of Maximum.Minimum\n, where no credit is represented by 0 to determine maximums properly.\nThis calculates the maximum of the minimum credit between nodes at each pivot, the bridge\nfunction looks like this;\nbridge:{x | x('[max;&])\\: x}\nThis returns the optimal possible credit by allowing credit through intermediate counterparties. For example if A only has credit with B, but B has credit with C, then after 1 hop, A actually has credit with C through B, but capped by the credit path in the same way.\nA special note here is the simple case where the credit matrix is boolean. The ‘connectivity matrix’ is now a simple yes/no\nto determine connections e.g. for electrical circuits.\nEach iteration improves the connections by adding additional 1s into the matrix that are now reachable in successive hops and uses the same bridge\nalgorithm.\nMatrix multiplication¶\nFor generalized matrix multiplication, using an inner product of Sum.Times\n.\nThis calculates the sum of the product between nodes at each pivot, the bridge function looks like this;\nbridge:{x + x('[sum;*])\\: x}\nGeneralization¶\nThe inner product for the above 3 bridge\nuse cases could be further generalized as projections of a cumulative inner product function.\nq)cip:{[f;g;z] f[z;] z('[f/;g])\\: z}\nq)bridgeMS:cip[&;+;] / Minimum.Sum (minimum path)\nq)bridgeCM:cip[|;&;] / Maximum.Minimum (credit matrix)\nq)bridgeMM:cip[+;*;] / Sum.Times (matrix multiplication)\nPerformance¶\nThe version of bridge\nused above shows the Linear Algebra most clearly.\nIt can be further optimized for performance, as shown here for the first case (minimum-path problem).\nAlthough all operations are atomic, flipping the argument seems to improve cache efficiency.\nbridgef:{x + x('[sum;*])/:\\: flip x}\nThe peach\nkeyword can be used to parallelize evaluation.\n/ Parallel version (multithreaded run q -s 6)\nbridgep: {x & {min each x +\\: y}[flip x;] peach x}\nThe .Q.fc\nutility uses multi-threading where possible.\n/ .Q.fc version\nbridgefc:{x & .Q.fc[{{{min x+y}[x] each y}[;y] each x}[;flip x];x]}\nA colleague, Ryan Sparks, is presently experimenting with further (significant) performance improvements by using CUDA on a graphics coprocessor for the inner-product function bridge\n.\nThis work is evolving and looks very promising. I look forward to Ryan presenting a paper and/or presentation on his results when complete as perhaps a sequel to this article.\nScript with examples from this article\nTest results¶\nRyan Sparks reports the following test results running V3.5 2017.05.02 using 6 secondary processes:\n| function | \\ts:1000 20×20 | \\ts:100 100×100 | 1000×1000 | 2000×2000 | 4000×4000 |\n|---|---|---|---|---|---|\nbridge0 |\n178 63,168 |\n689 5,330,880 |\n6,488 4,112,433,152 |\n35,068 32,833,633,920 |\nuntested |\nbridge1 |\n296 9,456 |\n1,065 159,728 |\n2,255 12,337,200 |\n11,327 49,249,968 |\nuntested |\nbridge2 |\n207 9,008 |\n1,249 157,616 |\n6,496 12,317,152 |\n40,073 49,209,824 |\nuntested |\nbridge3 |\n171 63,136. |\n683 5,330,848 |\n6,292 4,112,433,168 |\n32,446 32,833,633,936 |\nuntested |\nbridge4 |\n165 6,560 |\n182 106,912 |\n425 8,225,232 |\n5,967 32,834,000 |\n48,271 131,203,536 |\nbridge5 |\n612 6,656 |\n1,823 106,624 |\n1,695 8,221,360 |\n5,112 32,826,032 |\n32,915 131,187,376 |\nbridgejp |\n556 6,704 |\n1,507 106,672 |\n1,330 8,221,360 |\n3,904 32,826,032 |\n32,402 131,187,376 |\nbridgep |\n193 6,560 |\n219 106,912 |\n429 8,225,184 |\n5,922 32,833,952 |\n53,890 131,203,488 |\nbridgef |\n201 9392 |\n778 159,664 |\n2,030 1,233,713 |\n10,625 49,249,904 |\nuntested |\nbridgef2 |\n546 6,704 |\n1,807 106,672 |\n1,701 8,221,360 |\n5,552 32,826,032 |\n31,428 131,187,376 |\nbridge0:{x & (&/) each' x+/:\\: flip x}\nbridge1:{x & x(min@+)/:\\: flip x}\nbridge2:{x & x((&/)@+)\\: x}\nbridge3:k){x&&/''x+/:\\:+x}\nbridge4:k){x&(min'(+x)+\\:)':x}\nbridge5:k){x&.Q.fc[{(min y+)'x}[+x]';x]}\nbridgejp:{x & .Q.fc[{{{min x+y}[x] each y}[;y] each x}[;flip x];x]}\nbridgep:{x & {min each x +\\: y}[flip x;] peach x}\nbridgef:{x & x('[min;+])/:\\: flip x}\nbridgef2:{x & .Q.fc[{x('[min;+])/:\\: y}[;flip x];x]}\nYour mileage may vary\nAs always, optimizations need to be tested on the hardware and data in use.\nAcknowledgements¶\nMy thanks to Nion Chang, Pierre Kovalev, Jonny Press, Ryan Sparks and Stephen Taylor for contributions to this article.\nRob Hodgkinson\nrob@marketgridsystems.com"}}},{"rowIdx":74,"cells":{"text":{"kind":"string","value":"mergemode:`part // the partbyattr writedown mode can merge data from temporary storage to the hdb in three ways:\n // 1. part - the entire partition is merged to the hdb \n // 2. col - each column in the temporary partitions are merged individually \n // 3. hybrid - partitions merged by column or entire partittion based on byte limit \n\t\t\t\t\t\t\t\t\t\t\t\t\nmergenumrows:100000 // default number of rows for merge process\nmergenumtab:`quote`trade!10000 50000 // specify number of rows per table\nmergenumbytes:500000000 // default partition bytesize for merge limit in merge process (only used when .merge.mergebybytelimit=1b) \n\ntpconnsleepintv:10 // number of seconds between attempts to connect to the tp\nupd:insert // value of the upd function\nreplay:1b // replay the tickerplant log file\nschema:1b // retrieve schema from tickerplant\nsettimer:0D00:00:10 // timer to check if data needs written to disk\npartitiontype:`date // set type of partition (defaults to `date, can be `date, `month or `year)\ngetpartition:{@[value;`.wdb.currentpartition;(`date^partitiontype)$.proc.cd[]]} // function to determine the partition value\nreloadorder:`hdb`rdb // order to reload hdbs and rdbs\nhdbdir:`:hdb // move wdb database to different location\nsortcsv:hsym first .proc.getconfigfile\"sort.csv\" // location of csv file\npermitreload:1b // enable reload of hdbs/rdbs\ncompression:() // specify the compress level, empty list if no required\ngc:1b // garbage collect at appropriate points (after each table save and after sorting data)\neodwaittime:0D00:00:10.000 // time to wait for async calls to complete at eod\ntpcheckcycles:0W // number of attempts to connect to tp before process is killed\n\n// Server connection details\n\\d .servers\nSTARTUP:1b // create connections\nCONNECTIONS:`hdb`tickerplant`rdb`gateway`sort // list of connections to make at start up\n\n\\d .proc\nloadprocesscode:1b // Whether to load the process specific code defined at ${KDBCODE}/{process type}\n\n================================================================================\nFILE: TorQ_tests_bglaunchprocess_settings.q\nSIZE: 417 characters\n================================================================================\n\n/variables and fns to be used during the unit tests:\n/let the test port_no be 7124\n\ninput1:`procname`proctype`U`localtime`p`T`g`w`qcmd`custom`load!(\"test2\";\"test\";\"${KDBAPPCONFIG}/passwords/accesslist.txt\";\"0\";\"7124\";\"180\";\"1\";\"1000\";\"q\";\"custom_arg\";\"${TORQHOME}/tests/bglaunchprocess/settings.q\");\ninput2:`procname`proctype`load!(\"test3\";\"test\";\"${TORQHOME}/tests/bglaunchprocess/settings.q\");\n.servers.startup[];\n\n================================================================================\nFILE: TorQ_tests_chainedtp_database.q\nSIZE: 142 characters\n================================================================================\n\ntrade:flip `time`sym`price`size`stop`cond`ex`side!\"PSFIBCCS\" $\\: ();\nquote:flip `time`sym`bid`ask`bsize`asize`mode`ex`src!\"PSFFJJCCS\" $\\: ();\n\n\n================================================================================\nFILE: TorQ_tests_chainedtp_settings.q\nSIZE: 603 characters\n================================================================================\n\n// IPC connection parameters\n.servers.CONNECTIONS:`tickerplant`chainedtp`rdb;\n.servers.USERPASS:`admin:admin;\n\n// Test updates\ntesttrade:((5#`GOOG),5?`4;10?100.0;10?100i;10#0b;10?.Q.A;10?.Q.A;10#`buy);\ntestquote:(10?`4;(5?50.0),50+5?50.0;10?100.0;10?100i;10?100i;10?.Q.A;10?.Q.A;10#`3);\n\n// Paths to process CSV and test TP log directory\nprocesscsv:getenv[`KDBTESTS],\"/chainedtp/process.csv\";\ntptestlogs:getenv[`KDBTESTS],\"/chainedtp/tplogs\";\n\n// Function projections (using functions from helperfunctions.q)\nstartproc:startorstopproc[\"start\";;processcsv];\nstopproc:startorstopproc[\"stop\";;processcsv];\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_checkinputs_settings.q\nSIZE: 702 characters\n================================================================================\n\ntestpath:hsym`$getenv[`KDBTESTS],\"/dataaccess/checkinputs\";\nprocesscsv:` sv testpath,`config`process.csv;\n\n//- code to pass in a test name\n//- extract the input parameter from {testname}.csv\n//- extract the expected error from checkinputerrors.csv\n//- compare error with expected error\ncheckreturnederror:{[test]errors[test;`error]~@[.dataaccess.checkinputs;gettestparams test;::]};\ncheckreturnederrorcustom:{[test;param]errors[test;`error]~@[.dataaccess.checkinputs;param;::]};\n\n//- read dictionary of params from csv named according to the test {testname}.csv\ngettestparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv testpath,`testdata,`$string[test],\".csv\";\"s*\"]};\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_common_settings.q\nSIZE: 699 characters\n================================================================================\n\ntestpath:hsym`$getenv[`KDBTESTS],\"/dataaccess/common\";\nprocesscsv:` sv testpath,`config`process.csv;\n\n//- code to pass in a test name\n//- extract the input parameter from {testname}.csv\n//- extract the expected error from checkinputerrors.csv\n//- compare error with expected error\ncheckreturnederror:{[test]errors[test;`error]~@[.checkinputs.checkinputs;gettestparams test;::]};\ncheckreturnederrorcustom:{[test;param]errors[test;`error]~@[.checkinputs.checkinputs;param;::]};\n\n//- read dictionary of params from csv named according to the test {testname}.csv\ngettestparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv testpath,`testdata,`$string[test],\".csv\";\"s*\"]};\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_extractqueryparam_settings.q\nSIZE: 750 characters\n================================================================================\n\ninputpath:hsym`$getenv[`KDBTESTS],\"/dataaccess/extractqueryparam/input\";\noutputpath:hsym`$getenv[`KDBTESTS],\"/dataaccess/extractqueryparam/output\";\nprocesscsv:hsym`$getenv[`KDBTESTS],\"/dataaccess/extractqueryparam/`config`process.csv\";\n\n//- code to pass in a test name\n//- extract data from the input and output directories\n//- compare function output with expected output\n\ngetinputparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv inputpath,`$string[test],\".csv\";\"s*\"]};\n\ngetoutputparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv outputpath,`$string[test],\".csv\";\"s*\"]};\n\ntestfunction:{[test] getoutputparams[test]~.eqp.extractqueryparams[getinputparams[test];.eqp.queryparams]};\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_gwquerytest_settings.q\nSIZE: 283 characters\n================================================================================\n\n// IPC connection parameters\n.servers.CONNECTIONS:`gateway;\n.servers.USERPASS:`admin:admin;\n\ntestpath:hsym`$getenv[`KDBTESTS],\"/dataaccess/gwquerytest\";\n\nsublistvalue:2;\n\ngetdict:{exec parameter!get each parametervalue from ((\"s*\";1#\",\")0: ` sv testpath,`inputs,`$string[x],\".csv\")}\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_mockdata.q\nSIZE: 1,689 characters\n================================================================================\n\nparams:([proctype:`hdb`rdb]\n func:`generatehdb`generaterdb;\n partitiontype:`date`date;\n hdbname:`hdb`;\n n:5 5;\n tablename:`xdaily`xdaily;\n nrecord:10 10\n );"}}},{"rowIdx":75,"cells":{"text":{"kind":"string","value":"/ Deletes the specified object reference from the namespace. If the reference deleted is the last object in the\n/ namespace then the namespace is removed as well recursively up the namespace tree.\n/ NOTE: The namespace hierarchy removal will never remove the root namespace even if it is empty\n/ @param nsRef (Symbol) The object reference to remove from the namespace\n.ns.deleteReference:{[nsRef]\n if[not .ns.isSet nsRef;\n :(::);\n ];\n\n refSplit:`ns`ref!(-1_; last) @\\: ` vs nsRef;\n refSplit[`ns]:`.^$[0 = count refSplit`ns; `; ` sv refSplit`ns];\n\n ![refSplit`ns; (); 0b; enlist refSplit`ref];\n\n if[.type.isEmptyNamespace get refSplit`ns;\n .z.s refSplit`ns;\n ];\n };\n\n/ @returns (Symbol) A symbol reference to the function that called the function which called this function or 'anon-func' if an anonymous inner function\n.ns.getFunctionCaller:{\n bt:.Q.btx .Q.Ll `;\n\n caller:first bt[2][1];\n\n $[(\"q\";`) ~ caller;\n caller:`$\"q-prompt\";\n 0 = count caller;\n caller:`$\"anon-func\";\n / else\n caller:`$first caller\n ];\n\n :caller;\n };\n\n/ @param func (Symbol|Function) A reference to a function or an actual function\n/ @returns (Function) Resolves the function reference such that a function is always returned\n/ @throws FunctionDoesNotExistException If the reference does not exist\n/ @throws NotAFunctionException If the input value is not a function or the reference does not reference a function\n.ns.i.getFunction:{[func]\n $[.type.isFunction func;\n :func;\n not .type.isSymbol func;\n '\"NotAFunctionException\";\n not .ns.isSet func;\n '\"FunctionDoesNotExistException (\",string[func],\")\"\n ];\n\n func:get func;\n\n if[not .type.isFunction func;\n '\"NotAFunctionException\";\n ];\n\n :func;\n };\n\n\n================================================================================\nFILE: kdb-common_src_os.q\nSIZE: 9,151 characters\n================================================================================\n\n// Operating System Specific Functionality\n// Copyright (c) 2017 - 2018 Sport Trades Ltd\n\n// Documentation: https://github.com/BuaBook/kdb-common/wiki/os.q\n\n.require.lib each `util`type;\n\n\n/ The separator characters for PATH-type environment variables in all configured OSs\n.os.cfg.envPathSeparator:(`symbol$())!`char$();\n.os.cfg.envPathSeparator[`l`v`m]:\":\";\n.os.cfg.envPathSeparator[`w]:\";\";\n\n/ The PATH-type environment variable for shared object / DLL loading for all configured OSs\n.os.cfg.sharedObjectEnvVar:(`symbol$())!`symbol$();\n.os.cfg.sharedObjectEnvVar[`l`v]:`LD_LIBRARY_PATH;\n.os.cfg.sharedObjectEnvVar[`m]:`DYLD_LIBRARY_PATH;\n.os.cfg.sharedObjectEnvVar[`w]:`PATH;\n\n\n/ The current operating system, independent of architecture\n/ @see .os.i.getOsType\n.os.type:`;\n\n/ The separator character for PATH-type environment variables in the current OS\n.os.envPathSeparator:\" \";\n\n/ The environment variable containing the PATH-type environment variable for shared object / DLL loading\n.os.sharedObjectEnvVar:`;\n\n\n.os.init:{\n .os.type:.os.i.getOsType[];\n\n .os.envPathSeparator:.os.cfg.envPathSeparator .os.type;\n .os.sharedObjectEnvVar:.os.cfg.sharedObjectEnvVar .os.type;\n };\n\n/ Runs the specified command with the specified parameters. NOTE: That not\n/ every command has the equivalent parameters in each Operating System environment.\n/ @param cmd (Symbol) The OS command to run\n/ @param paramStr (String) The list of parameters to pass to the command\n/ @throws UnsupportedOsCommandException If the command specified is not supported on this OS\n/ @throws IllegalArgumentException If the parameter argument is not a string\n.os.run:{[cmd;paramStr]\n if[not cmd in .os.availableCommands[];\n '\"UnsupportedOsCommandException (\",string[cmd],\")\";\n ];\n\n if[not[.util.isEmpty paramStr] & not .type.isString paramStr;\n '\"IllegalArgumentException\";\n ];\n\n :.util.system .os[.os.type][cmd] paramStr;\n };\n\n/ @returns (SymbolList) All the available commands in the current operating system\n.os.availableCommands:{\n :key 1_ .os .os.type;\n };\n\n/ @returns (Boolean) True if the PID is valid and a process exists on the current server that matches it. Otherwise returns false\n.os.isProcessAlive:{[pid]\n osCheck:first .os.run[`pidCheck; string pid];\n\n if[`w=.os.type;\n :osCheck like \"*\",string[pid],\"*\";\n ];\n\n :not \"B\"$osCheck;\n };\n\n/ @returns (String) Current terminal window size in system \"c\" format - \"*lines* *columns*\"\n.os.getTerminalSize:{\n rawTermSize:trim .os.run[`terminalSize; \"\"];\n termSize:\"\";\n\n $[.os.type in `l`m;\n termSize:\" \" vs first rawTermSize;\n `w = .os.type;\n termSize:trim last each \":\" vs/: rawTermSize raze where each rawTermSize like/: (\"Lines:*\"; \"Columns:*\")\n ];\n\n :\" \" sv termSize;\n };\n\n/ @returns (Boolean) True if the kdb process is running in an interactive session, false otherwise\n.os.isInteractiveSession:{\n interactRes:.os.run[`isInteractive; ::];\n\n if[.os.type in `l`m;\n :not \"B\"$first interactRes;\n ];\n };\n\n/ @returns (Symbol) OS independent process architecture\n.os.getProcessArchitecture:{\n bits:\"I\"$-2#string .z.o;\n\n $[32=bits;\n :`x86;\n 64=bits;\n :`x86_64;\n / else\n '\"UnsupportedProcessArchitectureException\"\n ];\n };\n\n/ @param path (FilePath|FolderPath) The path to dereference\n/ @returns (FilePath|FolderPath) The 'real' path of specified path, removing sym links\n.os.dereferencePath:{[path]\n :hsym `$first .os.run[`readlink; 1_ string path];\n };\n\n\n.os.i.getOsType:{\n :`$first string .z.o;\n };\n\n.os.i.convertPathForWindows:{[path]\n :ssr[path;\"/\";\"\\\\\"];\n };\n\n\n// Windows Implementation\n\n.os.w.mkdir:{\n :\"mkdir \",.os.i.convertPathForWindows x;\n };\n\n.os.w.rmdir:{\n :\"rmdir \",.os.i.convertPathForWindows x;\n };\n\n.os.w.pwd:{\n :\"echo %cd%\";\n };\n\n.os.w.rm:{\n :\"del \",.os.i.convertPathForWindows x;\n };\n\n.os.w.rmF:{\n :\"del /F /Q \",.os.i.convertPathForWindows x;\n };\n\n.os.w.pidCheck:{\n :\"tasklist /FI \\\"PID eq \",x,\"\\\" /FO CSV /NH\";\n };\n\n.os.w.sigterm:{\n :\"taskkill /PID \",x;\n };\n\n.os.w.sigkill:{\n :\"taskkill /PID \",x,\" /F\";\n };\n\n.os.w.sleep:{\n :\"timeout /t \",x,\" /nobreak >nul\";\n };\n\n/ ln requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the target, 2nd argument should be the source\n.os.w.ln:{\n args:\"|\" vs x;\n :\"mklink \",.os.i.convertPathForWindows[args 0],\" \",.os.i.convertPathForWindows args 1;\n };\n\n/ mv requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.w.mv:{\n args:\"|\" vs x;\n :\"move \",.os.i.convertPathForWindows[args 0],\" \",.os.i.convertPathForWindows args 1;\n };\n\n/ cp requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.w.cp:{\n args:\"|\" vs x;\n :\"copy \",.os.i.convertPathForWindows[args 0],\" \",.os.i.convertPathForWindows args 1;\n };\n\n.os.w.rmFolder:{\n :\"rd /S /Q \",.os.i.convertPathForWindows x;\n };\n\n.os.w.rmFolder:{\n :\"rd /S /Q \",.os.i.convertPathForWindows x;\n };\n\n.os.w.tail:{\n :\"type \",.os.i.convertPathForWindows x;\n };\n\n.os.w.safeRmFolder:{\n :\"rmdir \",.os.i.convertPathForWindows x;\n };\n\n.os.w.procCount:{\n :\"echo %NUMBER_OF_PROCESSORS%\";\n };\n\n.os.w.which:{\n :\"where \",x;\n };\n\n.os.w.ver:{\n :\"ver\";\n };\n\n/ cp requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.w.cpFolder:{\n args:\"|\" vs x;\n :\"xcopy /e /y \",.os.i.convertPathForWindows[args 0],\" \",.os.i.convertPathForWindows args 1;\n };\n\n.os.w.terminalSize:{\n :\"mode con\";\n };\n\n// Linux Implementation\n\n.os.l.mkdir:{\n :\"mkdir -p \",x;\n };\n\n.os.l.rmdir:{\n :\"rmdir \",x;\n };\n\n.os.l.pwd:{\n :\"pwd\";\n };\n\n.os.l.rm:{\n :\"rm -v \",x;\n };\n\n.os.l.rmF:{\n :\"rm -vf \",x;\n };\n\n.os.l.pidCheck:{\n :\"kill -n 0 \",x,\" 2>/dev/null; echo $?\";\n };\n\n.os.l.sigint:{\n :\"kill -s INT \",x;\n };\n\n.os.l.sigterm:{\n :\"kill -s TERM \",x;\n };\n\n.os.l.sigkill:{\n :\"kill -s KILL \",x;\n };\n\n.os.l.sleep:{\n :\"sleep \",x;\n };\n\n/ ln requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the target, 2nd argument should be the source\n.os.l.ln:{\n args:\"|\" vs x;\n :\"ln -s \",args[1],\" \",args 0;\n };\n\n/ mv requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.l.mv:{\n args:\"|\" vs x;\n :\"mv \",args[0],\" \",args 1;\n };\n\n/ cp requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.l.cp:{\n args:\"|\" vs x;\n :\"cp \",args[0],\" \",args 1;\n };\n\n.os.l.rmFolder:{\n :\"rm -rvf \",x;\n };\n\n.os.l.tail:{\n :\"tail -n 30 \",x;\n };\n\n.os.l.safeRmFolder:{\n :\"rmdir \",x;\n };\n\n.os.l.procCount:{\n :\"getconf _NPROCESSORS_ONLN\";\n };\n\n.os.l.which:{\n :\"which \",x;\n };\n\n.os.l.ver:{\n :\"cat /etc/system-release\";\n };\n\n.os.l.cpuAssign:{\n :\"taskset -cp \",x;\n };\n\n/ cp requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.l.cpFolder:{\n args:\"|\" vs x;\n :\"cp -rv \",args[0],\" \",args 1;\n };\n\n.os.l.terminalSize:{\n :\"stty size\";\n };\n\n/ 'tty' exits 9 if there is a TTY attached, 1 otherwise\n.os.l.isInteractive:{\n :\"tty --quiet; echo $?\";\n };\n\n.os.l.shell:{\n :\"bash -c \\\"\",x,\"\\\"\";\n };\n\n.os.l.readlink:{\n :\"readlink -f \",x;\n };\n\n\n// Mac OSX Implementation\n.os.m.mkdir:{\n :\"mkdir -p \",x;\n };\n\n.os.m.rmdir:{\n :\"rmdir \",x;\n };\n\n.os.m.pwd:{\n :\"pwd\";\n };\n\n.os.m.rm:{\n :\"rm -v \",x;\n };\n\n.os.m.rmF:{\n :\"rm -vf \",x;\n };\n\n.os.m.pidCheck:{\n :\"kill -n 0 \",x,\" 2>/dev/null; echo $?\";\n };\n\n.os.m.sigint:{\n :\"kill -s INT \",x;\n };\n\n.os.m.sigterm:{\n :\"kill -s TERM \",x;\n };\n\n.os.m.sigkill:{\n :\"kill -s KILL \",x;\n };\n\n.os.m.sleep:{\n :\"sleep \",x;\n };\n\n/ ln requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the target, 2nd argument should be the source\n.os.m.ln:{\n args:\"|\" vs x;\n :\"ln -s \",args[1],\" \",args 0;\n };\n\n/ mv requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.m.mv:{\n args:\"|\" vs x;\n :\"mv \",args[0],\" \",args 1;\n };\n\n/ cp requires 2 arguments so pass string separated by \"|\"\n/ First argument should be the source, 2nd argument should be the target\n.os.m.cp:{\n args:\"|\" vs x;\n :\"cp \",args[0],\" \",args 1;\n };\n\n.os.m.rmFolder:{\n :\"rm -rvf \",x;\n };"}}},{"rowIdx":76,"cells":{"text":{"kind":"string","value":"// @private\n// @kind function\n// @category nlpTimeUtility\n// @desc Seperate YearMonth formats to year and month\n// i.e \"ymd\" -> \"y\",\"m\",\"d\"\n// @params ymd {string[]} The format for each date objecct\n// @returns {string} Formats of YearMonthDays objects seperated\ntm.i.formatYMD:{[ymd]\n @[ymd;i unq;:;\"ymd\" unq:where 1=count each i:where each \"ymd\" in/:\\:ymd]\n }\n\n// @private\n// @kind function\n// @category nlpTimeUtility\n// @desc Fill in the blanks in a date format string\n// @param format {string} A date format, as some permutation of \n// \"d\", \"m\", and \"y\"\n// @returns {string} The date format with any blanks filled with their most\n// plausible value\ntm.i.resolveFormat:{[format]\n $[0=n:sum\" \"=format;\n ;\n 1=n;\n ssr[;\" \";first\"ymd\"except format];\n 2=n;\n tm.i.dateFormats;\n {\"dmy\"}\n ]format\n }\n\n// @private\n// @kind dictionary\n// @category nlpTimeUtility\n// @desc The format to use, given a single known position\n// @type dictionary\ntm.i.dateFormats:(!). flip(\n (\"d \";\"dmy\"); // 10th 02 99\n (\"m \";\"mdy\"); // Feb 10 99\n (\"y \";\"ymd\"); // 1999 02 10\n (\" d \";\"mdy\"); // 02 10th 99\n (\" m \";\"dmy\"); // 10 Feb 99\n (\" y \";\"dym\"); // 10 1999 02 This is never conventionally used\n (\" d\";\"ymd\"); // 99 02 10th\n (\" m\";\"ydm\"); // 99 10 Feb This is never conventionally used\n (\" y\";\"dmy\")) // 10 02 1999 //mdy is the american option\n\n// @private\n// @kind function\n// @category nlpTimeUtility\n// @desc Turns a regex time string into a q timestamp\n// i.e \"131030\" -> 13:10:30.000\n// \"1pm\" -> 13:00:00.000\n// @param text {string} A time string\n// @returns {timestamp} The q time parsed from an\n// appropriate string\ntm.i.parseTime:{[text]\n numText:vs[\" \";text][0]in\"1234567890:.\";\n time:\"T\"$text where numText; \n amPM:regex.i.check[;text]each regex.objects`am`pm;\n time+$[amPM[0]&12=`hh$time;-1;amPM[1]&12>`hh$time;1;0]*12:00\n }\n\n// @private\n// @kind function\n// @category nlpTimeUtility\n// @desc Remove any null values\n// @array {number[][]} Array of values\n// returns {number[][]} Array with nulls removed\ntm.i.rmNull:{[array]\n array where not null array[;0]\n }\n\n\n================================================================================\nFILE: ml_nlp_code_email.q\nSIZE: 7,598 characters\n================================================================================\n\n// code/email.q - Nlp email utilities\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Utilities for handling emails\n\n\\d .nlp\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Rich Text Format (RTF) parsing function imported from python\nemail.i.striprtf:.p.get[`striprtf;<]\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract information from various message text types\n// @params textTyp {string} The format of the message text \n// @param msg {string|dictionary} An email message, or email subtree\n// @returns {boolean} Whether or not msg fits the text type criteria \nemail.i.findMime:{[textTyp;msg]\n msgDict:99=type each msg`payload;\n contentTyp:textTyp~/:msg`contentType;\n attachment:0b~'msg[`payload]@'`attachment;\n all(msgDict;contentTyp;attachment)\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Use beautiful soup to extract text from a html file\n// @param msg {string} The message payload\n// @returns {string} The text from the html\nemail.i.html2text:{[msg]\n cstring email.i.bs[pydstr msg;`html.parser][`:get_text;pydstr \"\\\\n\"]`\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Given an email, extract the text of the email\n// @param msg {string|dictionary} An email message, or email subtree\n// @returns {string} The text of the email, or email subtree\nemail.i.extractText:{[msg]\n // String is actual text, bytes attachment or non text mime type like inline \n // image, dict look at content element\n msgType:type msg;\n if[10=msgType;:msg];\n if[4=msgType;:\"\"];\n if[99=msgType;:.z.s msg`content];\n findMime:email.i.findMime[;msg];\n text:$[count i:where findMime[\"text/plain\"];\n {x[y][`payload]`content}[msg]each i;\n count i:where findMime[\"text/html\"];\n {email.i.html2text x[y][`payload]`content}[msg]each i;\n count i:where findMime[\"application/rtf\"];\n // Use python script to extract text from rtf\n {email.i.striprtf x[y][`payload]`content}[msg]each i;\n .z.s each msg`payload\n ];\n \"\\n\\n\"sv text\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Get all the to/from pairs from an email\n// @param msg {dictionary} An email message, or subtree thereof\n// @returns {any[]} To/from pairings of an email\nemail.i.getToFrom:{[msg]\n payload:msg`payload;\n payload:$[98=type payload;raze .z.s each payload;()];\n edges:(msg[`sender;0;1];)each msg[`to;;1];\n edges,payload\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract the sender information from an email\n// @param emails {<} The email as an embedPy object\n// @returns {string[]} Sender name and email\nemail.i.getSender:{[emails]\n fromInfo:raze emails[`:get_all;<]each pydstr each (\"from\";\"resent-from\");\n cstring email.i.getAddr fromInfo where not(::)~'fromInfo\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract the receiver information from an email\n// @param emails {<} The email as an embedPy object\n// @returns {string[]} Reciever name and email\nemail.i.getTo:{[emails]\n toInfo:raze emails[`:get_all;<]each\n pydstr each (\"to\";\"cc\";\"resent-to\";\"resent-cc\");\n cstring email.i.getAddr toInfo where not any(::;\"\")~/:\\:toInfo\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract the date information from an email\n// @param emails {<} The email as an embedPy object\n// @returns {timestamp} Date email was sent\nemail.i.getDate:{[emails]\n dates:string 6#email.i.parseDate emails[@;`date];\n \"P\"$\"D\"sv\".:\"sv'3 cut{$[1=count x;\"0\";\"\"],x}each dates\n }\n \n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract the subject information from an email\n// @param emails {<} The email as an embedPy object\n// @returns {string} Subject of the email\nemail.i.getSubject:{[emails]\n subject:emails[@;`subject];\n $[(::)~subject`;\n \"\";\n cstring email.i.makeHdr[email.i.decodeHdr subject][`:__str__][]`\n ]\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract the content type of an email\n// @param emails {<} The email as an embedPy object\n// @returns {string} Content type of an email \nemail.i.getContentType:{[emails]\n cstring emails[`:get_content_type][]`\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract the payload information from an email\n// @param emails {<} The email as an embedPy object\n// @returns {dictionary|table} Dictionary of `attachment`content or a table \n// of payloads\n// Content is byte[] for binary data, char[] for text\nemail.i.getPayload:{[emails]\n if[emails[`:is_multipart][]`;\n :email.i.parseMbox1 each emails[`:get_payload][]`\n ];\n // Raw bytes decoded from base64 encoding, wrapped embedPy\n raw:emails[`:get_payload;`decode pykw 1]; \n rtf:\"application/rtf\"~cstring email.i.getContentType emails;\n attachment:\"attachment\"~cstring emails[`:get_content_disposition][]`;\n payload:`attachment`content!(0b;raw`);\n if[all(rtf;attachment);:payload];\n if[attachment;\n payload,`attachment`filename!(1b;cstring email[`:get_filename][]`);\n ];\n content:cstring email.i.getContentType emails;\n if[not any content~/:(\"text/html\";\"text/plain\";\"message/rfc822\");:payload];\n charset:cstring emails[`:get_content_charset][]`;\n content:cstring i.str[raw;pydstr $[(::)~charset;\"us-ascii\";charset];`ignore]`;\n `attachment`content!(0b;content)\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract meta information from an email\n// @params filepath {string} The path to the mbox\n// @returns {dictionary} Meta information from the email\nemail.i.parseMbox:{[filepath]\n mbox:email.i.mbox pydstr filepath;\n email.i.parseMbox1 each flip[mbox[`:items;<][]]1\n }\n\n// @private\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract meta information from an email\n// @params mbox {<} Emails in mbox format\n// @returns {dictionary} Meta information from the email\nemail.i.parseMbox1:{[mbox]\n columns:`sender`to`date`subject`contentType`payload;\n msgInfo:`getSender`getTo`getDate`getSubject`getContentType`getPayload;\n columns!email.i[msgInfo]@\\:.p.wrap mbox\n }\n\n// Python imports\nemail.i.bs:.p.import[`bs4]`:BeautifulSoup\nemail.i.getAddr:.p.import[`email.utils;`:getaddresses;<]\nemail.i.parseDate:.p.import[`email.utils;`:parsedate;<]\nemail.i.decodeHdr:.p.import[`email.header;`:decode_header]\nemail.i.makeHdr:.p.import[`email.header;`:make_header]\nemail.i.msgFromString:.p.import[`email]`:message_from_string\nemail.i.mbox:.p.import[`mailbox]`:mbox\n\n\n// @kind function\n// @category nlpEmail\n// @desc Convert an mbox file to a table of parsed metadata\n// @param filepath {string} The path to the mbox file\n// @returns {table} Parsed metadata and content of the mbox file\nemail.loadEmails:{[filepath]\n parseMbox:email.i.parseMbox filepath;\n update text:.nlp.email.i.extractText each payload from parseMbox\n }\n\n// @kind function\n// @category nlpEmail\n// @desc Get the graph of who emailed who, including the number of\n// times they emailed\n// @param emails {table} The result of .nlp.loadEmails\n// @returns {table} Defines to-from pairings of emails\nemail.getGraph:{[emails]\n getToFrom:flip csym raze email.i.getToFrom each emails;\n getToFromTab:flip`sender`to!getToFrom;\n 0!`volume xdesc select volume:count i by sender,to from getToFromTab\n }\n\n// @kind function\n// @category nlpEmailUtility\n// @desc Extract meta information from an email\n// @params content {string} Email content as string\n// @returns {dictionary} Meta information from the email\nemail.parseMail:{[content]\n email.i.parseMbox1 email.i.msgFromString[pydstr content]`.\n }\n\n\n================================================================================\nFILE: ml_nlp_code_nlpCode.q\nSIZE: 16,644 characters\n================================================================================\n\n// code/nlpCode.q - NLP code\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Main NLP code base\n\n\\d .nlp\n\n// Date-Time\n\n// @kind function\n// @category nlp\n// @desc Find any times in a string\n// @param text {string} A text, potentially containing many times\n// @returns {any[]} A list of tuples for each time containing\n// (q-time; timeText; startIndex; 1+endIndex)\nfindTimes:{[text]\n timeText:regex.matchAll[regex.objects.time;text];\n parseTime:tm.i.parseTime each timeText[;0];\n time:parseTime,'timeText;\n time where time[;0]<24:01\n }\n\n// @kind function\n// @category nlp\n// @desc Find all the dates in a document\n// @param text {string} A text, potentially containing many dates\n// @returns {any[]} A list of tuples for each time containing \n// (startDate; endDate; dateText; startIndex; 1+endIndex)\nfindDates:{[text]\n ym:regex.matchAll[regex.objects.yearMonth;text];\n ymd:regex.matchAll[regex.objects.yearMonthDay;text];\n convYMD:tm.i.convYearMonthDay each ymd[;0];\n dates:tm.i.rmNull convYMD,'ymd;\n if[count dates;ym@:where not any ym[;1] within/: dates[; 3 4]];\n convYM:tm.i.convYearMonth each ym[;0];\n dates,:tm.i.rmNull convYM,'ym;\n dates iasc dates[;3]\n }\n\n// Parsing function"}}},{"rowIdx":77,"cells":{"text":{"kind":"string","value":"Running kdb+ as a service on Windows¶\nWindows 7+ provides a task scheduler tool which can be used to run kdb+ processes as services.\nTo schedule a kdb+ process to start on startup:\n-\nPress the +R keys to open Run.\n-\nType\ntaskschd.msc\nand press Enter. -\nUnder Actions select Create Task.\n-\nEnter a name and description for your task. Select the user under which the task should be run, and if the user should be logged on for the task to run. If you would like the process to be hidden, select the hidden option.\n-\nAdd a trigger to start the task on system startup:\n-\nFirst click New under the triggers tab.\n-\nSelect At startup under Begin the task.\n-\n-\nSelect the actions the task should take before:\n-\nClick New under the Actions tab.\n-\nSelect\nq.exe\nwith any command-line arguments and the folder which the process should start in.\n-\n-\nSet any required conditions.\n-\nConfigure any required settings.\n-\nWhen the task is complete, press OK.\nOutput redirect¶\nIf you need to redirect output you must modify the action using the following configuration:\nThe arguments should be\n/c C:\\\\q\\\\w64\\\\q.exe -p 5010 -q >C:\\\\q\\\\logs\\\\q.5010.log\nNote the directory for logs must exist.\nMultiple processes¶\nIf you want to set up several instances, the steps required are slightly\ndifferent. Rather than configure the task to run q.exe\n, instead\ncreate a .bat\nfile to start multiple kdb+ processes in the background\nand run this from the task.\nExample:\nstart \"q5010\" /B cmd.exe /c C:\\\\q\\\\w64\\\\q.exe -p 5010 -q >C:\\\\q\\\\logs\\\\q.5010.log\nstart \"q5011\" /B q testScript.q -q\nIn this way you can set up on Windows a complete kdb+ process that starts all the required processes in the correct order."}}},{"rowIdx":78,"cells":{"text":{"kind":"string","value":"/ https://lintool.github.io/Cloud9/docs/exercises/pagerank.html\nnode:asc distinct raze cloud9.l\nl:node?cloud9.l\nshow S:(1 2#1+max over l), .ml.prepend[1f] l\nshow node[i]!r i:idesc r:.ml.pageranks[d;S] over r:n#1f%n:S[0;0]\n-1 \"into a full matrix\";\nshow A:.ml.full S\nshow node[i]!r i:idesc r:.ml.pageranka[d;A]\nshow node[i]!r i:idesc r:.ml.pageranki[d;A] over r:n#1f%n:count A\nshow node[i]!r i:idesc r:$[;.ml.google[d;A]] over r:n#1f%n:count A\n\nnode:asc distinct raze berkstan.l\nl:node?berkstan.l\nshow S:(1 2#1+max over l), .ml.prepend[1f] l\n-1\"not enough memory to convert sparse -> full matrix\";\n-1\"just perform a few sparse iterations\";\nshow node[i]!r i:idesc r:10 .ml.pageranks[d;S]/ r:n#1f%n:S[0;0]\n\n\n================================================================================\nFILE: funq_pandp.q\nSIZE: 288 characters\n================================================================================\n\npandp.f:\"1342-0.txt\"\npandp.b:\"https://www.gutenberg.org/files/1342/\"\n-1\"[down]loading pride and prejudice text\";\n.ut.download[pandp.b;;\"\";\"\"] pandp.f;\npandp.txt:read0 `$pandp.f\npandp.chapters:1_\"\\nChapter \" vs \"\\n\" sv 35_-373_ pandp.txt\npandp.s:{first[x ss\"\\n\\n\"]_x} each pandp.chapters\n\n\n================================================================================\nFILE: funq_pendigits.q\nSIZE: 433 characters\n================================================================================\n\npendigits.f:(\"pendigits.tra\";\"pendigits.tes\")\npendigits.b:\"http://archive.ics.uci.edu/ml/machine-learning-databases/\"\npendigits.b,:\"pendigits/\"\n-1\"download the pendigits training and test data set\";\n.ut.download[pendigits.b;;\"\";\"\"] each pendigits.f;\npendigits.y:last pendigits.X:(17#\"h\";\",\") 0: `$pendigits.f 0\npendigits.X:-1_pendigits.X\n\npendigits.yt:last pendigits.Xt:(17#\"h\";\",\") 0: `$pendigits.f 1\npendigits.Xt:-1_pendigits.Xt\n\n\n================================================================================\nFILE: funq_persuasion.q\nSIZE: 339 characters\n================================================================================\n\n/ persuasion\npersuasion.f:\"105.txt\"\npersuasion.b:\"https://www.gutenberg.org/files/105/\"\n-1\"[down]loading persuasion text\";\n.ut.download[persuasion.b;;\"\";\"\"] persuasion.f;\npersuasion.txt:read0 `$persuasion.f\npersuasion.chapters:1_\"Chapter\" vs \"\\n\" sv 44_-373_persuasion.txt\npersuasion.s:{(3+first x ss\"\\n\\n\\n\")_x} each persuasion.chapters\n\n\n================================================================================\nFILE: funq_pima.q\nSIZE: 400 characters\n================================================================================\n\npima.f:\"pima-indians-diabetes.data\"\npima.b:\"http://archive.ics.uci.edu/ml/machine-learning-databases/\"\npima.b,:\"pima-indians-diabetes/\"\n-1\"[down]loading pima-indians-diabetes data set\";\n.ut.download[pima.b;;\"\";\"\"] pima.f;\npima.XY:(\"EEEEEEEEB\";\",\")0:`$pima.f\npima.X:-1_pima.XY\npima.y:first pima.Y:-1#pima.XY\npima.c:`preg`plas`pres`skin`test`mass`pedi`age`class\npima.t:`class xcols flip pima.c!pima.XY\n\n\n================================================================================\nFILE: funq_plot.q\nSIZE: 1,405 characters\n================================================================================\n\n\\c 20 100\n\\l funq.q\n\\l dji.q\n\n/ define a plotting function using 10 characters of gradation\nplt:.ut.plot[w:40;h:20;c:.ut.c10;sum]\n\n-1\"plotting 1-dimensional dataset (sin x): x\";\n-1 value plt X:sin .01*til 1000;\n\n-1\"plotting 2-dimensional dataset (uniform variates): (x;y)\";\n-1 value plt X:10000?/:2#1f;\n\n-1\"plotting 2-dimensional dataset (normal variates): (x;y)\";\n-1 value plt (.ml.bm 10000?) each 2#1f;\n\n-1\"plotting 3-dimensional dataset: (x;y;z)\";\n-1 value plt {(x;{x*x*x}x-.5;x:til[x]%x)} 1000;\n\n-1\"plotting 3-dimensional grid as a heatmap: X (matrix)\";\n-1 value plt .ut.hmap {x*/:(x:til x)*(x;x)#1f} 1000;\n\nb:1b / use binary encoding for portable (bit|pix)map\n\n-1\"plotting black/white Mandelbrot series\";\nc:.ut.tcross . (.ut.nseq .) each flip (-1+w:1000;-2 -1.25;.5 1.25)\nx:w cut .ml.mbrotp 20 .ml.mbrotf[c]/0f\n-1 value plt .ut.hmap x;\n-1\"saving PBM image\";\n`mandel.pbm 0: .ut.pbm[b] x\n\n-1\"plotting gray scale Mandelbrot series\";\nx:w cut last 20 .ml.mbrota[c]// (0f;0)\n-1 value plt .ut.hmap x;\n-1\"saving PGM image\";\n`mandel.pgm 0: .ut.pgm[b;20] x\n\n-1\"saving PPM image\";\n`mandel.ppm 0: .ut.ppm[b;20] flip[(rand 1+20;til 1+20;rand 1+20)] x\n\n-1\"plotting sparkline of the dow jones index components\";\nexec -1 ((4$string first stock),\": \",.ut.spark close) by stock from dji.t;\n\n/ tests\n.ut.assert[1b] last[x]<last .ut.heckbert[4] . x:.47 .56\n.ut.assert[1b] last[x]<last .ut.heckbert[10] . x:32064 64978f\n\n\n================================================================================\nFILE: funq_porter.q\nSIZE: 4,492 characters\n================================================================================\n\n/ this is the porter stemmer algorithm ported to q. it follows the\n/ algorithm presented in:\n\n/ Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,\n/ no. 3, pp 130-137\n\n/ https://tartarus.org/martin/PorterStemmer/def.txt\n\n/ this implementation includes the three points of departure from the\n/ original paper introduced here:\n\n/ https://www.tartarus.org/~martin/PorterStemmer\n\n/ note that this implementation stems single words - not full text.\n/ this obviates global variables and .porter.stem, therefore, can be\n/ 'peach'ed. instead of run-time computations and function calls,\n/ hard-coded offsets and $[;;] operators are used for performance.\n/ implementation accuracy can be verified by running the trailing code\n\n/ nick psaris\n/ release 1: august 2018\n\n\\d .porter\n\n/ are the letters in x vowels\nvowel:{\n v:x in \"aeiou\"; / aeiou are vowels\n / y is a vowel if the preceding letter is a consonant\n v[i where not (1b,v) i:where x=\"y\"]:1b;\n v}\n\n/ are the letters in x consonants\ncons:not vowel::\n\n/ returns true if x contains a vowel\nhasvowel:any vowel::\n\n/ returns true if x ends in a double consonant\ndoublec:{$[2>count x;0b;(=) . -2#x;last cons x;0b]}\n\n/ return true if last three letters are consonant - vowel -\n/ consonant and last letter is not in \"wxy\"\ncvc:{$[3>count x;0b;101b~-3#cons x;not last[x] in \"wxy\";0b]}\n\n/ if a<m replace n characters with (r)eplacement suffix\nr:{[a;n;r;x]$[a<m n:n _ x;n,r;x]}\n\n/ compute m where m in c?(vc){m}v? and c and v are consecutive lists\n/ of consonants and vowels\nm:{sum[x] - first x:x where differ x:cons x}\n\n/ remove plurals and -ed or -ing\nstep1ab:{\n x:$[not x like \"*s\";x;x like \"*sses\";-2_x;\n x like \"*ies\";-2_x;x like \"*ss\";x;-1_x];\n if[x like \"*eed\";:$[0<m -3_x;-1_x;x]];\n if[not x like o:\"*ed\";if[not x like o:\"*ing\";:x]];\n if[not hasvowel n:(1+neg count o)_x;:x];x:n;\n if[x like \"*at\";:x,\"e\"];\n if[x like \"*bl\";:x,\"e\"];\n if[x like \"*iz\";:x,\"e\"];\n if[doublec x;:$[last[x] in \"lsz\";x;-1_x]];\n if[1=m x;if[cvc x;:x,\"e\"]];\n x}\n\n/ replace y with i when there exist other vowels\nstep1c:{if[x like \"*y\";if[hasvowel -1_x;x[-1+count x]:\"i\"]];x}\n\n/ map double suffices to single ones\nstep2:{\n c:x -2+count x;\n if[c=\"a\";:$[x like \"*ational\";r[0;-7;\"ate\";x];\n x like \"*tional\";r[0;-6;\"tion\";x];x]];\n if[c=\"c\";:$[x like \"*enci\";r[0;-4;\"ence\";x];\n x like \"*anci\";r[0;-4;\"ance\";x];x]];\n if[c=\"e\";:$[x like \"*izer\";r[0;-4;\"ize\";x];x]];\n if[c=\"l\";:$[x like \"*bli\";r[0;-3;\"ble\";x];x like \"*alli\";r[0;-4;\"al\";x];\n x like \"*entli\";r[0;-5;\"ent\";x];x like \"*eli\";r[0;-3;\"e\";x];\n x like \"*ousli\";r[0;-5;\"ous\";x];x]];\n if[c=\"o\";:$[x like \"*ization\";r[0;-7;\"ize\";x];\n x like \"*ation\";r[0;-5;\"ate\";x];x like \"*ator\";r[0;-4;\"ate\";x];x]];\n if[c=\"s\";:$[x like \"*alism\";r[0;-5;\"al\";x];\n x like \"*iveness\";r[0;-7;\"ive\";x];x like \"*fulness\";r[0;-7;\"ful\";x];\n x like \"*ousness\";r[0;-7;\"ous\";x];x]];\n if[c=\"t\";:$[x like \"*aliti\";r[0;-5;\"al\";x];x like \"*iviti\";r[0;-5;\"ive\";x];\n x like \"*biliti\";r[0;-6;\"ble\";x];x]];\n if[c=\"g\";:$[x like \"*logi\";r[0;-4;\"log\";x];x]];\n x}\n\n/ handle -ic-, -full, -ness etc\nstep3:{\n c:x -1+count x;\n if[c=\"e\";:$[x like \"*icate\";r[0;-5;\"ic\";x];x like \"*ative\";r[0;-5;\"\";x];\n x like \"*alize\";r[0;-5;\"al\";x];x]];\n if[c=\"i\";:$[x like \"*iciti\";r[0;-5;\"ic\";x];x]];\n if[c=\"l\";:$[x like \"*ical\";r[0;-4;\"ic\";x];x like \"*ful\";r[0;-3;\"\";x];x]];\n if[c=\"s\";:$[x like \"*ness\";r[0;-4;\"\";x];x]];\n x}\n\n/ remove -ant, -ence etc, in context <c>vcvc<v>\nstep4:{\n c:x -2+count x;\n if[c=\"a\";:$[x like \"*al\";r[1;-2;\"\";x];x]];\n if[c=\"c\";:$[x like \"*ance\";r[1;-4;\"\";x];x like \"*ence\";r[1;-4;\"\";x];x]];\n if[c=\"e\";:$[x like \"*er\";r[1;-2;\"\";x];x]];\n if[c=\"i\";:$[x like \"*ic\";r[1;-2;\"\";x];x]];\n if[c=\"l\";:$[x like \"*able\";r[1;-4;\"\";x];x like \"*ible\";r[1;-4;\"\";x];x]];\n if[c=\"n\";:$[x like \"*ant\";r[1;-3;\"\";x];x like \"*ement\";r[1;-5;\"\";x];\n x like \"*ment\";r[1;-4;\"\";x];x like \"*ent\";r[1;-3;\"\";x];x]];\n if[c=\"o\";:$[x like \"*ion\";$[x[-4+count x] in \"st\";r[1;-3;\"\";x];x];\n x like \"*ou\";r[1;-2;\"\";x];x]];\n if[c=\"s\";:$[x like \"*ism\";r[1;-3;\"\";x];x]];\n if[c=\"t\";:$[x like \"*ate\";r[1;-3;\"\";x];x like \"*iti\";r[1;-3;\"\";x];x]];\n if[c=\"u\";:$[x like \"*ous\";r[1;-3;\"\";x];x]];\n if[c=\"v\";:$[x like \"*ive\";r[1;-3;\"\";x];x]];\n if[c=\"z\";:$[x like \"*ize\";r[1;-3;\"\";x];x]];\n x}\n\n/ remove final e if m>1, change -ll to -l if m>1\nstep5:{\n if[\"e\"=last x;x:$[0=a:m x;x;1<a;-1_x;not cvc -1_x;-1_x;x]];\n if[\"l\"=last x;if[doublec x;if[1<m x;:-1_x]]];\n x}\n\nstem:{\n if[3>count x;:x];\n x:step1ab x;\n x:step1c x;\n x:step2 x;\n x:step3 x;\n x:step4 x;\n x:step5 x;\n x}\n\n\n================================================================================\nFILE: funq_qmlmm.q\nSIZE: 195 characters\n================================================================================"}}},{"rowIdx":79,"cells":{"text":{"kind":"string","value":"Basic programs¶\nFrom GeeksforGeeks Python Programming Examples\nMany of the published examples have been rewritten to use Python techniques more likely to illuminate the q solution. Where practical, the solutions are shown as expressions evaluated in the REPL, which better allows for experimenting.\nFollow links to the originals for more details on the problem and Python solutions.\nFactorial of a number¶\n>>> def factorial(n): return 1 if (n==1 or n==0) else n * factorial(n - 1)\n...\n>>> factorial(5)\n120\nq)factorial:{$[x<2;1;x*.z.s x-1]}\nq)factorial 5\n120\nAbove .z.s\nrefers to the running function; it can be assigned any name.\nFactorial 5 is defined non-recursively as the product of the integers 1-5.\n>>> math.prod(map(1 ._add_, range(5))) # in Python 3.8\n>>> 120\nq)prd 1+til 5\n120\nSimple interest¶\n>>> p=10000 # principal\n>>> r=5 # rate\n>>> t=5 # time periods\n>>> (p*r*t)/100 # simple interest\n2500.0\nq)p:10000 / principal\nq)r:5 / rate\nq)t:5 / time periods\nq)(p*r*t)%100 / simple interest\n2500f\nQ programs tend to prefer vectors.\nq)(prd 10000 5 5)%100\n2500f\nIteration is implicit in most q operators. Here we have three principals and corresponding time periods. The rate is the same for all three.\nq)p:1000 1500 1750 / principals\nq)r:3 / rate\nq)t:5 6 7 / time periods\nq)(p*r*t)%100 / simple interest\n150 270 367.5\nCompound interest¶\n>>> p = 1200 # principal\n>>> r = 5.4 # rate\n>>> t = 2 # time periods\n>>> p*(pow((1+r/100),t)) # compound interest\n1333.0992\nq)p:1200 / principal\nq)r:5.4 / rate\nq)t:2 / time periods\nq)p*(1+r%100)xexp t / compound interest\n1333.099\nAgain, iteration through lists is implicit.\nq)p:1200 1500 1800 / principals\nq)r:5.4 / rate\nq)t:2 2 3 / time periods\nq)p*(1+r%100)xexp t / compound interest\n1333.099 1666.374 2107.63\nWhether an Armstrong number¶\nimport numpy as np\ndef is_armstrong(x):\ns = 0\nt = x\nwhile t:\ns += (t % 10) ** len(str(x))\nt //= 10\nreturn s == x\n>>> [is_armstrong(x) for x in (153, 120, 1253, 1634)]\n[True, False, False, True]\nisArmstrong:{x=sum{x xexp count x}10 vs x}\nq)isArmstrong each 153 120 1253 1634\n1001b\nThe steps of isArmstrong\nexplain themselves.\nq)10 vs 153 / decode base-10 integer\n1 5 3\nq)1 5 3 xexp 3 / raise to 3rd power\n1 125 27f\nq)sum 1 5 3 xexp 3\n153f\nArea of a circle¶\nThe area of a circle of radius \\(r\\) is \\(\\pi r^2\\), where \\(\\pi\\) is the arc-cosine of -1.\n>>> import numpy as np\n>>> np.arccos(-1)*5*5 # area of circle of radius 5\n78.53981633974483\nq)(acos -1)*5*5 / area of circle of radius 5\n78.53982\nPrime numbers in an interval¶\n>>> from sympy import sieve\n>>> list(sieve.primerange(11, 25))\n[11, 13, 17, 19, 23]\nrange:{x+til y-x-1}\nsieve_primerange:{\nc:range[x;y]; / candidates\nlmt:\"j\"$sqrt last c; / highest divisor to test\nc where all 0<c mod/:range[2;lmt] }\nq)sieve_primerange[11;25]\n11 13 17 19 23\nNo q primitive for this, but range is a useful starting point.\nq)show c:range[11;25] / candidates\n11 12 13 14 15 16 17 18 19 20 21 22 23 24 25\nq)\"j\"$sqrt last c / need test modulo only to here\n5\nq)range[2;]\"j\"$sqrt last c\n2 3 4 5\nq)c mod/:2 3 4 5 / modulo each c against all of them\n1 0 1 0 1 0 1 0 1 0 1 0 1 0 1\n2 0 1 2 0 1 2 0 1 2 0 1 2 0 1\n3 0 1 2 3 0 1 2 3 0 1 2 3 0 1\n1 2 3 4 0 1 2 3 4 0 1 2 3 4 0\nq)show f:0<c mod/:2 3 4 5 / flag remainders\n101010101010101b\n101101101101101b\n101110111011101b\n111101111011110b\nq)all f / AND the flag vectors\n101000101000100b\nq)where all f / index the hits\n0 2 6 8 12\nq)c where all f / select from range\n11 13 17 19 23\nWhether a number is prime¶\n>>> from sympy import isprime\n>>> [isprime(x) for x in (11, 15, 1)]\n[True, False, False]\nNo q primitive for this either.\nrange:{x+til y-x-1}\nisPrime:{(x>1)and all 0<x mod range[2;\"j\"$sqrt x]}\nq)isPrime each 11 15 1\n100b\nNth Fibonacci number¶\n# next Fibonacci pair\ndef nfp(x): return [x[1], sum(x)]\n# Nth Fibonacci pair\ndef fibp(n):\nif n<2: return [0, 1]\nreturn nfp(fibp(n-1))\ndef fib(n): return fibp(n)[0]\n>>> fib(9)\n21\nnfp:{(x 1),sum x}\nfib:{first(x-1)nfp/0 1}\nq)fib 9\n21\nAbove we see nfp\napplied with the Do iterator /\n.\nThe Python solution recurses down from n\nto reach the initial state of [0, 1]\n, while the q solution iterates n-1\ntimes on an initial state of 0 1\n.\nWhether a Fibonacci number¶\nimport math\ndef is_fibonacci(n):\nphi = 0.5 + 0.5 * math.sqrt(5.0)\na = phi * n\nreturn n == 0 or abs(round(a) - a) < 1.0 / n\n>>> [is_fibonacci(x) for x in (8, 34, 41)]\n[True, True, False]\n\\(x\\) is a Fibonacci number if either of \\(5x^{2}\\pm 4\\) is a perfect square.\nis_ps:{x={x*x}\"j\"$sqrt x} / is perfect square?\nis_fibonacci:{.[or]is_ps flip 4 -4+/:5*x*x}\nq)is_fibonacci 8 34 41\n110b\nThe iteration implicit in q’s operators means that is_fibonacci\nalso iterates implicitly.\nSum of squares of first N natural numbers¶\ndef squaresum(n): return (n * (n + 1) / 2) * (2 * n + 1) / 3\n>>> [squaresum(x) for x in (4,5)]\n[30.0, 55.0]\nsquaresum:{(x*(x+1)%2)*(1+x*2)%3}\nq)squaresum 4 5\n30 55f\nThe q solution mirrors the Python, but the primitives iterate implicitly.\nCube sum of first N natural numbers¶\ndef sum_cubes(x): return (x * (x + 1) // 2) ** 2\n>>> [sum_cubes(x) for x in (5, 7)]\n[225, 784]\nsum_cubes:{(x*(x+1)div 2)xexp 2}\nq)sum_cubes 5 7\n225 784f\nOnce again, the q operators iterate implicitly.\n\nArray programs¶\nFrom GeeksforGeeks Python Programming Examples\nFollow links to the originals for more details on the problem and Python solutions.\nSum of an array¶\n>>> sum([1, 2, 3])\n6\n>>> sum([15, 12, 13, 10])\n50\nq)sum each (1 2 3; 15 12 13 10)\n6 50\nLargest item in an array¶\n>>> max([10, 20, 4])\n20\n>>> max([20, 10, 20, 4, 100])\n100\nq)max each (10 20 4; 20 10 20 4 100)\n20 100\nRotate an array¶\n>>> import numpy as np\n>>> np.roll([1, 2, 3, 4, 5, 6, 7],-2)\narray([3, 4, 5, 6, 7, 1, 2])\nQ has a primitive for rotating lists.\nq)2 rotate 1 2 3 4 5 6 7\n3 4 5 6 7 1 2\nRemainder of array multiplication divided by n¶\ndef findremainder(arr, n):\nlens, mul = len(arr), 1\nfor i in range(lens):\nmul = (mul * (arr[i] % n)) % n\nreturn mul % n\n>>> findremainder([ 100, 10, 5, 25, 35, 14 ], 11)\n9\nfindRemainder:{(x*y) mod 11} over\nq)findRemainder 100 10 5 25 35 14\n9\nThe binary lambda {(x*y)mod 11}\nreturns the modulo-11 of the product of two numbers.\nover\napplies it to reduce the argument list.\nThe naïve solution\nq)(prd 100 10 5 25 35 14) mod 11\n9\noverflows for a long list.\nReconstruct array, replacing arr[i]\nwith (arr[i-1]+1)%M\n¶\ndef construct(m, a):\nind, n = 0, len(a)\n# Finding the index which is not -1\nfor i in range(n):\nif (a[i]!=-1):\nind = i\nbreak\n# Calculating the values of the indexes ind-1 to 0\nfor i in range(ind-1, -1, -1):\nif (a[i]==-1):\na[i]=(a[i + 1]-1 + m)% m\n# Calculating the values of the indexes ind + 1 to n\nfor i in range(ind + 1, n):\nif(a[i]==-1):\na[i]=(a[i-1]+1)% m\nprint(*a)\n>>> construct(7, [5, -1, -1, 1, 2, 3])\n5 6 0 1 2 3\n>>> construct(10, [5, -1, 7, -1, 9, 0])\n5 6 7 8 9 0\nconstruct:{[v;M] {$[y=-1;(x+1)mod z;y]}[;;M]\\[v]}\nq)construct[5 -1 -1 1 2 3;7]\n5 6 0 1 2 3\nq)construct[5 -1 7 -1 9 0;10]\n5 6 7 8 9 0\nThe q solution applies a binary lambda\n{$[y=-1;(x+1)mod z;y]}[;;M]\nto successive pairs of items of argument vector v\n.\nThe lambda is defined with three arguments and projected on M\n– constant for each iteration – so becoming a binary that can be iterated through the vector.\nSuccessive application relies on the Scan iterator.\nIs array monotonic?¶\ndef isMonotonic(A):\nreturn (all(A[i] <= A[i + 1] for i in range(len(A) - 1)) or\nall(A[i] >= A[i + 1] for i in range(len(A) - 1)))\n>>> isMonotonic([6, 5, 4, 4])\nTrue\nisMonotonic:{asc[x]in(x;reverse x)}\nq)isMonotonic 6 5 4 4\n1b\nq)isMonotonic 6 5 3 4\n0b\nBoth these solutions overcompute. The Python program traverses the entire list twice. The q program sorts the entire list. Native sort in q is very fast, but if the list is long and likely to fail we might prefer to iterate and stop as soon as we find the list is not monotonic.\nMonotony can rise or fall, so we test the first pair for both cases. The first several items in the list may match, so we continue testing with both ≤ and ≥ until we eliminate one or both. So, a two-item initial state.\n(1;(<=;>=))\n1\nis the next (first) index to test; the operators (<=;>=)\nare the tests to apply. (They would be (<;>)\nfor strict monotony.) Our function will try the tests, returning those that pass, and the next index, as the next state. We shall apply it with the While iterator, so we need it be unary, i.e. to take one argument. We also want it to refer to the list, so we project a binary lambda on the list to bind the list to the lambda as a constant value for its y\nargument.\nq)v:5 5 5 5 6 6 7 8 9 11 / list to test\nq)it:(1;(<=;>=)) / initial state: index and tests\nq)try:{[x;y] i:x 0; f:x 1; (i+1; f where f .\\:y i-1 0) }[;v]\nq){count x 1} try\\it\n1 (~>;~<)\n2 (~>;~<)\n3 (~>;~<)\n4 (~>;~<)\n5 ,~>\n6 ,~>\n7 ,~>\n8 ,~>\n9 ,~>\n10 ,~>\n11 ()\nAbove we see the ≤ test eliminated after item 4.\nOur test function for the iterator {count x 1}\nstops iteration when the list of functions is empty.\nThe last result will be (n;())\n, with n\nthe next index that would have been tested.\nWe can improve this, using the Converge iterator.\nisMt:{[v] / is monotonic?\ntry:{[x;y] / apply tests x[1] between y x[0]-1 0\ni:x 0; f:x 1; / index; tests\ngo:i<count y; / end of list?\nf:$[go;f where f .\\:y i-1 0;f]; / tests passed\ngo&:0<count f; / keep testing?\n(i+go;f)\n}[;v]; / project onto v\nit:(1;(<=;>=)); / initial state\ncount[v]=first try/[it] } / reached end of v?\nThe first item of the final result of try/[it]\nis the last index for which at least one of the tests ≤ and ≥ held true. We compare it to count[v]\nto see if try\ngot to the end of the list.\nThe second item of the final result is the list of tests that held true.\nInstead of testing the final index, we could count that list to see if either ≤ or ≥ held true throughout v\n. The result of isMt\nwould then be {0<count x 1}try/[it]\nq)isMt 6 5 4 4\n1b\nq)isMt 6 5 3 4\n0b\nThe above approach can be generalized.\nThe list of functions could be of any length, contain any binary functions.\nThe initial index could be anywhere in the list, and try\nadapted to stop iteration before the end of the list."}}},{"rowIdx":80,"cells":{"text":{"kind":"string","value":"/ if reQ not loaded, define necessary components here\nif[not `req in key `;\n .url.parse0:{[q;x]\n if[x~hsym`$255#\"a\";'\"hsym too long - consider using a string\"]; //error if URL~`: .. too long\n x:.url.sturl x; //ensure string URL\n p:x til pn:3+first ss[x;\"://\"]; //protocol\n uf:(\"@\"in x)&first[ss[x;\"@\"]]<first ss[pn _ x;\"/\"]; //user flag - true if username present\n un:pn; //default to no user:pass\n u:-1_$[uf;(pn _ x) til (un:1+first ss[x;\"@\"])-pn;\"\"]; //user:pass\n d:x til dn:count[x]^first ss[x:un _ x;\"[/?]\"]; //domain\n a:$[(dn=count x)|\"?\"=x[dn];\"/\",;] dn _ x; //absolute path (add leading slash if necessary)\n o:`protocol`auth`host`path!(p;u;d;a); //create URL object\n :$[q;@[o;`path`query;:;query o`path];o]; //split path into path & query if flag set, return\n };\n .url.sturl:{(\":\"=first x)_x:$[-11=type x;string;]x};\n .url.hsurl:{`$\":\",.url.sturl x};\n .req.query:`method`url`hsym`path`headers`body`bodytype!();\n .req.proxy:{[u]\n p:(^/)`$getenv`$(floor\\)(\"HTTP\";\"NO\"),\\:\"_PROXY\"; //check HTTP_PROXY & NO_PROXY env vars, upper & lower case - fill so p[0] is http_, p[1] is no_\n t:max(first \":\"vs u[`url]`host)like/:{((\".\"=first x)#\"*\"),x}each\",\" vs string p 1; //check if host is in NO_PROXY env var\n t:not null[first p]|t; //check if HTTP_PROXY is defined & host isn't in NO_PROXY\n :$[t;@[;`proxy;:;p 0];]u; //add proxy to URL object if required\n };\n .req.enchd:{[d]\n k:2_@[k;where 10<>type each k:(\" \";`),key d;string]; //convert non-string keys to strings\n v:2_@[v;where 10<>type each v:(\" \";`),value d;string]; //convert non-string values to strings\n :(\"\\r\\n\" sv \": \" sv/:flip (k;v)),\"\\r\\n\\r\\n\"; //encode headers dict to HTTP headers\n };\n .req.buildquery:{[q]\n r:string[q`method],\" \",q[`url;`path],\" HTTP/1.1\\r\\n\", //method & endpoint TODO: fix q[`path] for proxy use case\n \"Host: \",q[`url;`host],$[count q`headers;\"\\r\\n\";\"\"], //add host string\n .req.enchd[q`headers], //add headers\n $[count q`body;q`body;\"\"]; //add payload if present\n :r; //return complete query string\n };\n .cookies.addcookies:{[q]q}; // without reQ loaded, don't do anything with cookies\n ];\n\nVERBOSE:@[value;`.ws.VERBOSE;$[count .z.x;\"-verbose\" in .z.x;0b]]; //default to non-verbose output\n\nw:([h:`int$()] hostname:`$();callback:`$()) //table for recording open websockets\n\n.ws.onmessage.server:{value[w[.z.w]`callback]x} //pass messages to relevant handler\n\nopen0:{[x;y;v]\n q:@[.req.query;`method`url;:;(`GET;.url.parse0[0]x)]; //create reQ query object\n q:.req.proxy q; //handle proxy if needed\n hs:.url.hsurl`$raze q ./:enlist[`url`protocol],$[`proxy in key q;1#`proxy;enlist`url`host]; //get hostname as handle\n q[`headers]:(enlist\"Origin\")!enlist q[`url;`host]; //use Origin header\n q:.cookies.addcookies[q]; //if reQ is loaded, cookies can be added\n s:first r:hs d:.req.buildquery[q]; //build query & send\n if[v;-1\"-- REQUEST --\\n\",string[hs],\" \",d]; //if verbose, log request\n if[v;-1\"-- RESPONSE --\\n\",last r]; //if verbose, log response\n servers,:(s;hs); //record handle & callback in table\n w,:(s;hs;y); //record handle & callback in table\n :r; //return response\n }\n\nopen:{neg first open0[x;y;.ws.VERBOSE]} //return neg handle for messaging\n\n.ws.close:{[h]\n h:abs h;\n if[all(h in key .ws.w;h in key .z.W);hclose h]; //close handle if h is found both in .ws.w and .z.W (all opened handles)\n .ws.w:.ws.w _ h; //remove h from .ws.w\n .z.wc h; //remove h from .ws.servers\n }\n\n.ws.closea:{.ws.close each (0!.ws.w)[`h]} //close all opened websockets\n\n\\d .\n\n\n================================================================================\nFILE: ws.q_ws-handler_ws-handler.q\nSIZE: 939 characters\n================================================================================\n\n/ WebSockets handler module; create & receive WebSocket connections in a managable way\n\\d .ws\n\n/ Set up client/server tables & handlers without overwriting, so this script\n/ can be loaded multiple times without issue\nclients:@[value;`.ws.clients;([h:`int$()] hostname:`$())]; //clients = incoming connections over ws\nservers:@[value;`.ws.servers;([h:`int$()] hostname:`$())]; //servers = outgoing connections over ws\n\nonmessage.client:@[value;`.ws.onmessage.client;{{x}}]; //default echo\nonmessage.server:@[value;`.ws.onmessage.server;{{x}}]; //default echo\n\n.z.ws:{.ws.onmessage[$[.z.w in key servers;`server;`client]]x} //pass messages to relevant handler func\n.z.wo:{clients,:(.z.w;.z.h)} //log incoming connections\n.z.wc:{{delete from y where h=x}[.z.w] each `.ws.clients`.ws.servers} //clean up closed connections\n\n\\d .\n\n\n================================================================================\nFILE: ws.q_ws-server_wsu.q\nSIZE: 920 characters\n================================================================================\n\n/wsu.q\n/websocket pubsub functionality\n/based off kx u.q\n\n\\d .wsu\ninit:{w::t!(count t::tables`.)#()}\n\ndel:{w[x]_:w[x;;0]?y};.z.wc:{del[;x]each t};\n\nsel:{$[`~y;x;select from x where sym in y]}\n\npub:{[t;x]{[t;x;w]if[count x:sel[x]w 1;(neg first w).j.j(t;x)]}[t;x]each w t}\n\nadd:{[h;x;y]$[(count w x)>i:w[x;;0]?h;.[`.wsu.w;(x;i;1);union;y];w[x],:enlist(h;y)];(x;$[99=type v:value x;sel[v]y;0#v])}\n\nsub:{[h;x;y]if[x~`;:sub[h;;y]each t];if[not x in t;'x];del[x]h;add[h;x;y]}\n\nend:{(neg union/[w[;;0]])@\\:(`.u.end;x)}"}}},{"rowIdx":81,"cells":{"text":{"kind":"string","value":"/ General job addition function. Adds a job to the cron system for execution\n/ @param func (Symbol) Symbol reference to the function to execute\n/ @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments\n/ @param runType (Symbol) The type of cron job to add. See .cron.runners\n/ @param startTime (Timestamp) The first time the job will be run. NOTE: Timestamp will be rounded to the nearest millisecond\n/ @param endTime (Timestamp) The time to finish a repeating job executing. Pass null (0Np) to repeat forever or for one time jobs. NOTE: Timestamp will be rounded to the nearest millisecond\n/ @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs\n/ @returns (Long) The ID of the new cron job\n/ @throws InvalidCronJobIntervalException If the interval specified is smaller than the cron interval\n/ @throws FunctionDoesNotExistFunction If the function for the cron job does not exist\n/ @throws ReferenceIsNotAFunctionException If the symbol reference for the function is not actually a function\n/ @throws InvalidCronRunTypeException If the run type specified is not present in .cron.runners\n/ @throws InvalidCronJobTimeException If the start time specified is before the current time or the end time is before the start time\n.cron.add:{[func;args;runType;startTime;endTime;interval]\n if[not .ns.isSet func;\n .log.if.error \"Function to add to cron does not exist [ Function: \",string[func],\" ]\";\n '\"FunctionDoesNotExistFunction\";\n ];\n\n if[not .type.isFunction get func;\n .log.if.error \"Symbol reference for cron job is not a function [ Reference: \",string[func],\" ]\";\n '\"ReferenceIsNotAFunctionException\";\n ];\n\n if[not runType in key .cron.runners;\n .log.if.error \"Invalid cron run type. Expecting one of: \",.convert.listToString key .cron.runners;\n '\"InvalidCronRunTypeException\";\n ];\n\n if[not all .type.isTimestamp each (startTime; endTime);\n .log.if.error \"Invalid start time or end time. Must be a timestamp\";\n '\"InvalidCronJobTimeException\";\n ];\n\n startTime:.time.roundTimestampToMs startTime;\n endTime:.time.roundTimestampToMs endTime;\n now:.time.today[] + `second$.time.nowAsTime[];\n\n if[startTime < now;\n if[`disallowed = .cron.cfg.historicalStartTimes;\n .log.if.error (\"Cron job start time is in the past. Cannot add job [ Start Time: {} ] [ Now: {} ]\"; startTime; now);\n '\"InvalidCronJobTimeException\";\n ];\n\n if[`allowed = .cron.cfg.historicalStartTimes;\n .log.if.debug (\"Allowing start time in the past as configured [ Start Time: {} ] [ Now: {} ]\"; startTime; now);\n ];\n\n if[`setAsNow = .cron.cfg.historicalStartTimes;\n .log.if.debug (\"Overwriting start time in the past to now as configured [ Start Time: {} ] [ Now: {} ]\"; startTime; now);\n startTime:now;\n ];\n ];\n\n if[not[.util.isEmpty endTime] & endTime < startTime;\n .log.if.error (\"Cron job end time specified is before the start time. Cannot add job [ Start Time: {} ] [ End Time: {} ]\"; startTime; endTime)\n '\"InvalidCronJobTimeException\";\n ];\n\n if[(`ticking = .cron.cfg.mode) & not[.util.isEmpty interval] & .cron.cfg.timerInterval > .convert.timespanToMs interval;\n .log.if.error \"Cron job repeat interval is shorter than the cron timer interval (ticking). Cannot add job\";\n '\"InvalidCronJobIntervalException\";\n ];\n\n jobId:.cron.jobId;\n .cron.jobId+:1;\n\n `.cron.jobs upsert (jobId;func;args;runType;startTime;endTime;interval;startTime);\n\n if[`tickless = .cron.cfg.mode;\n .cron.i.setNextTick[];\n ];\n\n :jobId;\n };\n\n/ Shortcut function to add a job that will only execute once\n/ @param func (Symbol) Symbol reference to the function to execute\n/ @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments\n/ @param startTime (Timestamp) The first time the job will be run\n/ @see .cron.add\n.cron.addRunOnceJob:{[func;args;startTime]\n :.cron.add[func;args;`once;startTime;0Np;0Nn];\n };\n\n/ Shortcut function to add a job that repeats forever\n/ @param func (Symbol) Symbol reference to the function to execute\n/ @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments\n/ @param startTime (Timestamp) The first time the job will be run\n/ @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs\n/ @see .cron.add\n.cron.addRepeatForeverJob:{[func;args;startTime;interval]\n :.cron.add[func;args;`repeat;startTime;0Wp;interval];\n };\n\n/ Schedules a job that repeats forever but only if there isn't an active job with the same function and arguments\n/ @param uFunc (Symbol) Symbol reference to the function to execute\n/ @param uArgs () Any arguments that are required to execute the function. Pass generic null (::) for no arguments\n/ @param startTime (Timestamp) The first time the job will be run\n/ @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs\n/ @returns (Long) The job ID either of the existing job, or the newly scheduled job\n/ @see .cron.addRepeatForeverJob\n.cron.addUniqueRepeatForeverJob:{[uFunc;uArgs;startTime;interval]\n match:exec from .cron.jobs where func = uFunc, args ~\\: uArgs, not 0Wp = nextRunTime;\n\n if[not null match`id;\n .log.if.info (\"Cron job with matching function and arguments is active. Not adding job [ Function: {} ] [ Arguments: {} ]\"; uFunc; uArgs);\n :match`id;\n ];\n\n :.cron.addRepeatForeverJob[uFunc; uArgs; startTime; interval];\n };\n\n/ Shortcut function to add a job that repeats until a specified time\n/ @param func (Symbol) Symbol reference to the function to execute\n/ @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments\n/ @param startTime (Timestamp) The first time the job will be run\n/ @param endTime (Timestamp) The time to finish a repeating job executing. Pass null (0Np) to repeat forever or for one time jobs\n/ @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs\n/ @see .cron.add\n.cron.addRepeatUntilTimeJob:{[func;args;startTime;endTime;interval]\n :.cron.add[func;args;`repeat;startTime;endTime;interval];\n };\n\n/ Schedules a job that repeats until the specified time but only if there isn't an active job with the same function and arguments\n/ @param uFunc (Symbol) Symbol reference to the function to execute\n/ @param uArgs () Any arguments that are required to execute the function. Pass generic null (::) for no arguments\n/ @param startTime (Timestamp) The first time the job will be run\n/ @param endTime (Timestamp) The time to finish a repeating job executing. Pass null (0Np) to repeat forever or for one time jobs\n/ @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs\n/ @see .cron.addRepeatUntilTimeJob\n.cron.addUniqueRepeatUntilTimeJob:{[uFunc;uArgs;startTime;endTime;interval]\n match:exec from .cron.jobs where func = uFunc, args ~\\: uArgs, not 0Wp = nextRunTime;\n\n if[not null match`id;\n .log.if.info (\"Cron job with matching function and arguments is active. Not adding job [ Function: {} ] [ Arguments: {} ]\"; uFunc; uArgs);\n :match`id;\n ];\n\n :.cron.addRepeatUntilTimeJob[uFunc; uArgs; startTime; endTime; interval];\n };\n\n/ Cancels the specified job from running. Run once jobs will never run and repeating jobs will no longer run\n/ @param jobId (Long) The ID of the job to cancel\n/ @throws InvalidCronJobException If the ID of the job does not exist\n.cron.cancelJob:{[jobId]\n if[not jobId in key .cron.jobs;\n '\"InvalidCronJobException\";\n ];\n\n update nextRunTime:0Wp from `.cron.jobs where id = jobId;\n\n if[`tickless = .cron.cfg.mode;\n .cron.i.setNextTick[];\n ];\n };\n\n/ Removes all entries from .cron.status and all jobs that will not run again. By default this is run at\n/ midnight every day\n/ @see .cron.status\n.cron.cleanStatus:{\n delete from `.cron.jobs where nextRunTime = 0Wp;\n delete from `.cron.status where not null id;\n };\n\n/ The main cron function that is bound to .z.ts as part of the initialisation\n.cron.ts:{\n toRun:0!select id, runType from .cron.jobs where nextRunTime <= .time.now[];\n .cron.runners[toRun`runType] @' toRun`id;\n\n if[`tickless = .cron.cfg.mode;\n .cron.i.setNextTick[];\n ];\n };\n\n/ Execution function for jobs that only run once\n/ @returns (Boolean) If the job executed successfully or not\n/ @see .cron.i.run\n.cron.i.runOnce:{[jobId]\n status:.cron.i.run jobId;\n\n .cron.cancelJob jobId;\n\n :status;\n };\n\n/ Execution function for jobs that repeat\n/ @returns (Boolean) If the job exeucted successfully or not\n/ @see .cron.i.run\n.cron.i.runRepeat:{[jobId]\n status:.cron.i.run jobId;\n\n jobDetails:.cron.jobs jobId;\n\n if[.type.isInfinite jobDetails`nextRunTime;\n .log.if.debug \"Job has been self-cancelled. Will not reschedule [ Job: \",string[jobId],\" ]\";\n :status;\n ];\n\n newNextRunTime:(+). jobDetails`nextRunTime`interval;\n\n if[newNextRunTime > jobDetails`endTime;\n .log.if.info \"Job has reached 'end time'. Will not schedule again [ Job: \",string[jobId],\" ]\";\n newNextRunTime:0Wp;\n ];\n\n update nextRunTime:newNextRunTime from `.cron.jobs where id = jobId;\n\n :status;\n };\n\n/ Executes the specified cron job\n/ @param jobId (Long) The cron job to run now\n/ @returns (Boolean) If the job executed successfully or not\n/ @see .ns.protectedExecute\n.cron.i.run:{[jobId]\n jobDetails:.cron.jobs jobId;\n\n startTimer:.time.now[];\n\n result:.ns.protectedExecute . jobDetails`func`args;\n\n endTimer:.time.now[];\n\n status:not .ns.const.pExecFailure ~ first result;\n\n if[not status;\n $[.cron.cfg.printBacktraceOnFailure;\n .log.if.error (\"Cron job failed to execute [ Job ID: {} ]. Error - {}\\n{}\"; jobId; last result; result`backtrace);\n / else\n .log.if.error (\"Cron job failed to execute [ Job ID: {} ]. Error - {}\"; jobId; last result)\n ];\n\n result:(`errorMsg`backtrace inter key result)#result;\n ];\n\n / Cron job failures will always be logged\n if[.cron.cfg.logStatus | not status;\n `.cron.status upsert jobId,jobDetails[`func`nextRunTime],(startTimer;endTimer - startTimer;status;result);\n ];\n\n :status;\n };\n\n/ Updates the 'tickless' timer tick based on the next run time. If no more cron jobs are scheduled to run, the timer will be disabled\n/ until a new job is added\n/ @see .cron.jobs\n/ @see .cron.oneMsAsTimespan\n/ @see .cron.maxTimerAsTimespan\n.cron.i.setNextTick:{\n nextRun:exec min nextRunTime from .cron.jobs;\n\n if[.type.isInfinite nextRun;\n .log.if.trace \"No active cron jobs scheduled. Disabling system timer\";\n system \"t 0\";\n :(::);\n ];\n\n / Always make sure the next timer tick:\n / * Is not 0 (so accidentally disabled)\n / * Is not greater than max integer - 1\n timer:.cron.maxTimerAsTimespan & .cron.oneMsAsTimespan | nextRun - .time.roundTimestampToMs .time.now[];\n timerMs:.convert.timespanToMs timer;\n\n if[timerMs = system \"t\";\n :(::);\n ];\n\n system \"t \",string timerMs;\n\n .log.if.trace \"Tickless cron timer updated [ Next Run: \",string[timer],\" (\",string[timerMs],\" ms) ]\";\n };"}}},{"rowIdx":82,"cells":{"text":{"kind":"string","value":"kdb+ in astronomy¶\nThe field of observational astronomy has always been data-driven, but like many other fields, technological advances are causing something of a paradigm shift, and – according to experts – a bit of a headache! Currently under construction are new infrastructures that have the potential to record volumes of data that have not been seen before in the field. The Large Synoptic Survey Telescope (LSST) and Square Kilometer Array (SKA) are set to record such huge amounts of data that experts are concerned about their ability to make sense of this data, purely due to its sheer size.\nThe LSST is expected to be fully operational and start recording data in 2021, producing 15TB every night. It will take an image of half the sky every three nights, and so the same objects will be photographed again and again. This gives the data from the LSST a time dimension – how an object moves over time is something that will be studied from this data. In the 2020s, the SKA will produce 160TB of data per second. This is in the region of exabytes per night, and zettabytes per year. If this data were processed in real time, any large differences in objects (e.g. brightness/position) could then be investigated immediately. Use cases of the data on a non-real-time basis include discovering the formation and structure of our solar system, investigating distant galaxies, and the evolution of the universe.\nThere is no single choice of programming language in astronomy, but C is used for many astronomical applications. C is one of the most popular and commonly-used programming languages in the world with a wide range of uses varying from powering operating systems to building application software. kdb+ has the ability to extend its functionality through dynamically-loaded C/C++ modules, so we have the ability both to make use of existing utilities, and to create our own.\nWe believe that due to the amount of data collected, its time-series nature, and the potential need for both real-time and historical-based analysis, kdb+ would be very well suited to the data collected in the astronomy industry, and would be an ideal fit for many future astronomy projects.\nThis paper serves to provide an example of how this data collected in astronomy can be processed using the power and speed of kdb+. With future projects collecting data at increasing scales, this is an ideal time for the power of kdb+ to be applied to the field of astronomy. To demonstrate, we have loaded some raw astronomical data in to kdb+. In addition, we make use of kdb+’s versatility in extending with C to show how it takes a relatively small amount of q code to load some raw astronomical data in to kdb+ and run some simple analysis on the dataset.\nThe European Space Agency’s (ESA) Gaia project is a mission to chart a three-dimensional map of the Milky Way galaxy. Gaia is a billion-pixel camera that will take images of the galaxy to eventually record over one billion stars across the next five years, with also the anticipation of making other discoveries along the way. It recently released its first set of data to the public in the form of FITS files. This can be downloaded from ESA’s website.\nSloan Digital Sky Survey (SDSS)¶\nThe Sloan Digital Sky Survey (SDSS) – based in New Mexico – began surveying the sky in 2000. It covered roughly one third of the sky, and observed around 500 million objects. It has produced more than 100TB of data, of which all is available free to the public through its website. This has resulted in the SDSS often being described as the project that genuinely brought astronomy in to Big Data territory, and so its significance in this field cannot be underestimated. The data can be accessed through SQL queries, or through the raw FITS files that are also publicly available. We describe FITS files below.\nFITS files¶\nFlexible Image Transport System (FITS) is a digital file format used for storing scientific data. It is the format most widely used by astronomers and astrophysicists for transporting, analyzing and archiving scientific data since the early 1980s. The data in the SDSS database is archived in FITS files. These files are primarily designed to store images in the form of multidimensional numerical arrays or data in the form of tables. One FITS file can contain multiple tables and images.\nFITS files consist of segments called Header Data Units (HDUs). Every FITS file has a primary HDU which is usually an image and optional extension HDUs which may contain images or tables. Each HDU contains a Header Unit and a Data Unit. The Header Unit contains metadata pertaining to the information stored in the Data Unit and the Data Unit is the image or table referenced by the metadata.\nProcessing the data – linking FITS and kdb+ using C¶\nKX provides a header file, k.h\n, for interacting with C from kdb+. It provides the link between kdb+ and C by converting the different data types and structures between the two languages. Using this header file, we created a shared object that could natively parse a FITS file and load the data into a kdb+ database. This C extension can read metadata in HDUs and extract columns from binary tables, converting the information into a format usable by kdb+. The C functions are loaded into kdb+ from the shared object by using the Dynamic Load operator 2:\n, which is described in more detail later.\nInterfaces: C client for kdb+\n“C API for kdb+”\nKxSystems/kdb\nJames Neill’s repository for kdb+ in astronomy:\njpneill/fitsToKdb\nThe shared object¶\nThere are five functions for extracting metadata about the FITS file and the tables. Each of these takes in one or more K objects from a kdb+ process and returns a K object to that process.\n// Function which prints a list of the number of HDUs and their types\n// x – q symbol that is the FITS file `$”example.fits”\nK listHDUs(K x)\n// Function which returns number of rows in a table from a FITS file\n// x - q symbol that is the FITS file `$\"example.fits\"\n// y – q int or long representing the HDU of the binary table\nK num_rows(K x, K y)\n// Function which returns number of columns in a table from a FITS file\n// x - q symbol that is the FITS file `$\"example.fits\"\n// y – q int or long representing the HDU of the binary table\nK num_cols(K x, K y)\n// Function which returns a list of columns in a binary table in a FITS file\n// x - q symbol that is the FITS file `$\"example.fits\"\n// y - q int or long representing the HDU we're looking for\nK cols(K x, K y)\n// Function which returns a column type from a binary table in a FITS file\n// x - q symbol for name of FITS file\n// y - q symbol for column name\n// z - q int or long representing the HDU we're looking at\nK getColType(K x, K y, K z)\nThere are four functions for extracting columns of different types:\n// All of the following take in the same parameters:\n// x - q symbol for FITS file name\n// y - q symbol for column name\n// z - q long for number of rows to extract\n// h - q int or long representing the number of the HDU we're looking at\nK readLongCol(K x,K y,K z,K h)\nK readIntCol(K x,K y,K z,K h)\nK readDoubleCol(K x,K y,K z,K h)\nK readSymCol(K x,K y,K z,K h)\nImporting table data to kdb+¶\nIn kdb+, the Dynamic Load 2:\noperator is a binary function used to dynamically load C functions from a shared object. The left argument is the library from which the function is loaded (of type symbol), and the right argument is a list containing the function name (type symbol), and the number of arguments that function takes (type integer). Dynamically loaded functions have the datatype value 112h\n. Below is an example of how the listHDUs\nfunction is loaded:\nq).astro.listHDUs:`fitsToKdb 2:(`listHDUs;1)\nOnce the function is loaded into the kdb+ process it can be called in the same way as any q function:\nq)file:`$\"specObj-dr12.fits\"\nq).astro.listHDUs[file]\nNumber of HDUs: 2\nHDU 1: IMAGE_HDU\nHDU 2: BINARY_TBL\nThe other functions can then be loaded in in the same manner.\n.astro.getFitsRowCount:`fitsToKdb 2:(`num_rows;2)\n.astro.getFitsColCount:`fitsToKdb 2:(`num_cols;2)\n.astro.getFitsColNames:`fitsToKdb 2:(`cols;2)\n.astro.getFitsColType: `fitsToKdb 2:(`getColType;3)\n.astro.readLongCol: `fitsToKdb 2:(`readLongCol;4)\n.astro.readDoubleCol: `fitsToKdb 2:(`readDoubleCol;4)\n.astro.readIntCol: `fitsToKdb 2:(`readIntCol;4)\n.astro.readSymCol: `fitsToKdb 2:(`readSymCol;4)\nThe source file for these example analytics is the specObj-dr12.fits\nfile from the SDSS database.\nwww.sdss.org/dr12/spectro/spectro_access\nThis is a 2.9GB file containing the redshifts and classifications of all 4 million+ objects observed, including galaxy, quasar, and stellar spectra. We are able to use the functions defined above to create a kdb+ database from this FITS format file.\nq).astro.getFitsRowCount[file;2]\n4355200i\nq)c:.astro.getFitsColNames[file;2] // columns in table\nq)t:.astro.getFitsColType[file;;2]each c // types of columns\nq)n:1000000 // number of rows to extract from the table\nq)icols:c where t=`I // get only the int columns\n// build a dictionary containing the int columns\nq)icols:lower[icols]!.astro.readIntCol[file;;n;2]each icols\n// repeat for each of the long, float and sym cols\nInterfaces: Using foreign functions with kdb+\nBuilding tables in kdb+¶\nOnce the data is in dictionary form, combine and build the table:\nq)specObj:flip raze(icols;jcols;fcols;scols)\nq)specObj\nnspecobs spectrographid bluefiber nturnoff boss_specobj_id ..\n--------------------------------------------------------------\n2 1 -1 -1 0 -1\n2 1 -1 -1 0 -1\n2 1 -1 -1 0 -1\n2 1 -1 -1 0 -1\n2 1 -1 -1 0 -1\n2 1 -1 -1 0 -1\n2 1 -1 -1 0 -1\n2 1 -1 -1 0 -1\n3 1 -1 -1 0 -1\n..\nq)count specObj\n1000000\nAt this point, we have successfully loaded the data from a FITS file in to an in-memory kdb+ table. From here we can run some sample queries to take a look at the data and what it contains.\nObtaining a breakdown by class of the data:\nq)select count i by class from specObj\nclass | x\n------| ------\nGALAXY| 678098\nQSO | 112701\nSTAR | 209201\n// Time(ms) and memory(bytes) taken to compute across 1m rows\nq)\\ts select count i by class from specObj\n18 16777872\nCalculating recessional velocity¶\nCalculating the recessional velocity (recessional velocity is the rate at which an object is moving away from Earth) of each object using the observed redshift (z\ncolumn):\nq)rv:{[z](z-1)%1+z*:z+:1}\nq)rv 1.6\n0.742268\nq)select class,subclass,plug_ra,plug_dec,z,recVel:rv z from specObj\nclass subclass plug_ra plug_dec z recVel\n-------------------------------------------------------------\nGALAXY 146.7142 -1.041304 0.02127545 0.02104918\nGALAXY 146.9195 -0.9904918 0.2139246 0.1914661\nQSO BROADLINE 146.9023 -0.9849133 0.6521814 0.4637643\nGALAXY 146.8598 -0.8089017 0.1265536 0.1186022\n..\n// Time(ms) and memory(bytes) taken to compute above query\nq)\\ts select class,subclass,plug_ra,plug_dec,z,recVel:rv z from specObj\n30 33555104\n// breakdown of average recessional velocity by class\nq)select recVel:avg rv z by class from specObj\nclass | recVel\n------| -------------\nGALAXY| 0.1374245\nQSO | 0.6371058\nSTAR | -6.876509e-05\nq)\\ts select recVel:avg rv z by class from specObj\n53 53478144\nThe kdb+ function fby\naggregates values from one list based on groups in another list. It is commonly used to extend the functionality of the Where clause in a select statement. Placing fby\nin a Where clause allows an aggregate function (e.g. sum\n, max\n, avg\n) to be used to select individual rows across groupings. The left argument is a list containing two items – the first being the aggregation function, the second being the data vector (list/column) – and the right argument is the ‘group by’ vector. Below we make use of the fby\nfunction to obtain a breakdown by class of the objects with above average recessional velocity.\nq)select count i by class from (select class, recVel:rv z from specObj)\nwhere recVel>(avg;recVel) fby class\nclass | x\n------| ------\nGALAXY| 242519\nQSO | 68750\nSTAR | 116152\nq)\\ts select count i by class from (select class, recVel:rv z from specObj)\nwhere recVel>(avg;recVel) fby class\n84 36701520\nWe can infer from these results that quasi-stellar objects (QSOs) have the greatest recessional velocities of the three classes in this data set on average, whereas stars’ negative recessional velocity suggests they are moving towards us on average. Only roughly one third (242,519 of 678,098) of the galaxies in this data have an above-average recessional velocity by class, while over half of stars, and closer to two thirds of QSOs are above average in their respective classes.\nAs previously mentioned, the positive impact of having the SDSS data available to the public has been massive. Since it was released, over 3,000 papers have been written on a range of different topics in the field, based on data from the SDSS.\nThe SDSS is just one example of several projects, but it shows the benefit of making the data accessible to all interested parties, both professional and amateur. This impact throughout the wider astronomical community has pushed leaders in the field to continue this development – to further promote sky surveys by building bigger, more powerful telescopes. However, this doesn’t come without its problems, with data storage and computational processing power being pushed to the limits. kdb+ has the ability to scale to these extremes. Sean Keevey’s “A natural query interface for distributed systems” discusses how data distributed over several processes and machines can be seamlessly accessed from a single starting point by the end user.\nWe previously mentioned how astronomy data can often come with a time domain, which would be well suited to kdb+ and how the data would be stored on disk. The data set in this example does not have a time domain, as it just provides information on given objects recorded once, but this certainly does not mean that it is not suited to kdb+.\nWe could apply an attribute to this data for optimization, such as the sorted attribute to the class column in the example data set. The benefits of this would become more apparent as more files were being loaded in. Ciaran Gorman’s “Columnar database and query optimization” gives an in-depth explanation as to how they can be applied.\nFurther reading¶\n- The Atlantic: “How big data is changing astronomy (again)”\n- Cloud Computing and the Square Kilometre Array\nAuthors¶\nAndrew Magowan is a kdb+ consultant who has developed data and analytic systems for some of the world's largest financial institutions. Andrew is currently based in New York where he maintains a global tick capture application across a range of asset classes at a major investment bank.\nJames Neill works as a kdb+ consultant for one of the world’s largest investment banks developing a range of applications. James has also been involved in the design of training courses in data science and machine learning as part of the First Derivatives Capital Markets Training Programme."}}},{"rowIdx":83,"cells":{"text":{"kind":"string","value":"// @kind function\n// @category models\n// @desc Fit a vanilla torch model to data\n// @param data {dictionary} Containing training and testing data according to\n// keys `xtrn`ytrn`xtst`ytst\n// @param model {<} Model object being passed through the system (compiled)\n// @return {<} A vanilla fitted torch model\nmodels.torch.NN.fit:{[data;model]\n optimArg:enlist[`lr]!enlist 0.9;\n optimizer:models.i.Adam[model[`:parameters][];pykwargs optimArg];\n criterion:models.i.neuralNet[`:BCEWithLogitsLoss][];\n dataX:models.i.numpy[models.i.npArray[data`xtrain]][`:float][];\n dataY:models.i.numpy[models.i.npArray[data`ytrain]][`:float][];\n tensorXY:models.i.tensorData[dataX;dataY];\n modelArgs:`batch_size`shuffle`num_workers!(count first data`xtrain;1b;0);\n if[.pykx.loaded;modelArgs:.pykx.topy each modelArgs];\n dataLoader:models.i.dataLoader[tensorXY;pykwargs modelArgs];\n nEpochs:10|`int$(count[data`xtrain]%1000);\n models.torch.torchFit[model;optimizer;criterion;dataLoader;nEpochs]\n }\n\n\n// @kind function\n// @category models\n// @desc Compile a keras model for binary problems\n// @param data {dictionary} Containing training and testing data according to \n// keys `xtrn`ytrn`xtst`ytst\n// @param seed {int} Seed used for initialising the same model\n// @return {<} The compiled torch models\nmodels.torch.NN.model:{[data;seed]\n models.torch.torchModel[count first data`xtrain;200]\n }\n\n\n// @kind function\n// @category models\n// @desc Predict test data values using a compiled model\n// for binary problem types\n// @param data {dictionary} Containing training and testing data according to \n// keys `xtrn`ytrn`xtst`ytst\n// @param model {<} Model object being passed through the system (fitted)\n// @return {boolean} The predicted values for a given model\nmodels.torch.NN.predict:{[data;model] \n dataX:models.i.numpy[models.i.npArray[data`xtest]][`:float][];\n torchMax:.p.wrap last models.i.torch[`:max][model[dataX];1]`;\n if[.pykx.loaded;torchMax:torchMax[`:values]];\n torchMax[`:detach][][`:numpy][][`:squeeze][]`\n }\n\n\n// Load required python modules\nmodels.i.torch:.p.import[`torch]\nmodels.i.npArray:.p.import[`numpy]`:array;\nmodels.i.Adam:.p.import[`torch.optim]`:Adam\nmodels.i.numpy:.p.import[`torch]`:from_numpy\nmodels.i.tensorData:.p.import[`torch.utils.data]`:TensorDataset\nmodels.i.dataLoader:.p.import[`torch.utils.data]`:DataLoader\nmodels.i.neuralNet:.p.import[`torch.nn]\n\nmodels.torch.torchFit:.p.get[`runmodel];\nmodels.torch.torchModel:.p.get[`classifier];\n\n\n================================================================================\nFILE: ml_automl_code_tests_utils.q\nSIZE: 2,239 characters\n================================================================================\n\n// code/tests/utils.q - Testing utilities\n// Copyright (c) 2021 Kx Systems Inc\n//\n// The following utilities are used to test that a function is returning the \n// expected error message or data. These functions will likely be provided in\n// some form within the test.q script provided as standard for the testing of\n// q and embedPy code.\n\n// @kind function\n// @category tests\n// @desc Ensure that a test that is expected to fail, does so with an \n// appropriate message\n// @param function {(<;proj)} The function or projection to be tested\n// @param data {any} Data to be applied to the function as an individual item\n// for unary functions or a list of variables for multivariant functions\n// @param applyType {boolean} Is function to be applied unary/multivariant\n// (1b/0b)\n// @param expectedError {string} Expected error message on failure of the \n// function\n// @return {boolean} Function errored with appropriate message (1b), function \n// failed inappropriately or passed (0b)\nfailingTest:{[function;data;applyType;expectedError]\n // Is function to be applied unary or multivariant\n applyType:$[applyType;@;.];\n failureFunction:{[err;ret](`TestFailing;ret;err~ret)}expectedError;\n functionReturn:applyType[function;data;failureFunction];\n $[`TestFailing~first functionReturn;last functionReturn;0b]\n }\n\n// @kind function\n// @category tests\n// @desc Ensure that a test that is expected to pass, \n// does so with an appropriate return\n// @param function {(<;proj)} The function or projection to be tested\n// @param data {any} Data to be applied to the function as an individual item\n// for unary functions or a list of variables for multivariant functions\n// @param applyType {boolean} Is function to be applied unary/multivariant \n// (1b/0b)\n// @param expectedReturn {string} The data expected to be returned on execution\n// of the function with the supplied data\n// @return {boolean} Function returned the appropriate output (1b), function\n// failed or executed with incorrect output (0b)\npassingTest:{[function;data;applyType;expectedReturn]\n // Is function to be applied unary or multivariant\n applyType:$[applyType;@;.];\n functionReturn:applyType[function;data];\n expectedReturn~functionReturn\n }\n\n\n================================================================================\nFILE: ml_automl_code_utils.q\nSIZE: 24,710 characters\n================================================================================\n\n// code/utils.q - General utility functions\n// Copyright (c) 2021 Kx Systems Inc\n//\n// The purpose of this file is to house utilities that are useful across more\n// than one node or as part of the AutoML fit functionality and graph.\n\n\\d .automl\n\n// @kind data\n// @category utility\n// @desc List of models to exclude\n// @type symbol[]\nutils.excludeList:`GaussianNB`LinearRegression\n\n// @kind function\n// @category utility\n// @desc Defaulted fitting and prediction functions for AutoML cross\n// validation and hyperparameter search. Both models fit on a training set\n// and return the predicted scores based on supplied scoring function.\n// @param func {<} Scoring function that takes parameters and data as input, \n// returns appropriate score\n// @param hyperParam {dictionary} Hyperparameters to be searched\n// @param data {float[]} Data split into training and testing sets of format\n// ((xtrn;ytrn);(xval;yval))\n// @return {boolean[]|float[]} Predicted and true validation values\nutils.fitPredict:{[func;hyperParam;data]\n predicts:$[0h~type hyperParam;\n func[data;hyperParam 0;hyperParam 1];\n @[.[func[][hyperParam]`:fit;data 0]`:predict;data[1]0]`\n ];\n (predicts;data[1]1)\n }\n\n// @kind function\n// @category utility\n// @desc Load function from q. If function not found, try Python.\n// @param funcName {symbol} Name of function to retrieve\n// @return {<} Loaded function\nutils.qpyFuncSearch:{[funcName]\n func:@[get;funcName;()];\n $[()~func;.p.get[funcName;<];func]\n }\n\n// @kind function\n// @category utility\n// @desc Load NLP library if requirements met\n// This function takes no arguments and returns nothing. Its purpose is to load\n// the NLP library if requirements are met. If not, a statement printed to \n// terminal.\nutils.loadNLP:{\n notSatisfied:\"Requirements for NLP models are not satisfied. gensim must be\",\n \" installed. NLP module will not be available.\";\n $[(0~checkimport 3)&(::)~@[{system\"l \",x};\"nlp/nlp.q\";{0b}];\n .nlp.loadfile`:init.q;\n -1 notSatisfied;\n ];\n }\n\n// @kind function\n// @category utility\n// @desc Used throughout the library to convert linux/mac file names to\n// windows equivalent\n// @param path {string} Linux style path\n// @return {string} Path modified to be suitable for windows systems\nutils.ssrWindows:{[path]\n $[.z.o like \"w*\";ssr[;\"/\";\"\\\\\"];]path\n }\n\n// Python plot functionality\nutils.plt:.p.import`matplotlib.pyplot;\n\n// @kind function\n// @category utility\n// @desc Split data into training and testing sets without shuffling\n// @param features {table} Unkeyed tabular feature data\n// @param target {number[]} Numerical target vector\n// @param size {float} Percentage of data in testing set\n// @return {dictionary} Data separated into training and testing sets\nutils.ttsNonShuff:{[features;target;size]\n `xtrain`ytrain`xtest`ytest!\n raze(features;target)@\\:/:(0,floor n*1-size)_til n:count features\n }\n\n// @kind function\n// @category utility\n// @desc Return column value based on best model\n// @param modelTab {table} Models to apply to feature data\n// @param modelName {symbol} Name of current model\n// @param col {symbol} Column to search\n// @return {symbol} Column value\nutils.bestModelDef:{[modelTab;modelName;col]\n first?[modelTab;enlist(=;`model;enlist modelName);();col]\n }\n\n// @kind function\n// @category automl\n// @desc Retrieve feature and target data using information contained\n// in user-defined JSON file\n// @param method {dictionary} Retrieval methods for command line data. i.e.\n// `featureData`targetData!(\"csv\";\"ipc\")\n// @return {dictionary} Feature and target data retrieved based on user \n// instructions\nutils.getCommandLineData:{[method]\n methodSpecification:cli.input`retrievalMethods;\n dict:key[method]!methodSpecification'[value method;key method];\n if[count idx:where`ipc=method;dict[idx]:(\"J\";\"c\";\"c\")$/:3#'dict idx];\n dict:dict,'([]typ:value method);\n featureData:.ml.i.loadDataset dict`featureData;\n featurePath:dict[`featureData]utils.dataType method`featureData;\n targetPath:dict[`targetData]utils.dataType method`targetData;\n targetName:`$dict[`targetData]`targetColumn;\n // If data retrieval methods are the same for both feature and target data, \n // only load data once and retrieve the target from the table. Otherwise,\n // retrieve target data using .ml.i.loadDataset\n data:$[featurePath~targetPath;\n (flip targetName _ flip featureData;featureData targetName);\n (featureData;.ml.i.loadDataset[dict`targetData]$[`~targetName;::;\n targetName])\n ];\n `features`target!data\n }\n\n// @kind function\n// @category utility\n// @desc Create a prediction function to be used when applying a \n// previously fit model to new data. The function calls the predict method\n// of the defined model and passes in new feature data to make predictions.\n// @param config {dictionary} Information about a previous run of AutoML \n// including the feature extraction procedure used and the best model \n// produced\n// @param features {table} Tabular feature data to make predictions on\n// @returns {number[]} Predictions\nutils.generatePredict:{[config;features]\n original_print:utils.printing;\n utils.printing:0b;\n bestModel:config`bestModel;\n features:utils.featureCreation[config;features];\n modelLibrary:config`modelLib;\n utils.printing:original_print;\n $[`sklearn~modelLibrary;\n bestModel[`:predict;<]features;\n modelLibrary in`keras`torch`theano;\n [features:enlist[`xtest]!enlist features;\n customName:\".\" sv string config`modelLib`modelFunc;\n get[\".automl.models.\",customName,\".predict\"][features;bestModel]\n\t ];\n '\"NotYetImplemented\"\n\t]\n }\n\n// @kind function\n// @category utility\n// @desc Apply feature extraction/creation and selection on provided \n// data based on a previous run\n// @param config {dictionary} Information about a previous run of AutoML \n// including the feature extraction procedure used and the best model \n// produced\n// @param features {table} Tabular feature data to make predictions on\n// @returns {table} Features produced using config feature extraction \n// procedures\nutils.featureCreation:{[config;features]\n sigFeats:config`sigFeats;\n extractType:config`featureExtractionType;\n if[`nlp~extractType;config[`savedWord2Vec]:1b];\n if[`fresh~extractType;\n relevantFuncs:raze`$distinct{(\"_\" vs string x)1}each sigFeats;\n appropriateFuncs:1!select from 0!.ml.fresh.params where f in relevantFuncs;\n config[`functions]:appropriateFuncs\n\t];\n features:dataPreprocessing.node.function[config;features;config`symEncode];\n features:featureCreation.node.function[config;features]`features;\n if[not all newFeats:sigFeats in cols features;\n n:count newColumns:sigFeats where not newFeats;\n features:flip flip[features],newColumns!((n;count features)#0f),()];\n flip value flip sigFeats#\"f\"$0^features\n }"}}},{"rowIdx":84,"cells":{"text":{"kind":"string","value":"// @kind function\n// @category rs\n// @desc Cross validated parameter random search applied to data with\n// ascending split indices\n// @param k {int} Number of folds\n// @param n {int} Number of repetitions\n// @param features {any[][]} Matrix of features\n// @param target {any[]} Vector of targets\n// @param function {fn} Function that takes parameters and data as input\n// and returns a score\n// @param p {dictionary} Dictionary of hyperparameters to be searched with \n// format `typ`randomState`n`p where typ is the type of search \n// (random/sobol), randomState is the seed, n is the number of \n// hyperparameter sets and p is a dictionary of parameters - see \n// documentation for more info.\n// @param tstTyp {float} Size of the holdout set used in a fitted grid \n// search, where the best model is fit to the holdout set. If 0 the function \n// will return scores for each fold for the given hyperparameters. If \n// negative the data will be shuffled prior to designation of the holdout set\n// @return {table|list} Scores for hyperparameter sets on each of\n// the k folds for all values of h and additionally returns the best \n// hyperparameters and score on the holdout set for 0 < h <=1.\nrs.kfSplit:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.kfSplit]\n\n// @kind function\n// @category rs\n// @desc Cross validated parameter random search applied to data with \n// shuffled split indices\n// @param k {int} Number of folds\n// @param n {int} Number of repetitions\n// @param features {any[][]} Matrix of features\n// @param target {any[]} Vector of targets\n// @param function {fn} Function that takes parameters and data as input\n// and returns a score\n// @param p {dictionary} Dictionary of hyperparameters to be searched with \n// format `typ`randomState`n`p where typ is the type of search \n// (random/sobol), randomState is the seed, n is the number of \n// hyperparameter sets and p is a dictionary of parameters - see \n// documentation for more info.\n// @param tstTyp {float} Size of the holdout set used in a fitted grid \n// search, where the best model is fit to the holdout set. If 0 the function \n// will return scores for each fold for the given hyperparameters. If \n// negative the data will be shuffled prior to designation of the holdout set\n// @return {table|list} Scores for hyperparameter sets on each of\n// the k folds for all values of h and additionally returns the best \n// hyperparameters and score on the holdout set for 0 < h <=1.\nrs.kfShuff:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.kfShuff]\n\n// @kind function\n// @category rs\n// @desc Cross validated parameter random search applied to data with \n// an equi-distributions of targets per fold\n// @param k {int} Number of folds\n// @param n {int} Number of repetitions\n// @param features {any[][]} Matrix of features\n// @param target {any[]} Vector of targets\n// @param function {fn} Function that takes parameters and data as input\n// and returns a score\n// @param p {dictionary} Dictionary of hyperparameters to be searched with \n// format `typ`randomState`n`p where typ is the type of search \n// (random/sobol), randomState is the seed, n is the number of \n// hyperparameter sets and p is a dictionary of parameters - see \n// documentation for more info.\n// @param tstTyp {float} Size of the holdout set used in a fitted grid \n// search, where the best model is fit to the holdout set. If 0 the function \n// will return scores for each fold for the given hyperparameters. If \n// negative the data will be shuffled prior to designation of the holdout set\n// @return {table|list} Scores for hyperparameter sets on each of\n// the k folds for all values of h and additionally returns the best \n// hyperparameters and score on the holdout set for 0 < h <=1.\nrs.kfStrat:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.kfStrat]\n\n// @kind function\n// @category rs\n// @desc Cross validated parameter random search applied to roll \n// forward time-series sets\n// @param k {int} Number of folds\n// @param n {int} Number of repetitions\n// @param features {any[][]} Matrix of features\n// @param target {any[]} Vector of targets\n// @param function {fn} Function that takes parameters and data as input\n// and returns a score\n// @param p {dictionary} Dictionary of hyperparameters to be searched with \n// format `typ`randomState`n`p where typ is the type of search \n// (random/sobol), randomState is the seed, n is the number of \n// hyperparameter sets and p is a dictionary of parameters - see \n// documentation for more info.\n// @param tstTyp {float} Size of the holdout set used in a fitted grid \n// search, where the best model is fit to the holdout set. If 0 the function \n// will return scores for each fold for the given hyperparameters. If \n// negative the data will be shuffled prior to designation of the holdout set\n// @return {table|list} Scores for hyperparameter sets on each of\n// the k folds for all values of h and additionally returns the best \n// hyperparameters and score on the holdout set for 0 < h <=1.\nrs.tsRolls:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.tsRolls]\n\n// @kind function\n// @category rs\n// @desc Cross validated parameter random search applied to chain \n// forward time-series sets\n// @param k {int} Number of folds\n// @param n {int} Number of repetitions\n// @param features {any[][]} Matrix of features\n// @param target {any[]} Vector of targets\n// @param function {fn} Function that takes parameters and data as input\n// and returns a score\n// @param p {dictionary} Dictionary of hyperparameters to be searched with \n// format `typ`randomState`n`p where typ is the type of search \n// (random/sobol), randomState is the seed, n is the number of \n// hyperparameter sets and p is a dictionary of parameters - see \n// documentation for more info.\n// @param tstTyp {float} Size of the holdout set used in a fitted grid \n// search, where the best model is fit to the holdout set. If 0 the function \n// will return scores for each fold for the given hyperparameters. If \n// negative the data will be shuffled prior to designation of the holdout set\n// @return {table|list} Scores for hyperparameter sets on each of\n// the k folds for all values of h and additionally returns the best \n// hyperparameters and score on the holdout set for 0 < h <=1.\nrs.tsChain:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.tsChain]\n\n// @kind function\n// @category rs\n// @desc Cross validated parameter random search applied to percentage \n// split dataset\n// @param pc {float} (0-1) representing percentage of validation data\n// @param n {int} Number of repetitions\n// @param features {any[][]} Matrix of features\n// @param target {any[]} Vector of targets\n// @param function {fn} Function that takes parameters and data as input\n// and returns a score\n// @param p {dictionary} Dictionary of hyperparameters to be searched with \n// format `typ`randomState`n`p where typ is the type of search \n// (random/sobol), randomState is the seed, n is the number of \n// hyperparameter sets and p is a dictionary of parameters - see \n// documentation for more info.\n// @param tstTyp {float} Size of the holdout set used in a fitted grid \n// search, where the best model is fit to the holdout set. If 0 the function \n// will return scores for each fold for the given hyperparameters. If \n// negative the data will be shuffled prior to designation of the holdout set\n// @return {table|list} Scores for hyperparameter sets on each of\n// the k folds for all values of h and additionally returns the best \n// hyperparameters and score on the holdout set for 0 < h <=1.\nrs.pcSplit:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.pcSplit]\n\n// @kind function\n// @category rs\n// @desc Cross validated parameter random search applied to randomly \n// shuffled data and validated on a percentage holdout set\n// @param pc {float} (0-1) representing percentage of validation data\n// @param k {int} Number of folds\n// @param n {int} Number of repetitions\n// @param features {any[][]} Matrix of features\n// @param target {any[]} Vector of targets\n// @param function {fn} Function that takes parameters and data as input\n// and returns a score\n// @param p {dictionary} Dictionary of hyperparameters to be searched with \n// format `typ`randomState`n`p where typ is the type of search \n// (random/sobol), randomState is the seed, n is the number of \n// hyperparameter sets and p is a dictionary of parameters - see \n// documentation for more info.\n// @param tstTyp {float} Size of the holdout set used in a fitted grid \n// search, where the best model is fit to the holdout set. If 0 the function \n// will return scores for each fold for the given hyperparameters. If \n// negative the data will be shuffled prior to designation of the holdout set\n// @return {table|list} Scores for hyperparameter sets on each of\n// the k folds for all values of h and additionally returns the best \n// hyperparameters and score on the holdout set for 0 < h <=1.\nrs.mcSplit:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.mcSplit]\n\n// Multi-processing functionality\n\n// Load multi-processing modules\nloadfile`:util/mproc.q\nloadfile`:util/pickle.q\n\n// If multiple processes are available, multi-process cross validation library\nif[0>system\"s\";\n multiProc.init[abs system\"s\"]enlist\".ml.loadfile`:util/pickle.q\"\n ];\nxv.picklewrap:{picklewrap[(0>system\"s\")&.p.i.isw x]x}\n\n\n================================================================================\nFILE: ml_nlp_code_cluster.q\nSIZE: 12,956 characters\n================================================================================\n\n// code/cluster.q - Nlp clustering utilities\n// Copyright (c) 2021 Kx Systems Inc\n// \n// Clustering utilites for textual data \n\n\\d .nlp\n\n// @private\n// @kind function\n// @category nlpClusteringUtility\n// @desc Extract the keywords from a list of documents or keyword\n// dictionary\n// @param parsedTab {table} A parsed document containing keywords and their\n// associated significance scores\n// @returns {dictionary[]} Keyword dictionaries\ncluster.i.asKeywords:{[parsedTab]\n keyWords:$[-9=type parsedTab[0]`keywords;parsedTab;parsedTab`keywords];\n i.fillEmptyDocs keyWords\n }\n\n// @private\n// @kind function\n// @category nlpClusteringUtility\n// @desc Split the document into clusters using kmeans\n// @param iters {long} The number of times to iterate the refining step\n// @param parsedTab {table} A parsed document containing keywords and their\n// associated significance scores\n// @param clusters {long} Cluster indices\n// @returns {long[][]} The documents' indices, grouped into clusters\ncluster.i.bisect:{[iters;parsedTab;clusters]\n idx:i.minIndex cluster.MSE each parsedTab clusters;\n cluster:clusters idx;\n (clusters _ idx),cluster@/:cluster.kmeans[parsedTab cluster;2;iters]\n }\n\n// @private\n// @kind function\n// @category nlpClusteringUtility\n// @desc Apply k-means clustering to a document\n// @param parsedTab {table} A parsed document containing keywords and their\n// associated significance scores\n// @param clusters {long[]} Cluster indices\n// @returns {long[][]} The documents' indices, grouped into clusters\ncluster.i.kmeans:{[parsedTab;clusters]\n centroids:(i.takeTop[3]i.fastSum@)each parsedTab clusters;\n value group i.maxIndex each centroids compareDocs\\:/:parsedTab\n }\n\n// @private\n// @kind function\n// @category nlpClusteringUtility\n// @desc Find nearest neighbor of document\n// @param centroids {dictionary[]} Centroids as keyword dictionaries\n// @param parsedTab {table} A parsed document containing keywords and their\n// associated significance scores\n// @returns {long[][]} Document indices \ncluster.i.findNearestNeighbor:{[centroids;doc]\n similarities:compareDocs[doc] each centroids;\n m:max similarities;\n $[m>0f;similarities?m;0n]\n }\n\n// @private\n// @kind function\n// @category nlpClusteringUtility\n// @desc Merge any clusters with significant overlap into a single \n// cluster\n// @param clusters {any[][]} Cluster indices\n// @returns {any[][]} Appropriate clusters merged together\ncluster.i.mergeOverlappingClusters:{[clusters]\n counts:count each clusters;\n similar:cluster.i.similarClusters[clusters;counts]each til count clusters;\n // Merge any cluster that has at least one similar cluster\n // A boolean vector of which clusters will be getting merged\n merge:1<count each similar;\n // Filter out clusters of 1, and remove duplicates\n similarClusters:distinct desc each similar where merge;\n // Do the actual merging of the similar clusters\n newClusters:(distinct raze@)each clusters similarClusters;\n // Clusters not involved in any merge\n // This can't just be (not merge), as that only drops the larger cluster,\n // not the smaller one, in each merge\n untouchedClusters:(til count clusters)except raze similarClusters;\n clusters[untouchedClusters],newClusters\n }\n\n// @private\n// @kind function\n// @category nlpClusteringUtility\n// @desc Group together clusters that share over 50% of their elements\n// @param clusters {any[][]} Cluster indices\n// @param counts {long} Count of each cluster\n// @param idx {long} Index of cluster\n// @return {any[][]} Clusters grouped together\ncluster.i.similarClusters:{[clusters;counts;idx]\n superset:counts=sum each clusters[idx]in/:clusters;\n similar:.5<=avg each clusters[idx]in/:clusters;\n notSmaller:(count clusters idx)>=count each clusters;\n where superset or(similar & notSmaller)\n }\n\n// @private\n// @kind function\n// @category nlpClusteringUtility\n// @desc Normalize the columns of a matrix so they sum to 1\n// @param matrix {float[][]} Numeric matrix of values \n// @returns {float[][]} The normalized columns\ncluster.i.columnNormalize:{[matrix]\n 0f^matrix%\\:sum matrix\n }"}}},{"rowIdx":85,"cells":{"text":{"kind":"string","value":"// Create FIFO and unzip file into it, return FIFO name\nreadintofifo:{[filename]\n fifo:\"/tmp/logfifo\",string .z.i;\n fifostr:\"mkfifo \",fifo,\";gunzip -cd \",filename,\" > \",fifo,\" &\";\n @[system;fifostr;{.lg.e[`replay;\"Failed to read log into named pipe\"]}];\n fifo\n };\n\n// upd functions down here\nrealupd:{[f;t;x] \n\t// increment the tablecounts\n tablecounts[t]+::count first x;\n\t// run the supplied function in the error trap\n\t.[f;(t;x);{[t;x;e] errorcounts[t]+::count first x}[t;x]];\n\t}[.replay.upd]\n\n// amend the upd function to filter based on the table list\nif[(not tablelist~enlist `all) and not segmentedmode; realupd:{[f;t;x] if[t in .replay.tablestoreplay; f[t;x]]}[realupd]]\n\n// amend to do chunked saves\nif[messagechunks < 0W; realupd:{[f;t;x] f[t;x]; checkcount[hdbdir;replaydate;1;tempdir]}[realupd]]\n\ninitialupd:{[t;x] \n\t // spin through the first X messages\n\t $[msgcount < (firstmessage - 1);\n\tmsgcount+::1;\n\t// Once we reach the correct message, reset the upd function\n\t@[`.;`upd;:;.replay.realupd]]\n\t}\n\n// extract user defined row counts\nmergemaxrows:{[tabname] mergenumrows^mergenumtab[tabname]};\n\n// post replay function for merge replay, invoked after all the tables have been written down for a given log file\npostreplaymerge:{[td;p;h]\n .os.md[.os.pth[string .Q.par[td;p;`]]]; // ensures directory exists before removed\n mergelimits:(tabsincountorder[.replay.tablestoreplay],())!$[.merge.mergebybytelimit;(count tabsincountorder[.replay.tablestoreplay])#mergenumbytes;({[x] mergenumrows^mergemaxrows[x]}tabsincountorder[.replay.tablestoreplay])],();\t\n // merge the tables from each partition in the tempdir together\n merge[td;p;;mergelimits;h] each tabsincountorder[.replay.tablestoreplay];\n .os.deldir .os.pth[string .Q.par[td;p;`]]; // delete the contents of tempdir after merge completion\n }\n\n// function to upsert to specified directory\nupserttopartition:{[h;dir;tablename;tabdata;pt;expttype;expt]\n dirpar:.Q.par[dir;pt;`$string first expt];\n directory:` sv dirpar,tablename,`;\n // make directories for tables if they don't exist\n if[count tabpar:tabsincountorder[.replay.tablestoreplay] except key dirpar;\n .lg.o[`dir;\"creating directories under \",1_string dirpar];\n tabpar:tabpar except `heartbeat`logmsg;\n .[{[d;h;t](` sv d,t,`) set .Q.en[h;0#value t]};] each dirpar,'h,'tabpar];\n .lg.o[`save;\"saving \",(string tablename),\" data to partition \",string directory];\n .[\n upsert;\n (directory;r:update `sym!sym from ?[tabdata;{(x;y;(),z)}[in;;]'[expttype;expt];0b;()]);\n {[e] .lg.e[`savetablesbypart;\"Failed to save table to disk : \",e];'e}];\n /-key in partsizes are directory to partition, need to drop training slash in directory key\n .merge.partsizes[first ` vs directory]+:(count r;-22!r);\n };\n\nsavetablesbypart:{[dir;pt;tablename;h]\n arows: count value tablename;\t\n .lg.o[`rowcheck;\"the \",(string tablename),\" table consists of \", (string arows), \" rows\"];\t\t\n // get additional partition(s) defined by parted attribute in sort.csv\t\t\n extrapartitiontype:.merge.getextrapartitiontype[tablename];\n\t\n // check each partition type actually is a column in the selected table\n .merge.checkpartitiontype[tablename;extrapartitiontype];\t\t\n // enumerate data to be upserted\n enumdata:update (`. `sym)?sym from .Q.en[h;value tablename];\n // get list of distinct combiniations for partition directories\n extrapartitions:(`. `sym)?.merge.getextrapartitions[tablename;extrapartitiontype];\n\n .lg.o[`save;\"enumerated \",(string tablename),\" table\"];\t\t\n // upsert data to specific partition directory \n upserttopartition[h;dir;tablename;enumdata;pt;extrapartitiontype] each extrapartitions;\t\t\t\t\n // empty the table\n .lg.o[`delete;\"deleting \",(string tablename),\" data from in-memory table\"];\n @[`.;tablename;0#];\n // run a garbage collection (if enabled)\n if[gc;.gc.run[]];\n };\n\n\nmerge:{[dir;pt;tablename;mergelimits;h]\n // get int partitions\n intpars:asc key ` sv dir,`$string pt; // list of enumerated partitions 0 1 2 3...\n k:key each intdir:.Q.par[hsym dir;pt] each intpars; // list of table names\n if[0=count raze k inter\\: tablename; :()]; \n // get list of partition directories containing specified table\n partdirs:` sv' (intdir,'parts) where not ()~/:parts:k inter\\: tablename; // get each of the directories that hold the table\n // permanent storage destination, where data being merged too\n dest:.Q.par[h;pt;tablename];\n // exit function if no subdirectories are found\n if[0=count partdirs; :()];\n // if no table data set empty table. If data to merge, merge with correct merge function \n $[0 = count partdirs inter exec ptdir from .merge.partsizes;\n [.lg.w[`merge;\"no records for \", string[tablename]];\n (` sv dest,`) set @[.Q.en[h;value tablename];.merge.getextrapartitiontype[tablename];`p#];\n ];\n [$[mergemethod~`part;\n [dest:` sv .Q.par[h;pt;tablename],`; // provides path to where to move data to\t\n /-get chunks to partitions to merge in batch\n partchunks:.merge.getpartchunks[partdirs;mergelimits[tablename]];\n .merge.mergebypart[tablename;dest]'[partchunks];\n ];\n mergemethod~`col;\n [.merge.mergebycol[(tablename;value tablename);dest]'[partdirs];\n /- merging data by column does not create .d file - set it here after merge\n .lg.o[`merge;\"setting .d file\"];\n (` sv dest,`.d) set cols value tablename;\n ];\n .merge.mergehybrid[(tablename;value tablename);dest;partdirs;mergelimits[tablename]]\n ]\n ]\n ];\n .lg.o[`merge;\"deleting \", string[tablename], \" from temp storage\"]; \n .os.deldir each .os.pth each string partdirs;\n // set the attributes\n .lg.o[`merge;\"setting attributes\"];\n @[dest;;`p#] each .merge.getextrapartitiontype[tablename]; \n .lg.o[`merge;\"merge complete\"];\n // run a garbage collection (if enabled)\n if[gc;.gc.run[]];\n };\n\n// Return log file if it exists and not in segmented mode\ngetlogfile:{\n if[.replay.segmentedmode;.lg.e[`getlogfile;m:\"Segmented mode requires tplogdir.\"];'m];\n if[()~key hsym f:.replay.tplogfile;.lg.e[`getlogfile;m:\"Specified tplogfile \",string[f],\" does not exist\"];'m];\n enlist hsym f\n };\n\n// Return contents of log directory if it exists\ngetlogdir:{\n if[()~key hsym d:.replay.tplogdir;.lg.e[`getlogdir;m:\"Specified log directory \",string[d],\" does not exist\"];'m];\n if[d like \"*.gz\";.lg.e[`getlogdir;m:\"Zipped log directories not supported.\"];'m];\n $[.replay.segmentedmode;.replay.getstplogs[d];.Q.dd[logdir;] each key logdir:hsym d]\n };\n\n// Use STP meta table and tplogdir to build log names\ngetstplogs:{[logdir]\n // If trying to replay zipped files on Windows, error out\n winzip:(.z.o like \"w*\") and z:`stpmeta.gz in key d:hsym logdir;\n if[winzip;.lg.e[`replaylog;m:\"Zipped log files cannot be directly replayed on Windows\"];'m];\n\n // If meta table is zipped, assume all other logs are zipped as well and build log names accordingly\n if[z;system \"gunzip \",1_string .Q.dd[d;`stpmeta.gz]];\n metatable:@[get;.Q.dd[d;`stpmeta];{.lg.e[`getstpmeta;m:\"Log directory must contain valid STP meta table\"];'m}];\n if[z;system \"gzip \",1_string .Q.dd[d;`stpmeta]];\n names:exec distinct logname from metatable where any each tbls in .replay.tablestoreplay;\n .Q.dd[d;] each $[z;.Q.dd[;`gz];::] each last each ` vs' names\n };\n\n// Set up log replay list and clean HDB if necessary, kick off replay\ninitandrun:{\n if[all not null .replay[`tplogfile`tplogdir];.lg.e[`getlogs;m:\"Can't pass in log file and directory.\"];'m];\n\n .lg.o[`initandrun;\"Initialising replay settings.\"];\n .replay.tablestoreplay:$[`all~first .replay.tablelist;tables[];.replay.tablelist,()];\n .replay.logstoreplay:$[not null .replay.tplogfile;.replay.getlogfile[];.replay.getlogdir[]];\n if[not count r:.replay.logstoreplay;.lg.e[`initandrun;m:\"No log files found\"];'m];\n\n // If in segmented mode, get replay date and clean HDB once\n if[.replay.segmentedmode;\n // Pull out the date from the STP log file name - *_YYYYMMDDhhmmss (+ .gz if zipped)\n .replay.replaydate:first l:\"D\"$$[first[r] like \"*.gz\";-9_-17#;-6_-14#] each string r;\n if[not 1=count distinct l;.lg.e[`replay;m:\"Cannot replay logs from different dates in segmented mode!\"];'m];\n if[.replay.clean;.replay.cleanhdb .replay.replaydate]\n ];\n\n\n // Replay all logs and exit\n .lg.o[`initandrun;\"Replaying the following log(s): \",csv sv 1_'string .replay.logstoreplay];\n .replay.pathlist:()!();\n .replay.replaylog each .replay.logstoreplay;\n if[sortafterreplay;applysortandattr[.replay.pathlist]];\n if[partandmerge;postreplaymerge[tempdir;.replay.replaydate;hdbdir]]; \n .lg.o[`replay;\"replay complete\"];\n if[.replay.exitwhencomplete;exit 0];\n };\n\n\\d .\n\n// Load the sort csv and kick off replay if auto-running\n.sort.getsortcsv[.replay.sortcsv]\nif[.replay.autoreplay;.replay.initandrun[]];\n\n================================================================================\nFILE: TorQ_code_processes_tickerplant.q\nSIZE: 3,457 characters\n================================================================================\n\n\n/ q tick.q sym . -p 5001 </dev/null >foo 2>&1 &\n/2014.03.12 remove license check\n/2013.09.05 warn on corrupt log\n/2013.08.14 allow <endofday> when -u is set\n/2012.11.09 use timestamp type rather than time. -19h/\"t\"/.z.Z -> -16h/\"n\"/.z.P\n/2011.02.10 i->i,j to avoid duplicate data if subscription whilst data in buffer\n/2009.07.30 ts day (and \"d\"$a instead of floor a)\n/2008.09.09 .k -> .q, 2.4\n/2008.02.03 tick/r.k allow no log\n/2007.09.03 check one day flip\n/2006.10.18 check type?\n/2006.07.24 pub then log\n/2006.02.09 fix(2005.11.28) .z.ts end-of-day\n/2006.01.05 @[;`sym;`g#] in tick.k load\n/2005.12.21 tick/r.k reset `g#sym\n/2005.12.11 feed can send .u.endofday\n/2005.11.28 zero-end-of-day\n/2005.10.28 allow`time on incoming\n/2005.10.10 zero latency\n\"kdb+tick 2.8 2014.03.12\"\n\n/q tick.q SRC [DST] [-p 5010] [-o h]\n\n/load schema from params, default to \"sym.q\"\n.proc.loadf[(src:$[`schemafile in key .proc.params;raze .proc.params`schemafile;\"sym\"]),\".q\"];\n\n.proc.loadf[getenv[`KDBCODE],\"/common/u.q\"];\n.proc.loadf[getenv[`KDBCODE],\"/common/timezone.q\"];\n.proc.loadf[getenv[`KDBCODE],\"/common/eodtime.q\"];\n.proc.loadf[getenv[`KDBCODE],\"/common/datadog.q\"];\n\n\\d .\nupd:{[tab;x] .u.icounts[tab]+::count first x;}"}}},{"rowIdx":86,"cells":{"text":{"kind":"string","value":"// @kind function\n// @category runModels\n// @desc Seeded cross validation function, designed to ensure that \n// models will be consistent from run to run in order to accurately assess\n// the benefit of updates to parameters.\n// @param tts {dictionary} Feature and target data split into training/testing\n// sets\n// @param config {dictionary} Information relating to the current run of AutoML\n// @param modelTab {table} Models to be applied to feature data\n// @return {boolean[]|float[]} Predictions and associated actual values for\n// each cross validation fold\nrunModels.xValSeed:{[tts;config;modelTab]\n xTrain:tts`xtrain;\n yTrain:tts`ytrain;\n numReps:1;\n scoreFunc:get[config`predictionFunction]modelTab`minit;\n seedModel:`seed~modelTab`seed;\n isSklearn:`sklearn~modelTab`lib;\n // Seed handled differently for sklearn and keras \n seed:$[not seedModel;\n ::;\n isSklearn;\n enlist[`random_state]!enlist config`seed;\n (config`seed;modelTab`fnc)\n ];\n $[seedModel&isSklearn;\n // Grid search required to incorporate the random state definition\n [gsFunc:utils.qpyFuncSearch config`gridSearchFunction;\n numFolds:config`gridSearchArgument;\n val:enlist[`val]!enlist 0;\n first value gsFunc[numFolds;numReps;xTrain;yTrain;scoreFunc;seed;val]\n ];\n // Otherwise a vanilla cross validation is performed\n [xvFunc:utils.qpyFuncSearch config`crossValidationFunction;\n numFolds:config`crossValidationArgument;\n xvFunc[numFolds;numReps;xTrain;yTrain;scoreFunc seed]\n ]\n ]\n }\n \n// @kind function\n// @category runModels\n// @desc Extract the scoring function to be applied for model selection\n// @param config {dictionary} Information relating to the current run of AutoML\n// @param modelTab {table} Models to be applied to feature data\n// @return {<} Scoring function appropriate to the problem being solved\nrunModels.scoringFunc:{[config;modelTab]\n problemType:$[`reg in distinct modelTab`typ;\"Regression\";\"Classification\"];\n scoreFunc:config`$\"scoringFunction\",problemType;\n printScore:utils.printDict[`scoreFunc],string scoreFunc;\n config[`logFunc]printScore;\n scoreFunc\n }\n\n// @kind function\n// @category runModels\n// @desc Order average predictions returned by models\n// @param modelTab {table} Models to be applied to feature data\n// @param scoreFunc {<} Scoring function applied to predictions\n// @param orderFunc {<} Ordering function applied to scores\n// @param predictions {boolean[]|float[]} Predictions made by model\n// @return {dictionary} Scores returned by each model in appropriate order \nrunModels.orderModels:{[modelTab;scoreFunc;orderFunc;predicts]\n avgScore:avg each scoreFunc .''predicts;\n scoreDict:modelTab[`model]!avgScore;\n orderFunc scoreDict\n }\n\n// @kind function\n// @category runModels\n// @desc Fit best model on holdout set and score predictions\n// @param scores {dictionary} Scores returned by each model\n// @param tts {dictionary} Feature and target data split into training/testing\n// sets\n// @param modelTab {table} Models to be applied to feature data\n// @param scoreFunc {<} Scoring function applied to predictions\n// @param config {dictionary} Information related to the current run of AutoML\n// @return {dictionary} Fitted model and scores along with time taken \nrunModels.bestModelFit:{[scores;tts;modelTab;scoreFunc;config]\n config[`logFunc]scores;\n holdoutTimeStart:.z.T;\n bestModel:first key scores;\n printModel:utils.printDict[`bestModel],string bestModel;\n config[`logFunc]printModel;\n modelLib:first exec lib from modelTab where model=bestModel;\n fitScore:$[modelLib in key models;\n runModels.i.customModel[bestModel;tts;modelTab;scoreFunc;config];\n runModels.i.sklModel[bestModel;tts;modelTab;scoreFunc]\n ];\n holdoutTime:.z.T-holdoutTimeStart;\n returnDict:`holdoutTime`bestModel!holdoutTime,bestModel;\n fitScore,returnDict\n }\n\n// @kind function\n// @category runModels\n// @desc Create dictionary of meta data used\n// @param holdoutRun {dictionary} Information from fitting/scoring on the \n// holdout set\n// @param scores {dictionary} Scores returned by each model\n// @param scoreFunc {<} Scoring function applied to predictions\n// @param xValTime {time} Time taken to apply xval functions to data\n// @param modelTab {table} Models to be applied to feature data\n// @param modelName {string} Name of best model\n// @return {dictionary} Metadata to be contained within the end reports\nrunModels.createMeta:{[holdoutRun;scores;scoreFunc;xValTime;modelTab;modelName]\n modelLib:first exec lib from modelTab where model=modelName;\n modelFunc:first exec fnc from modelTab where model=modelName;\n holdScore:holdoutRun`score;\n holdTime:holdoutRun`holdoutTime;\n `holdoutScore`modelScores`metric`xValTime`holdoutTime`modelLib`modelFunc!\n (holdScore;scores;scoreFunc;xValTime;holdTime;modelLib;modelFunc)\n }\n\n\n================================================================================\nFILE: ml_automl_code_nodes_runModels_init.q\nSIZE: 260 characters\n================================================================================\n\n// code/nodes/runModels/init.q - Load runModels node\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Load code for runModels node \n\n\\d .automl\n\nloadfile`:code/nodes/runModels/utils.q\nloadfile`:code/nodes/runModels/funcs.q\nloadfile`:code/nodes/runModels/runModels.q\n\n\n================================================================================\nFILE: ml_automl_code_nodes_runModels_runModels.q\nSIZE: 1,783 characters\n================================================================================\n\n// code/nodes/runModels/runModels.q - Run models\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Select the most promising model from the list of provided models for the \n// user defined problem. This is done in a cross validated manner, with the\n// best model selected based on how well it generalizes to new data prior to\n// the application of grid/pseduo-random/sobol-random search optimization.\n\n\n\\d .automl\n\n// @kind function\n// @category node\n// @desc Runs models from modelTable returns best one\n// @param config {dictionary} Location and method by which to retrieve the data\n// @param tts {dictionary} Feature and target data split into training/testing\n// sets\n// @param modelTab {table} Potential models to be applied to feature data\n// @return {dictionary} Best model returned along with name of model\nrunModels.node.function:{[config;tts;modelTab]\n runModels.setSeed config;\n holdoutSet:runModels.holdoutSplit[config;tts];\n startTime:.z.T;\n predictions:runModels.xValSeed[holdoutSet;config]each modelTab;\n scoreFunc:runModels.scoringFunc[config;modelTab];\n orderFunc:runModels.jsonParse scoreFunc;\n scores:runModels.orderModels[modelTab;scoreFunc;orderFunc;predictions];\n totalTime:.z.T-startTime;\n holdoutRun:runModels.bestModelFit[scores;holdoutSet;modelTab;scoreFunc;\n config];\n metaData:runModels.createMeta[holdoutRun;scores;scoreFunc;totalTime;modelTab;\n holdoutRun`bestModel];\n returnKeys:`orderFunc`bestModel`bestScoringName`modelMetaData;\n returnVals:(orderFunc;holdoutRun`model;holdoutRun`bestModel;metaData);\n returnKeys!returnVals\n }\n\n// Input information\nrunModels.node.inputs:`config`ttsObject`models!\"!!+\"\n\n// Output information\nrunModels.i.k:`orderFunc`bestModel`bestScoringName`modelMetaData;\nrunModels.node.outputs:runModels.i.k!\"<<s!\"\n\n\n================================================================================\nFILE: ml_automl_code_nodes_runModels_utils.q\nSIZE: 2,712 characters\n================================================================================\n\n// code/nodes/runModels/utils.q - Utilities for the runModels node\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Utility functions specific the the runModels node implementation\n\n\\d .automl\n\n// @kind function\n// @category runModelsUtility\n// @desc Extraction of data from a file\n// @param filePath {string} File path from which to extract the data from \n// @return {dictionary} parsed from file\nrunModels.i.readFile:{[filePath]\n key(!).(\"S=;\")0:filePath\n }\n\n// @kind function\n// @category runModelsUtility\n// @desc Fit and score custom model to holdout set\n// @param bestModel {symbol} The best scorinng model from xval\n// @param tts {dictionary} Feature and target data split into training \n// and testing set\n// @param modelTab {table} Models to be applied to feature data\n// @param scoreFunc {<} Scoring metric applied to evaluate the model\n// @param cfg {dictionary} Configuration information assigned by the \n// user and related to the current run\n// @return {dictionary} The fitted model along with the predictions\nrunModels.i.customModel:{[bestModel;tts;modelTab;scoreFunc;cfg]\n modelLib:first exec lib from modelTab where model=bestModel;\n modelType:first exec typ from modelTab where model=bestModel;\n if[(`keras~modelLib)&`multi~modelType;\n tts[`ytrain]:runModels.i.prepMultiTarget tts\n ];\n modelDef:utils.bestModelDef[modelTab;bestModel]each`lib`fnc;\n customStr:\".automl.models.\",sv[\".\";string modelDef],\".\";\n model:get[customStr,\"model\"][tts;cfg`seed];\n modelFit:get[customStr,\"fit\"][tts;model];\n modelPred:get[customStr,\"predict\"][tts;modelFit];\n score:scoreFunc[modelPred;tts`ytest];\n `model`score!(modelFit;score)\n }\n\n// @kind function\n// @category runModelsUtility\n// @desc One hot encodes target values and converts to Numpy array\n// @param tts {dictionary} Feature and target data split into training\n// and testing set\n// @return {dictionary} Preprocessed target values\nrunModels.i.prepMultiTarget:{[tts]\n models.i.npArray flip value .ml.i.oneHot tts`ytrain\n }\n\n\n// @category runModelsUtility\n// @desc Fit and score sklearn model to holdout set\n// @param bestModel {symbol} The best scorinng model from xval\n// @param tts {dictionary} Feature and target data split into training\n// and testing set\n// @param modelTab {table} Models to be applied to feature data\n// @param scoreFunc {<} Scoring metric applied to evaluate the model\n// @return {dictionary} The fitted model along with the predictions\nrunModels.i.sklModel:{[bestModel;tts;modelTab;scoreFunc]\n model:utils.bestModelDef[modelTab;bestModel;`minit][][];\n model[`:fit]. tts`xtrain`ytrain;\n modelPred:model[`:predict][tts`xtest]`;\n score:scoreFunc[modelPred;tts`ytest];\n `model`score!(model;score)\n }\n\n\n================================================================================\nFILE: ml_automl_code_nodes_saveGraph_funcs.q\nSIZE: 3,181 characters\n================================================================================\n\n// code/nodes/saveGraph/funcs.q - Functions called in saveGraph node\n// Copyright (c) 2021 Kx Systems Inc\n//\n// Definitions of the main callable functions used in the application of\n// .automl.saveGraph\n\n\\d .automl\n\n// @kind function\n// @category saveGraph\n// @desc Save down target distribution plot\n// @param params {dictionary} All data generated during the process\n// @param savePath {string} Path where images are to be saved\n// return {::} Save target distribution plot to appropriate location\nsaveGraph.targetPlot:{[params;savePath]\n problemTyp:string params[`config;`problemType];\n plotFunc:\".automl.saveGraph.i.\",problemTyp,\"TargetPlot\";\n get[plotFunc][params;savePath];\n }\n\n// @kind function\n// @category saveGraph\n// @desc Save down result plot depending on problem type\n// @param params {dictionary} All data generated during the process\n// @param savePath {string} Path where images are to be saved\n// return {::} Save confusion matrix or residual plot to appropriate location\nsaveGraph.resultPlot:{[params;savePath]\n problemTyp:params[`config;`problemType];\n $[`class~problemTyp;\n saveGraph.confusionMatrix;\n saveGraph.residualPlot\n ][params;savePath]\n }\n\n// @kind function\n// @category saveGraph\n// @desc Save down confusion matrix\n// @param params {dictionary} All data generated during the process\n// @param savePath {string} Path where images are to be saved\n// return {::} Save confusion matrix to appropriate location\nsaveGraph.confusionMatrix:{[params;savePath]\n confMatrix:params[`analyzeModel;`confMatrix];\n modelName:params`modelName;\n classes:`$string key confMatrix;\n saveGraph.i.displayConfMatrix[value confMatrix;classes;modelName;savePath]\n }\n\n// @kind function\n// @category saveGraph\n// @desc Save down residual plot\n// @param params {dictionary} All data generated during the process\n// @param savePath {string} Path where images are to be saved\n// return {::} Save residual plot to appropriate location\nsaveGraph.residualPlot:{[params;savePath]\n residuals:params[`analyzeModel;`residuals];\n modelName:params`modelName;\n tts:params`ttsObject;\n saveGraph.i.plotResiduals[residuals;tts;modelName;savePath]\n }\n\n// @kind function\n// @category saveGraph\n// @desc Save down impact plot\n// @param params {dictionary} All data generated during the process\n// @param savePath {string} Path where images are to be saved\n// return {::} Save impact plot to appropriate location\nsaveGraph.impactPlot:{[params;savePath]\n modelName:params`modelName;\n sigFeats:params`sigFeats;\n impact:params[`analyzeModel;`impact];\n // Update impact dictionary to include column names instead of just indices\n updKeys:sigFeats key impact;\n updImpact:updKeys!value impact;\n saveGraph.i.plotImpact[updImpact;modelName;savePath];\n }"}}},{"rowIdx":87,"cells":{"text":{"kind":"string","value":"Using modified .z\nfunctions to trace, monitor and control execution¶\nEvery client interaction with a kdb+ server is handled by one of the p\n? functions you’ll find in the system namespace .z\n. These functions have reasonable, simple defaults that work fine right out of the box. What we’re doing here is taking advantage of the fact that they’re just functions, allowing you to overwrite them with your own custom code to show or modify what’s happening.\nThe utility scripts in github.com/simongarland/dotz are examples of how to do this, and these files are described in detail below.\nIn all of the examples the code to wrap up existing definitions looks complicated. The reason is that these are general scripts and so a combination of them could be loaded into applications with pre-existing custom .z.p\n? definitions. The wrapping code protects these definitions, but in a particular application you can probably simply replace or extend existing definitions rather than wrapping them.\nTake it for a spin¶\nThe simplest way to get a feeling for what’s going on is to try it out. Start up two kdb+ sessions, load traceusage.q\ninto one of them – and then talk to it from the other server. Watch the output in the traceusage\nsession.\nVanilla server session:\nq)h:hopen 5001\nq)h\n3\nq)h\"2+2 3\"\n4 5\nq)h\"2 3 4+2 3\"\n'length\nq)hclose h\ntraceusage\nserver session:\n$ q traceusage.q -p 5001\n…\n2008.05.09 12:38:10.367 ms:0.002003799 m+:0K pw a:localhost u: w:4 (`simon;\"***\")\n2008.05.09 12:38:10.370 ms:0.003025343 m+:0K po a:localhost u:simon w:4 4\n2008.05.09 12:38:17.151 ms:0.02098095 m+:0K pg a:localhost u:simon w:4 2+2 3\n*2008.05.09 12:38:25.438 (error:length) pg a:localhost u:simon w:4 2 3 4+2 3\n2008.05.09 12:38:33.246 ms:0.002986053 m+:0K pc a:192.168.1.34 u:simon w:0 4\n(On non-Windows OSs the error line will be in glorious 1980s style color.)\nThe gory details¶\nWhat the individual files do, and how to use them.\nThe toolkit¶\nThe “tools” you have to work with are the p\n? functions from .z\n: .z.po\n, .z.pc\n, .z.pw\n, .z.pg\n, .z.ps\n, .z.ph\n, .z.pp\nand .z.exit\n. Combined with the .z\nvariables .z.a\n, .z.u\nand .z.w\nwhich are always set to the values of the client during execution of the .z.p\n? function. Depending on how the function is called, additional information may be provided as arguments to the .z.p\n? functions (user ID and password for .z.pw\n, browser environment for .z.ph\nand .z.pp\n).\nBy default, execution is done using value\nso strings or symbol argument lists can be tested in a console.\nQ for Mortals: §11.6 Interprocess Communication\nsaveorig.q\n¶\nThis script just saves original values of things like .z.pg\nso you can revert to original definitions without having to restart the task. This is made a little more complicated by the way some of the default definitions aren’t materialized in the user workspace. For example by default .z.pg\nis just {value x}\nbut that’s run in the kdb+ executable. The default values are created explicitly if need be.\n.dotz.exit.ORIG:.z.exit:@[.:;`.z.exit;{;}];\n.dotz.pg.ORIG:.z.pg:@[.:;`.z.pg;{.:}];\n.dotz.ps.ORIG:.z.ps:@[.:;`.z.ps;{.:}];\n…\nOther functions and variables shared between multiple scripts (such as debug output level .debug.LEVEL\n, or the .z.a\nIP address <-> hostname cache .dotz.IPA\n) are defined here too. Although it would be simpler to embed this setup code in each script, allowing them to be standalone, one tires of the cut’n’paste forays required by every tiny change.\nNote\nAfter the various state variables have been defined the script saveorig.custom.q\nis loaded, if found, allowing you to customise the setup without needing to have a modified version of the saveorig script.\nAgain, for production use you should rip out the unneeded definitions.\nTracing execution¶\nTracing execution of the various callbacks is the simplest application. It can be as simple as just sprinkling 0N!\nstatements around the functions, or as complicated as logging to an external file. As these are samples they also track the use of .z.pi\n– but that can get tiresome if you’re debugging from a console. In that case just zap the custom .z.pi\ndefinition with:\nq)\\x .z.pi\nThe variable .usage.LEVEL\ncan be set to control how much is output. By default (2) it displays all messages, a value of 1 will display only errors and a value of 0 will temporarily disable logging. In the examples below, the sample session is a simple hopen\n, get \"2+2\"\n, get \"2 3+3 4 5\"\nthen hclose\n.\ndumpusage.q\n¶\nThe simplest file of all, it just puts in 0N!\nto display the input and the results. It’s fine for debugging a simple conversation with a single client – but not informative enough for more complex setups.\ndotz$ q dumpusage.q -p 5001\n…\nq)\"***\"\n`simon\n1b\n4\n4\n\"2+2\"\n4\n\"2 3+3 4 5\"\n4\n4\ntraceusage.q\n¶\nDumps formatted output to the console, and on non-Windows consoles it will color errors and expensive calls. The definition of what’s expensive can be set by .usage.EXPENSIVE\n(in milliseconds).\ndotz$ q traceusage.q -p 5001\n…\nq)\n2008.05.13 11:42:56.290 ms:0.002003799 m+:0K pw a:localhost u: w:4 (`simon;\"***\")\n2008.05.13 11:42:56.319 ms:0.003968307 m+:0K po a:localhost u:simon w:4 4\n2008.05.13 11:43:00.866 ms:0.0159911 m+:0K pg a:localhost u:simon w:4 2+2\n*2008.05.13 11:43:08.818 (error:length) pg a:localhost u:simon w:4 2 3+3 4 5\n2008.05.13 11:43:13.986 ms:0.004007597 m+:0K pc a:192.168.1.34 u:simon w:0 4\nlastusage.q\n¶\nWhen debugging it’s sometimes more helpful to be able to grab the last request that came in rather than just looking at a trace of what happened. This set of custom callbacks stores the last calls in namespace .last\n, allowing you to fetch the data and retry the request directly in your session.\ndotz$ q lastusage.q -p 5001\n…\nq).last\n| ::\npw | ``when`u`w`a`x`y`z`r!(::;2008.05.13T11:44:36.655;`;4;2130706433;{[x;y]1b};`simon;\"\";1b)\nzcmd| `pc\npo | ``when`u`w`a`x`y`r!(::;2008.05.13T11:44:36.655;`simon;4;2130706433;::;4;4)\npg | ``when`u`w`a`x`y`r!(::;2008.05.13T11:44:40.951;`simon;4;2130706433;.:;\"2+2\";4)\npc | ``when`u`w`a`x`y`r!(::;2008.05.13T11:44:52.111;`simon;0;-1062731486;::;4;4)\nq).last.pg\n| ::\nwhen| 2008.05.13T11:44:40.951\nu | `simon\nw | 4\na | 2130706433\nx | .:\ny | \"2+2\"\nr | 4\nq)value .last.pg.y\n4\nmonitorusage.q\n¶\nIf the monitoring is to be left running for a long time scrolling back through the console is not a sensible way to look for problems. This script logs all requests to a local table USAGE\n, allowing you to analyse the data. As the data is stored in an in-memory table it’s of course lost when you exit unless you choose to do something with .z.exit\n.\ndotz$ q monitorusage.q -p 5001\n…\nq)USAGE\ndate time ms mdelta zcmd ipa u w cmd ok error\n---------------------------------------------\nq)USAGE\ndate time ms mdelta zcmd ipa u w cmd ok error\n-------------------------------------------------------------------------------------------------\n2008.05.13 11:48:19.360 0.2459958 0 pi 192.168.1.34 simon 0 \"USAGE\" 1\n2008.05.13 11:48:31.728 0.002003799 0 pw localhost 4 \"(`simon;\\\"***\\\")\" 1\n2008.05.13 11:48:31.729 0 0 po localhost simon 4 ,\"4\" 1\n2008.05.13 11:48:36.360 0.0159911 0 pg localhost simon 4 \"2+2\" 1\n2008.05.13 11:48:41.920 0 0 pg localhost simon 4 \"2 3+3 4 5\" 0 length\n2008.05.13 11:48:46.512 0.003025343 0 pc 192.168.1.34 simon 0 ,\"4\" 1\nq)\nlogusage.q\nand loadusage.q\n¶\nFinally, the all-singing all-dancing version. This script logs all requests directly to an external logfile – using the same log mechanism as kdb+tick. This allows logging to be left running for days without having to worry about tables growing – and will ensure that the logging data is safe even if the session terminates unexpectedly. Use loadusage.q\nto load the logged data into a session as a table (same schema as that from monitorusage.q\nexcept hostname added).\ndotz$ q logusage.q -p 5001\n…\nq)'type\nq)\\\\ / nothing to see here..\ndotz$ q loadusage.q\n…\nq)USAGE\ndate time ms mdelta zcmd ipa host u w cmd ok error\n--------------------------------------------------------------------------------------------------------\n2008.05.13 13:01:42.694 0.002003799 0 pw 127.0.0.1 localhost 5 (`simon;\"***\") 1\n2008.05.13 13:01:42.695 0.0009822543 0 po 127.0.0.1 localhost simon 5 5 1\n2008.05.13 13:01:46.198 0.0159911 0 pg 127.0.0.1 localhost simon 5 2+2 1\n2008.05.13 13:01:57.350 0 0 pg 127.0.0.1 localhost simon 5 2 3+3 4 5 0 length\n2008.05.13 13:02:00.901 0.004007597 0 pc 192.168.1.34 simon 0 5 1\nq)q)select from USAGE where not ok\ndate time ms mdelta zcmd ipa host u w cmd ok error\n--------------------------------------------------------------------------------------\n2008.05.13 11:50:55.988 0 0 pg 127.0.0.1 localhost simon 5 2 3+3 4 5 0 length\nq)\nSlamming the doors¶\nAnother important use for modified .z.p\n? callbacks is to control access to a q session. Q contains some very coarse access controls settable by command-line options – particularly -u\nor -U\nto enforce password-controlled access (with MD5 passwords), -b\nto enforce read-only access and -T\nto set a maximum CPU time per single client call.\nThe password control in -u\nand -U\nis all done in the kdb+ executable, so completely outside user control. However as soon as the initial check (if any) has been done control is passed to z.pw\n, which can say if a connection is to be allowed. This can be via some session internal table or function, or can go outside to something like a central single-signon server.\nblockusage.q\n¶\nUse this script simply to block all client interaction: it just sets .z.pw\nto always return false, i.e. no connection is allowed for supplied user ID and password.\ncontrolaccess.q\nand loadinvalidaccess.q\n¶\nThere are so many ways to control access that this script is way too complicated for immediate use as it stands – just pick an interesting subset.\nIt shows how to control access via\n- a user table – splitting users into superusers (who can do anything), powerusers (who can run ad-hoc queries, but can’t do things like shutdown the session) and defaultusers (who can only use a specific list of pre-defined commands).\n- the client’s server – with name matching a set of wildcards\n- a list of valid commands\n- a command validator which parses input\nInvalid access attempts are logged, and the logfile can be loaded into a table and queried with loadinvalidaccess.q\nTracking clients and servers¶\nQ provides a a list of handles in use with the keys of .z.W\n. These clients provide more background information about what's “behind” the handles by extending .z.po\nand .z.pc\n.\ntrackclients.q\n¶\nTracking of clients can be automated using this script, .z.po\nand .z.pc\nmaintain the list automatically.\nBy default, the table of clients just uses information provided by .z.po\nlike .z.u\nand .z.w\n, but if .clients.INTRUSIVE\nis set, the server will ask the clients for more details like their q versions, number of secondary processes etc.\nOutput from a session where a client did three hopen 5001\ns, and one hclose\n.\ndotz$ q trackclients.q -p 5001\n…\nq)CLIENTS\nw| ipa u a poz pcz\n-| --------------------------------------------------------------------------\n4| localhost simon 2130706433 2008.05.13T13:37:14.176\n5| localhost simon 2130706433 2008.05.13T13:37:15.007\n| localhost simon 2130706433 2008.05.13T13:37:15.735 2008.05.13T13:37:26.359\nq)\ntrackservers.q\n¶\nGiving client applications the ability to track servers simplifies application design – no need to hardcode server+port settings, or to handle servers becoming unavailable.\nUnlike trackclients.q\nyou do have to add server records manually, although .z.pc\nhandles them going away. You can either add a new server using a function like .servers.addnh\n(nh\nis name, hpup) or add a server record for an existing open handle using .servers.addw\n.\nServers can be public or private – a private server is not handed on to other users who request a list of current servers (the simplest way of setting up a new session, no central “handle server” to be maintained).\nBy default servers that disappear are retried regularly.\n$ q trackservers.q\nq).servers.addw hopen`:welly3:2018\n3\nq).servers.addw hopen`:welly3:2017\n4\nq)SERVERS\nname hpup w hits private lastz\n---------------------------------------------------------------\nservers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337\ntaq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761\nlava2006 :welly3:2017 4 0 0 2008.05.13T13:45:05.400\nq)SERVERS\nname hpup w hits private lastz\n---------------------------------------------------------------\nservers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337\ntaq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761\nlava2006 :welly3:2017 4 0 0 2008.05.13T13:45:05.400\nq).servers.handlefor`lava2006\n4\nq).servers.handlefor`lava2007\n'lava2007.not.available\nq)SERVERS\nname hpup w hits private lastz\n---------------------------------------------------------------\nservers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337\ntaq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761\nlava2006 :welly3:2017 4 1 0 2008.05.13T13:45:44.824\nq)\nAdding servers with user supplied name with .servers.addnh\n:\nq).servers.addnh[`taq;`::5001]\n3\nq).servers.addnh[`taq;`::5001]\n5\nq).servers.addnh[`taq;`::5001]\n6\nq).servers.addnh[`taq;`::5001]\n7\nq)SERVERS\nname hpup w hits private lastz\n---------------------------------------------------------------\nservers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337\ntaq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761\nlava2006 :welly3:2017 4 1 0 2008.05.13T13:45:44.824\ntaq ::5001 3 0 0 2008.05.13T13:50:03.777\ntaq ::5001 5 0 0 2008.05.13T13:50:05.513\ntaq ::5001 6 0 0 2008.05.13T13:50:06.848\ntaq ::5001 7 0 0 2008.05.13T13:50:07.640\nq).servers.handlefor`taq\n3\nq).servers.handlefor`taq\n5\nq).servers.handlefor`taq\n6\nq).servers.handlefor`taq\n7\nq).servers.handlefor`taq\n3\nRunning on other servers¶\nThe default Q IPC allows you to easily submit synchronous or asynchronous requests. Combined with a list of all available servers from trackservers.q\nabove you can deal with most simple requests.\nremotetasks.q\n¶\nThis script provides an extra way of dealing with a lot of data requests. It allows you to submit synchronous or asynchronous requests, locally or remotely – and collects all the results in a local table TASKS\n. So, for example, if you had to run a few hundred queries to be able to build a report and you had 10 server sessions available to query you’d simply submit all 100 queries and either pick up results as they drift in, or wait until all are complete.\nYou can additionally allocate requests to a request group to make it easy to check when a complete group has completed.\nHere’s an example session using two servers on 5001 and 5002. First create the server table entries.\nq).servers.addnh[`hh;`::5001]\n5\nq).servers.addnh[`hh;`::5002]\n6\nq).servers.addnh[`hh;`::5002]\n7\nq)SERVERS\nname hpup w hits private lastz\n--------------------------------------------------------------\nservers :192.168.1.34:0 0 0 1 2008.05.13T18:37:41.087\nhh ::5001 5 0 0 2008.05.13T18:38:59.455\nhh ::5002 6 0 0 2008.05.13T18:39:03.886\nhh ::5002 7 0 0 2008.05.13T18:39:05.390\nSubmit a few tasks:\nq).tasks.rxa[.servers.handlefor`hh;\"max til 10\"]\n10001\nq).tasks.rxa[.servers.handlefor`hh;\"max til 10\"]\n10002\nq).tasks.rxa[.servers.handlefor`hh;\"max til 10\"]\n10003\nq)TASKS\nnr | grp startz endz w ipa status expr result\n-----| ----------------------------------------------------------------------------------------------\n10001| 20001 2008.05.13T18:41:43.057 2008.05.13T18:41:43.058 5 localhost complete \"max til 10\" 9\n10002| 20002 2008.05.13T18:41:46.138 2008.05.13T18:41:46.138 6 localhost complete \"max til 10\" 9\n10003| 20003 2008.05.13T18:41:47.009 2008.05.13T18:41:47.010 7 localhost complete \"max til 10\" 9\nq).tasks.results 10002\n10002| 9\nq).tasks.results .tasks.completed[]\n10001| 9\n10002| 9\n10003| 9\nand one invalid task:\nq).tasks.rxa[.servers.handlefor`hh;\"17+`this\"]\n10004\nq).tasks.failed[]\n,10004\nq).tasks.results 10004\nq).tasks.status 10004\n10004| fail\nq)TASKS\nnr | grp startz endz w ipa status expr result\n-----| ----------------------------------------------------------------------------------------------\n10001| 20001 2008.05.13T18:41:43.057 2008.05.13T18:41:43.058 5 localhost complete \"max til 10\" 9\n10002| 20002 2008.05.13T18:41:46.138 2008.05.13T18:41:46.138 6 localhost complete \"max til 10\" 9\n10003| 20003 2008.05.13T18:41:47.009 2008.05.13T18:41:47.010 7 localhost complete \"max til 10\" 9\n10004| 20004 2008.05.13T18:44:05.229 2008.05.13T18:44:05.229 5 localhost fail \"17+`this\" \"type\"\nq)\nUtilities¶\nhutil.q\n¶\nProduction usage\nAll these utility files should be treated as examples. For any particular case they probably have too many options and should be cut down to do just what you want. The access control script is the most obvious case - it probably has far too many options/checks going on."}}},{"rowIdx":88,"cells":{"text":{"kind":"string","value":"generatehdb:{[x]\n .lg.o[`mockdata;\"generating mock hdb\"]; \n x:updatehdbdir x;\n setondisk[x].'exec .getrange[partitiontype]'[til n]from x;\n loadhdb x`hdbdir;\n };\n\nupdatehdbdir:{[x]update hdbdir:` sv(testpath;hdbname)from x};\nloadhdb:{[hdbdir]system \"l \",1_string hdbdir};\n\ngeneraterdb:{\n .lg.o[`mockdata;\"generating mock hdb\"];\n [x]setinmemory[x]. exec .getrange[partitiontype][n]from x;\n };\n\n.getrange.date:{[n]0D+2000.01.01+0 1+n};\n.getrange.month:{[n]0D+.Q.addmonths[2000.01.01;0 1+n]};\n.getrange.year:{[n]0D+.Q.addmonths[2000.01.01;12*0 1+n]};\n\ngeneratedata:{[x;start;end]\n end:end-1;\n difference:(end-start)%x`nrecord;\n timestamp:start+til[x`nrecord]*difference;\n syms:`AUDUSD`EURUSD`USDCHF;\n sym:`p#syms where 3#x`nrecord;\n source:(`$\"source\",/:string til nsyms:count syms)where 3#x`nrecord;\n id:\"x\",/:string til count source;\n offset:til[nsyms]*difference%nsyms;\n time:raze timestamp+/:offset;\n sourcetime:raze timestamp+/:2*offset;\n price:raze 100+x[`nrecord]?/:10*1+til nsyms;\n size:raze 1000+x[`nrecord]?/:100*1+til nsyms;\n :([]sym;source;id;`timestamp$time;`timestamp$sourcetime;bidprice:0.9*price;bidsize:0.9*size;askprice:1.1*price;asksize:1.1*size);\n };\n\nsetondisk:{[x;start;end]\n data:generatedata[x;start;end];\n x:update target:.Q.par[hdbdir;partitiontype$first start;tablename]from x;\n exec .Q.dd[target;`]set .Q.en[hdbdir;data]from x;\n :x;\n };\n\nsetinmemory:{[x;start;end]\n x[`tablename]set generatedata[x;start;end];\n };\n\nrun:{[]\n x:params .proc.proctype;\n :x[`func]x;\n };\n\nrun[];\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_queryorder_hdb_dbmaint.q\nSIZE: 6,148 characters\n================================================================================\n\n/ kdb+ partitioned database maintenance\n\\d .os\nWIN:.z.o in`w32`w64\npth:{p:$[10h=type x;x;string x];if[WIN;p[where\"/\"=p]:\"\\\\\"];(\":\"=first p)_ p}\ncpy:{system$[WIN;\"copy /v /z \";\"cp \"],pth[x],\" \",pth y}\ndel:{system$[WIN;\"del \";\"rm \"],pth x}\nren:{system$[WIN;\"move \";\"mv \"],pth[x],\" \",pth y}\nhere:{hsym`$system$[WIN;\"cd\";\"pwd\"]}\n\\d .\n\nadd1col:{[tabledir;colname;defaultvalue]\n if[not colname in ac:allcols tabledir;\n stdout\"adding column \",(string colname),\" (type \",(string type defaultvalue),\") to `\",string tabledir;\n num:count get(`)sv tabledir,first ac;\n .[(`)sv tabledir,colname;();:;num#defaultvalue];\n @[tabledir;`.d;,;colname]]}\n\nallcols:{[tabledir]get tabledir,`.d}\n\nallpaths:{[dbdir;table]\n files:key dbdir;\n if[any files like\"par.txt\";:raze allpaths[;table]each hsym each`$read0(`)sv dbdir,`par.txt];\n files@:where files like\"[0-9]*\";(`)sv'dbdir,'files,'table}\n\ncopy1col:{[tabledir;oldcol;newcol]\n if[(oldcol in ac)and not newcol in ac:allcols tabledir;\n stdout\"copying \",(string oldcol),\" to \",(string newcol),\" in `\",string tabledir;\n .os.cpy[(`)sv tabledir,oldcol;(`)sv tabledir,newcol];@[tabledir;`.d;,;newcol]]}\n\ndelete1col:{[tabledir;col]\n if[col in ac:allcols tabledir;\n stdout\"deleting column \",(string col),\" from `\",string tabledir;\n .os.del[(`)sv tabledir,col];@[tabledir;`.d;:;ac except col]]}\n\n/\nenum:{[tabledir;val]\n if[not 11=abs type val;:val];\n .[p;();,;u@:iasc u@:where not(u:distinct enlist val)in v:$[type key p:(`)sv tabledir,`sym;get p;0#`]];`sym!(v,u)?val}\n\\\n\nenum:{[tabledir;val]if[not 11=abs type val;:val];.Q.dd[tabledir;`sym]?val}\n\n\nfind1col:{[tabledir;col]\n $[col in allcols tabledir;\n stdout\"column \",string[col],\" (type \",(string first\"i\"$read1((`)sv tabledir,col;8;1)),\") in `\",string tabledir;\n stdout\"column \",string[col],\" *NOT*FOUND* in `\",string tabledir]}\n\nfix1table:{[tabledir;goodpartition;goodpartitioncols]\n if[count missing:goodpartitioncols except allcols tabledir;\n stdout\"fixing table `\",string tabledir;{add1col[x;z;0#get y,z]}[tabledir;goodpartition]each missing]}\n\nfn1col:{[tabledir;col;fn]\n if[col in allcols tabledir;\n oldattr:-2!oldvalue:get p:tabledir,col;\n newattr:-2!newvalue:fn oldvalue;\t\t\n if[$[not oldattr~newattr;1b;not oldvalue~newvalue];\n stdout\"resaving column \",(string col),\" (type \",(string type newvalue),\") in `\",string tabledir;\n oldvalue:0;.[(`)sv p;();:;newvalue]]]}\n\nreordercols0:{[tabledir;neworder]\n if[not((count ac)=count neworder)or all neworder in ac:allcols tabledir;'`order];\n stdout\"reordering columns in `\",string tabledir;\n @[tabledir;`.d;:;neworder]}\n\nrename1col:{[tabledir;oldname;newname]\n if[(oldname in ac)and not newname in ac:allcols tabledir;\n stdout\"renaming \",(string oldname),\" to \",(string newname),\" in `\",string tabledir;\n .os.ren[` sv tabledir,oldname;` sv tabledir,newname];@[tabledir;`.d;:;.[ac;where ac=oldname;:;newname]]]}\n\nren1table:{[old;new]stdout\"renaming \",(string old),\" to \",string new;.os.ren[old;new];}\n\nadd1table:{[dbdir;tablename;table]\n stdout\"adding \",string tablename;\n @[tablename;`;:;.Q.en[dbdir]0#table];}\n\nstdout:{-1 raze[\" \"sv string`date`second$.z.P],\" \",x;}\nvalidcolname:{(not x in `i,.Q.res,key`.q)and x = .Q.id x}\n\n//////////////////////////////////////////////////////////////////////////////////////////////////////////\n// * public\n\nthisdb:`:. / if functions are to be run within the database instance then use <thisdb> (`:.) as dbdir\n\naddcol:{[dbdir;table;colname;defaultvalue] / addcol[`:/data/taq;`trade;`noo;0h]\n if[not validcolname colname;'(`)sv colname,`invalid.colname];\n add1col[;colname;enum[dbdir;defaultvalue]]each allpaths[dbdir;table];}\n\ncastcol:{[dbdir;table;col;newtype] / castcol[thisdb;`trade;`size;`short]\n fncol[dbdir;table;col;newtype$]}\n\nclearattrcol:{[dbdir;table;col] / clearattr[thisdb;`trade;`sym]\n setattrcol[dbdir;table;col;(`)]}\n\ncopycol:{[dbdir;table;oldcol;newcol] / copycol[`:/k4/data/taq;`trade;`size;`size2]\n if[not validcolname newcol;'(`)sv newcol,`invalid.newname];\n copy1col[;oldcol;newcol]each allpaths[dbdir;table];}\n\ndeletecol:{[dbdir;table;col] / deletecol[`:/k4/data/taq;`trade;`iz]\n delete1col[;col]each allpaths[dbdir;table];}\n\nfindcol:{[dbdir;table;col] / findcol[`:/k4/data/taq;`trade;`iz]\n find1col[;col]each allpaths[dbdir;table];}\n\n/ adds missing columns, but DOESN'T delete extra columns - do that manually\nfixtable:{[dbdir;table;goodpartition] / fixtable[`:/k4/data/taq;`trade;`:/data/taq/2005.02.19]\n fix1table[;goodpartition;allcols goodpartition]each allpaths[dbdir;table]except goodpartition;}\n\nfncol:{[dbdir;table;col;fn] / fncol[thisdb;`trade;`price;2*]\n fn1col[;col;fn]each allpaths[dbdir;table];}\n\nlistcols:{[dbdir;table] / listcols[`:/k4/data/taq;`trade]\n allcols first allpaths[dbdir;table]}\n\nrenamecol:{[dbdir;table;oldname;newname] / renamecol[`:/k4/data/taq;`trade;`woz;`iz]\n if[not validcolname newname;'` sv newname,`invalid.newname];\n rename1col[;oldname;newname]each allpaths[dbdir;table];}\n\nreordercols:{[dbdir;table;neworder] / reordercols[`:/k4/data/taq;`trade;reverse cols trade]\n reordercols0[;neworder]each allpaths[dbdir;table];}\n\nsetattrcol:{[dbdir;table;col;newattr] / setattr[thisdb;`trade;`sym;`g] / `s `p `u\n fncol[dbdir;table;col;newattr#]}\n\naddtable:{[dbdir;tablename;table] / addtable[`:.;`trade;([]price...)]\n add1table[dbdir;;table]each allpaths[dbdir;tablename];}\n\nrentable:{[dbdir;old;new] / rentable[`:.;`trade;`transactions]\n ren1table'[allpaths[dbdir;old];allpaths[dbdir;new]];}\n\n\\\ntest with https://github.com/KxSystems/kdb/blob/master/tq.q (sample taq database)\n\nif making changes to current database you need to reload (\\l .) to make modifications visible\n\nif the database you've been modifying is a tick database don't forget to adjust the schema (tick/???.q) to reflect your changes to the data\n\n\naddcol[`:.;`trade;`num;10]\naddcol[`:.;`trade;`F;`test]\ndelete1col[`:./2000.10.02/trade;`F]\nfixtable[`:.;`trade;`:./2000.10.03/trade]\nreordercols[`:.;`quote;except[2 rotate cols quote;`date]]\nclearattrcol[`:.;`trade;`sym]\nsetattrcol[`:.;`trade;`sym;`p]\ncastcol[`:.;`trade;`time;`second]\nrenamecol[`:.;`trade;`price;`PRICE]\npxcols:{(y,())renamecol[`:.;x]'z,()]\n`PRICE`size renamecol[`:.;`trade]'`p`s\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_queryorder_settings.q\nSIZE: 900 characters\n================================================================================\n\ninputpath:hsym`$getenv[`KDBTESTS],\"/dataaccess/queryorder/inputs\";\noutputpath:hsym`$getenv[`KDBTESTS],\"/dataaccess/queryorder/outputs\";\nprocesscsv:hsym`$getenv[`KDBTESTS],\"/dataaccess/queryorder/`config`process.csv\";\n\n//- code to pass in a test name\n//- extract the input dictionary from {testname}.csv\n//- extract the respone from .queryorder.orderquery\n//- compare output with expected one\n\n\ngetinputparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv inputpath,`$string[test],\".csv\";\"s*\"]};\n\ngetoutputparams:{[test]T:exec parameter!get each parametervalue from .checkinputs.readcsv[` sv outputpath,`$string[test],\".csv\";\"i*\"];:(T[til 4])};\n\n\ntestfunction:{[testquery] getoutputparams[testquery]~(raze .queryorder.orderquery[getinputparams[testquery]])[1+til 4]};\n\ntestfunction1:{[testquery;expectedoutput] (getdata getinputparams[testquery])~value expectedoutput};\n\n\n================================================================================\nFILE: TorQ_tests_dataaccess_settings.q\nSIZE: 422 characters\n================================================================================\n\n.servers.USERPASS:`$\"admin:admin\";\n\n//- some custom functionality for tests\n.dataaccess.testfuncrollover:{[]2000.01.05D}; //- function to determine rollover to split the time ranges destined for the rdb and hdb.\n.dataaccess.testfuncpartitionrange:{[timecolumn;primarytimecolumn;partitionfield;hdbtimerange]@[partitionfield$hdbtimerange;1;+;not timecolumn~primarytimecolumn]}; //- offset times for non-primary time columns\n\n\n================================================================================\nFILE: TorQ_tests_helperfunctions.q\nSIZE: 970 characters\n================================================================================\n\nstartorstopproc:{[startorstop;procname;processcsv] \n\t.proc.sys getenv[`TORQHOME],\"/torq.sh \",startorstop,\" \",procname,\" -csv \",processcsv\n\t};\n\ndeadproccheck:{[proctype;procname]\n\t/ pairs are of the form \"<PID> ssh\" for 'PID TTY...' part of pgrep, and \"<PID> q\" if process is running\n\tpidnamepairs:.proc.sys \"pgrep -lf \\\"stackid \",getenv[`KDBBASEPORT],\" -proctype \",proctype,\" -procname \",procname,\"\\\"\";\n\tnot \"q\" in last each pidnamepairs\n\t};\n\n// Kill process dead with -9\nkill9proc:{[proc] a:\"q\" in' b:@[system;\"pgrep -lf \",proc,\" -u $USER\";\" \"];system \"kill -9 \",first \" \" vs first b where a};\n\n// Returns boolean true if process is alive\nisalive:{[proc] any \"q\" in' @[system;\"pgrep -lf \",proc,\" -u $USER\";\" \"]};"}}},{"rowIdx":89,"cells":{"text":{"kind":"string","value":"cov\n, scov\n¶\nCovariance\ncov\n¶\nx cov y cov[x;y]\nWhere x\nand y\nare conforming numeric lists returns their covariance as a floating-point number. Applies to all numeric data types and signals an error with temporal types, char and sym.\nq)2 3 5 7 cov 3 3 5 9\n4.5\nq)2 3 5 7 cov 4 3 0 2\n-1.8125\nq)select price cov size by sym from trade\ncov\nis an aggregate function.\nThe function cov\nis equivalent to {avg[x*y]-avg[x]*avg y}\n.\nDomain and range:\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | f . f f f f f f f . f f f f f f f f\ng | . . . . . . . . . . . . . . . . . .\nx | f . f f f f f f f . f f f f f f f f\nh | f . f f f f f f f . f f f f f f f f\ni | f . f f f f f f f . f f f f f f f f\nj | f . f f f f f f f . f f f f f f f f\ne | f . f f f f f f f . f f f f f f f f\nf | f . f f f f f f f . f f f f f f f f\nc | f . f f f f f f f . f f f f f f f f\ns | . . . . . . . . . . . . . . . . . .\np | f . f f f f f f f . f f f f f f f f\nm | f . f f f f f f f . f f f f f f f f\nd | f . f f f f f f f . f f f f f f f f\nz | f . f f f f f f f . f f f f f f f f\nn | f . f f f f f f f . f f f f f f f f\nu | f . f f f f f f f . f f f f f f f f\nv | f . f f f f f f f . f f f f f f f f\nt | f . f f f f f f f . f f f f f f f f\nRange: f\ncov\nis a multithreaded primitive.\nscov\n¶\nSample covariance\nx scov y scov[x;y]\nWhere x\nand y\nare conforming numeric lists returns their sample covariance as a float atom.\n\\[scov(x,y)=\\frac{n}{n-1} cov(x,y)\\]\nApplies to all numeric data types and signals an error with temporal types, char and sym.\nq)2 3 5 7 scov 3 3 5 9\n6f\nq)2 3 5 7 scov 4 3 0 2\n-2.416667\nq)select price scov size by sym from trade\nscov\nis an aggregate function.\nThe function scov\nis equivalent to {cov[x;y]*count[x]%-1+count x}\n.\nDomain and range:\nb g x h i j e f c s p m d z n u v t\n----------------------------------------\nb | f . f f f f f f . . f f f f f f f f\ng | . . . . . . . . . . . . . . . . . .\nx | f . f f f f f f . . f f f f f f f f\nh | f . f f f f f f . . f f f f f f f f\ni | f . f f f f f f . . f f f f f f f f\nj | f . f f f f f f . . f f f f f f f f\ne | f . f f f f f f . . f f f f f f f f\nf | f . f f f f f f f . f f f f f f f f\nc | . . . . . . . f . . f f f f f f f f\ns | . . . . . . . . . . . . . . . . . .\np | f . f f f f f f f . f . . . f f f f\nm | f . f f f f f f f . . f . . f f f f\nd | f . f f f f f f f . . . f . f f f f\nz | f . f f f f f f f . . . . f f f f f\nn | f . f f f f f f f . f f f f f f f f\nu | f . f f f f f f f . f f f f f f f f\nv | f . f f f f f f f . f f f f f f f f\nt | f . f f f f f f f . f f f f f f f f\nRange: f\nscov\nis a multithreaded primitive.\n\ncross\n¶\nx cross y cross[x;y]\nReturns the cross-product (i.e. all possible combinations) of x\nand y\n.\nq)1 2 3 cross 10 20\n1 10\n1 20\n2 10\n2 20\n3 10\n3 20\nq)(cross/)(2 3;10;\"abc\")\n2 10 \"a\"\n2 10 \"b\"\n2 10 \"c\"\n3 10 \"a\"\n3 10 \"b\"\n3 10 \"c\"\ncross\ncan work on tables and dictionaries.\nq)s:`IBM`MSFT`AAPL\nq)v:1 2\nq)([]s:s)cross([]v:v)\ns v\n------\nIBM 1\nIBM 2\nMSFT 1\nMSFT 2\nAAPL 1\nAAPL 2\nThe function cross\nis equivalent to {raze x,/:\\:y}\n.\n\ncsv\n¶\nCSV delimiter\ncsv\nA synonym for \",\"\nfor use in preparing text for CSV files, or reading them.\nPrepare Text,\n.h.cd\n(csv from data),\n.h.td\n(tsv from data)\nFile system\ncsv\n¶CSV delimiter\ncsv\nA synonym for \",\"\nfor use in preparing text for CSV files, or reading them.\nPrepare Text,\n.h.cd\n(csv from data),\n.h.td\n(tsv from data)\nFile system\n\n_\nCut, cut\n¶\n_ (cut operator)¶\nCut a list or table into sub-arrays\nx _ y _[x;y]\nWhere\nx\nis a non-decreasing list of integers in the domaintil count y\ny\nis a list or table\nreturns y\ncut at the indexes given in x\n. The result is a list with the same count as x\n.\nExamples using cut on lists:\nq)2 4 9 _ til 10 /first result item starts at index 2\n2 3\n4 5 6 7 8\n,9\nq)\nq)2 4 4 9 _ til 10 /cuts are empty for duplicate indexes\n2 3\n`long$()\n4 5 6 7 8\n,9\nq)2 5 7 _ til 12\n2 3 4\n5 6\n7 8 9 10 11\nExample using cut on table sp\ncreated using sp.q\nq)\\l sp.q\nq)count sp\n12\nq){}show each 2 5 7_sp / `show` returns the generic null ::\ns p qty\n---------\ns1 p3 400\ns1 p4 200\ns4 p5 100\ns p qty\n---------\ns1 p6 100\ns2 p1 300\ns p qty\n---------\ns2 p2 400\ns3 p2 200\ns4 p2 200\ns4 p4 300\ns1 p5 400\n_\n(cut) is a multithreaded primitive.\nAvoid confusion with underscores in names: separate the Cut operator with spaces.\ncut\n(keyword)¶\nCut a list or table into a matrix of x\ncolumns\nx cut y cut[x;y]\nWhere\nx\nis an integer atomy\nis a list\nreturns y\nsplits into a list of lists, all (except perhaps the last) of count x\n.\nq)4 cut til 10\n0 1 2 3\n4 5 6 7\n8 9\nOtherwise cut\nbehaves as _\nCut.\n\n?\nRoll, Deal, Permute¶\nRandom lists, with or without duplicates\nRoll and Deal¶\nSelect items randomly, generate random values\nx?y ?[x;y] / Roll\nneg[x]?y ?[neg[x];y] / Deal\nSelect¶\nWhere\nx\nis an integer atomy\nis a list\nreturns abs[x]\nrandomly selected items of y\n.\nWhere x\nis\n- positive items are selected independently (Roll)\n- negative and\nx>=neg count y\n, items are selected from different indexes ofy\n(Deal)\nq)5?`Arthur`Steve`Dennis\n`Arthur`Arthur`Steve`Dennis`Arthur\nq)2?(\"a\";0101b;`abc;`the`quick;2012.06m)\n`abc\n2012.06m\nq)-3?`the`quick`brown`fox\n`brown`quick`fox\nDuplicate items in y\nIf y\ncontains duplicate items, so may the result of Deal.\nq)-2?`bye`bye`blackbird\n`bye`bye\nGenerate¶\nWhere\nx\nis an int atomy\nis an atom > 0\nreturns a list of abs[x]\nitems of the same type as y\n, generated as follows\nright domain (y) range operator\n----------------------------------------------------------------\ninteger >0 til y Roll, Deal\n0Ng GUIDs Roll, Deal\nfloat, temporal ≥0 0 to y Roll\n0i ints Roll\n0 longs Roll, Deal\n0b 01b Roll\n\" \" .Q.a Roll\n0x0 bytes Roll\nnumeric symbol `n symbols, each of n chars (n≤8) Roll, Deal\nfrom abcdefghijklmnop\nWhere x\nis negative (Deal), y\nmust have a positive long or null GUID\nq)10?5 / roll 10 (5-sided dice)\n4 2 1 1 3 2 0 0 2 2\nq)-5?20 / deal 5\n13 11 8 12 19\nq)-10?10 / first 10 ints in random order\n9 3 5 7 2 0 6 1 4 8\nq)(asc -10?10)~asc -10?10\n1b\nq)-1?0Ng / deal 1 GUID\n,fd2db048-decb-0008-0176-01714e5eeced\nq)count distinct -1000?0Ng / deal 1000 GUIDs\n1000\nq)5?4.5 / roll floats\n3.13239 1.699364 2.898484 1.334554 3.085937\nq)4?2012.09m / roll months\n2006.02 2007.07 2007.07 2008.06m\nq)30?\" \"\n\"tusrgoufcetphltnkegcflrunpornt\"\nq)16?0x0 / roll 16 bytes\n0x8c6b8b64681560840a3e178401251b68\nq)20?0b / roll booleans\n00000110010101000100b\nq)10?`3 / roll short symbols\n`bon`dec`nei`jem`pgm`kei`lpn`bjh`flj`npo\nq)rand `6\n`nemoad\nRoll and Deal return list results\nFor an atom result, instead of first 1?x\n, use rand\n.\nDeal of GUID atom¶\nDeal of GUID uses a mix of process ID, current time and IP address to generate the GUID, and successive calls may not allow enough time for the current time reading to change.\nq)count distinct {-1?0ng}each til 10 / Deal one GUID ten times\n5\nThe range of GUIDs is large enough that Roll and Deal often return the same result.\nq)count distinct 1000000000?0Ng / Roll a billion GUIDs\n1000000000\nFor a set of distinct GUIDs, use Deal to generate them in one operation\nPermute¶\n0N?x\nWhere x\nis\n- a non-negative int atom, returns the items of\ntil x\nin random order - a list, returns the items of\nx\nin random order\n(Since V3.3.)\nq)0N?10 / permute til 10\n8 2 4 1 6 0 5 3 7 9\nq)0N?5 4 2 / permute items\n4 5 2\nq)0N?\"abc\" / permute items\n\"bac\"\nq)0N?(\"the\";1 2 4;`ibm`goog) / permute items\n`ibm`goog\n1 2 4\n\"the\"\nSeed¶\nDeal, Roll, Permute and rand\nuse a constant seed on kdb+ startup: scripts using them can be repeated with the same results. You can see and set the value of the seed with system command \\S\n.)\nTo use GUIDs as identifiers, use Deal, not Roll\n$ q\n..\nq)1?0Ng / roll 1 GUID\n,8c6b8b64-6815-6084-0a3e-178401251b68\nq)\\\\\n$ q\n..\nq)1?0Ng / roll 1 GUID\n,8c6b8b64-6815-6084-0a3e-178401251b68\nq)\\\\\n$ q\n..\nq)-1?0Ng / deal 1 GUID\n,2afe0040-2a1b-bfce-ef3e-7160260cf992\nq)\\\\\n$ q\n..\nq)-1?0Ng / deal 1 GUID\n,753a8739-aa6b-3cb4-2e31-0fcdf20fd2f0\nRoll uses the current seed (\\S 0N\n). Deal uses a seed based on process properties and the current time. This means -10?0Ng\nis different from {first -1?0Ng}each til 10\n.\nErrors¶\n| error | cause |\n|---|---|\n| length | neg x exceeds count y |\n| type | x is negative (Roll only) |\n\ndelete\n¶\nDelete rows or columns from a table, entries from a dictionary, or objects from a namespace\ndelete from x\ndelete from x where pw\ndelete ps from x\ndelete\nis a qSQL query template and varies from regular q syntax\nFor the Delete operator !\n, see\nFunctional SQL\nTable rows¶\ndelete from x\ndelete from x where pw\nWhere\nx\nis a tablepw\nis a condition\ndeletes from x\nrows matching pw\n, or all rows if where pw\nnot specified.\nq)show table: ([] a: `a`b`c; n: 1 2 3)\na n\n---\na 1\nb 2\nc 3\nq)show delete from table where a = `c\na n\n---\na 1\nb 2\nAttributes may or may not be dropped: reapply or remove as needed\nTable columns¶\ndelete from x\ndelete ps from x\nWhere\nx\nis a tableps\na list of column names\ndeletes from x\ncolumns ps\nor all columns if ps\nnot specified.\nq)show delete n from table\na\n-\na\nb\nc\nDictionary entries¶\ndelete from x\ndelete ps from x\nWhere\nx\nis a dictionaryps\na list of keys to it\ndeletes from x\nentries for ps\n.\nq)show d:`a`b`c!til 3\na| 0\nb| 1\nc| 2\nq)delete b from `d\n`d\nq)d\na| 0\nc| 2\nCond is not supported inside q-SQL expressions\nEnclose in a lambda or use Vector Conditional instead.\nNamespace objects¶\ndelete from x\ndelete ps from x\nWhere\nx\nis a namespaceps\na symbol atom or vector of name/s defined in it\ndeletes the named objects from the namespace.\nq)a:1\nq)\\v\n,`a\nq)delete a from `.\n`.\nq)\\v\n`symbol$()"}}},{"rowIdx":90,"cells":{"text":{"kind":"string","value":"Functional qSQL¶\nThe functional forms of delete\n, exec\n, select\nand update\nare particularly useful for programmatically-generated queries, such as when column names are dynamically produced.\nFunctional form is an alternative to using a qSQL template to construct a query. For example, the following are equivalent:\nq)select n from t\nq)?[t;();0b;(enlist `n)!enlist `n]\nPerformance\nThe q interpreter parses delete\n, exec\n, select\n, and update\ninto their equivalent functional forms, so there is no performance difference.\nThe functional forms are\n![t;c;b;a] /update and delete\n?[t;i;p] /simple exec\n?[t;c;b;a] /select or exec\n?[t;c;b;a;n] /select up to n records\n?[t;c;b;a;n;(g;cn)] /select up to n records sorted by g on cn\nwhere:\nt\nis a table, or the name of a table as a symbol atom.c\nis the Where phrase, a list of constraints.\nEvery constraint inc\nis a parse tree representing an expression to be evaluated; the result of each being a boolean vector. The parse tree consists of a function followed by a list of its arguments, each an expression containing column names and other variables. Represented by symbols, it distinguishes actual symbol constants by enlisting them. The function is applied to the arguments, producing a boolean vector that selects the rows. The selection is performed in the order of the items inc\n, from left to right: only rows selected by one constraint are evaluated by the next.\nb\nis the By phrase.\nThe domain of dictionaryb\nis a list of symbols that are the key names for the grouping. Its range is a list of column expressions (parse trees) whose results are used to construct the groups. The grouping is ordered by the domain items, from major to minor.b\nis one of:- the general empty list\n()\n- boolean atom:\n0b\nfor no grouping;1b\nfor distinct - a symbol atom or list naming table column/s\n- a dictionary of group-by specifications\n- the general empty list\na\nis the Select phrase. The domain of dictionarya\nis a list of symbols containing the names of the produced columns. QSQL query templates assign default column names in the result, but here each result column must be named explicitly.\nEach item of its range is an evaluation list consisting of a function and its argument(s), each of which is a column name or another such result list. For each evaluation list, the function is applied to the specified value(s) for each row and the result is returned. The evaluation lists are resolved recursively when operations are nested.\na\nis one of- the general empty list\n()\n- a symbol atom: the name of a table column\n- a parse tree\n- a dictionary of select specifications (aggregations)\n- the general empty list\ni\nis a list of indexesp\nis a parse treen\nis a non-negative integer or infinity, indicating the maximum number of records to be returnedg\nis a unary grade function\nCall by name¶\nColumns in a\n, b\nand c\nappear as symbols.\nTo distinguish symbol atoms and vectors from columns, enlist them.\nq)t:([] c1:`a`b`a`c`a`b`c; c2:10*1+til 7; c3:1.1*1+til 7)\nq)select from t where c2>35,c1 in `b`c\nc1 c2 c3\n---------\nc 40 4.4\nb 60 6.6\nc 70 7.7\nq)?[t; ((>;`c2;35);(in;`c1;enlist[`b`c])); 0b; ()]\nc1 c2 c3\n---------\nc 40 4.4\nb 60 6.6\nc 70 7.7\nNote above that\n- the columns\nc1\nandc2\nappear as symbol atoms - the symbol vector\n`b`c\nappears asenlist[`b`c]\nUse enlist\nto create singletons to ensure appropriate entities are lists.\nDifferent types of a\nand b\nreturn different types of result for Select and Exec.\n| b\na | bool () sym/s dict\n-----------|----------------------------------------\n() | table dict - keyed table\nsym | - vector dict dict\nparse tree | - vector dict dict\ndict | table vector/s table table\n?\nSelect¶\n?[t;c;b;a]\nWhere t\n, c\n, b\n, and a\nare as above, returns a table.\nq)show t:([]n:`x`y`x`z`z`y;p:0 15 12 20 25 14)\nn p\n----\nx 0\ny 15\nx 12\nz 20\nz 25\ny 14\nq)select m:max p,s:sum p by name:n from t where p>0,n in `x`y\nname| m s\n----| -----\nx | 12 12\ny | 15 29\nFollowing is the equivalent functional form. Note the use of enlist\nto create singletons, ensuring that appropriate entities are lists.\nq)c: ((>;`p;0);(in;`n;enlist `x`y))\nq)b: (enlist `name)!enlist `n\nq)a: `m`s!((max;`p);(sum;`p))\nq)?[t;c;b;a]\nname| m s\n----| -----\nx | 12 12\ny | 15 29\nDegenerate cases\n- For no constraints, make\nc\nthe empty list - For no grouping make\nb\na boolean0b\n- For distinct rows make\nb\na boolean1b\n- To produce all columns of\nt\nin the result, makea\nthe empty list()\nselect from t\nis equivalent to functional form ?[t;();0b;()]\n.\nSelect distinct¶\nFor special case select distinct\nspecify b\nas 1b\n.\nq)t:([] c1:`a`b`a`c`b`c; c2:1 1 1 2 2 2; c3:10 20 30 40 50 60)\nq)?[t;(); 1b; `c1`c2!`c1`c2] / select distinct c1,c2 from t\nc1 c2\n-----\na 1\nb 1\nc 2\nb 2\nRank 5¶\nLimit result rows\n?[t;c;b;a;n]\nReturns as for rank 4, but where n\nis\n- an integer or infinity, only the first\nn\nrows, or the last ifn\nis negative - a pair of non-negative integers, up to\nn[1]\nrows starting with rown[0]\nq)show t:([] c1:`a`b`c`a; c2:10 20 30 40)\nc1 c2\n-----\na 10\nb 20\nc 30\na 40\nq)?[t;();0b;();-2] / select[-2] from t\nc1 c2\n-----\nc 30\na 40\nq)?[t;();0b;();1 2] / select[1 2] from t\nc1 c2\n-----\nb 20\nc 30\nRank 6¶\nLimit result rows and sort by a column\n?[t;c;b;a;n;(g;cn)]\nReturns as for rank 5, but where\ng\nis a unary grading functioncn\nis a column name as a symbol atom\nsorted by g\non column cn\n.\nq)?[t; (); 0b; `c1`c2!`c1`c2; 0W; (idesc;`c1)]\nc1 c2\n-----\nc 30\nb 20\na 10\na 40\nQ for Mortals §9.12.1 Functional select\n?\nExec¶\nA simplified form of Select that returns a list or dictionary rather than a table.\n?[t;c;b;a]\nThe constraint specification c\n(Where) is as for Select.\nq)show t:([] c1:`a`b`c`c`a`a; c2:10 20 30 30 40 40;\nc3: 1.1 2.2 3.3 3.3 4.4 3.14159; c4:`cow`sheep`cat`dog`cow`dog)\nc1 c2 c3 c4\n-------------------\na 10 1.1 cow\nb 20 2.2 sheep\nc 30 3.3 cat\nc 30 3.3 dog\na 40 4.4 cow\na 40 3.14159 dog\nNo grouping¶\nb\nis the general empty list.\nb a result\n--------------------------------------------------------------\n() () the last row of t as a dictionary\n() sym the value of that column\n() dict a dictionary with keys and values as specified by a\nq)?[t; (); (); ()] / exec last c1,last c2,last c3 from t\nc1| `a\nc2| 40\nc3| 3.14159\nc4| `dog\nq)?[t; (); (); `c1] / exec c1 from t\n`a`b`c`c`a`a\nq)?[t; (); (); `one`two!`c1`c2] / exec one:c1,two:c2 from t\none| a b c c a a\ntwo| 10 20 30 30 40 40\nq)?[t; (); (); `one`two!(`c1;(sum;`c2))] / exec one:c1,two:sum c2 from t\none| `a`b`c`c`a`a\ntwo| 170\nGroup by column¶\nb\nis a column name. The result is a dictionary.\nWhere a\nis a column name, in the result\n- the keys are distinct values of the column named in\nb\n- the values are lists of corresponding values from the column named in\na\nq)?[t; (); `c1; `c2] / exec c2 by c1 from t\na| 10 40 40\nb| ,20\nc| 30 30\nWhere a\nis a dictionary, in the result\n- the key is a table with a single anonymous column containing distinct values of the column named in\nb\n- the value is a table with columns as defined in\na\nq)?[t; (); `c1; enlist[`c2]!enlist`c2] / exec c2:c2 by c1 from t\n| c2\n-| --------\na| 10 40 40\nb| ,20\nc| 30 30\nq)?[t; (); `c1; `two`three!`c2`c3] / exec two:c2,three:c3 by c1 from t\n| two three\n-| ------------------------\na| 10 40 40 1.1 4.4 3.14159\nb| ,20 ,2.2\nc| 30 30 3.3 3.3\nq)?[t;();`c1;`m2`s3!((max;`c2);(sum;`c3))] / exec m2:max c2,s3:sum c3 by c1 from t\n| m2 s3\n-| -----------\na| 40 8.64159\nb| 20 2.2\nc| 30 6.6\nGroup by columns¶\nb\nis a list of column names.\nWhere a\nis a column name, returns a dictionary in which\n- the key is the empty symbol\n- the value is the value of the column/s specified in\na\nq)?[t; (); `c1`c2; `c3]\n| 1.1 2.2 3.3 3.3 4.4 3.14159\nq)?[t; (); `c1`c2; `c3`c4!((max;`c3);(last;`c4))]\n| c3 c4\n| -------\n| 4.4 dog\nGroup by a dictionary¶\nb\nis a dictionary. Result is a dictionary in which the key is a table with columns as specified by b\nand\nb a result value\n-----------------------------------------------------\ndict () last records of table that match each key\ndict sym corresponding values from the column in a\ndict dict values as defined in a\nq)?[t; (); `one`two!`c1`c2; ()]\none two| c1 c2 c3 c4\n-------| -------------------\na 10 | a 10 1.1 cow\na 40 | a 40 3.14159 dog\nb 20 | b 20 2.2 sheep\nc 30 | c 30 3.3 dog\nq)/ exec last c1,last c2,last c3,last c4 by one:c1,two:c2 from t\nq)?[t; (); enlist[`one]!enlist(string;`c1); ()]\none | c1 c2 c3 c4\n----| -------------------\n,\"a\"| a 40 3.14159 dog\n,\"b\"| b 20 2.2 sheep\n,\"c\"| c 30 3.3 dog\nq)/ exec last c1,last c2,last c3,last c4 by one:string c1 from t\nq)?[t; (); enlist[`one]!enlist `c1; `c2] / exec c2 by one:c1 from t\none|\n---| --------\na | 10 40 40\nb | ,20\nc | 30 30\nq)?[t; (); `one`four!`c1`c4; `m2`s3!((max;`c2);(sum;`c3))]\none four | m2 s3\n---------| ----------\na cow | 40 5.5\na dog | 40 3.14159\nb sheep| 20 2.2\nc cat | 30 3.3\nc dog | 30 3.3\nQ for Mortals §9.12.2 Functional exec\n?\nSimple Exec¶\n?[t;i;p]\nWhere t\nis not partitioned, another form of Exec.\nq)show t:([]a:1 2 3;b:4 5 6;c:7 9 0)\na b c\n-----\n1 4 7\n2 5 9\n3 6 0\nq)?[t;0 1 2;`a]\n1 2 3\nq)?[t;0 1 2;`b]\n4 5 6\nq)?[t;0 1 2;(last;`a)]\n3\nq)?[t;0 1;(last;`a)]\n2\nq)?[t;0 1 2;(*;(min;`a);(avg;`c))]\n5.333333\n!\nUpdate¶\n![t;c;b;a]\nArguments t\n, c\n, b\n, and a\nare as for Select.\nq)show t:([]n:`x`y`x`z`z`y;p:0 15 12 20 25 14)\nn p\n----\nx 0\ny 15\nx 12\nz 20\nz 25\ny 14\nq)select m:max p,s:sum p by name:n from t where p>0,n in `x`y\nname| m s\n----| -----\nx | 12 12\ny | 15 29\nq)update p:max p by n from t where p>0\nn p\n----\nx 0\ny 15\nx 12\nz 25\nz 25\ny 15\nq)c: enlist (>;`p;0)\nq)b: (enlist `n)!enlist `n\nq)a: (enlist `p)!enlist (max;`p)\nq)![t;c;b;a]\nn p\n----\nx 0\ny 15\nx 12\nz 25\nz 25\ny 15\nThe degenerate cases are the same as in Select.\nQ for Mortals §9.12.3 Functional update\n!\nDelete¶\nA simplified form of Update\n![t;c;0b;a]\nOne of c\nor a\nmust be empty, the other not. c\nselects which rows will be removed. a\nis a symbol vector with the names of columns to be removed.\nq)t:([]c1:`a`b`c;c2:`x`y`z)\nq)/following is: delete c2 from t\nq)![t;();0b;enlist `c2]\nc1\n--\na\nb\nc\nq)/following is: delete from t where c2 = `y\nq)![t;enlist (=;`c2; enlist `y);0b;`symbol$()]\nc1 c2\n-----\na x\nc z\nQ for Mortals §9.12.4 Functional delete\nConversion using parse¶\nApplying parse to a qSQL statement written as a string will return the internal representation of the functional form. With some manipulation this can then be used to piece together the functional form in q. This generally becomes more difficult as the query becomes more complex and requires a deep understanding of what kdb+ is doing when it parses qSQL form.\nAn example of using parse to convert qSQL to its corresponding functional form is as follows:\nq)t:([]c1:`a`b`c; c2:10 20 30)\nq)parse \"select c2:2*c2 from t where c1=`c\"\n?\n`t\n,,(=;`c1;,`c)\n0b\n(,`c2)!,(*;2;`c2)\nq)?[`t; enlist (=;`c1;enlist `c); 0b; (enlist `c2)!enlist (*;2;`c2)]\nc2\n--\n60\nIssues converting to functional form¶\nTo convert a select\nquery to a functional form one may attempt to\napply the parse\nfunction to the query string:\nq)parse \"select sym,price,size from trade where price>50\"\n?\n`trade\n,,(>;`price;50)\n0b\n`sym`price`size!`sym`price`size\nAs we know, parse\nproduces a parse tree and since some of the elements may themselves be parse trees we can’t immediately take the output of parse and plug it into the form ?[t;c;b;a]\n. After a little playing around with the result of parse\nyou might eventually figure out that the correct functional form is as follows.\nq)funcQry:?[`trade;enlist(>;`price;50);0b;`sym`price`size! `sym`price`size]\nq)strQry:select sym,price,size from trade where price>50 q)\nq)funcQry~strQry\n1b\nThis, however, becomes more difficult as the query statements become more complex:\nq)parse \"select count i from trade where 140>(count;i) fby sym\"\n?\n`trade\n,,(>;140;(k){@[(#y)#x[0]0#x\n1;g;:;x[0]'x[1]g:.=y]};(enlist;#:;`i);`sym))\n0b\n(,`x)!,(#:;`i)\nIn this case, it is not obvious what the functional form of the above query should be, even after applying parse\n.\nThere are three issues with this parse-and-“by eye” method to convert to the equivalent functional form. We will cover these in the next three subsections.\nParse trees and eval¶\nThe first issue with passing a select\nquery to parse\nis that each returned item is in unevaluated form. As discussed here, simply applying value\nto a parse tree does not work. However, if we evaluate each one of the arguments fully, then there would be no nested parse trees. We could then apply value\nto the result:\nq)eval each parse \"select count i from trade where 140>(count;i) fby sym\"\n?\n+`sym`time`price`size!(`VOD`IBM`BP`VOD`IBM`IBM`HSBC`VOD`MS..\n,(>;140;(k){@[(#y)#x[0]0#x\n1;g;:;x[0]'x[1]g:.=y]};(enlist;#:;`i);`sym))\n0b\n(,`x)!,(#:;`i)\nThe equivalence below holds for a general qSQL query provided as a string:\nq)value[str]~value eval each parse str\n1b\nIn particular:\nq)str:\"select count i from trade where 140>(count;i) fby sym\"\nq)value[str]~value eval each parse str\n1b\nIn fact, since within the functional form we can refer to the table by name we can make this even clearer. Also, the first item in the result of parse\napplied to a select\nquery will always be ?\n(or !\nfor a delete\nor update\nquery) which cannot be evaluated any further. So we don’t need to apply eval\nto it.\nq)pTree:parse str:\"select count i from trade where 140>(count;i) fby sym\"\nq)@[pTree;2 3 4;eval]\n?\n`trade\n,(>;140;(k){@[(#y)#x[0]0#x\n1;g;:;x[0]'x[1]g:.=y]};(enlist;#:;`i);`sym))\n0b\n(,`x)!,(#:;`i)\nq)value[str] ~ value @[pTree;2 3 4;eval]\n1b\nVariable representation in parse trees¶\nRecall that in a parse tree a variable is represented by a symbol containing its name. So to represent a symbol or a list of symbols, you must use enlist\non that expression. In k, enlist\nis the unary form of the comma operator in k:\nq)parse\"3#`a`b`c`d`e`f\"\n#\n3\n,`a`b`c`d`e`f\nq)(#;3;enlist `a`b`c`d`e`f)~parse\"3#`a`b`c`d`e`f\"\n1b\nThis causes a difficulty as q has no unary syntax for operators.\nWhich means the following isn’t a valid q expression and so returns an error.\nq)(#;3;,`a`b`c`d`e`f)\n',\nIn the parse tree we receive we need to somehow distinguish between k’s unary ,\n(which we want to replace with enlist\n) and the binary Join operator, which we want to leave as it is.\nExplicit definitions in .q\nare shown in full¶\nThe fby\nin the select\nquery above is represented by its full k\ndefinition.\nq)parse \"fby\"\nk){@[(#y)#x[0]0#x 1;g;:;x[0]'x[1]g:.=y]}\nWhile using the k form isn’t generally a problem from a functionality perspective, it does however make the resulting functional statement difficult to read.\nThe solution¶\nWe will write a function to automate the process of converting a select\nquery into its equivalent functional form.\nThis function, buildQuery\n, will return the functional form as a string.\nq)buildQuery \"select count i from trade where 140>(count;i) fby sym\"\n\"?[trade;enlist(>;140;(fby;(enlist;count;`i);`sym));0b;\n(enlist`x)! enlist (count;`i)]\"\nWhen executed it will always return the same result as the select\nquery from which it is derived:\nq)str:\"select count i from trade where 140>(count;i) fby sym\"\nq)value[str]~value buildQuery str\n1b\nAnd since the same logic applies to exec\n, update\nand delete\nit will be able to convert to their corresponding functional forms also.\nTo write this function we will solve the three issues outlined above:\n- parse-tree items may be parse trees\n- parse trees use k’s unary syntax for operators\n- q keywords from\n.q.\nare replaced by their k definitions\nThe first issue, where some items returned by parse\nmay themselves be parse trees is easily resolved by applying eval\nto the individual items.\nThe second issue is with k’s unary syntax for ,\n. We want to replace it with the q keyword enlist\n. To do this we define a function that traverses the parse tree and detects if any element is an enlisted list of symbols or an enlisted single symbol. If it finds one we replace it with a string representation of enlist\ninstead of ,\n.\nereptest:{ //returns a boolean\n(1=count x) and ((0=type x) and 11=type first x) or 11=type x}\nereplace:{\"enlist\",.Q.s1 first x}\nfuncEn:{$[ereptest x;ereplace x;0=type x;.z.s each x;x]}\nBefore we replace the item we first need to check it has the correct form. We need to test if it is one of:\n- An enlisted list of syms. It will have type\n0h\n, count 1 and the type of its first item will be11h\nif and only if it is an enlisted list of syms. - An enlisted single sym. It will have type\n11h\nand count 1 if and only if it is an enlisted single symbol.\nThe ereptest\nfunction above performs this check, with ereplace\nperforming the replacement.\nConsole size\n.Q.s1\nis dependent on the size of the console so make it larger if necessary.\nSince we are going to be checking a parse tree which may contain parse trees nested to arbitrary depth, we need a way to check all the elements down to the base level.\nWe observe that a parse tree is a general list, and therefore of type 0h\n. This knowledge combined with the use of .z.s\nallows us to scan a parse tree recursively. The logic goes: if what you have passed into funcEn\nis a parse tree then reapply the function to each element.\nTo illustrate we examine the following select\nquery.\nq)show pTree:parse \"select from trade where sym like \\\"F*\\\",not sym=`FD\"\n?\n`trade\n,((like;`sym;\"F*\");(~:;(=;`sym;,`FD))) 0b\n()\nq)x:eval pTree 2 //apply eval to Where clause\nConsider the Where clause in isolation.\nq)x //a 2-list of Where clauses\n(like;`sym;\"F*\")\n(~:;(=;`sym;,`FD))\nq)funcEn x\n(like;`sym;\"F*\")\n(~:;(=;`sym;\"enlist`FD\"))\nSimilarly we create a function which will replace k functions with their q equivalents in string form, thus addressing the third issue above.\nq)kreplace:{[x] $[`=qval:.q?x;x;string qval]}\nq)funcK:{$[0=t:type x;.z.s each x;t<100h;x;kreplace x]}\nRunning these functions against our Where clause, we see the k representations being converted to q.\nq)x\n(like;`sym;\"F*\")\n(~:;(=;`sym;,`FD))\nq)funcK x //replaces ~: with “not”\n(like;`sym;\"F*\")\n(\"not\";(=;`sym;,`FD))\nNext, we make a slight change to kreplace\nand ereplace\nand combine them.\nkreplace:{[x] $[`=qval:.q?x;x;\"~~\",string[qval],\"~~\"]}\nereplace:{\"~~enlist\",(.Q.s1 first x),\"~~\"}\nq)funcEn funcK x\n(like;`sym;\"F*\") (\"~~not~~\";(=;`sym;\"~~enlist`FD~~\"))\nThe double tilde here is going to act as a tag to allow us to differentiate from actual string elements in the parse tree. This allows us to drop the embedded quotation marks at a later stage inside the buildQuery\nfunction:\nq)ssr/[;(\"\\\"~~\";\"~~\\\"\");(\"\";\"\")] .Q.s1 funcEn funcK x\n\"((like;`sym;\\\"F*\\\");(not;(=;`sym;enlist`FD)))\"\nthus giving us the correct format for the Where clause in a functional select. By applying the same logic to the rest of the parse tree we can write the buildQuery\nfunction.\nq)buildQuery \"select from trade where sym like \\\"F*\\\",not sym=`FD\"\n\"?[trade;((like;`sym;\\\"F*\\\");(not;(=;`sym;enlist`FD)));0b;()]\"\nOne thing to take note of is that since we use reverse lookup on the .q\nnamespace and only want one result we occasionally get the wrong keyword back.\nq)buildQuery \"update tstamp:ltime tstamp from z\"\n\"![z;();0b;(enlist`tstamp)!enlist (reciprocal;`tstamp)]\"\nq).q`ltime\n%:\nq).q`reciprocal\n%:\nThese instances are rare and a developer should be able to spot when they occur. Of course, the functional form will still work as expected but could confuse readers of the code.\nFifth and sixth arguments¶\nFunctional select also has ranks 5 and 6; i.e. fifth and sixth arguments.\nQ for Mortals: §9.12.1 Functional queries\nWe also cover these with the buildQuery\nfunction.\nq)buildQuery \"select[10 20] from trade\"\n\"?[trade;();0b;();10 20]\"\nq)//5th parameter included\nThe 6th argument is a column and a direction to order the results by. Use <\nfor ascending and >\nfor descending.\nq)parse\"select[10;<price] from trade\"\n?\n`trade\n()\n0b\n()\n10\n,(<:;`price)\nq).q?(<:;>:)\n`hopen`hclose\nq)qfind each (\"<:\";\">:\") //qfind defined above\nhopen\nhclose\nWe see that the k function for the 6th argument of the functional form is <:\n(ascending) or >:\n(descending). At first glance this appears to be hopen\nor hclose\n. In fact in earlier versions of q, iasc\nand hopen\nwere equivalent (as were idesc\nand hclose\n). The definitions of iasc\nand idesc\nwere later altered to signal a rank error if not applied to a list.\nq)iasc\nk){$[0h>@x;'`rank;<x]}\nq)idesc\nk){$[0h>@x;'`rank;>x]}\nq)iasc 7\n'rank\nSince the columns of a table are lists, it is irrelevant whether the functional form uses the old or new version of iasc\nor idesc\n.\nThe buildQuery\nfunction handles the 6th argument as a special case so will produce iasc\nor idesc\nas appropriate.\nq)buildQuery \"select[10 20;>price] from trade\"\n\"?[trade;();0b;();10 20;(idesc;`price)]\"\nThe full buildQuery\nfunction code is as follows:\n\\c 30 200\ntidy:{ssr/[;(\"\\\"~~\";\"~~\\\"\");(\"\";\"\")] $[\",\"=first x;1_x;x]}\nstrBrk:{y,(\";\" sv x),z}\n//replace k representation with equivalent q keyword\nkreplace:{[x] $[`=qval:.q?x;x;\"~~\",string[qval],\"~~\"]}\nfuncK:{$[0=t:type x;.z.s each x;t<100h;x;kreplace x]}\n//replace eg ,`FD`ABC`DEF with \"enlist`FD`ABC`DEF\"\nereplace:{\"~~enlist\",(.Q.s1 first x),\"~~\"}\nereptest:{(1=count x) and ((0=type x) and 11=type first x) or 11=type x}\nfuncEn:{$[ereptest x;ereplace x;0=type x;.z.s each x;x]}\nbasic:{tidy .Q.s1 funcK funcEn x}\naddbraks:{\"(\",x,\")\"}\n//Where clause needs to be a list of Where clauses,\n//so if only one Where clause, need to enlist.\nstringify:{$[(0=type x) and 1=count x;\"enlist \";\"\"],basic x}\n//if a dictionary, apply to both keys and values\nab:{\n$[(0=count x) or -1=type x; .Q.s1 x;\n99=type x; (addbraks stringify key x ),\"!\",stringify value x;\nstringify x] }\ninner:{[x]\nidxs:2 3 4 5 6 inter ainds:til count x;\nx:@[x;idxs;'[ab;eval]];\nif[6 in idxs;x[6]:ssr/[;(\"hopen\";\"hclose\");(\"iasc\";\"idesc\")] x[6]];\n//for select statements within select statements\nx[1]:$[-11=type x 1;x 1;[idxs,:1;.z.s x 1]];\nx:@[x;ainds except idxs;string];\nx[0],strBrk[1_x;\"[\";\"]\"] }\nbuildQuery:{inner parse x}\nqSQL\nQ for Mortals\n§9.12 Functional Forms\nFunctional Query Functions"}}},{"rowIdx":91,"cells":{"text":{"kind":"string","value":"Performance tips¶\nHow do I execute functions in parallel?¶\nIn the expression f each xs\n, f\nis applied to each element of xs\nin sequence. In a multi-CPU setting, applications of f\ncan be done in parallel by using peach\ninstead of each\n. Typically this is worth it if f\nis computationally expensive.\nEvaluating a hardware configuration¶\nThe scripts throughput.q\nand io.q\nare a useful starting point for users wanting to measure the performance of the systems where kdb+ will be deployed. The results of these (somewhat rough) tests can be used to stress-test different CPU, disk and network configurations running kdb+.\nThroughput¶\nThis test measures the time to insert a million rows into a table, one at a time, and also as bulk inserts of 10, 100, 1000, and 10000 rows.\nTo run the test, simply load throughput.q\ninto a q session:\n$ q throughput.q\nOn an AMD Opteron box with 4 GB of RAM, we get\n0.672 million inserts per second (single insert)\n6.944 million inserts per second (bulk insert 10)\n20.408 million inserts per second (bulk insert 100)\n24.39 million inserts per second (bulk insert 1000)\n25 million inserts per second (bulk insert 10000)\nOn an AMD Turion64 laptop with 0.5 GB of RAM\n0.928 million inserts per second (single insert)\n8.065 million inserts per second (bulk insert 10)\n16.129 million inserts per second (bulk insert 100)\n16.129 million inserts per second (bulk insert 1000)\n16.129 million inserts per second (bulk insert 10000)\nOn a 12-core Mac mini with 64 GB of RAM\nKDB+ 4.1t 2022.01.14 Copyright (C) 1993-2022 Kx Systems\nm64/ 12()core 65536MB ..\n2.639 million inserts per second (single insert)\n25 million inserts per second (bulk insert 10)\n166.667 million inserts per second (bulk insert 100)\n333.333 million inserts per second (bulk insert 1000)\n333.333 million inserts per second (bulk insert 10000)\nthroughput.q\n:\nSTDOUT: -1\nSYMS: -1000?`3\nEXCHANGES: 10#.Q.A\ngetRandomTrades: {[N] ([]sym: N?SYMS; time: N?.z.t; price: N?100e; size: N?1000i; stop:N?0b; cond:N?.Q.A; ex:N?EXCHANGES)}\nt1: getRandomTrades 1\nt10: getRandomTrades 10\nt100: getRandomTrades 100\nt1000: getRandomTrades 1000\nt10000: getRandomTrades 10000\ntradeNew: 0#t1;\ntmp:value\"\\\\t do[1000000;tradeNew,:t1]\" / prepare space\ntradeNew:0#t1\nms:value\"\\\\t do[1000000;tradeNew,:t1]\"\ntmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms),\" million inserts per second (single insert)\"\ntradeNew:0#t1\nms:value\"\\\\t do[100000;tradeNew,:t10]\"\ntmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms),\" million inserts per second (bulk insert 10)\"\ntradeNew:0#t1\nms:value\"\\\\t do[10000;tradeNew,:t100]\"\ntmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms),\" million inserts per second (bulk insert 100)\"\ntradeNew:0#t1\nms:value\"\\\\t do[1000;tradeNew,:t1000]\"\ntmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms),\" million inserts per second (bulk insert 1000)\"\ntradeNew:0#t1\nms:value\"\\\\t do[100;tradeNew,:t10000]\"\ntmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms),\" million inserts per second (bulk insert 10000)\"\nexit 0\nDisk input/output¶\nThis test measures the cost of disk access from kdb+. Things that are measured include: open and close of a file; read files (cold and in the cache); write files; appends; getting the size of a file; etc.\nThe benchmark first creates the test files, and then does something else for a while to get them out of the cache.\n$ q io.q -prepare\nKDB+ 2.4t 2006.09.29 Copyright (C) 1993-2006 Kx Systems\nl64/ 4cpu 3943MB ...\nstart local q server with: q -p 5555\ntmpfiles created\nNext we need to start a second kdb+ process.\n$ q -p 5555\nNow we can run the benchmark.\n$ q io.q -flush 32 -run\nOn an AMD Opteron box with 4 GB of RAM, we get:\nmemory flushed (32GB)\n* local file\nhclose hopen`:read.test 0.0094 ms\nread `:read.test - 270 MB/sec\nread `:read.test - 392 MB/sec (cached)\nwrite `:write.test - 157 MB/sec\n* local fileops\n.[`:file.test;();,;2 3] 0.017 ms\n.[`:file.test;();:;2 3] 0.093 ms\nappend (2 3) to handle 0.00883 ms\nhcount`:file.test 0.0053 ms\nread1`:file.test 2.1732 ms\nvalue`:file.test 0.0251 ms\n* local comm\nhclose hopen`:127.0.0.1:5555 0.135 ms\nsync (key rand 100) 0.06277 ms\nasync (string 23);collect 0.00773 ms\nsync (string 23) 0.05514 ms\nFinally, we can clean up the temporary files.\n$ q io.q -cleanup\ntmpfiles deleted\nCommand-line arguments¶\nq io.q [-run] [-prepare] [-cleanup] [-flush memsizeingb] [-rl remotelocation] [-rh remotehost] / hardware timings\neg: q io.q -prepare -rl /mnt/foo\nq io.q -flush 32 -run -rl /mnt/foo -rh server19:5005\nq io.q -cleanup -rl /mnt/foo\nIf remote host/location aren’t supplied only local tests will be run.\nThe local and remote q servers must be started manually.\nPerformance of different versions of insert¶\nThere are several syntactic forms to insert rows into tables, with different costs. We demonstrate the differences.\nIn the examples, we use a non-keyed table.\nq)trade\ndate open high low close volume sym\n------------------------------------------------\n2006.10.03 24.5 24.51 23.79 24.13 19087300 AMD\n2006.10.03 27.37 27.48 27.21 27.37 39386200 MSFT\nq)row: first trade\nIn our first test, we use insert\n.\nq)load `:trade\nq)row: first trade\nq)\\t do[1000000; insert[`trade; row]]\n1968\nNext we test the version that uses the dot notation.\nq)load `:trade\nq)row: first trade\nq)\\t do[1000000; .[`trade; (); ,; row]]\n1890\nThose two can take the table as a parameter. If the table is known, we can also use the Amend operator, which is faster:\nq)load `:trade\nq)row: first trade\nq)\\t do[1000000; trade,: row]\n1718\nDifferences between versions\nThe result of this comparison might vary between different versions of kdb+. The tests shown above are for V2.4t.\nFinally, remember that bulk insert is faster than repeated inserts of single rows:\nq)load `:trade\nq)row: first trade\nq)rows: 1000000 # enlist row\nq)\\t insert[`trade; rows]\n109\nq)load `:trade\nq)\\t .[`trade; (); ,; rows]\n78\nq)load `:trade\nq)\\t trade,: rows\n78\nUsing the `g#\nattribute¶\nThis recipe demonstrates the use of the `g#\nattribute to improve performance of queries. The test is as follows: given 10 million trades and 10 million quotes, how long does it take to snapshot price, bid, ask, mid for the SP500 at some prior time?\nThe tables can be set up like this:\nq)n:10000000\nq)s:`$read0`:tick/sp500.txt\nq)S:s,-7500?`4 / 8000 symbols\nq)t:{09:30:00.0+floor 23400000%x%til x} / milliseconds from 9:30 to 16:00\nq)trade:([]sym:n?S;time:t n;price:n?100.0;ox:n?2) / 10 million trades\nq)quote:([]sym:n?S;time:t n;bid:n?100.0;ask:n?100.0) / 10 million quotes\nq)r:first s / sample ric\nq)t:12:00:00.0 / sample time\nThe test queries and their running times are as follows:\nq)\\t select last sym,last price from trade where sym=r,ox=1,time<=t\n84\nq)\\t select from trade where sym=r,ox=1,time=time time bin t\n84\nNow, let’s apply the attribute to the sym\ncolumn:\nq)update `g#sym from `trade\nq)update `g#sym from `quote\nThe queries now run faster.\nq)\\t select last sym,last price from trade where sym=r,ox=1,time<=t\n0\nq)\\t select from trade where sym=r,ox=1,time=time time bin t\n0\nIn fact, we need to run them many times to get a measurable time:\nq)n:1000\nq)\\t do[n;select last sym,last price from trade where sym=r,ox=1,time<=t]\n78\nq)\\t select from trade where sym=r,ox=1,time=time time bin t\n83\nSTAC-M3 benchmark¶\nSTAC-M3 is an independent benchmark for testing solutions (such as kdb+) that manage large timeseries datasets (tick databases). This has been run using kdb+ on several platforms. The results are available to registered STAC users.\nThese benchmarks are run on a year of daily NYSE TAQ-like data, approximately 5 TB in total. They use a series of up to 20 complex queries that were defined by financial institutions to reflect real business requirements. The benchmarks enable users and vendors to compare the performance of their database solutions against audited, third-party measurements.\n\nPivot tables¶\nSome notes on the theory and practice of pivoting tables.\nSimple pivot example¶\nGiven a source table\nq)t:([]k:1 2 3 2 3;p:`xx`yy`zz`xx`yy;v:10 20 30 40 50)\nwe want to obtain\nq)pvt:([k:1 2 3]xx:10 40 0N;yy:0N 20 50;zz:0N 0N 30)\nAs originally suggested by Jeff Borror, we begin by getting the distinct pivot values – these will become our column names in addition to the key column k\n. Note that p\nmust be a column of symbols for this to work.\nq)P:asc exec distinct p from t;\nAnd then create the pivot table!\nq)pvt:exec P#(p!v) by k:k from t;\nwhich can be read as: for each key k\n, create a dictionary of the present columns p\nand their values v\n, take the full list of columns from that dict, and finally collapse the list of dicts to a table.\nAnother variation on creating the pivot table\nq)pvt:exec P!(p!v)P by k:k from t;\nExplanation¶\nA key point to remember is that a table is a list of dictionaries and that is key to how we build the resulting pivot table. A list of conforming dictionaries (same symbol keys, value types) collapses to a table.\nq)pvt:((`k`xx`yy`zz!1 10 0N 0N);(`k`xx`yy`zz!2 40 20 0N);(`k`xx`yy`zz!3 0N 50 30))\nIt’s helpful to play around with these constructs at the q prompt.\nq)exec p!v from t\n`xx`yy`zz`xx`yy!10 20 30 40 50\nExtract key/value pairs for p\nand v\ngrouped by k\nq)exec p!v by k from t\n1 2 3!(enlist `xx!enlist 10;`yy`xx!20 40;`zz`yy!30 50)\nCreate a list of dictionaries\nq)exec p!v by k:k from t\n(flip (enlist `k)!enlist 1 2 3)!(enlist `xx!enlist 10;`yy`xx!20 40;`zz`yy!30 50)\nIn the dictionaries create nulls for missing values to allow them to conform with common column names and collapse to a table\nq)exec P#(p!v) by k:k from t\n(+(,`k)!,1 2 3)!+`s#`xx`yy`zz!(10 40 0N;0N 20 50;0N 0N 30)\nA very general pivot function, and an example¶\nCredit\nThe following is derived from a thread on the k4 listbox between Aaron Davies, Attila Vrabecz and Andrey Zholos.\nCreate sample data set of level-2 data at 4 quotes a minute, two sides, five levels, NSYE day\nq)qpd:5*2*4*\"i\"$16:00-09:30\nq)date:raze(100*qpd)#'2009.01.05+til 5\nq)sym:(raze/)5#enlist qpd#'100?`4\nq)sym:(neg count sym)?sym\nq)time:\"t\"$(raze/) 500#enlist 10#'09:30:00+15*til (qpd div 5*2)\nq)time+:(count time)?1000\nq)side:raze 500#enlist raze(qpd div 2)#enlist\"BA\"\nq)level:raze 500#enlist raze(qpd div 5)#enlist 0 1 2 3 4\nq)level:(neg count level)?level\nq)price:(500*qpd)?100f\nq)size:(500*qpd)?100\nq)quote:([]date;sym;time;side;level;price;size)\n/ pivot t, keyed by k, on p, exposing v\n/ f, a function of v and pivot values, names the columns\n/ g, a function of k, pivot values, and the return of f, orders the columns\n/ either can be defaulted with (::)\n/ conceptually, this is\n/ exec f\\[v;P\\]!raze((flip(p0;p1;.))!/:(v0;v1;..))\\[;P\\]by k0,k1,.. from t\n/ where P~exec distinct flip(p0;p1;..)from t\n/ followed by reordering the columns and rekeying\npiv:{[t;k;p;v;f;g]\nv:(),v;\nG:group flip k!(t:.Q.v t)k;\nF:group flip p!t p;\ncount[k]!g[k;P;C]xcols 0!key[G]!flip(C:f[v]P:flip value flip key F)!raze\n{[i;j;k;x;y]\na:count[x]#x 0N;\na[y]:x y;\nb:count[x]#0b;\nb[y]:1b;\nc:a i;\nc[k]:first'[a[j]@'where'[b j]];\nc}[I[;0];I J;J:where 1<>count'[I:value G]]/:\\:[t v;value F]}\nq)f:{[v;P]`$raze each string raze P[;0],'/:v,/:\\:P[;1]}\nq)g:{[k;P;c]k,(raze/)flip flip each 5 cut'10 cut raze reverse 10 cut asc c}\n/ `Bpricei`Bsizei`Apricei`Asizei for levels i\nUse a small subset for testing\nq)q:select from quote where sym=first sym\nq)book:piv[`q;`date`sym`time;`side`level;`price`size;f;g]\nq)![`book;();`date`sym!`date`sym;{x!fills,'x}cols get book];\nq)book\nOne user reports:\nThis is able to pivot a whole day of real quote data, about 25 million quotes over about 4000 syms and an average of 5 levels per sym, in a little over four minutes."}}},{"rowIdx":92,"cells":{"text":{"kind":"string","value":"upsert\n¶\nOverwrite or append records to a table\nx upsert y upsert[x;y]\nWhere\nx\nis a table, or the name of a table as a symbol atom, or the name of a splayed table as a directory handley\nis zero or more records\nthe records are upserted into the table.\nThe record/s y\nmay be either\n- lists with types that match\ntype each x cols x\n- a table with columns that are members of\ncols x\nand have corresponding types\nIf x\nis the name of a table, it is updated in place. Otherwise the updated table is returned.\nIf x\nis the name of a table as a symbol atom (or the name of a splayed table as a directory handle) that does not exist in the file system, it is written to file.\nSimple table¶\nIf the table is simple, new records are appended. If the records are in a table, it must be simple.\nq)t:([]name:`tom`dick`harry;age:28 29 30;sex:`M)\nq)t upsert (`dick;49;`M)\nname age sex\n-------------\ntom 28 M\ndick 29 M\nharry 30 M\ndick 49 M\nq)t upsert((`dick;49;`M);(`jane;23;`F))\nname age sex\n-------------\ntom 28 M\ndick 29 M\nharry 30 M\ndick 49 M\njane 23 F\nq)`t upsert ([]age:49 23;name:`dick`jane)\n`t\nq)t\nname age sex\n-------------\ntom 28 M\ndick 29 M\nharry 30 M\ndick 49\njane 23\nKeyed table¶\nIf the table is keyed, any new records that match on key are updated. Otherwise, new records are inserted.\nIf the right argument is a table it may be keyed or unkeyed.\nq)a upsert (`e;30;70) / single record\ns| r u\n-| -----\nq| 1 5\nw| 2 6\ne| 30 70\nq)a upsert ((`e;30;70);(`r;40;80)) / multiple records\ns| r u\n-| -----\nq| 1 5\nw| 2 6\ne| 30 70\nr| 40 80\nq)show a:([]s:`q`w`e;r:1 2 3;u:5 6 7) / simple table\ns| r u\n-| ---\nq| 1 5\nw| 2 6\ne| 3 7\nq)/update `q and `e, insert new `r; return new table\nq)a upsert ([s:`e`r`q]r:30 4 10;u:70 8 50) / keyed table\ns| r u\n-| -----\nq| 10 50\nw| 2 6\ne| 30 70\nr| 4 8\nq)`a upsert ([s:`e`r`q]r:30 4 10;u:70 8 50) / same but update table in place\n`a\nSerialized table¶\nq)`:data/tser set ([] c1:`a`b; c2:1.1 2.2)\n`:data/tser\nq)`:data/tser upsert (`c; 3.3)\n`:data/tser\nq)get `:data/tser\nc1 c2\n------\na 1.1\nb 2.2\nc 3.3\nUpserting to a serialized table reads the table into memory, updates it, and writes it back to file.\nSplayed table¶\nq)`:data/tsplay/ set ([] c1:`sym?`a`b; c2:1.1 2.2)\n`:data/tsplay/\nq)`:data/tsplay upsert (`sym?`c; 3.3)\n`:data/tsplay\nq)select from `:data/tsplay\nc1 c2\n------\na 1.1\nb 2.2\nc 3.3\nUpserting to a splayed table appends new values to the column files.\nUpserting to a serialized or splayed table removes any attributes set.\nCond is not supported inside q-SQL expressions\nEnclose in a lambda or use Vector Conditional instead.\n\nvalue\n¶\nRecurse the interpreter\nvalue x value[x]\nReturns the value of x\n:\ndictionary value of the dictionary\nsymbol atom value of the variable it names\nenumeration corresponding symbol vector\nstring result of evaluating it in current context\nlist result of calling or indexing\nthe first element\nwith the remaining elements\n(if the first element is a string or symbol,\nit is evaluated first)\nnote that this is different from a parse tree\nthat is handled by eval\n.\nprojection list: function followed by argument/s composition list of composed values derived function argument of the iterator operator internal code\nview list of metadata lambda structure\nfile symbol content of datafile\nExamples:\nq)value `q`w`e!(1 2;3 4;5 6) / dictionary\n1 2\n3 4\n5 6\nq)a:1 2 3\nq)value `a / symbol\n1 2 3\nq)e:`a`b`c\nq)x:`e$`a`a`c`b\nq)x\n`e$`a`a`c`b\nq)value x / enumeration\n`a`a`c`b\nq)value \"enlist a:til 5\" / string\n0 1 2 3 4\nq)value \"{x*x}\"\n{x*x}\nq)value \"iasc 2 7 3 1\"\n3 0 2 1\nq)\\d .a\nq.a)value\"b:2\"\n2\nq.a)b\n2\nq.a)\\d .\nq)b\n'b\nq).a.b\n2\nq)value(+;1;2) / list - apply a function or index a list\n3\nq)/ if the first item is a string or symbol, it is evaluated first\nq)value(`.q.neg;2)\n-2\nq)value(\"{x+y}\";1;2)\n3\nq)value +[2] / projection\n+\n2\nq)value differ / composition\n~:\n~':\nq)f:,/:\\: / derived function\nq)value f\n,/:\nq)value each (::;+;-;*;%) / operator\n0 1 2 3 4\nThe string form can be useful as a kind of ‘prepared statement’ from the Java client API since the Java serializer doesn’t support lambdas and keywords.\nView¶\nreturns a list of metadata:\n- cached value\n- parse tree\n- dependencies\n- definition\nWhen the view is pending, the cached value is ::\n.\nq)a:1\nq)b::a+1\nq)get`. `b\n::\n(+;`a;1)\n,`a\n\"a+1\"\nq)b\n2\nq)get`. `b\n2\n(+;`a;1)\n,`a\n\"a+1\"\nq)\nLambda¶\nThe structure of the result of value\non a lambda is subject to change between versions.\nAs of V3.5 the structure is:\n(bytecode;parameters;locals;(namespace,globals);constants[0];…;constants[n];m;n;f;l;s)\nwhere\n| this | is |\n|---|---|\nm |\nbytecode to source position map; -1 if position unknown |\nn |\nfully qualified (with namespace) function name as a string, set on first global assignment, with @ appended for inner lambdas; () if not applicable |\nf |\nfull path to the file where the function originated from; \"\" if not applicable |\nl |\nline number in said file; -1 if n/a |\ns |\nsource code |\nq)f:{[a;b]d::neg c:a*b+5;c+e}\nq)value f\n0xa0624161430309220b048100028269410004\n`a`b\n,`c\n``d`e\n5\n21 19 20 17 18 0 16 11 0 9 0 9 0 25 23 24 2\n\"..f\"\n\"\"\n-1\n\"{[a;b]d::neg c:a*b+5;c+e}\"\nq)/Now define in .test context – globals refer to current context of test\nq)\\d .test\nq.test)f:{[a;b]d::neg c:a*b+5;c+e}\nq.test)value f\n0xa0624161430309220b048100028269410004\n`a`b\n,`c\n`test`d`e\n5\n21 19 20 17 18 0 16 11 0 9 0 9 0 25 23 24 2\n\".test.f\"\n\"\"\n-1\n\"{[a;b]d::neg c:a*b+5;c+e}\"\nLocal values in suspended functions¶\nSee changes since V3.5 that support debugging.\nget\n¶\nThe function value\nis the same as get\nBy convention get\nis used for file I/O but the two are interchangeable.\nq)get \"2+3\" / same as value\n5\nq)value each (get;value) / same internal code\n19 19\n\nvar\n, svar\n¶\nVariance, sample variance\nvar\n¶\nVariance\nvar x var[x]\nWhere x\nis a numeric list, returns its variance as a float atom. Nulls are ignored.\nq)var 2 3 5 7\n3.6875\nq)var 2 3 5 0n 7\n3.6875\nq)select var price by sym from trade where date=2010.10.10,sym in`IBM`MSFT\nvar\nis an aggregate function, equivalent, where sqr:{x*x}\nto\n{avg[sqr x]-sqr[avg x]}\nSince 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists.\nq)M:get`:m77 set m:(2 3;4 0N;1 7)\nq)var m\n1.555556 4\nq)var M\n1.555556 4\nq)T:get`:tab/ set t:flip`a`b!flip m\nq)var t\na| 1.555556\nb| 4\nq)var T\na| 1.555556\nb| 4\nvar\nis a multithreaded primitive.\nsvar\n¶\nSample variance\nsvar x svar[x]\nWhere x\nis a numeric list, returns its sample variance as a float atom.\n\\[svar(x)=\\frac{n}{n-1}var(x)\\]\nq)var 2 3 5 7\n3.6875\nq)svar 2 3 5 7\n4.916667\nq)select svar price by sym from trade where date=2010.10.10,sym in`IBM`MSFT\nsvar\nis an aggregate function, equivalent to {var[x]*count[x]%-1+count x}\n.\nSince 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists.\nq)M:get`:m77 set m:(2 3;4 0N;1 7)\nq)svar m\n2.333333 8\nq)svar M\n2.333333 8\nq)T:get`:tab/ set t:flip`a`b!flip m\nq)svar t\na| 2.333333\nb| 8\nq)svar T\na| 2.333333\nb| 8\nsvar\nis a multithreaded primitive.\nDomain and range¶\ndomain: b g x h i j e f c s p m d z n u v t\nrange: f . f f f f f f f . f f f f f f f f\n\n?\nVector Conditional¶\nReplace selected items of one list with corresponding items of another\n?[x;y;z]\nWhere\nx\nis a boolean vectory\nandz\nare lists of the same typex\n,y\n, andz\nconform\nreturns a new list by replacing elements of y\nwith the elements of z\nwhen x\nis false.\nAll three arguments are evaluated.\nq)?[11001b;1 2 3 4 5;10 20 30 40 50]\n1 2 30 40 5\nIf x\n, y\n, or z\nare atomic, they are repeated.\nq)?[11001b;1;10 20 30 40 50]\n1 1 30 40 1\nq)?[11001b;1 2 3 4 5;99]\n1 2 99 99 5\nSince V2.7 2010.10.07 ?[x;y;z]\nworks for atoms too.\nVector Conditional can be used in qSQL queries, which do not support Cond.\nFor multiple cases – more than just true/false – see Controlling evaluation.\n?\nQuery,\nCond,\nif\nControlling evaluation\nQ for Mortals\n§10.1.3 Vector Conditional Evaluation\n\nview\n, views\n¶\nview\n¶\nExpression defining a view\nview x view[x]\nWhere x\nis a view (by reference), returns the expression defining x\n.\nq)v::2+a*3 / define dependency v\nq)a:5\nq)v\n17\nq)view `v / view the dependency expression\n\"2+a*3\"\nviews\n¶\nList views defined in the default namespace\nviews[]\nReturns a sorted list of the views currently defined in the default namespace.\nq)w::b*10\nq)v::2+a*3\nq)views[]\n`s#`v`w\nMetadata\nViews\nQ for Mortals\n§4.11 Alias"}}},{"rowIdx":93,"cells":{"text":{"kind":"string","value":"u.q¶\nu.q\nis available from KxSystems/kdb-tick\nOverview¶\nContains functions to allow clients to subscribe to all or subsets of available data, publishing to interested clients and alerting clients to events, for example, end-of-day. Tracks client subscription interest and removes client subscription details on their disconnection.\nThis script is loaded by other processes, for example a tickerplant.\nUsage¶\nTo allow the ability to publish data to any process, do the following:\n- load\nu.q\n- declare the tables to be published in the top level namespace. Each table must contain a column called\nsym\n, which acts as the single key field to which subscribers subscribe - initialize by calling\n.u.init[]\n- publish data by calling\n.u.pub[table name; table data]\nThe list of tables that can be published and the processes currently subscribed are held in .u.w\n.\nSubscriber processes must open a connection to the publisher and call .u.sub[tablename;list_of_symbols_to_subscribe_to]\n.\nIf a subscriber calls .u.sub\nagain, the current subscription is overwritten either for all tables (if a wildcard is used) or the specified table.\nTo add to a subscription, for example, add more syms\nto a current subscription, the subscriber can call .u.add\n).\nClients should define a upd\nfunction to receive updates, and .u.end\nfunction for end-of-day events.\nVariables¶\n| Name | Description |\n|---|---|\n| .u.w | Dictionary of registered client interest in data being processed (for example, tables->(handle;syms) |\n| .u.t | Table names |\nFunctions¶\nFunctions are open source and open to customisation.\n.u.init¶\nInitialise variables used to track registered clients.\n.u.init[]\nInitialises variables by retrieving all tables defined in the root namespace. Used to track client interest in data being published.\n.u.del¶\nDelete subscriber from dictionary of known subscribers (.u.w\n) for given table\n.u.del[x;y]\nWhere\nx\nis a table namey\nis the connection handle\n.u.sel¶\nSelect from table, given optional sym filter. Used to filter tables to clients who may not want everything from the table.\n.u.sel[x;y]\nWhere\nx\nis a tabley\nis a list of syms (can be empty list)\nreturns the table x\n, which can be filtered by y\n.\n.u.pub¶\nPublish updates to subscribers.\n.u.pub[x;y]\nWhere\nx\nis table name (sym type)y\nis new data for tablex\n(table type)\nActions performed:\n- find interested client handles for table\nx\nand any filter they may have (using.u.w\n) - for each client\n- filter\ny\nusing.u.sel\n(if client specified a filter at subscription time) - publish asynchronously to client, calling their\nupd\nfunction with parameters table name and table data.\n- filter\n.u.add¶\nAdd client subscription interest in table with optional filter.\n.u.add[x;y]\nWhere\nx\nis a table name (sym)y\nis list of syms used to filter table data, with empty sym representing for all table data\nActions performed:\n- uses\n.z.w\nto get current client handle. - find any existing subscriptions to table\nx\nfor client (using.u.w\n)- if existing, update filter with union on\ny\n- else a new entry is added to\n.u.w\nwith client handle,x\nandy\n.\n- if existing, update filter with union on\nReturns 2 element list. The first element is the table name. The second element depends on whether x\nrefers to a keyed table.\n- If\nx\nis a keyed table,.u.sel\nis used to select from the keyed table the required syms - otherwise returns an empty table\nx\n(schema definition of table), with the grouped attribute applied to the sym column.\n.u.sub¶\nUsed by clients to register subscription interest.\n.u.sub[x;y]\nWhere\nx\nis a table name (sym)y\nis list of syms used to filter table data, with empty sym representing for all table data\nIf x\nis empty symbol, client is subscribed to all known tables using y\ncriteria. This is achieved by calling .u.sub for each table in .u.t\n.\nFor the subscribing client, any previous registered in the given tables are removed prior to reinstating new criteria provided i.e. calls .u.del\n.\nCalls .u.add\nto record the client subscription.\nReturns\n- a two item list if x is an indivial table name. First item is the table name subscribed to as a symbol. Second item is an empty table (table schema).\n- a list of two item lists as described above for each individual table, if x is an empty symbol (i.e. subscribe to all tables)\n- an error if the table does not exist.\n.u.end¶\nInform all registered clients that end-of-day has occurred.\n.u.end[x]\nWhere x\nis a date, representing the day that is ending.\nIterates over all client handles via .u.w\nand asynchronously calls their .u.end\nfunction passing x\n.\n.z.pc¶\nImplementation of .z.pc\ncallback for connection close.\nCalled when a client disconnects. The client handle provided is used to call .u.del\nfor all tables. This ensures all subscriptions are removed for that client.\nExample¶\ntick.q\nis an example of a tickerplant that uses u.q\nfor pub/sub.\nIn addition, the example scripts below demonstrate pub/sub in a standalone publisher and subscriber. They can be downloaded from KxSystems/cookbook/pubsub. Each script should be run from the OS command prompt as shown in the following example.\n$ q publisher.q\n$ q subscriber.q\nThe publisher generates some random data and publishes it periodically on a timer.\nThe subscriber receives data from the publisher and is displayed on the screen. You can modify the subscription request and the upd\nfunction of the subscriber as required. You can run multiple subscribers at once."}}},{"rowIdx":94,"cells":{"text":{"kind":"string","value":"Implicit iteration¶\nBefore you specify iteration, see whether what you need is already implicit in the operators and keywords\nThis tutorial as a video presentation\nLists and dictionaries are first-class entities in q, and most operators and keywords iterate through them. This article is about when to leave it to q.\nThat is, when not to specify iteration.\nRecall:\n- Map iteration\n-\nevaluates an expression once on each item in a list or dictionary.\n- Accumulator iteration\n-\nevaluates an expression successively: the result of one evaluation becomes an argument of the next.\nImplicit map iterations¶\nThe simplest and most common implicit map iteration is pairwise: between corresponding list items.\nq)10 100 1000 * (1 2 3;4 5 6;7 8)\n10 20 30\n400 500 600\n7000 8000\nOf course, this requires the lists to have the same number of items.\nq)10 100 1000 * (1 2 3;4 5 6)\n'length\n[0] 10 100 1000 * (1 2 3;4 5 6)\n^\nScalar extension¶\nUnless! If one of the operands is an atom, scalar extension pairs it with every list item.\nq)5 < 1 2 3 4 5 6 7 8\n00000111b\nq)\"f\" < (\"abc\";\"def\";\"gh\")\n000b\n000b\n11b\nAtomic iteration¶\nMany operators have atomic iteration: they iterate recursively, pairwise and with scalar extension, until they find the atoms in a list.\nq)1 4 7 < (1 2 3;4 5 6;7 8)\n011b\n011b\n01b\nq)(1;2 3 4; 7) < (1 2 3;4 5 6;7 8)\n011b\n111b\n01b\nq)(1;2 3 4;(5 6 7;8)) < (1 2 3;4 5 6;7 8)\n011b\n111b\n(110b;0b)\nSimilarly, some unary keywords implicitly apply to each item of a list argument – and recurse to atoms.\nq)cos (1 2 3; 4 5 6)\n0.5403023 -0.4161468 -0.9899925\n-0.6536436 0.2836622 0.9601703\nq)lower(\"THE\";(\"Quick\";\"Brown\");\"FOX\")\n\"the\"\n(\"quick\";\"brown\")\n\"fox\"\nAtomic operators are atomic in both their left and right domains.\n4 < (1;2 3 4;(5 6 7;8))\n0b\n000b\n(111b;1b)\nSome binary keywords are atomic in only one domain.\nFor example, the right argument of within\nis an ascending pair of sortable type.\nBut in its left domain, within\nis atomic.\nq)2 3 4 within 3 6\n011b\nq)(2 3 4;(5; 6 7;8)) within 3 6\n0 1 1\n1b 10b 0b\nList iteration¶\nList iteration is through list items only – not atomic.\nThe like\nkeyword has list iteration in its left domain.\nq)`quick like \"qu?ck\"\n1b\nq)`quick`quack`quark like \"qu?ck\" / list iteration\n110b\nq)(`quick;`quack`quark) like \"qu?ck\" / but not atomic\n'type\n[0] (`quick;`quack`quark) like \"qu?ck\"\n^\nList iteration stops after the first level: it does not recurse.\nSimple visualizations¶\nEven a simple visual display can be useful. Here are sines of the first twenty positive integers, tested to see which of them is greater than 0.5.\nq).5 < sin 1 + til 20\n11000011000001100001b\nWe can take that boolean vector and use it to index a short string, getting us a simple visual display.\nAnd, as you probably know, Index At @\ncan be elided and replaced with prefix notation.\nq)\".#\" @ .5 < sin 1 + til 20\n\"##....##.....##....#\"\nq)\".#\" .5 < sin 1 + til 20\n\"##....##.....##....#\"\nIndex At is atomic in its right domain; that is, right-atomic.\nHere we’ll index a string with an integer vector and we’ll get a string result.\nq)\" -|+\" @ 0 3 1 1 1 3 0\n\" +---+ \"\nIf we index it with a 2-row matrix – two integer vectors – we’ll get a character matrix back.\nq)\" -|+\" @ (0 3 1 1 1 3 0;0 2 0 0 0 2 0)\n\" +---+ \"\n\" | | \"\nAnd if we take that 2-row matrix and index it – to make selections from it – the result is a numeric matrix.\nq)(0 3 1 1 1 3 0;0 2 0 0 0 2 0) @ 0 1 1 1 0\n0 3 1 1 1 3 0\n0 2 0 0 0 2 0\n0 2 0 0 0 2 0\n0 2 0 0 0 2 0\n0 3 1 1 1 3 0\nAnd because Index At is right-atomic we can use the numeric matrix to index the string.\nq)\" -|+\" @(0 3 1 1 1 3 0;0 2 0 0 0 2 0) @ 0 1 1 1 0\n\" +---+ \"\n\" | | \"\n\" | | \"\n\" | | \"\n\" +---+ \"\nIndex At is right-atomic, but in its left domain it has list iteration: list items need not be atoms.\nIn this example, the list items are themselves strings.\nIf we index that list of strings with an integer matrix, we get back a matrix of strings.\nq)show L:(\"the\";\"quick\";\"brown\";\"fox\")\n\"the\"\n\"quick\"\n\"brown\"\n\"fox\"\nq)(1 3;2 0)\n1 3\n2 0\nq)L@(1 3;2 0)\n\"quick\" \"fox\"\n\"brown\" \"the\"\nq)show q:4 5#.Q.a\n\"abcde\"\n\"fghij\"\n\"klmno\"\n\"pqrst\"\nq)q @ (1 2;3 1) / Index At: right-atomic\n\"fghij\" \"klmno\"\n\"pqrst\" \"fghij\"\nq)q . (1 2;3 1) / Index: list iteration on the right\n\"ig\"\n\"nl\"\nSome keywords evaluate a binary expression between adjacent items in a list.\nq)deltas 1 5 0 9 5 2\n1 4 -5 9 -4 -3\nq)ratios 2 3 4 5\n2 1.5 1.333333 1.25\nThese are map iterations: the evaluations are independent and can be performed in parallel.\nExercise 1¶\nsensors.txt\ncontains (24) hourly sensor readings over a 12-day period.\nSensor readings are in the range 0-9.\n$ wget https://code.kx.com/download/learn/iteration/sensors.txt\n--2022-01-03 11:27:18-- https://code.kx.com/download/learn/iteration/sensors.txt\nResolving code.kx.com (code.kx.com)... 74.50.49.235\nConnecting to code.kx.com (code.kx.com)|74.50.49.235|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 300 [text/plain]\nSaving to: ‘sensors.txt’\nsensors.txt 100%[===================>] 300 --.-KB/s in 0s\n2022-01-03 11:27:19 (143 MB/s) - ‘sensors.txt’ saved [300/300]\nq)show s:read0`:sensors.txt\n\"030557246251157265736086\"\n\"757251109999993270188377\"\n\"776439448625126896347568\"\n\"116491158137137589031187\"\n\"855938799541699262946623\"\n\"104948806186867057936025\"\n\"328964479858696484945053\"\n\"861596102999933729145653\"\n\"623589072102430497578780\"\n\"240663439999997746246672\"\n\"311551572414272384005263\"\n\"850884046457214232200714\"\na. For each of the 24 hours, on how many days of the period did the sensor reading for that hour fall to zero?\nConverting the sensor readings to numbers is not necessary: they can be compared directly to \"0\"\n.\nq)s=\"0\"\n101000000000000000000100b\n000000010000000001000000b\n000000000000000000000000b\n000000000000000000100000b\n000000000000000000000000b\n010000010000000100000100b\n000000000000000000000100b\n000000010000000000000000b\n000000100010001000000001b\n001000000000000000000000b\n000000000000000000110000b\n001000100000000000011000b\nThe Equals operator has implicit atomic iteration. Here it iterates across the items (rows) of the list s\n. Each item (row) is a character list (string) and Equals continues iterating through the items.\nThe result of s=\"0\"\nis a boolean matrix of the same shape as s\n.\nSumming it simply adds the rows together.\nq)sum s=\"0\"\n1 1 3 0 0 0 2 3 0 0 1 0 0 0 1 1 0 1 2 2 1 3 0 1i\nYour maintenance manager gets automated reports printed, but the last report got damaged. She needs your help.\nb. On which days did the sensor readings begin (8, 6, 1, 5, …) and (1, 1, 6, 4, …)?\nWe can search the first four columns of s\nfor these sequences.\nq)s[;til 4]\n\"0305\"\n\"7572\"\n\"7764\"\n\"1164\"\n\"8559\"\n\"1049\"\n\"3289\"\n\"8615\"\n\"6235\"\n\"2406\"\n\"3115\"\n\"8508\"\nThe Find operator has list iteration in both left and right domains.\nq)s[;til 4]?(\"8615\";\"1164\")\n7 3\nVisualizations help us find patterns in datasets. Even simple visualizations can be valuable.\nNormal operating levels are in the range (2,7).\nc. Display a simple plot showing when the sensors reported levels outside that range.\nThe within\nkeyword take as right argument a 2-item vector of sortable type.\nIt has atomic iteration in its left domain.\nKeyword not\nis atomic.\nq)not s within \"27\"\n101000000001100000000110b\n000001111111110001111000b\n000001001000100110000001b\n110011101100100011101110b\n100101011001011000100000b\n110101110110100100100100b\n001100001101010010100100b\n101010110111100001100000b\n000011100110001010001011b\n001000001111110000000000b\n011001000010000010110000b\n101110100000010000011010b\nBecause Index At is right-atomic we can use the boolean matrix to index a string.\n\".#\"not s within \"27\"\n\"#.#........##........##.\"\n\".....#########...####...\"\n\".....#..#...#..##......#\"\n\"##..###.##..#...###.###.\"\n\"#..#.#.##..#.##...#.....\"\n\"##.#.###.##.#..#..#..#..\"\n\"..##....##.#.#..#.#..#..\"\n\"#.#.#.##.####....##.....\"\n\"....###..##...#.#...#.##\"\n\"..#.....######..........\"\n\".##..#....#.....#.##....\"\n\"#.###.#......#.....##.#.\"\nAt level 9 productivity is highest.\nd. Plot when in the period this occurred.\n\".#\"s=\"9\"\n\"........................\"\n\"........######..........\"\n\".....#..........#.......\"\n\"....#............#......\"\n\"...#...##....##...#.....\"\n\"...#..............#.....\"\n\"...#....#....#....#.....\"\n\"....#....####....#......\"\n\".....#..........#.......\"\n\"........######..........\"\n\"........................\"\n\"........................\"\nImplicit accumulator iterations¶\nAccumulator iterations evaluate some expression successively: the result of one evaluation becomes the argument of the next.\nWe have already used the sum\nkeyword, which implicitly evaluates Add between successive items of a list.\nq)((2+3)+4)+5\n14\nq)sum 2 3 4 5\n14\nq)a:`cats`dogs!2 3; b:`cows`sheep!3 4; c:`dogs`sheep!5 6\nq)sum (a;b;c)\ncats | 2\ndogs | 8\ncows | 3\nsheep| 10\nsum\nis an aggregator: it returns the result of its last evaluation.\nsums\nalso iterates successively, but returns the results of all the evaluations.\nq)(2;2+3;2+3+4;2+3+4+5)\n2 5 9 14\nq)sums 2 3 4 5\n2 5 9 14\nNotice that the result has the same length as the argument: sums\nis a uniform function.\nNotice also that the index of the result corresponds to the number of evaluations: (sums 2 3 4 5)[3]\nis the result of three additions and (sums 2 3 4 5)[0]\nis the result of no additions.\nKeywords such as mavg\nand msum\ncombine map iterations (e.g. evaluate on each group of three successive items) with an aggregator which might employ accumulator iteration, e.g. sum\n.\nq)3 msum 1 5 0 9 5 2 2 4 0 5 3 0\n1 6 6 14 14 16 9 8 6 9 8 8\nExercise 2¶\nFactory productivity is thought to be most affected by the machinery’s fuddling level. An automated process adjusts the fuddling level every 20 minutes to keep it stable; the level resets to zero each midnight.\nWe have in fudadj.csv\na log of the adjustments.\nq)\\wget -q https://code.kx.com/download/learn/iteration/fudadj.csv\nq)read0 `:fudadj.csv / fuddling adjustments\n\"-1,-1,3,3,2,3,3,3,1,-1,3,0,2,1,2,1,0,-1,3,0,3,1,1,1,3,0,-1,3,-1,2,0,2,1,3,0,0,0,..\n\"0,1,-1,-1,3,-1,-1,3,1,1,2,1,-1,1,3,2,2,3,2,2,2,3,3,3,2,3,0,3,3,1,2,1,-1,-1,-1,0,..\n\"1,0,-1,2,3,-1,1,-1,-1,-1,2,3,2,0,0,3,3,2,2,-1,2,-1,2,0,1,2,2,0,0,-1,1,3,-1,1,-1,..\n\"3,2,2,1,3,-1,-1,-1,1,-1,1,1,0,-1,0,3,-1,0,2,0,2,0,1,2,3,2,1,3,-1,2,-1,1,2,1,-1,3..\n\"1,0,3,-1,2,3,3,1,1,2,-1,1,1,3,-1,2,2,2,2,2,0,3,-1,1,2,-1,3,0,0,1,2,3,3,0,-1,0,-1..\n\"2,-1,3,2,1,2,3,3,1,2,-1,-1,1,-1,0,-1,3,2,-1,-1,-1,1,1,2,2,3,0,2,1,0,1,2,3,3,2,-1..\n\"3,1,-1,2,1,3,-1,1,0,1,2,2,1,3,1,1,1,3,2,-1,-1,1,0,3,3,0,0,2,1,0,2,3,2,2,2,0,-1,-..\n\"-1,2,-1,-1,1,2,-1,0,2,3,0,2,0,1,2,-1,3,3,1,2,-1,-1,-1,3,3,0,1,1,1,3,2,1,-1,1,2,2..\n\"-1,3,-1,2,0,0,1,1,1,3,0,2,2,2,2,-1,-1,-1,-1,1,1,3,0,3,-1,1,2,3,0,-1,2,2,2,2,0,2,..\n\"3,2,-1,-1,0,-1,3,2,0,3,1,0,0,2,3,2,1,1,-1,2,3,-1,3,3,3,-1,1,3,2,1,1,1,2,3,2,1,1,..\n\"0,2,0,1,-1,3,0,2,-1,2,-1,2,0,0,-1,3,0,3,1,0,2,2,3,-1,2,0,1,1,2,0,2,2,0,0,0,-1,1,..\n\"2,-1,2,-1,3,0,1,1,0,-1,2,2,3,3,0,0,-1,1,3,-1,1,2,2,3,2,-1,0,2,0,3,0,1,1,0,3,3,-1..\nWhat were the fuddling levels corresponding to the sensor readings in Exercise 1?\nThe file has no column headers, so Load CSV returns not a table but a list of columns.\nq)show fa:(prd[24 3]#\"J\";csv)0: read0 `:fudadj.csv / fuddling adjustments\n-1 0 1 3 1 2 3 -1 -1 3 0 2\n-1 1 0 2 0 -1 1 2 3 2 2 -1\n3 -1 -1 2 3 3 -1 -1 -1 -1 0 2\n3 -1 2 1 -1 2 2 -1 2 -1 1 -1\n2 3 3 3 2 1 1 1 0 0 -1 3\n..\nThat suits us. The 72 rows correspond to 20-minute intervals. We take cumulative sums across the intervals, and select every third sum to get the hourly levels. Transposing the result gives us 12×24 fuddling levels.\nq)flip sums[fa]@2+3*til 24\n1 9 16 18 23 23 29 32 34 38 41 44 44 50 52 54 59 62 65 72 72 76 78 80\n0 1 4 8 11 18 24 33 38 45 47 45 50 51 54 55 58 58 57 61 64 68 69 66\n0 4 3 7 9 17 20 21 26 25 28 27 28 33 34 34 36 42 46 49 51 55 55 61\n7 10 9 10 9 11 15 18 24 28 30 33 35 41 41 43 43 49 51 56 57 57 62 63\n4 8 13 15 18 24 28 31 35 36 44 43 45 47 49 55 58 60 64 62 65 67 68 70\n4 9 16 16 16 20 17 21 26 29 35 39 42 49 57 59 65 69 73 73 75 74 74 77\n3 9 9 14 19 24 24 28 31 34 41 45 46 49 55 57 59 62 68 71 75 77 79 79\n0 2 3 8 11 16 18 19 23 28 30 35 36 37 41 41 42 47 54 59 58 59 63 67\n1 3 6 11 17 14 15 21 23 25 31 35 41 46 47 51 54 59 63 67 73 77 81 86\n4 2 7 11 16 20 24 29 32 38 42 48 51 56 60 67 64 72 75 78 79 82 85 91\n2 5 6 9 8 14 17 21 24 27 31 30 32 36 38 39 41 44 42 47 50 50 51 53\n3 5 7 10 16 16 19 26 27 32 34 40 39 40 43 51 51 53 57 62 65 63 65 71\nIt is clear that the automatic adjustments are not keeping the fuddling levels stable.\nYet another way q is weird?\nIf in other languages you are used to specifying iterations, you may at first experience this as an annoying distraction. Besides solving your problem, you also have to learn and keep in mind q’s implicit iterations. You already know how to write iterations. Why now learn this?\nThe reward is that, as implicit iteration becomes familiar to you, you stop thinking about most of the iterations in your code, which leaves you more mental space for problem solving. (Only when we put on noise-cancelling headphones do we discover how much annoying background noise we had been filtering out.)\nAs a bonus, many algorithms are startlingly simple to write in q. It’s way cool.\nConclusion¶\nThat’s it. The big takeaway is that there is a lot of iteration built into the q primitives. It will almost always give you your shortest, fastest code – and the most readable."}}},{"rowIdx":95,"cells":{"text":{"kind":"string","value":"An introduction to neural networks with kdb+¶\nDue to the desire to understand the brain and mimic the way it works by creating machines that learn, neural networks have been studied with great interest for many decades. A simple mathematical model for the neuron was first presented to the world by Warren McCulloch and Walter Pitts in 1943.\nWith modern advances in technology and the computational power available, this field of research has had massive implications on how computers can be used to ‘think’ and learn to solve problems given an appropriate algorithm. A few interesting examples of this research in action include:\n-\nInterpreting art and painting images [Mordvintsev et al.] [Gatys et al.]\nA number of different algorithms have been developed around this field of research, and this paper is going to focus on the implementation of a feedforward neural network in kdb+. A feedforward network (also known as a multi-layer perceptron) is a type of supervised machine-learning algorithm which uses a series of nonlinear functions layered together with an output. It can be used for classification or regression purposes and has been shown to be a universal approximator – an algorithm that can model any smooth function given enough hidden units.\nReference\nSee Kurt Hornik, “Approximation Capabilities of Multilayer Feedforward Networks”, Neural Networks, Vol. 4, pp. 251-257, 1991\nThis design of feedforward networks can be represented through operations on matrices and vectors. Array-programming languages such as q are well suited to computational implementations in this format due to the vectorized operations on lists.\nAll tests were run using kdb+ version 3.2 (2015.05.07)\nFeedforward networks¶\nAs mentioned in the introduction, a feedforward neural network is a type of supervised machine-learning algorithm. This means that it is trained on datasets for which the output for given inputs is already known.\nA neural network consists of a set of neurons connected together in some order. Each neuron receives inputs from other neurons, performs some action on these inputs and outputs a signal that becomes input for another connected neuron.\nThe perceptron¶\nA single neuron by itself is often referred to as a perceptron (Figure 1). The idea is to construct a predictor based on a linear combination of the inputs and a set of weights which control the impact an input has on the system. This linear combination (the dot product of the inputs and the weights) is then used as the input to a chosen threshold function which produces the desired output.\nFigure 1: A perceptron\nThreshold function¶\nA threshold function represents the activation of the perceptron based on its inputs. A common choice for this function is the sigmoid function.\nq)sigmoid:{1%1+exp neg x}\nq)output:sigmoid[inputs mmu weights]\nThis function provides a smooth curve bounded asymptotically by 0 and 1 on the vertical axis (Figure 2).\nFigure 2: A plot of the sigmoid function\nPerceptrons as linear predictors¶\nPerceptrons can only solve linearly-separable problems. A linearly-separable problem is one where two sets in a plane can be cleanly divided by a straight line. We can demonstrate this by looking at plots of truth table outputs.\nLet us say that a red dot represents true and a blue dot represents false. If we take the standard truth table inputs of 1 or 0 and add some random noise to them so that they are either slightly smaller than 1 or slightly larger than 0, then we get the plots shown in Figure 3.\nFigure 3: Truth Table Plots\nNotice how we can easily separate the true results from false in the AND and OR truth tables with a straight line. Perceptrons are good predictors for these problems. However, if the data is not so easily separable, as with the XOR plot, we need a way to be able to make nonlinear predictions. Solving the XOR problem will form our motivation for connecting many perceptrons into a network.\nA neural network¶\nA network consists of neurons connected together in layers as shown in Figure 4. Each neuron in one layer is connected to each neuron in the next layer and each connection has an associated weight. The first layer of neurons is the input layer. The neurons in this layer represent input values from a data sample.\nFollowing the input layer are the hidden layers. The number of neurons in a hidden layer is determined through a combination of background understanding of the problem and trial and error.\nFinally there is the output layer. The number of neurons in this layer is determined by the type of problem to be solved. For example one might wish to classify an input data sample into one of two categories (e.g. true or false as in the XOR problem). This type of problem is referred to as a binary classification problem. For this type of problem we only need one output neuron because the sigmoid function will return values close to 1 or 0 after the network is trained.\nHowever, the function which acts on the linear combination of inputs and weights at the output layer is not always the same as the threshold function used throughout the rest of the network. This is because we do not always desire a value between 0 and 1. Output functions for addressing different types of problems will be discussed in detail in Output functions for regression and multi-class classification.\nFigure 4: A feedforward network\nBias neurons¶\nA bias neuron is a neuron external to the main network. One is added to the input layer and one to each of the hidden layers. The value it passes to neurons in the next layer is always 1 and it receives no inputs from previous layers (see Figure 4). The purpose of bias neurons is analogous to the intercept parameter of a simple linear model – commonly written as\nThe absence of \\(\\beta_{0}x_{0}\\) in the simple linear model results in the predicted line always passing through (0, 0) and the model will perform poorly when attempting to predict unknown values. Hence we always set \\(x_{0}\\) to 1 and alter \\(\\beta_{0}\\) as we find the line of best fit. In the neural network we represent the network’s version of the \\(\\beta_{0}x_{0}\\) term as a bias neuron and associated weights. For more information on bias terms in statistical modelling see Chapter 3 in [2] and Chapter 7 in [1].\nBias neurons are added to the input layer by adding a 1 to each of the input samples in the data set used to train the network.\n// Inputs and expected target values for XOR problem\nq)input:((0 0f);(0 1f);(1 0f);(1 1f))\n// Add a bias neuron to each input\nq)input:input,’1.0\nq)target:0 1 1 0f\nWeight initialization¶\nThe initialization process begins by assigning values to the weights present in the network. The weights are randomly assigned such that the values of the weights between the input nodes and a receiving node on the next layer are in the range (-1, 1) with mean 0.\nSince there will be multiple neurons in the next layer we can represent all the weights between two layers as a matrix, where the number of rows represents the number of inputs and the number of columns represents the number of neurons in the next layer. An example of how this weight matrix and the input matrix interact is shown in Figure 5.\nwInit:{\n// If only one input neuron is detected exit\n// This is most likely due to a missing bias neuron\nif[1=x;:\"Number of input neurons must be greater than 1.\"];\nflip flip[r]-avg r:{[x;y]x?1.0}[y]each til x\n}\nInitialize weights between three inputs and four outputs. The first column represents the weights (connections) between the three inputs and the first neuron in the next layer. The second column is the weights leading to the second neuron in the next layer and so on.\nq)wInit[3;4]\n-0.3586151 0.09051553 -0.2815408 -0.05282783\n0.02154042 0.4219367 0.2320934 -0.05853578\n0.3370747 -0.5124522 0.04944742 0.1113636\nFigure 5: Diagram showing 2 input neurons (green and blue neurons)\nconnecting to 2 hidden neurons. The colors in the matrices correspond\nto the area of the network those values are found during execution\nof a forward pass.\nThe feedforward network in kdb+¶\nOnce we have prepared the input data and the weights they can be applied to the network to provide output. We will use the network to predict the outputs of the XOR function.\n// weights between input layer and hidden layer (2 inputs + 1 bias neuron)\nq)w:wInit[3;4]\n// weights between hidden layer and output layer (4 hidden neurons + 1 bias neuron)\nq)v:wInit[5;1]\nq)ffn:{[input;w;v]\n// Apply inputs and their weights to the hidden layer\nz:sigmoid[input mmu w];\n// Use output from hidden layer to generate an output\nsigmoid[z mmu v]\n}\nq)ffn[input;w;v]\n0.5028818\n0.5136649\n0.4891303\n0.5\nThe network has produced an output, but these values are not close to the target values. This is understandable as the weights have been randomly initialized. In order to produce the desired output the network must learn a more appropriate set of weights.\nTraining the network¶\nTraining a network to produce accurate results involves determining the weights which minimize the errors of the output nodes. The problem of minimizing the error by adjusting the weights is solved by a technique called back-propagation – a form of gradient descent.\nGradient descent begins by calculating the derivative of the error function with respect to the weights. This derivative gives information about the direction needed to move along the surface described by the error function in order to arrive at a minimum. The weights are gradually adjusted using this information until an acceptable minimum in error has been reached.\nBack-propagation¶\nBack-propagation trains the network by propagating information about the error at the output back through the network and adjusting all the weights of the connecting neurons. For an output node that applies the sigmoid function the error function is the cross-entropy error function defined as:\nThis gives us the following update rule for adjusting the weights between the output node and the hidden layer:\nwhere:\n- \\(z^{t}_h\\)\n-\nthe output after evaluating the hidden neuron \\(h\\) for input sample \\(t\\)\n- \\(v_h\\)\n-\nthe weight between the output neuron and hidden neuron \\(h\\)\n- \\(y_t\\)\n-\nthe target for sample \\(t\\)\n- \\(\\widehat{y}^t\\)\n-\nthe calculated output for sample \\(t\\)\n- \\(\\alpha\\)\n-\nthe rate at which we adjust the weights (usually < 0.1)\nUpdate rules\nThe derivation of the update rules for back-propagation is beyond the scope of this paper. See Chapter 11 in [3] and Chapter 11 in [2].\nOnce the change in the above weights has been calculated we propagate the error to the hidden layer and generate the update rule for the weights between the input layer and the hidden layer:\nwhere:\n- \\(w_{hj}\\)\n-\nthe weight between hidden neuron \\(h\\) and input neuron \\(j\\)\n- \\(x^t_j\\)\n-\nthe input from neuron \\(j\\) for some sample \\(t\\)\nUsing these formulas we can update our feedforward network function to implement back-propagation and allow training:\n// inputs - the input data set with bias node\n// targets – known outputs corresponding to inputs\n// lr – learning rate ‘alpha’\n// d – dictionary with 3 items: output\n// weights between input and hidden layers\n// weights between hidden and output layers\nffn:{[inputs;targets;lr;d]\nz:1.0,/:sigmoid[inputs mmu d`w];\no:sigmoid[z mmu d`v];\n// Error of output neurons\ndeltaO:(targets-o);\n// Error of hidden neurons\ndeltaZ:1_/:$[deltaO;flip d`v]*z*1-z;\n`o`v`w!(o;d[`v]+lr*flip[z] mmu deltaO;\nd[`w]+lr*flip[inputs] mmu deltaZ)\n}\n// Example – the XOR problem\nq)inputs\n0 0 1\n0 1 1\n1 0 1\n1 1 1\nq)targets\n0 1 1 0f\nq)w\n0.3257579 0.348099 -0.4320058 0.3356597\n-0.07237444 -0.3028193 0.3088185 -0.3069554\n-0.2533834 -0.04527963 0.1231874 -0.02870423\nq)v\n-0.0133154\n0.04739764\n0.2894549\n-0.3235371\n// Two training passes show little change\nq)finalResult:(ffn[inputs;targets;0.1]/)[2;`o`w`v!(0,();w;v)]\nq)finalResult`o\n0.5025557\n0.5133001\n0.4888265\n0.4996545\n// 10000 training passes shows significant improvement\nq)finalResult:(ffn[inputs;targets;0.1]/)[10000;`o`w`v!(0,();w;v)]\nq)finalResult`o\n0.009305227\n0.9890354\n0.9890087\n0.01142469\nq)\\ts finalResult:(ffn[inputs;targets;0.1]/)[10000;`o`w`v!(0,();w;v)]\n164 2992\nNow that the network has been trained it can be applied to a random\npermutation of XOR inputs to see how it performs on a single pass.\n// generate a random permutation of list 0 to 99\nq)rp:-100?til 100\n// use rp to shuffle inputs and targets\n// we generate rp first so that the shuffled indices of\n// the inputs and targets match up\nq)inputs:(100#inputs)rp\nq)targets:(100#targets)rp\n// using weights of trained network solve test inputs\nq)rw:finalResult`w; rv:finalResult`v\nq)res: (ffn[inputs;targets;0.1]/)[1;`o`w`v!(0;rw;rv)]\n// Are all the predictions correct?\nq)all raze[`int$res`o]=targets\n1b\nOutput functions for regression and multi-class classification¶\nThere are three types of output we are looking for when using a network of this type. The first we have already discussed in A neural network above – binary classification. We have also worked through an example of this problem when applying the network to predict XOR outputs. The other two problems we will discuss are multiclass outputs and nonlinear regression outputs.\nMulticlass outputs¶\nFor multiclass outputs the goal is to determine the correct classification of an input into three or more possible classifications. Unlike the binary classification situation, in the output layer there will be one neuron for each possible classification. The target values are transformed using one-hot encoding. This gives us a unique list of 0s and 1s for each possible classification that is the same length as the number of possible classifications. For example, if there are classifications A, B and C the transformations are 0 0 1, 0 1 0 and 1 0 0 – giving the output layer target patterns to match for training and testing. The output function used is the softmax function:\nwhere\n- \\(\\widehat{y}^t_i\\)\n-\nthe output from neuron \\(i\\) for sample \\(t\\)\n- \\(S^t_i\\)\n-\nthe linear combination of outputs from the hidden layer and the weights connecting the hidden layer to output neuron \\(i\\) for sample \\(t\\)\n- \\(S^t_k\\)\n-\nthe linear combination of outputs from the hidden layer and the weights connecting the hidden layer to output neuron \\(k\\) for sample \\(t\\)\nBy using the softmax function we ensure that the sum of the outputs from each of the neurons in the output layer is 1. That allows us to pick the neuron with the highest output value as the most likely to be the classification we expect for the given input; the ‘winning’ neuron will be assigned a value of 1 and the other neurons a value of 0 resulting in a match to one of the one-hot encoded classifications. The cross-entropy error function in this case is:\nwhere \\(\\widehat{y}^t_i\\) is the target value for output neuron \\(i\\) with sample \\(t\\).\nThe update rules are:\nwhere \\(v_{ih}\\) is the weight between output neuron \\(i\\) and hidden neuron \\(h\\).\nAn example implementation of the softmax output is shown below in Classification for 3+ classes.\nNonlinear regression outputs¶\nIf the goal is to predict a real value, not necessarily constrained within the boundaries imposed by a threshold function, the output function is just the linear combination of the outputs from the hidden layer.\nwhere\n- \\(\\textbf{v}\\)\n-\nthe vector of weights between the hidden layer and the output layer\n- \\(\\textbf{z}^t\\)\n-\nthe vector of outputs from the hidden layer\nIn this case we change the error function from cross-entropy to the sum-of-squared errors:\nThe update rules for a regression output are:\nq)lin:{x}\nq)linErr:{0.5*sum sum a*a:x-y}\nIt’s useful now to put the different functions for error and output in\ndictionary format as this will allow us to use the same ffn\nfunction\nfor all 3 types of classification:\n// x is linear combination of hidden outputs and weights\noutputFuncs:`sig`smax`lin!\n({1%1+exp neg x};{exp[x]%sum flip exp x};{x})\n// x is target value, y is calculated\nerrFuncs:`sig`smax`lin!\n({neg sum sum flip(x*log y)+(1-x)*log 1-y};\n{neg sum sum flip x*log y};\n{sum sum a*a:x-y})\nffn:{[inputs;targets;lr;of;d]\n// Calculate the outputs of the hidden layer\n// and add bias node\nz:1.0,/:sigmoid[inputs mmu d`w];\no:outputFuncs[of][z mmu d`v];\n// Error of output neurons\ndeltaO:(targets-o);\n// Error of hidden neurons\n// Hidden bias node is not connected to any\n// input layer nodes so we drop it\ndeltaZ:1_/:$[deltaO;flip d`v]*z*1-z;\n`o`v`w`err!\n(o;\nd[`v]+lr*flip[z] mmu deltaO;\nd[`w]+lr*flip[inputs] mmu deltaZ;\nerrFuncs[of][targets;o])\n}\nClassification for 3+ classes¶\nAs an example, we will study a set of Iris flower data which was originally introduced into research by Ronald Fisher in 1936. It contains samples from three different species of the Iris flower and has become a standard test case for many classification techniques within machine learning. By taking measurements of certain metrics (eg. length and width of sepals) the plants can be classified and computationally distinguished from each other. The data and a description of the data can be found in the links at archive.ics.uci.edu\nWe one-hot encode the different possible species of Iris, resulting in a neural network with 5 inputs (including the bias neuron), 7 hidden neurons (including the bias neuron) and 3 outputs. The data set is randomly shuffled to reduce the likelihood of a biased output. From this randomized selection of the data a random selection of 20 samples is taken as the test set and the other 130 samples are used in training.\n// one-hot encoding of classes\nq)IrisOneH:oneHot[distinct Iris.species]\nq)IrisOneH\nIris-setosa | 1 0 0\nIris-versicolor| 0 1 0\nIris-virginica | 0 0 1\nq)Iris1h\nslength swidth plength pwidth species onehot\n------------------------------------------------\n5.1 3.5 1.4 0.2 Iris-setosa 1 0 0\n4.9 3 1.4 0.2 Iris-setosa 1 0 0\n4.7 3.2 1.3 0.2 Iris-setosa 1 0 0\n..\n// Random permutation of the dataset\nq)IrisRP:Iris1h{neg[x]?til x}count Iris1h\n// Pick a test set – samples from data not used in training\nq)IrisTest:IrisRP[-20?count IrisRP]\nq)IrisTrain:IrisRP except IrisTest\n// Init weights, input and output variables\nq)w:wInit[5;6]\nq)v:wInit[7;3]\nq)input:1.0,'flip flip[IrisTrain]`slength`swidth`plength`pwidth\nq)output:IrisTrain.onehot\n// Train network\nq)resIris:(ffn[input;output;0.01;`smax]/)[800;`o`w`v`err!(0;w;v;1f)]\n// After 800 iterations (or epochs) how well does it perform?\nq)all(IrisOneH?\"f\"$\"j\"$resIris`o)=IrisOneH?output\n0b\nq)100*sum[(IrisOneH?\"f\"$\"j\"$resIris`o)=IrisOneH?output]%count output\n96.89922\n// Init variables for test data\nq)tinput:1.0,'flip flip[IrisTest]`slength`swidth`plength`pwidth\nq)toutput:IrisTest.onehot\n// Run test data through network without training\nq)resIrisT:(ffn[tinput;toutput;0.01;`smax]/)[1;`o`w`v`err!(0;resIris`w;resIris`v;1f)]\nq)all (IrisOneH?\"f\"$\"j\"$resIrisT`o)=IrisOneH?toutput\n1b\nPlot of error vs training epoch is given in Figure 6. We see that the error settles into an oscillation pattern around training epoch 700. While these oscillations are slowly converging it is likely that overfitting begins to take place shortly after the 700th iteration of training.\nFigure 6: Error while training network on Iris dataset\nStochastic gradient descent¶\nOften, in practice, computing the error and gradient for the entire training set can be very slow and in some cases not possible if the data will not fit entirely in memory. Stochastic gradient descent solves this problem by performing the back-propagation training on small batches of the training set chosen at random.\nRandomly chosen batches simulate real-time streaming of data and help to reduce bias in the final trained network (see chapter 7 in [2] for more details on bias and variance in machine learning).\nClassification for 3+ classes using stochastic batch training¶\nThis time we will again use Fisher’s Iris dataset, but the network will be trained using randomly selected small batches from the training data.\nConclusion¶\nIn this white paper we have explored a proof-of-concept implementation of a feedforward network in kdb+. By constructing the model for the network using linear algebra (inputs, outputs and weights represented by matrices) we have shown that an array-processing language is well suited to developing a complex system.\nWe have shown that multiple types of output functions can be easily applied to the network depending on the desired result. This generalization demonstrates the adaptability of kdb+ to many problems where numerical data can be arranged into lists, arrays or tables.\nIn the event that there is not enough main memory to carry out the calculations on all the training data in one pass we presented an alternative approach, stochastic gradient descent, which allows the network to be trained on small batches of the data.\nThe network exhibited in this paper forms the groundwork for more complicated networks. Adding additional hidden layers and different options for threshold function will allow more complex convolutional and deep networks to be developed.\nAll tests were run using kdb+ version 3.2 (2015.05.07)\nAuthor¶\nJames Neill works as a kdb+ consultant for one of the world’s largest investment banks, developing a range of applications. James has also been involved in the design of training courses in data science and machine learning as part of the First Derivatives training programme.\nReferences¶\n-\nMurphy, K. P. (2012). Machine Learning: a probabilistic perspective. MIT Press.\n-\nHastie, T., Tibshirani, R. and Friedman, J. The Elements of Statistical Learning. Springer, New York. (Online version)\n-\nAlpaydin, E. Introduction to Machine Learning, Second Edition. MIT Press."}}},{"rowIdx":96,"cells":{"text":{"kind":"string","value":"// @kind function\n// @category main\n// @subcategory new\n//\n// @overview\n// Generates a new named experiment within the specified registry without\n// adding a model on-prem or within a supported cloud providers storage solution\n//\n// @todo\n// It should be possible via configuration to add descriptive information\n// about an experiment.\n//\n// @param folderPath {dict|string|null} Registry location, can be:\n// 1. A dictionary containing the vendor and location as a string, e.g.\n// ```enlist[`local]!enlist\"myReg\"``` or\n// ```enlist[`aws]!enlist\"s3://ml-reg-test\"``` etc;\n// 2. A string indicating the local path;\n// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.\n// @param experimentName {string} The name of the experiment to be located\n// under the namedExperiments folder which can be populated by new models\n// associated with the experiment\n// @param config {dict|null} Any additional configuration needed for\n// initialising the experiment\n//\n// @return {dict} Updated config dictionary containing relevant\n// registry paths\nregistry.new.experiment:{[folderPath;experimentName;config]\n config:registry.util.check.config[folderPath;config];\n if[not`local~storage:config`storage;storage:`cloud];\n experimentName:registry.util.check.experiment experimentName;\n registry[storage;`new;`experiment][experimentName;config]\n }\n\n\n================================================================================\nFILE: ml_ml_registry_q_main_query.q\nSIZE: 1,717 characters\n================================================================================\n\n// query.q - Main callable functions for querying the modelStore\n// Copyright (c) 2021 Kx Systems Inc\n//\n// @overview\n// Querying the modelStore table. Currently, the below features can\n// be referenced by users to query the modelStore table:\n// 1. registrationTime\n// 2. experimentName\n// 3. modelName\n// 4. modelType\n// 5. version\n// 6. uniqueID\n//\n// @category Model-Registry\n// @subcategory Functionality\n//\n// @end\n\n\\d .ml\n\n// @kind function\n// @category main\n// @subcategory query\n//\n// @overview\n// Query the modelStore\n//\n// @param folderPath {dict|string|null} Registry location, can be:\n// 1. A dictionary containing the vendor and location as a string, e.g.\n// ```enlist[`local]!enlist\"myReg\"``` or\n// ```enlist[`aws]!enlist\"s3://ml-reg-test\"``` etc;\n// 2. A string indicating the local path;\n// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.\n// @param config {dict} Any additional configuration needed for\n// retrieving the modelStore. Can also be empty dictionary `()!()`.\n//\n// @return {table} Most recent version of the modelStore\nregistry.query.modelStore:{[folderPath;config]\n if[config~(::);config:()!()];\n // Retrieve entire modelStore\n modelStore:registry.get.modelStore[folderPath;config];\n // If no user-defined config return entire modelStore\n k:`modelName`experimentName`modelType`version`registrationTime`uniqueID;\n if[not any k in key config;:modelStore];\n // Generate where clause and query modelStore\n keys2check:(`modelName`experimentName`modelType;enlist`version;`registrationTime`uniqueID);\n whereClause:registry.util.query.checkKey[config]/[();keys2check;(like;{all each x=\\:y};=)];\n ?[modelStore;whereClause;0b;()]\n }\n\n\n================================================================================\nFILE: ml_ml_registry_q_main_set.q\nSIZE: 11,054 characters\n================================================================================\n\n// set.q - Main callable functions for adding information to the model registry\n// Copyright (c) 2021 Kx Systems Inc\n//\n// @overview\n// Setting items within the registry including\n// 1. Models:\n// - q (functions/projections/appropriate dictionaries)\n// - Python (python functions + sklearn/keras specific functionality)\n// 2. Configuration\n// 3. Model information table\n//\n// @category Model-Registry\n// @subcategory Functionality\n//\n// @end\n\n\\d .ml\n\n// @kind function\n// @category main\n// @subcategory set\n//\n// @overview\n// Add a q object, Python function, Keras model or sklearn model\n// to the registry so that it can be retrieved and applied to new data.\n// In the current iteration there is an assumption of complete\n// independence for the q functions/files i.e. q function/workflows\n// explicitly don't use Python to make it easier to store and generate\n// reintroduce models\n//\n// @todo\n// Improve the configuration information that is being persisted\n// presently this contains all information within the config folder\n// however this is not particularly informative and may be confusing\n//\n// @param folderPath {dict|string|null} Registry location, can be:\n// 1. A dictionary containing the vendor and location as a string, e.g.\n// ```enlist[`local]!enlist\"myReg\"``` or\n// ```enlist[`aws]!enlist\"s3://ml-reg-test\"``` etc;\n// 2. A string indicating the local path;\n// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.\n// @param experimentName {string|null} Name of experiment model belongs to\n// @param model {any} `(<|dict|fn|proj)` Model to be saved to the registry.\n// @param modelName {string} The name to be associated with the model\n// @param modelType {string} The type of model that is being saved, namely\n// \"q\"|\"sklearn\"|\"keras\"|\"python\"\n// @param config {dict} Any additional configuration needed for\n// setting the model\n//\n// @return {null}\nregistry.set.model:{[folderPath;experimentName;model;modelName;modelType;config]\n config:registry.util.check.config[folderPath;config];\n if[not`local~storage:config`storage;storage:`cloud];\n experimentName:$[(any experimentName ~/: (::;\"\"))|10h<>abs type experimentName;\n \"undefined\";\n experimentName\n ];\n c:registry[storage;`set;`model][experimentName;model;modelName;modelType;config];\n first c`uniqueID\n }\n\n// @kind function\n// @category main\n// @subcategory set\n//\n// @overview\n// Add a q object to the registry. This should be a q object in the\n// current process which is either a function/projection/dictionary\n// containing a predict key\n//\n// @param registryPath {string} Full/relative path to the model registry\n// @param model {any} `(dict|fn|proj)` Model to be saved to the registry.\n// @param config {dict} Information relating to the model that is\n// to be saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.set.object:{[typ;registryPath;model;config]\n toSet:$[type[model]in 10 11 -11h;\"File\";\"Model\"];\n registry.util.set[`$typ,toSet][registryPath;model;config]\n }\n\n// @kind function\n// @category main\n// @subcategory set\n//\n// @overview\n// Set the configuration associated with a specified model version such\n// that all relevant information needed to redeploy the model is present\n// with a packaged model\n//\n// @param config {dict} Information relating to the model\n// being saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.set.modelConfig:{[model;modelType;config]\n safeWrite:{[config;path]\n if[not count key hsym `$config[`versionPath],\"/config/\",path,\".json\";\n registry.util.set.json[config;`config;path;enlist config]\n ]};\n $[99h=type model;\n $[not ((\"q\"~modelType)&((`predict in key model)|(`modelInfo in key model)));\n {[safeWrite;config;sym;model]\n safeWrite[config;string[sym],\"/modelInfo\"]\n }[safeWrite;config]'[key model;value model];\n safeWrite[config;\"modelInfo\"]];\n safeWrite[config;\"modelInfo\"]\n ]\n }\n\n// @kind function\n// @category main\n// @subcategory set\n//\n// @overview\n// Set the configuration associated with monitoring a specified model version\n// such that all relevant information needed to monitor the model is present\n// with a packaged model\n//\n// @param model {any} `(<|dict|fn|proj)` Model to be monitored.\n// @param modelType {string} The type of model that is being saved, namely\n// \"q\"|\"sklearn\"|\"keras\"\n// @param data {table} Historical data to understand model behaviour\n// @param config {dict} Information relating to the model\n// being saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.set.monitorConfig:{[model;modelType;data;config]\n func : {[sym;model;modelType;data;config]\n if[not 98h~type data;:(::)];\n $[sym~(::);\n newConfig:.j.k raze read0 hsym `$config[`versionPath],\"/config/modelInfo.json\";\n newConfig:.j.k raze read0 hsym `$config[`versionPath],\"/config/\",string[sym],\"/modelInfo.json\"\n ];\n newConfig[`monitoring;`schema;`values]:registry.util.create.schema data;\n newConfig[`monitoring;`schema;`monitor]:1b;\n newConfig[`monitoring;`nulls;`values]:registry.util.create.null data;\n newConfig[`monitoring;`nulls;`monitor]:1b;\n newConfig[`monitoring;`infinity;`values]:registry.util.create.inf data;\n newConfig[`monitoring;`infinity;`monitor]:1b;\n newConfig[`monitoring;`latency;`values]:registry.util.create.latency[model;modelType;data];\n newConfig[`monitoring;`latency;`monitor]:1b;\n newConfig[`monitoring;`csi;`values]:registry.util.create.csi data;\n newConfig[`monitoring;`csi;`monitor]:1b;\n newConfig[`monitoring;`psi;`values]:registry.util.create.psi[model;modelType;data];\n newConfig[`monitoring;`psi;`monitor]:1b;\n params:`maxDepth`indent!(10;\" \");\n $[sym~(::);\n (hsym `$config[`versionPath],\"/config/modelInfo.json\") 0: enlist .j.j newConfig;\n (hsym `$config[`versionPath],\"/config/\",string[sym],\"/modelInfo.json\") 0: enlist .j.j newConfig]\n }[;;modelType;;config];\n $[all 99h=(type[model];type[data]);\n [k:key[model] inter key[data];func'[k;model k;data k]];\n not 99h=type[model];\n func[::;model;data];\n '\"data to fit monitoring statistics is not partitioned on model key\"\n ]\n }\n\n// @kind function\n// @category main\n// @subcategory set\n//\n// @overview\n// Set the configuration associated with supervised monitoring\n//\n// @param config {dict} Information relating to the model\n// being saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.set.superviseConfig:{[model;config]\n func:{[sym;model;config]\n $[sym~(::);\n newConfig:.j.k raze read0 hsym `$config[`versionPath],\"/config/modelInfo.json\";\n newConfig:.j.k raze read0 hsym `$config[`versionPath],\"/config/\",string[sym],\"/modelInfo.json\"\n ];\n newConfig[`monitoring;`supervised;`values]:config `supervise;\n newConfig[`monitoring;`supervised;`monitor]:1b;\n params:`maxDepth`indent!(10;\" \");\n $[sym~(::);\n (hsym `$config[`versionPath],\"/config/modelInfo.json\") 0: enlist .j.j newConfig;\n (hsym `$config[`versionPath],\"/config/\",string[sym],\"/modelInfo.json\") 0: enlist .j.j newConfig\n ];\n }[;;config];\n $[99h~type[model];\n func'[key[model];value[model]];\n func[::;model]]\n }\n\n// @kind function\n// @category main\n// @subcategory set\n//\n// @overview\n// Upsert relevant data from current run to modelStore\n//\n// @param config {dict} Information relating to the model\n// being saved, this includes version, experiment and model names\n//\n// @return {null}\nregistry.set.modelStore:{[config]\n enlistCols:`experimentName`modelName`modelType`version`description;\n regularCols:`registrationTime`uniqueID!config`registrationTime`uniqueID;\n experimentName:config`experimentName;\n experimentName:$[0h=type experimentName;;enlist]experimentName;\n modelName:enlist config`modelName;\n modelType:config`modelType;\n modelType:enlist$[-10h=type modelType;enlist;]modelType;\n description:config`description;\n if[0=count description;description:\"\"];\n description:enlist$[-10h=type description;enlist;]description;\n version:enlist config`version;\n info:regularCols,enlistCols!\n (experimentName;modelName;modelType;version;description);\n // check if model already exists\n whereClause:enlist (&;(&;(~\\:;`version;config[`version]);(~\\:;`modelName;config[`modelName]));\n (~\\:;`experimentName;config[`experimentName]));\n columns:enlist `uniqueID;\n if[not count ?[config[`modelStorePath];whereClause;0b;columns!columns]`uniqueID;\n config[`modelStorePath]upsert flip info\n ];\n }"}}},{"rowIdx":97,"cells":{"text":{"kind":"string","value":"// @private\n// @kind function\n// @category optimizationUtility\n// @desc Optimize a function until gradient tolerance is reached or\n// maximum number of allowed iterations is met. The following outlines a\n// python equivalent\n// https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/optimize.py#L1131\n// @param func {fn} Function to be minimized\n// @param optimDict {dictionary} Variables to be updated at each iteration of\n// optimization\n// @param args {any} Arguments to the optimization function that do not \n// change per iteration \n// @param params {dictionary} Parameters controlling non default optimization \n// behaviour\n// @return {dictionary} Variables, gradients, matrices and indices at the end \n// of each iteration\ni.BFGSFunction:{[func;optimDict;args;params] \n // Calculate search direction\n pk:neg mmu[optimDict`hess;optimDict`gk];\n // Line search func to be inserted to get alpha\n wolfe:i.wolfeSearch[;;;pk;func;;args;params]. optimDict`fk`fkPrev`gk`xk;\n // Old fk goes to previous val\n optimDict[`fkPrev]:optimDict`fk;\n // Update values based on wolfe line search\n alpha:wolfe 0;\n optimDict[`fk]:wolfe 1;\n gNew:wolfe 2;\n // Redefine the x value at k-1 to the current x value\n optimDict[`xkPrev]:optimDict`xk;\n // Calculate the step distance for moving from x(k-1) -> x(k)\n sk:alpha*pk;\n // Update values of x at the new position k\n optimDict[`xk]:optimDict[`xkPrev]+sk;\n // If null gNew, then get gradient of new x value\n if[any null gNew;gNew:i.grad[func;optimDict`xk;args;params`geps]];\n // Subtract new gradients\n yk:gNew-optimDict`gk;\n optimDict[`gk]:gNew;\n // Get new norm of gradient\n optimDict[`gnorm]:i.vecNorm[optimDict`gk;params`norm];\n // Calculate new hessian matrix for next iteration \n rhok:1%mmu[yk;sk];\n if[0w=rhok;\n rhok:1000f;\n -1\"Division by zero in calculation of rhok, assuming rhok large\";\n ];\n A1:optimDict[`I]-sk*\\:yk*rhok;\n A2:optimDict[`I]-yk*\\:sk*rhok;\n hessMul:mmu[A1;mmu[optimDict`hess;A2]];\n optimDict[`hess]:hessMul+rhok*(sk*/:sk);\n // if x(k) returns infinite value update gnorm and fk\n if[0w in abs optimDict`xk;optimDict[`gnorm`fk]:(0n;0w)];\n optimDict[`idx]+:1;\n if[params`display;show optimDict;-1\"\";];\n optimDict\n }\n\n// @private\n// @kind function\n// @category optimizationUtility\n// @desc Complete a line search across an unconstrained minimization\n// problem making use of wolfe conditions to constrain the search. The naming\n// convention for dictionary keys in this implementation is based on the \n// python implementation of the same functionality here\n// https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/linesearch.py#L193\n// @param fk {float} Function return evaluated at position k\n// @param fkPrev {float} Function return evaluated at position k-1\n// @param gk {float} Gradient at position k\n// @param pk {float} Search direction\n// @param func {fn} Function being optimized \n// @param xk {number[]} Parameter values at position k\n// @param args {dictionary|number[]} Function arguments that do not change per \n// iteration\n// @param params {dictionary} Parameters controlling non default optimization\n// behaviour\n// @return {number[]} New alpha, fk and derivative values\ni.wolfeSearch:{[fk;fkPrev;gk;pk;func;xk;args;params]\n phiFunc :i.phi[func;pk;;xk;args];\n derPhiFunc:i.derPhi[func;params`geps;pk;;xk;args];\n // Initial Wolfe conditions\n wolfeKeys:`idx`alpha0`phi0`phia0;\n wolfeVals:(0;0;fk;fk);\n wolfeDict:wolfeKeys!wolfeVals;\n // Calculate the derivative at that phi0\n derPhi0:gk mmu pk;\n wolfeDict[`derPhia0`derPhi0]:2#derPhi0;\n // Calculate step size this should be 0 < x < 1 \n // with min(x;maxstepsize) or 1f otherwise\n alpha:1.01*2*(fk-fkPrev)%derPhi0;\n alphaVal:$[alpha within 0 1f;min(alpha;params`stepSize);1f];\n wolfeDict[`alpha1]:alphaVal;\n // function value at alpha1\n wolfeDict[`phia1]:phiFunc wolfeDict`alpha1;\n // Repeat until wolfe criteria is reached or max iterations have been done\n // to get new alpha, phi and derPhi values\n wolfeDict:i.stopWolfe[;params]\n i.scalarWolfe[derPhiFunc;phiFunc;pk;params]/wolfeDict;\n // if the line search did not converge, use last alpha , phi and derPhi\n $[not any null raze wolfeDict`alphaStar`phiStar`derPhiStar;\n wolfeDict`alphaStar`phiStar`derPhiStar;\n wolfeDict`alpha1`phia1`derPhia0Fin\n ]\n }\n\n// @private\n// @kind function\n// @category optimizationUtility\n// @desc Apply a scalar search to find an alpha value that satisfies\n// strong Wolfe conditions, a python implementation of this is outlined here\n// https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/linesearch.py#L338\n// This functions defines the bounds between which the step function can \n// be found. When the optimal bound is found, the area is zoomed recursively\n// until the optimal value is found\n// @param derPhiFunc {fn} Function to calculate the value of the objective\n// function derivative at alpha\n// @param phiFunc {fn} Function to calculate the value of the objective\n// function at alpha\n// @param pk {float} Search direction\n// @param params {dictionary} Parameters controlling non default optimization\n// behaviour\n// @param wolfeDict {dictionary} All data relevant to the calculation of the \n// optimal alpha values \n// @returns {dictionary} New alpha, fk and derivative values\ni.scalarWolfe:{[derPhiFunc;phiFunc;pk;params;wolfeDict]\n // Set up zoom function constant params\n zoomSetup:i.zoomFunc[derPhiFunc;phiFunc;;;params]. wolfeDict`phi0`derPhi0;\n // If criteria 1 is met, zoom and break loop\n if[i.wolfeCriteria1[wolfeDict;params];\n wolfeDict[`idx]:0w;\n wolfeVals:wolfeDict`alpha0`alpha1`phia0`phia1`derPhia0;\n updZoom:zoomSetup wolfeVals;\n wolfeDict[i.zoomReturn]:updZoom;\n :wolfeDict\n ];\n // Calculate the derivative of the function at the new position\n derPhiCalc:derPhiFunc wolfeDict`alpha1;\n // Update the new derivative function\n wolfeDict[`derPhia1]:derPhiCalc`derval;\n $[i.wolfeCriteria2[wolfeDict;params];\n [wolfeDict[`alphaStar]:wolfeDict`alpha1;\n wolfeDict[`phiStar]:wolfeDict`phia1;\n wolfeDict[`derPhiStar]:derPhiCalc`grad;\n wolfeDict[`idx]:0w;\n wolfeDict\n ];\n 0<=wolfeDict`derPhia1;\n [wolfeDict[`idx]:0w;\n updZoom:zoomSetup wolfeDict`alpha1`alpha0`phia1`phia0`derPhia1;\n wolfeDict[i.zoomReturn]:updZoom \n ];\n // Update dictionary and repeat process until criteria is met\n [wolfeDict[`alpha0]:wolfeDict`alpha1;\n wolfeDict[`alpha1]:2*wolfeDict`alpha1;\n wolfeDict[`phia0]:wolfeDict`phia1;\n wolfeDict[`phia1]:phiFunc wolfeDict`alpha1;\n wolfeDict[`derPhia0]:wolfeDict`derPhia1;\n wolfeDict[`derPhia0Fin]:derPhiCalc`grad;\n wolfeDict[`idx]+:1\n ]\n ];\n wolfeDict\n }\n\n// @private\n// @kind function\n// @category optimizeUtility\n// @desc Function to apply 'zoom' iteratively during linesearch to find\n// optimal alpha value satisfying strong Wolfe conditions\n// @param derPhiFunc {fn} Function to calculate the value of the objective\n// function derivative at alpha\n// @param phiFunc {fn} Function to calculate the value of the objective\n// function at alpha\n// @param phi0 {float} Value of function evaluation at x(k-1)\n// @param derPhi0 {float} Value of objective function derivative at x(k-1)\n// @param params {dictionary} Parameters controlling non default optimization \n// behaviour\n// @param cond {number[]} Bounding conditions for alpha, phi and derPhi used in \n// zoom algorithm\n// @returns {number[]} New alpha, fk and derivative values\ni.zoomFunc:{[derPhiFunc;phiFunc;phi0;derPhi0;params;cond]\n zoomDict:i.zoomKeys!cond,phi0;\n zoomDict[`idx`aRec]:2#0f;\n zoomDict:i.stopZoom[;params]\n i.zoom[derPhiFunc;phiFunc;phi0;derPhi0;params]/zoomDict;\n // If zoom did not converge, set to null\n $[count star:zoomDict[i.zoomReturn];star;3#0N]\n }\n\n// @private\n// @kind function\n// @category optimizeUtility\n// @desc Function to apply an individual step in 'zoom' during \n// linesearch to find optimal alpha value satisfying strong Wolfe conditions.\n// An outline of the python implementation of this section of the algorithm \n// can be found here\n// https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/linesearch.py#L556\n// @param derPhiFunc {fn} Function to calculate the value of the objective \n// function derivative at alpha\n// @param phiFunc {fn} Function to calculate the value of the objective \n// function at alpha\n// @param phi0 {float} Value of function evaluation at x(k-1)\n// @param derPhi0 {float} Value of objective function derivative at x(k-1)\n// @param params {dictionary} Parameters controlling non default optimization \n// behaviour\n// @param zoomDict {dictionary} Parameters to be updated as 'zoom' procedure is\n// applied to find the optimal value of alpha\n// @returns {dictionary} Parameters calculated for an individual step in line\n// search procedure to find optimal alpha value satisfying strong Wolfe \n// conditions\ni.zoom:{[derPhiFunc;phiFunc;phi0;derPhi0;params;zoomDict]\n alphaDiff:zoomDict[`aHi]-zoomDict`aLo;\n // define high and low values\n highLowVal:$[alphaDiff>0;zoomDict`aHi`aLo;zoomDict`aLo`aHi];\n highLow:`high`low!highLowVal;\n if[\"i\"$zoomDict`idx;\n cubicCheck:alphaDiff*0.2;\n findMin:i.cubicMin . zoomDict`aLo`phiLo`derPhiLo`aHi`phiHi`aRec`phiRec\n ];\n if[i.quadCriteria[findMin;highLow;cubicCheck;zoomDict];\n quadCheck:0.1*alphaDiff;\n findMin:i.quadMin . zoomDict`aLo`phiLo`derPhiLo`aHi`phiHi;\n lowerCheck:findMin<highLow[`high]+quadCheck;\n upperCheck:findMin>highLow[`low]-quadCheck;\n if[upperCheck|lowerCheck;\n findMin:zoomDict[`aLo]+0.5*alphaDiff\n ]\n ];\n // Update new values depending on findMin\n phiMin:phiFunc[findMin];\n // First condition, update and continue loop\n if[i.zoomCriteria1[phi0;derPhi0;phiMin;findMin;zoomDict;params];\n zoomDict[`idx]+:1;\n zoomDict[i.zoomKeys1]:zoomDict[`phiHi`aHi],findMin,phiMin;\n :zoomDict\n ];\n // Calculate the derivative at the cubic minimum\n derPhiMin:derPhiFunc findMin;\n // Second scenario, create new features and end the loop\n $[i.zoomCriteria2[derPhi0;derPhiMin;params];\n [zoomDict[`idx]:0w;\n zoomDict:zoomDict,i.zoomReturn!findMin,phiMin,enlist derPhiMin`grad\n ];\n i.zoomCriteria3[derPhiMin;alphaDiff];\n [zoomDict[`idx]+:1;\n zoomDict[i.zoomKeys1,i.zoomKeys2]:zoomDict[`phiHi`aHi`aLo`phiLo],\n findMin,phiMin,derPhiMin`derval\n ];\n [zoomDict[`idx]+:1;\n zoomDict[i.zoomKeys3,i.zoomKeys2]:zoomDict[`phiLo`aLo],\n findMin,phiMin,derPhiMin`derval\n ]\n ];\n zoomDict\n }\n\n// Vector norm calculation"}}},{"rowIdx":98,"cells":{"text":{"kind":"string","value":"// Find offset of central directory signature in a zip vector.\n// Assumes last match is valid; more sophisticated algos are possible,\n// but they can be implemented as needed.\n// @param x bytes\n// @return long\n.finos.unzip.priv.ovcds:{\n last(\"c\"$x)ss\"c\"$0x504b0506}\n\n// Find offset of central directory signature in a zip file.\n// Implemented via sliding four-byte read starting at end of file.\n// Assumes last match is valid; more sophisticated algos are possible,\n// but they can be implemented as needed.\n// @param x hsym\n// @return long\n.finos.unzip.priv.ofcds:{\n c:hcount x;\n r:{(not 0x504b0506~y 0)&x>=y 1}[c]{(read1(x;y-z 1;4);1+z 1)}[x;c]/(0x00000000;0);\n $[0x504b0506~r 0;1+c-r 1;0N]}\n\n// Find offset of zip64 end of central directory locator signature in a zip vector.\n// Assumes last match is valid; more sophisticated algos are possible,\n// but they can be implemented as needed.\n// @param x bytes\n// @return long\n.finos.unzip.priv.ovecls64:{\n last(\"c\"$y)ss\"c\"$0x504b0607}\n\n// Find offset of zip64 end of central directory locator signature in a zip file.\n// Implemented via sliding four-byte read starting at end of file.\n// Assumes last match is valid; more sophisticated algos are possible,\n// but they can be implemented as needed.\n// @param x hsym\n// @return long\n.finos.unzip.priv.ofecls64:{\n c:hcount x;\n r:{(not 0x504b0607~y 0)&x>y 1}[c]{(read1(x;y-z 1;4);1+z 1)}[x;c]/(0x00000000;0);\n $[0x504b0607~r 0;1+c-r 1;0N]}\n\n// Extract one file from an archive using unzip(1).\n// @param x hsym\n// @param y sym\n// @return character vector\n.finos.unzip.priv.unzip_system:{\n f:hsym`$first system\"mktemp\";\n system\"(unzip -p \\\"\",(1_string x),\"\\\" \\\"\",(string y),\"\\\" >\",(1_string f),\")\";\n r:\"c\"$read1 f;\n hdel f;\n r}\n\n// Perform various zip-related operations.\n// Possible values for x, and expected z arg in each case:\n// `list: List files in an archive.\n// z: ignored\n// `unzip: Extract (specific file(s) from) an archive.\n// z: sym, sym vector, or (::) to unzip all files\n// See https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT,\n// https://users.cs.jmu.edu/buchhofp/forensics/formats/pkzip.html,\n// https://fossies.org/linux/zip/proginfo/extrafld.txt, etc.\n// @param x sym\n// @param y hsym, character vector, or byte vector\n// @param z see above\n// @return dictionary of filenames and character vectors\n.finos.unzip.priv.unzip:{\n if[not x in`list`unzip;\n '`domain;\n ];\n\n / accept chars\n if[10h=type y;\n y:\"x\"$y;\n ];\n\n / accept hsym and bytes\n if[$[-11h=t:type y;not\":\"=first string y;4h<>t];\n '`type;\n ];\n\n if[`unzip=x;\n if[not(11h=abs type z)|z~(::);\n '`domain;\n ];\n ];\n\n .finos.log.info\"processing \",$[-11h=t;1_string y;\"archive\"];\n\n / get byte count\n c:.finos.unzip.priv.bcount y;\n\n / look for central directory signature\n cds:$[4h=t;.finos.unzip.priv.ovcds;.finos.unzip.priv.ofcds]y;\n if[null cds;\n '\"no central directory signature\";\n ];\n\n / parse end-of-central-directory record\n ecd:.finos.unzip.priv.pecd .finos.unzip.priv.bytes[y;cds;c-cds];\n\n / punt on multi-disk archives\n if[0<>ecd`dnu;'`nyi];\n if[0<>ecd`dcd;'`nyi];\n\n / bytes of central directory record\n cd:exec .finos.unzip.priv.bytes[y;cof;csz]from\n $[\n -1=ecd`cof; / zip64\n [\n / look for zip64 end of central directory locator signature\n ecls64:$[4h=t;.finos.unzip.priv.ovecls64;.finos.unzip.priv.ofecls64]y;\n if[null ecls64;\n '\"no end of central directory locator\";\n ];\n\n / parse zip64 end-of-central-directory locator record\n ecl64:.finos.unzip.priv.pecl64 .finos.unzip.priv.bytes[y;ecls64;c-ecls64];\n\n / parse zip64 end-of-central-directory record\n ecd64:.finos.unzip.priv.pecd64 .finos.unzip.priv.bytes[y;ecl64`cof;12+.finos.unzip.priv.parseNum .finos.unzip.priv.bytes[y;4+ecl64`cof;8]]];\n ecd];\n\n / check for empty zip\n if[not count cd;\n :$[\n `list=x;\n ([name:0#`]size:0#0Ni;timestamp:0#0Np);\n `unzip=x;\n $[\n -11h=type z;\n [\n .finos.log.error(string z),\": file not found in archive\";\n 'z;\n ];\n 11h=type z;\n [\n {.finos.log.error(string x),\": file not found in archive\"}each z;\n 'first z;\n ];\n z~(::);\n ((0#`)!())];\n '`domain];\n ];\n\n / start of central directory\n scd:$[-1=ecd`cof;ecd64;ecd]`cof;\n\n / parse central directory\n .finos.log.debug\"parsing central directory\";\n cd:.finos.unzip.priv.parse[(.finos.unzip.priv.pcd;.finos.unzip.priv.wcd);cd;count cd];\n .finos.log.debug\"done parsing central directory\";\n\n / apply extra field\n cd:.finos.unzip.priv.axfd[(enlist`context)!enlist`cd]each cd;\n\n r:$[\n `list=x;\n [\n 1!select name:fnm,size:usz,timestamp:mdt+mtm from cd];\n `unzip=x;\n [\n / calculate next offsets\n cd:update nof:scd^next lof from cd;\n\n / apply file filter, if any\n if[not z~(::);\n cd:select from cd where fnm in z;\n if[count e:exec(raze z)except fnm from cd;\n {.finos.log.error(string x),\": file not found in archive\"}each e;\n 'first e;\n ];\n ];\n\n / parse file data\n fd:$[\n .finos.unzip.filescan;\n [\n / read file if neccesary\n if[-11h=type y;\n y:read1 y;\n ];\n\n / trim any leading garbage\n y:(exec min lof from cd)_y;\n\n / extract all files\n .finos.unzip.priv.parse[(.finos.unzip.priv.pfd;.finos.unzip.priv.wfd;z);y;scd-exec min lof from cd]];\n [\n / extract each file mentioned in the central directory\n f:{[w;x;y;z]\n h:.finos.unzip.priv.split[w;0].finos.unzip.priv.bytes[x;y`lof;sum w];\n first .finos.unzip.priv.pfd[(.finos.unzip.priv.bytes[x;y`lof;z-y`lof];::);sum w;h]};\n\n / assume the end of the last file is the beginning of the central directory\n / might be wrong if archive decryption header and/or archive extra data record are present?\n cd f[.finos.unzip.priv.wfd;y]'exec nof from cd]];\n\n r:exec fnm!fdu from fd;\n\n r:$[\n 11h=type z;\n z#r;\n -11h=type z;\n r z;\n r];\n\n if[.finos.unzip.verify&-11h=type y;\n .finos.log.info\"verifying\";\n v:r~$[\n -11h=type z;\n .finos.unzip.priv.unzip_system[y]z;\n {y!x y}[y .finos.unzip.priv.unzip_system/:]key r];\n if[not v;\n break;\n '`parse;\n ];\n .finos.log.info\"verified\";\n ];\n r];\n '`domain];\n\n r}\n\n\n// Public API\n\n// Set to true to verify extraction against unzip(1).\n// N.B. will not work if .finos.unzip.unzip is called from a thread.\n// N.B. will not work in file scan mode.\n.finos.unzip.verify:0b\n\n// Set to true to extract files via file scan, rather than by using the\n// central directory.\n// N.B. currently, will likely fail for data-descriptor-based archives\n.finos.unzip.filescan:0b\n\n// List files in an archive.\n// @param x hsym, character vector, or byte vector\n// @return table of filenames and file metadata\n.finos.unzip.list:{.finos.unzip.priv.unzip[`list;x;::]}\n\n// Unzip an archive.\n// @param x hsym, character vector, or byte vector\n// @return dictionary of filenames and character vectors\n.finos.unzip.unzip:{.finos.unzip.priv.unzip[`unzip;x;::]}\n\n// Unzip specific files from an archive.\n// @param x hsym, character vector, or byte vector\n// @param y sym vector\n// @return dictionary of filenames and character vectors\n.finos.unzip.unzip2:{.finos.unzip.priv.unzip[`unzip;x;y]}\n\n\n================================================================================\nFILE: kdb_q_util_util.q\nSIZE: 3,389 characters\n================================================================================\n\n// General-purpose utility functions.\n\n///\n// read0, but compatible with non-seekable files (fifos, /proc, etc.).\n// @param x file symbol\n// @return A list of strings containing the contents of the file.\n// @see read0\n.finos.util.read0f:{r:{y,read0 x}[h:hopen`$\":fifo://\",1_string x]over();hclose h;r}\n\n///\n// read1, but compatible with non-seekable files (fifos, /proc, etc.).\n// @param x file symbol\n// @return A byte vector containing the contents of the file.\n// @see read1\n.finos.util.read1f:{r:{y,read1 x}[h:hopen`$\":fifo://\",1_string x]over();hclose h;r}\n\n.finos.util.compose:('[;])/\n\n// create a list. e.g. list(`a;1) -> (`a;1)\n// allows a trailing delimiter, e.g.\n// list(\n// `a;\n// 1;\n// )\n.finos.util.list:{$[104h=type x;1_-1_get x;x]}\n\n// create a dictionary. e.g. dict (1;2;3;4) -> 1 3!2 4\n.finos.util.dict:{(!) . flip 2 cut .finos.util.list x}\n\n// create a table. e.g. table[`x`y;(1;2;3;4)] -> ([]x:1 3;y:2 4)\n.finos.util.table:{flip x!flip(count x)cut .finos.util.list y}"}}},{"rowIdx":99,"cells":{"text":{"kind":"string","value":"\\d .subcut\nenabled:1b // switch on subscribercutoff\n\n\\d .servers\nCONNECTIONS,:`segmentedtickerplant\nCONNECTIONSFROMDISCOVERY:1b\n\n\n================================================================================\nFILE: TorQ_config_settings_segmentedtickerplant.q\nSIZE: 1,446 characters\n================================================================================\n\n// Segmented TP config\n\n\\d .stplg\n \nmultilog:`tabperiod; // [tabperiod|none|periodic|tabular|custom]\nmultilogperiod:0D01; // Length of period for STP periodic logging modes\nerrmode:1b; // Enable error mode for STP\nbatchmode:`defaultbatch; // [memorybatch|defaultbatch|immediate]\nreplayperiod:`day // [period|day|prior]\ncustomcsv:hsym first .proc.getconfigfile[\"stpcustom.csv\"]; // Location for custom logging mode csv\nkdbtplog:`$getenv`KDBTPLOG;\n\n\\d .proc\nloadcommoncode:0b // do not load common code\nloadprocesscode:1b // load process code\nlogroll:0b // do not roll logs\n\n// Configuration used by the usage functions - logging of client interaction\n\\d .usage\nenabled:0b // switch off the usage logging\n\n// Client tracking configuration\n// This is the only thing we want to do\n// and only for connections being opened and closed\n\\d .clients\nenabled:1b // whether client tracking is enabled\nopencloseonly:1b // only log open and closing of connections\n\n// Server connection details\n\\d .servers\nenabled:0b // disable server tracking\n\n\\d .timer\nenabled:0b // disable the timer\n\n\\d .hb\nenabled:0b // disable heartbeating\n\n\\d .zpsignore\nenabled:0b // disable zpsignore - zps should be empty \n\n\n================================================================================\nFILE: TorQ_config_settings_sort.q\nSIZE: 3,968 characters\n================================================================================\n\n// Bespoke SORT config\n\n\\d .wdb\nignorelist:`heartbeat`logmsg // list of tables to ignore\nhdbtypes:`hdb // list of hdb types to look for and call in hdb reload\nrdbtypes:`rdb // list of rdb types to look for and call in rdb reload\ntickerplanttypes:`tickerplant // list of tickerplant types to try and make a connection to\nwdbtypes:`wdb // list of wdb types to look for and call in wdb init tables\nsubtabs:` // list of tables to subscribe for (` for all)\nsubsyms:` // list of syms to subscribe for (` for all)\nsavedir:hsym`$getenv[`TORQHOME],\"/wdbhdb\" // location to save wdb data\nnumrows:100000 // default number of rows\nnumtab:`quote`trade!10000 50000 // specify number of rows per table\nmode:`sort // the wdb process can operate in three modes\n // 1. saveandsort: the process will subscribe for data,\n // periodically write data to disk and at EOD it will flush\n // remaining data to disk before sorting it and informing\n // GWs, RDBs and HDBs etc...\n // 2. save: the process will subscribe for data,\n // periodically write data to disk and at EOD it will flush\n // remaining data to disk. It will then inform it's respective\n // sort mode process to sort the data\n // 3. sort: the process will wait to get a trigger from it's respective\n // save mode process. When this is triggered it will sort the\n // data on disk, apply attributes and the trigger a reload on the\n // rdb and hdb processes\n\nmergenumrows:100000 // default number of rows for merge process\nmergenumtab:`quote`trade!10000 50000 // specify number of rows per table\n\ntpconnsleepintv:10 // number of seconds between attempts to connect to the tp\nupd:insert // value of the upd function\nreplay:1b // replay the tickerplant log file\nschema:1b // retrieve schema from tickerplant\nsettimer:0D00:00:10 // timer to check if data needs written to disk\npartitiontype:`date // set type of partition (defaults to `date, can be `date, `month or `year)\ngetpartition:{@[value;\n `.wdb.currentpartition;\n (`date^partitiontype)$.proc.cd[]]} //function to determine the partition value\nreloadorder:`hdb`rdb // order to reload hdbs and rdbs\nhdbdir:`:hdb // move wdb database to different location\nsortcsv:hsym first .proc.getconfigfile[\"sort.csv\"] // location of csv file\npermitreload:1b // enable reload of hdbs/rdbs\ncompression:() // specify the compress level, empty list if no required\ngc:1b // garbage collect at appropriate points (after each table save and after sorting data)\neodwaittime:0D00:00:10.000 // time to wait for async calls to complete at eod\n\n// Server connection details\n\\d .servers\nCONNECTIONS:`wdb`hdb`tickerplant`rdb`gateway // list of connections to make at start up\nSTARTUP:1b // create connections\n\n\n================================================================================\nFILE: TorQ_config_settings_sortworker.q\nSIZE: 2,368 characters\n================================================================================\n\n// Sort Worker config\n\n\\d .wdb\nsavedir:hsym`$getenv[`TORQHOME],\"/wdbhdb\" // location to save wdb data\nmode:`sort // the wdb process can operate in three modes\n // 1. saveandsort: the process will subscribe for data,\n // periodically write data to disk and at EOD it will flush\n // remaining data to disk before sorting it and informing\n // GWs, RDBs and HDBs etc...\n // 2. save: the process will subscribe for data,\n // periodically write data to disk and at EOD it will flush\n // remaining data to disk. It will then inform it's respective\n // sort mode process to sort the data\n // 3. sort: the process will wait to get a trigger from it's respective\n // save mode process. When this is triggered it will sort the\n // data on disk, apply attributes and the trigger a reload on the\n // rdb and hdb processes\n\nmergenumrows:100000 // default number of rows for merge process\nmergenumtab:`quote`trade!10000 50000 // specify number of rows per table\nhdbdir:`:hdb // move wdb database to different location\nsortcsv:hsym first .proc.getconfigfile[\"sort.csv\"] // location of csv file\ngc:1b // garbage collect at appropriate points (after each table save and after sorting data)\ntickerplanttypes:rdbtypes:hdbtypes:gatewaytypes:sorttypes:sortworkertypes:() // sortworkers don't need these connections"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":0,"numItemsPerPage":100,"numTotalItems":342,"offset":0,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjEzMDY1Mywic3ViIjoiL2RhdGFzZXRzL21vcmdhbnN0YW5sZXkvcV9wcmV0cmFpbmVkX2RhdGFzZXQiLCJleHAiOjE3NTYxMzQyNTMsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.6FxzyPJbyvE1flr2JHBNNy5WYC73Otw2mgYT-g-XaFf--8Rw0sBo4SSoZ_K2J5EM-4-8qEW4QUi8k6V1UW7NCA","displayUrls":true},"discussionsStats":{"closed":1,"open":1,"total":2},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}"><div><header class="bg-linear-to-t border-b border-gray-100 pt-4 xl:pt-0 from-purple-500/8 dark:from-purple-500/20 to-white to-70% dark:to-gray-950"><div class="mx-4 relative flex flex-col xl:flex-row"><h1 class="flex flex-wrap items-center max-md:leading-tight gap-y-1 text-lg xl:flex-none"><a href="/datasets" class="group flex items-center"><svg class="sm:mr-1 -mr-1 text-gray-400" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 25 25"><ellipse cx="12.5" cy="5" fill="currentColor" fill-opacity="0.25" rx="7.5" ry="2"></ellipse><path d="M12.5 15C16.6421 15 20 14.1046 20 13V20C20 21.1046 16.6421 22 12.5 22C8.35786 22 5 21.1046 5 20V13C5 14.1046 8.35786 15 12.5 15Z" fill="currentColor" opacity="0.5"></path><path d="M12.5 7C16.6421 7 20 6.10457 20 5V11.5C20 12.6046 16.6421 13.5 12.5 13.5C8.35786 13.5 5 12.6046 5 11.5V5C5 6.10457 8.35786 7 12.5 7Z" fill="currentColor" opacity="0.5"></path><path d="M5.23628 12C5.08204 12.1598 5 12.8273 5 13C5 14.1046 8.35786 15 12.5 15C16.6421 15 20 14.1046 20 13C20 12.8273 19.918 12.1598 19.7637 12C18.9311 12.8626 15.9947 13.5 12.5 13.5C9.0053 13.5 6.06886 12.8626 5.23628 12Z" fill="currentColor"></path></svg> <span class="mr-2.5 font-semibold text-gray-400 group-hover:text-gray-500 max-sm:hidden">Datasets:</span></a> <hr class="mx-1.5 h-2 translate-y-px rounded-sm border-r dark:border-gray-600 sm:hidden"> <div class="group flex flex-none items-center"><div class="relative mr-1 flex items-center"> <span class="inline-block "><span class="contents"><a href="/morganstanley" class="text-gray-400 hover:text-blue-600"><img alt="" class="size-3.5 rounded-sm flex-none" src="https://aifasthub.com/avatars/v1/production/uploads/66e455f8b8ea7145410504ad/LFz6EEAVT8-c08wTkd0yw.png" crossorigin="anonymous"></a></span> </span></div> <span class="inline-block "><span class="contents"><a href="/morganstanley" class="text-gray-400 hover:text-blue-600">morganstanley</a></span> </span> <div class="mx-0.5 text-gray-300">/</div></div> <div class="max-w-full xl:flex xl:min-w-0 xl:flex-nowrap xl:items-center xl:gap-x-1"><a class="break-words font-mono font-semibold hover:text-blue-600 text-[1.07rem] xl:truncate" href="/datasets/morganstanley/q_pretrained_dataset">q_pretrained_dataset</a> <button class="text-xs mr-3 focus:outline-hidden inline-flex cursor-pointer items-center text-sm mx-0.5 text-gray-600 " title="Copy dataset name to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> </button></div> <div class="inline-flex items-center overflow-hidden whitespace-nowrap rounded-md border bg-white text-sm leading-none text-gray-500 mr-2"><button class="relative flex items-center overflow-hidden from-red-50 to-transparent dark:from-red-900 px-1.5 py-1 hover:bg-linear-to-t focus:outline-hidden" title="Like"><svg class="left-1.5 absolute" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" fill="currentColor"><path d="M22.45,6a5.47,5.47,0,0,1,3.91,1.64,5.7,5.7,0,0,1,0,8L16,26.13,5.64,15.64a5.7,5.7,0,0,1,0-8,5.48,5.48,0,0,1,7.82,0L16,10.24l2.53-2.58A5.44,5.44,0,0,1,22.45,6m0-2a7.47,7.47,0,0,0-5.34,2.24L16,7.36,14.89,6.24a7.49,7.49,0,0,0-10.68,0,7.72,7.72,0,0,0,0,10.82L16,29,27.79,17.06a7.72,7.72,0,0,0,0-10.82A7.49,7.49,0,0,0,22.45,4Z"></path></svg> <span class="ml-4 pl-0.5 ">like</span></button> <button class="focus:outline-hidden flex items-center border-l px-1.5 py-1 text-gray-400 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" title="See users who liked this repository">0</button></div> <div class="relative flex items-center gap-1.5 "><div class="mr-2 inline-flex h-6 items-center overflow-hidden whitespace-nowrap rounded-md border text-sm text-gray-500"><button class="focus:outline-hidden relative flex h-full max-w-56 items-center gap-1.5 overflow-hidden px-1.5 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" type="button" ><div class="flex h-full flex-1 items-center justify-center ">Follow</div> <img alt="" class="rounded-xs size-3 flex-none" src="https://aifasthub.com/avatars/v1/production/uploads/66e455f8b8ea7145410504ad/LFz6EEAVT8-c08wTkd0yw.png"> <span class="truncate">Morgan Stanley</span></button> <button class="focus:outline-hidden flex h-full items-center border-l pl-1.5 pr-1.5 text-gray-400 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" title="Show Morgan Stanley's followers" type="button">23</button></div> </div> </h1> <div class="flex flex-col-reverse gap-x-2 sm:flex-row sm:items-center sm:justify-between xl:ml-auto"><div class="-mb-px flex h-12 items-center overflow-x-auto overflow-y-hidden "> <a class="tab-alternate" href="/datasets/morganstanley/q_pretrained_dataset"><svg class="mr-1.5 text-gray-400 flex-none" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg> Dataset card </a><a class="tab-alternate active" href="/datasets/morganstanley/q_pretrained_dataset/viewer/"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill="currentColor" d="M2.5 2h7a1 1 0 0 1 1 1v6a1 1 0 0 1-1 1h-7a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1Zm0 2v2h3V4h-3Zm4 0v2h3V4h-3Zm-4 3v2h3V7h-3Zm4 0v2h3V7h-3Z"></path></svg> Data Studio </a><a class="tab-alternate" href="/datasets/morganstanley/q_pretrained_dataset/tree/main"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-tertiary" d="M21 19h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-8h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0 4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M9 19a1 1 0 0 1-1-1V6a1 1 0 0 1 2 0v12a1 1 0 0 1-1 1zm-6-4.333a1 1 0 0 1-.64-1.769L3.438 12l-1.078-.898a1 1 0 0 1 1.28-1.538l2 1.667a1 1 0 0 1 0 1.538l-2 1.667a.999.999 0 0 1-.64.231z" fill="currentColor"></path></svg> <span class="xl:hidden">Files</span> <span class="hidden xl:inline">Files and versions</span> <span class="inline-block "><span class="contents"><div slot="anchor" class="shadow-purple-500/10 ml-2 inline-flex -translate-y-px items-center gap-0.5 rounded-md border bg-white px-1 py-0.5 align-middle text-xs font-semibold leading-none text-gray-800 shadow-sm dark:border-gray-700 dark:bg-gradient-to-b dark:from-gray-925 dark:to-gray-925 dark:text-gray-300"><svg class="size-3 " xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill-rule="evenodd" clip-rule="evenodd" d="M6.14 3.64 5.1 4.92 2.98 2.28h2.06l1.1 1.36Zm0 4.72-1.1 1.36H2.98l2.13-2.64 1.03 1.28Zm4.9 1.36L8.03 6l3-3.72H8.96L5.97 6l3 3.72h2.06Z" fill="#7875FF"></path><path d="M4.24 6 2.6 8.03.97 6 2.6 3.97 4.24 6Z" fill="#FF7F41" opacity="1"></path></svg> <span>xet</span> </div></span> </span> </a><a class="tab-alternate" href="/datasets/morganstanley/q_pretrained_dataset/discussions"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path><path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path></svg> Community <div class="ml-1.5 flex h-4 min-w-[1rem] items-center justify-center rounded px-1 text-xs leading-none shadow-sm bg-black text-white dark:bg-gray-800 dark:text-gray-200">2</div> </a></div> </div></div></header> </div> <div class="flex flex-col w-full"> <div class="flex h-full flex-1"> <div class="flex flex-1 flex-col overflow-hidden " style="height: calc(100vh - 48px)"><div class="flex flex-col overflow-hidden h-full "> <div class="flex flex-1 flex-col overflow-hidden "><div class="flex flex-1 flex-col overflow-hidden"><div class="flex min-h-0 flex-1"><div class="flex flex-1 flex-col overflow-hidden"><div class="md:shadow-xs dark:border-gray-800 md:my-4 md:ml-4 md:rounded-lg md:border flex min-w-0 flex-wrap "><div class="flex min-w-0 flex-1 flex-wrap"><div class="grid flex-1 grid-cols-1 overflow-hidden text-sm md:grid-cols-2 md:place-content-center md:rounded-lg"><label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-950 md:border-r md:border-r-0 hidden" title="default"><span class="text-gray-500">Subset (1)</span> <div class="flex items-center whitespace-nowrap"><span class="truncate">default</span> <span class="mx-2 text-gray-500">·</span> <span class="text-gray-500">381 rows</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Subset (1)"><option value="default" selected>default (381 rows)</option></optgroup></select></label> <label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-900 md:border-r md:border-r" title="train"><div class="text-gray-500">Split (2)</div> <div class="flex items-center overflow-hidden whitespace-nowrap"><span class="truncate">train</span> <span class="mx-2 text-gray-500">·</span> <span class="text-gray-500">342 rows</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Split (2)"><option value="train" selected>train (342 rows)</option><option value="validation" >validation (39 rows)</option></optgroup></select></label></div></div> <div class="hidden flex-none flex-col items-center gap-0.5 border-l px-1 md:flex justify-end"> <span class="inline-block "><span class="contents"><div slot="anchor"><button class="group text-gray-500 hover:text-gray-700" aria-label="Hide sidepanel"><div class="rounded-xs flex size-4 items-center justify-center border border-gray-400 bg-gray-100 hover:border-gray-600 hover:bg-blue-50 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-700 dark:group-hover:border-gray-400"><div class="float-left h-full w-[65%]"></div> <div class="float-right h-full w-[35%] bg-gray-400 group-hover:bg-gray-600 dark:bg-gray-600 dark:group-hover:bg-gray-400"></div></div></button></div></span> </span> <div class="relative "> <button class="btn px-0.5 py-0.5 " type="button"> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="p-0.5" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><circle cx="16" cy="7" r="3" fill="currentColor"></circle><circle cx="16" cy="16" r="3" fill="currentColor"></circle><circle cx="16" cy="25" r="3" fill="currentColor"></circle></svg> </button> </div></div></div> <div class="flex min-h-0 flex-1 flex-col border dark:border-gray-800 md:mb-4 md:ml-4 md:rounded-lg"> <div class="bg-linear-to-r text-smd relative flex items-center dark:border-gray-900 dark:bg-gray-950 false rounded-t-lg [&:has(:focus)]:from-gray-50 [&:has(:focus)]:to-transparent [&:has(:focus)]:to-20% dark:[&:has(:focus)]:from-gray-900"><form class="flex-1"><svg class="absolute left-3 top-1/2 transform -translate-y-1/2 pointer-events-none text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M30 28.59L22.45 21A11 11 0 1 0 21 22.45L28.59 30zM5 14a9 9 0 1 1 9 9a9 9 0 0 1-9-9z" fill="currentColor"></path></svg> <input disabled class="outline-hidden h-9 w-full border-none bg-transparent px-1 pl-9 pr-3 placeholder:text-gray-400 " placeholder="Search this dataset" dir="auto"></form> <div class="flex items-center gap-2 px-2 py-1"><button type="button" class="hover:bg-yellow-200/70 flex items-center gap-1 rounded-md border border-yellow-200 bg-yellow-100 pl-0.5 pr-1 text-[.8rem] leading-normal text-gray-700 dark:border-orange-500/25 dark:bg-orange-500/20 dark:text-gray-300 dark:hover:brightness-110 md:hidden"><div class="rounded-sm bg-yellow-300 px-1 font-mono text-[.7rem] font-bold text-black dark:bg-yellow-700 dark:text-gray-200">SQL </div> Console </button></div></div> <div class="flex flex-1 flex-col overflow-hidden min-h-64 flex w-full flex-col border-t md:rounded-b-lg md:shadow-lg"> <div class="flex-1 relative overflow-auto"><table class="w-full table-auto rounded-lg font-mono text-xs text-gray-900"><thead class="shadow-xs sticky left-0 right-0 top-0 z-1 bg-white align-top"><tr class="space-y-54 h-full min-w-fit divide-x border-b text-left"><th class="h-full max-w-sm p-2 text-left relative w-full"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">text <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="21.97" width="11.2" height="8.030000000000001" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="17.68" width="11.2" height="12.32" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="23.92" width="11.2" height="6.08" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="24.05" width="11.2" height="5.95" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">1.46k</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">56.1k</div> </div></div></div></div> </th></tr></thead> <tbody class="h-16 overflow-scroll"><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="0"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">.ml.mm:.qml.mm .ml.mmt:.qml.mmx[`rflip] .ml.mtm:.qml.mmx[`lflip] .ml.minv:.qml.minv .ml.mlsq:{.qml.mlsqx[`flip;y;x]} .ml.dot:.qml.dot .ml.mdet:.qml.mdet .ml.mchol:.qml.mchol .fmincg.dot:.qml.dot ================================================================================ FILE: funq_randomforest.q SIZE: 1,085 characters ================================================================================ \c 20 100 \l funq.q \l wdbc.q \l winequality.q -1"applying random forest to the wdbc data set"; k:20 d:.ut.part[`train`test!3 1;0N?] wdbc.t -1"bagging grows B decision trees with random sampling (with replacement)"; m:.ml.bag[k;.ml.q45[();::]] d`train avg d.test.diagnosis=.ml.pbag[k;m] d`test -1"a random forest grows B decision trees with random sampling (with replacement)"; -1"and a sub-selection of sqrt (for classification) of the features at each split"; m:.ml.bag[k;.ml.q45[(1#`maxff)!1#sqrt;::]] d`train avg d.test.diagnosis=.ml.pbag[k;m] d`test -1"applying random forest to the winequality data set"; d:.ut.part[`train`test!1 1;0N?] winequality.red.t -1"bagging grows B decision trees with random sampling (with replacement)"; m:.ml.bag[k;.ml.rt[();::]] d`train .ml.rms d.test.quality-.ml.pbag[k;m] d`test -1"a random forest grows B decision trees with random sampling (with replacement)"; -1"and a sub-selection of one third (for regression) of the features at each split"; m:.ml.bag[k;.ml.rt[(1#`maxff)!1#%[;3];::]] d`train .ml.rms d.test.quality-.ml.pbag[k;m] d`test ================================================================================ FILE: funq_recommend.q SIZE: 8,769 characters ================================================================================ \c 22 100 \l funq.q \l mlens.q -1"reference mlens data from global namespace"; `rating`movie set' mlens`rating`movie / personal ratings -1"we now build a dataset to hold our own ratings/preferences"; r:1!select `mlens.movie$movieId,rating:0n from movie r,:([]movieId:173 208 260 435 1197 2005 1968i;rating:.5 .5 4 .5 4 4 4f) r,:([]movieId:2918 4006 53996 69526 87520 112370i;rating:5 5 4 4 5 5f) show select movieId,rating,movieId.title from r where not null rating / http://files.grouplens.org/papers/FnT%20CF%20Recsys%20Survey.pdf / content based filtering -1"content based filtering does not use ratings from other people."; -1"it uses our own preferences mixed with each movie's genre"; Y:value[r]1#`rating -1"we build the X matrix based on each movie's genres"; show X:"f"$flip genre in/: value[movie]`genres -1"we then initialize the THETA matrix"; theta:raze 0N!THETA:(1;1+count X)#0f -1"since we don't use other user's preferences, this is quick optimization"; rf:.ml.l2[.1] / l2 regularization theta:first .fmincg.fmincg[20;.ml.lincostgrad[rf;Y;X];theta] / learn -1"confirm lincostgrad handled the null Y values"; .ut.assert[2.4 0.2 0.4 -0.2 0.4] .ut.rnd[.1] 5#theta -1"view our deduced genre preferences"; show {(5#x),-5#x}desc genre!1_theta -1"how closely do the computed scores match our preferences"; THETA:(count[Y];0N)#theta r:update score:first .ml.plin[X;THETA] from r show select[>score] rating,score,movieId.title from r where not null rating -1"and finally, show the recommendations"; show select[10;>score] movieId,score,movieId.title from r -1"'Mars Needs Moms' was my top recommendation because it had so many genres"; select genres from movie where movieId = 85261 / ratings data summary / http://webdam.inria.fr/Jorge/html/wdmch19.html -1"we begin be reporting summary statistics about the ratings dataset"; -1"support"; -1"reporting the number of users, movies and ratings"; (count distinct@) each exec nu:userId, nm:movieId, nr:i from rating -1"distribution:"; -1"we can see that only users with >20 ratings are included"; t:select nr:count rating by userId from rating show select nu:count userId by 10 xbar nr from t -1"we can also see that a large majority of movies have less than 10 ratings"; t:select nr:count rating by movieId from rating show select nm:count movieId by 10 xbar nr from t -1"quality:"; -1"we can see that there is a positive bias to the ratings"; show `min`med`avg`mode`max!(min;med;avg;.ml.mode;max)@\:rating`rating /rating:select from rating where 19<(count;i) fby userId,9<(count;i) fby movieId -1"the average rating per user (and movie) is distributed around 3.5"; t:select avg rating by movieId from rating t:select nm:count i by .5 xbar rating from t s:select avg rating by userId from rating show t lj select nu:count i by .5 xbar rating from s -1"movies with a small number of ratings can distort the rankings"; -1"the top rankings are dominated by movies with a single rating"; show select[10;>rating] avg rating, n:count i by movieId.title from rating -1"while the most rated movies have averages centered around 4"; show select[10;>n] avg rating, n:count i by movieId.title from rating -1"we will therefore demean the ratings before performing our analysis"; -1""; -1"by using a syntax that is similar to pivoting,"; -1"we can generate the user/movie matrix"; / https://grouplens.org/blog/similarity-functions-for-user-user-collaborative-filtering/ -1"to ensure the ratings matrix only contains movies with relevant movies,"; -1"we generate a list of unique movie ids that meet our threshold."; n:20 show m:exec distinct movieId from rating where n<(count;i) fby movieId show R:value exec (movieId!rating) m by userId from rating where movieId in m -1"then add our own ratings"; R,:r[([]movieId:m);`rating] -1"demean each user"; U:R-au:avg each R k:30 / user-user collaborative filtering -1"user-user collaborative filtering fills missing ratings"; -1"with averaged values from users who's ratings are most similar to ours"; -1"average top ",string[k], " users based on correlation"; p:last[au]+.ml.fknn[1f-;.ml.cordist\:;k;U;0f^U] 0f^last U show `score xdesc update score:p,movieId.title from ([]movieId:m)#r -1"average top ",string[k], " users based on spearman correlation"; p:last[au]+.ml.fknn[1f-;.ml.scordist\:;k;U;0f^U] 0f^last U show `score xdesc update score:p,movieId.title from ([]movieId:m)#r -1"weighted average top ",string[k], " users based on cosine similarity"; -1"results in the same recommendations as .ml.cordist because the data"; -1"has been centered and filled with 0"; p:last[au]+.ml.fknn[1f-;.ml.cosdist\:;k;U;0f^U] 0f^last U show `score xdesc update score:p,movieId.title from ([]movieId:m)#r / item-item collaborative filtering -1"item-item collaborative filtering fills missing ratings"; -1"with averaged values from movies most similar to movies we've rated"; I-:ai:avg each I:flip R -1"pre-build item-item distance matrix because item similarities are stable"; D:((0^I) .ml.cosdist\:) peach 0^I -1"average top ",string[k], " items based on correlation"; p:ai+.ml.knn[1f-;k;last each I] D show `score xdesc update score:p,movieId.title from ([]movieId:m)#r nf:10; if[2<count key `.qml; -1 .ut.box["**"] ( "singular value decomposition (svd) allows us to compute latent factors (off-line)"; "and perform simple matrix multiplication to make predictions (on-line)"); -1"compute score based on top n svd factors"; / singular value decomposition usv:.qml.msvd 0f^U; -1"predict missing ratings using low rank approximations"; P:ai+{x$z$/:y} . .ml.nsvd[nf] usv; show t:`score xdesc update score:last P,movieId.title from ([]movieId:m)#r; -1"compare against existing ratings"; show select from t where not null rating; -1"we can use svd to foldin a new user"; .ml.foldin[.ml.nsvd[500] usv;0b] 0f^U[2]; -1"or even a new movie"; .ml.foldin[.ml.nsvd[500] usv;1b;0f^U[;2]]; -1"what does the first factor look like?"; show each {(5#x;-5#x)}([]movieId:m idesc usv[2][;0])#movie; -1"how much variance does each factor explain?"; show .ut.plot[40;19;.ut.c10;avg] {x%sum x*:x}.qml.mdiag usv 1; ]; / regularized gradient descent -1 .ut.box["**"] ( "regularized gradient descent collaborative filtering"; "doesn't need to be filled with default values"; "and can use regularization"); n:(ni:count U 0;nu:count U) / (n items; n users) -1"randomly initialize X and THETA"; xtheta:2 raze/ XTHETA:(X:-1+ni?/:nf#2f;THETA:-1+nu?/:nf#2f) -1"learn latent factors that best predict existing ratings matrix"; xtheta:first .fmincg.fmincg[100;.ml.cfcostgrad[rf;n;U];xtheta] / learn -1"predict missing ratings"; P:au+.ml.pcf . XTHETA:.ml.cfcut[n] xtheta / predictions show t:`score xdesc update score:last P,movieId.title from ([]movieId:m)#r -1"compare against existing ratings"; show select from t where not null rating -1"check collaborative filtering gradient calculations"; .ut.assert . .ut.rnd[1e-6] .ml.checkcfgrad[1e-4;rf;20 5] / stochastic regularized gradient descent -1"by solving for each rating, one at a time"; -1"we can perform stochastic gradient descent"; -1"randomly initialize X and THETA"; xtheta:2 raze/ XTHETA:(X:-1+ni?/:nf#2f;THETA:-1+nu?/:nf#2f) -1"define cost function"; cf:.ml.cfcost[rf;U] . -1"define minimization function"; mf:.ml.sgdmf[.05;.2;0N?;U;;::] -1"keep running mf until improvement is lower than pct limit"; XTHETA:first .ml.iter[-1;.0001;cf;mf] XTHETA -1"predict missing ratings"; P:au+.ml.pcf . XTHETA / predictions show t:`score xdesc update score:last P,movieId.title from ([]movieId:m)#r -1"compare against existing ratings"; show select from t where not null rating / alternating least squares with weighted regularization / Large-scale Parallel Collaborative Filtering for the Netflix Prize / http://dl.acm.org/citation.cfm?id=1424269 -1"Alterating Least Squares is used to factor the rating matrix"; -1"into a user matrix (X) and movie matrix (THETA)"; -1"by alternating between keeping THETA constant and solving for X"; -1"and vice versa. this changes a non-convex problem"; -1"into a quadratic problem solvable with parallel least squares."; -1"this implementation uses a weighting scheme where"; -1"the weights are equal to the number of ratings per user/movie"; -1"reset X and THETA"; XTHETA:(X:-1+ni?/:nf#1f;THETA:-1+nu?/:nf#2f) -1"keep running mf until improvement is lower than pct limit"; XTHETA:first .ml.iter[1;.0001;.ml.cfcost[();U] .;.ml.alswr[.01;U]] XTHETA -1"predict missing ratings"; P:au+.ml.pcf . XTHETA / predictions show t:`score xdesc update score:last P,movieId.title from ([]movieId:m)#r -1"compare against existing ratings"; show s:select from t where not null rating .ut.assert[0f] .ut.rnd[.01] avg exec .ml.mseloss[rating;score] from s ================================================================================ FILE: funq_sands.q SIZE: 315 characters ================================================================================ / sense and sensibility sands.f:"161.txt" sands.b:"https://www.gutenberg.org/files/161/old/" -1"[down]loading sense and sensibility text"; .ut.download[sands.b;;"";""] sands.f; sands.txt:read0 `$sands.f sands.chapters:1_"CHAPTER" vs "\n" sv 43_-373_sands.txt sands.s:{(3+first x ss"\n\n\n")_x} each sands.chapters ================================================================================ FILE: funq_seeds.q SIZE: 429 characters ================================================================================ seeds.f:"seeds_dataset.txt" seeds.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/" seeds.b,:"00236/" -1"[down]loading seeds data set"; .ut.download[seeds.b;;"";""] seeds.f; seeds.XY:("FFFFFFFH";"\t") 0: ssr[;"\t\t";"\t"] each read0 `$seeds.f seeds.X:-1_seeds.XY seeds.y:first seeds.Y:-1#seeds.XY seeds.c:`area`perimeter`compactness`length`width`asymmetry`groove`variety seeds.t:`variety xcols flip seeds.c!seeds.XY ================================================================================ FILE: funq_silhouette.q SIZE: 1,437 characters ================================================================================</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="1"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Implementing trend indicators in kdb+¶ The compactness of kdb+ and the terseness of q focus code on a small number of high-performing native built-in functions rather than extensive libraries. kdb+ users often develop libraries of their own domain-specific algorithms and functions, for convenience and to support reuse. In this paper, we show examples of functions commonly used in finance, built on native q functions. Cryptocurrency data for Bitcoin and Ethereum from multiple exchanges are used in the examples. Charts are displayed using the KX Analyst IDE. The code used in this paper can be found at kxcontrib/trend-indicators. It is developed on kdb+ version 3.6 2019.03.07. Data extraction¶ Data was captured in a process similar to that used in Eduard Silantyev’s blog “Combining high-frequency cryptocurrency venue data using kdb+”. Trade and quote tick data for Ethereum (ETH) and Bitcoin (BTC) denominated in the US dollar (USD) was collected from four exchanges - Bitfinex - HitBtc - Kraken - Coinbase spanning May, June and July 2019: just over two months of data. A Python script connected to exchange feeds and extracted the relevant data, which was then published to a kdb+ tickerplant. The tickerplant processed the messages and sent them to a real-time database (RDB). At the end of the day the data was then written to a historical database (HDB) where it could be accessed for analysis. Such details will not be elaborated on, as the focus of this paper is implementing Trend Indicators using kdb+. For help with tick capture: - kdb+tick profiling for throughput optimization - Disaster-recovery planning for kdb+ tick systems - Query Routing: A kdb+ framework for a scalable, load balanced system To make it easy to follow through this paper and execute the functions/indicators created there is a sample of close data located in the GitHub repository. This is a small binary flat file which can be loaded into kdb+/q. The close data contains the daily high/low/open/close and volume of trades for Bitcoin trading on Kraken and the table is called bitcoinKraken . This table will be used throughout the paper to show how you can apply the functions/indicators to an in-memory kdb+ table. q)bitcoinKraken:get `:bitcoinKraken q)\l cryptoFuncs.q "loading in cryptoFuncs" q)10#bitcoinKraken date sym exch high low open close vol -------------------------------------------------------------- 2019.05.09 BTC_USD KRAKEN 6174 6037.9 6042 6151.4 1808.803 2019.05.10 BTC_USD KRAKEN 6430 6110.1 6151.4 6337.9 9872.36 2019.05.11 BTC_USD KRAKEN 7450 6338 6339.5 7209.9 18569.93 2019.05.12 BTC_USD KRAKEN 7588 6724.1 7207.9 6973.9 18620.15 2019.05.13 BTC_USD KRAKEN 8169.3 6870 6970.1 7816.3 19668.6 2019.05.14 BTC_USD KRAKEN 8339.9 7620 7817.1 7993.7 18118.61 2019.05.15 BTC_USD KRAKEN 8296.9 5414.5 7988.9 8203 11599.71 2019.05.16 BTC_USD KRAKEN 8370 7650 8201.5 7880.7 13419.86 2019.05.17 BTC_USD KRAKEN 7946.2 6636 7883.6 7350 21017.35 2019.05.18 BTC_USD KRAKEN 7494.2 7205 7353.9 7266.8 6258.585 Technical analysis¶ Technical analysis is the process of identifying trading opportunities based on past price movements using different stock charts. Trend/technical traders use a combination of patterns and indicators from price charts to help them make financial decisions. Investors analyze price charts to develop theories about what direction the market is likely to move. Commonly used in technical analysis tools are the Candlestick chart, Moving Average Convergence Divergence and Relative Strength Index. These tools are created using q/kdb+’s in-built functions such as mavg , ema , min , max , and avg . The tools discussed do not predict future prices but provide the investor information to determine their next move. The indicators create buy and sell signals using moving averages, prices, volume, days since previous high or low. The investor can then make his financial decision based on the signals created. Pattern recognition¶ The candlestick chart is used for describing price movements in a particular security. The chart illustrates the open/high/low/close of a security and is used by traders to identify patterns based on past movements. candlestick : { fillscale : .gg.scale.colour.cat 01b!(.gg.colour.Red; .gg.colour.Green); .qp.theme[enlist[`legend_use]!enlist 0b] .qp.stack ( // open/close .qp.interval[x; `date; `open; `close] .qp.s.aes[`fill; `gain] ,.qp.s.scale[`fill; fillscale] ,.qp.s.labels[`x`y!("Date";"Price")] ,.qp.s.geom[`gap`colour!(0; .gg.colour.White)]; // low/high .qp.segment[x; `date; `high; `date; `low] .qp.s.aes[`fill; `gain] ,.qp.s.scale[`fill; fillscale] ,.qp.s.labels[`x`y!("Date";"Price")] ,.qp.s.geom[enlist [`size]!enlist 1]) } .qp.go[700;300] .qp.theme[.gg.theme.clean] .qp.title["Candlestick chart BTC"] candlestick[update gain: close > open from select from wpData where sym=`BTC_USD,exch=`KRAKEN] Figure 1: Bitcoin Candlestick Chart using Kraken data Each candle shows the high/open/close/low and if our security closed higher than the open. This can be useful in predicting short term price movements. Simple Moving Averages¶ The price of a security can be extremely volatile and large price movements can make it hard to pinpoint the general trend. Moving averages ‘smooth’ price data by creating a single flowing line. The line represents the average price over a period of time. Which moving average the trader decides to use is determined by the time frame in which he or she trades. There are two commonly used moving averages: Simple Moving Average (SMA) and Exponential Moving Average (EMA). EMA gives a larger weighting to more recent prices when calculating the average. In Figure 2 you can see the 10-day moving average and 20-day moving average along with the close price. Traders analyze where the current trade price lies in relation to the moving averages. If the current trade price is above the moving-average (MA) line this would indicate over-bought (decline in price expected), trade price below MA would indicate over-sold (increase in price may be seen). It should be noted that a signal/trend indicator would not determine a trading strategy but would be analyzed in conjunction with other factors. Now using the previously defined bitcoinKraken table we can start to apply our own simple moving averages. In the example below the 2- and 5-day moving averages are calculated on the close price. This can be updated to get the moving average of any of the numeric columns like high price for example or you could alter the number of periods used. In Figure 2 the 10- and 20-day moving averages are used. This can be adjusted depending on your needs. Short-term traders would be interested in relatively short time periods whereas long-term investors who want an overall picture of a security would compare large periods like 100 and 200 days. q)10#update sma2:mavg[2;close],sma5:mavg[5;close] from bitcoinKraken date sym exch high low open close vol sma2 sma5 ------------------------------------------------------------------------------- 2019.05.09 BTC_USD KRAKEN 6174 6037.9 6042 6151.4 1808.803 6151.4 6151.4 2019.05.10 BTC_USD KRAKEN 6430 6110.1 6151.4 6337.9 9872.36 6244.65 6244.65 2019.05.11 BTC_USD KRAKEN 7450 6338 6339.5 7209.9 18569.93 6773.9 6566.4 2019.05.12 BTC_USD KRAKEN 7588 6724.1 7207.9 6973.9 18620.15 7091.9 6668.275 2019.05.13 BTC_USD KRAKEN 8169.3 6870 6970.1 7816.3 19668.6 7395.1 6897.88 2019.05.14 BTC_USD KRAKEN 8339.9 7620 7817.1 7993.7 18118.61 7905 7266.34 2019.05.15 BTC_USD KRAKEN 8296.9 5414.5 7988.9 8203 11599.71 8098.35 7639.36 2019.05.16 BTC_USD KRAKEN 8370 7650 8201.5 7880.7 13419.86 8041.85 7773.52 2019.05.17 BTC_USD KRAKEN 7946.2 6636 7883.6 7350 21017.35 7615.35 7848.74 2019.05.18 BTC_USD KRAKEN 7494.2 7205 7353.9 7266.8 6258.585 7308.4 7738.84 The graph in Figure 2 was created using KX Analyst. A sample for this code can be seen below. All Graphics of Grammar code can be found in the repository for this project. The following is an example. sma:{[x] .qp.go[700;300] .qp.title["SMA BTC Kraken"] .qp.theme[.gg.theme.clean] .qp.stack( .qp.line[x; `date; `sma10] .qp.s.geom[enlist[`fill]!enlist .gg.colour.Blue] ,.qp.s.scale [`y; .gg.scale.limits[6000 0N] .gg.scale.linear] ,.qp.s.legend[""; `sma10`sma20`close!(.gg.colour.Blue;.gg.colour.Red;.gg.colour.Green)] ,.qp.s.labels[`x`y!("Date";"Price")]; .qp.line[x; `date; `sma20] .qp.s.geom[enlist[`fill]!enlist .gg.colour.Red] ,.qp.s.scale [`y; .gg.scale.limits[6000 0N] .gg.scale.linear] ,.qp.s.labels[`x`y!("Date";"Price")]; .qp.line[x; `date; `close] .qp.s.geom[enlist[`fill]!enlist .gg.colour.Green] ,.qp.s.scale [`y; .gg.scale.limits[6000 0N] .gg.scale.linear] ,.qp.s.labels[`x`y!("Date";"Price")])} q)sma[update sma10:mavg[10;close], sma20:mavg[20;close] from select from wpData where sym=`BTC_USD,exch=`KRAKEN] Figure 2: 10- and 20-day Simple Moving Averages for Bitcoin Moving Average Convergence Divergence¶ Moving Average Convergence Divergence (MACD) is an important and popular analysis tool. It is a trend indicator that shows the relationship between two moving averages of a securities price. MACD is calculated by subtracting the long-term EMA (26 periods) from the short-term EMA (12 periods). A period is generally defined as a day but shorter/longer timespans can be used. Throughout this paper we will consider a period to be one day. EMAs place greater weight and significance on the more recent data points and react more significantly to price movements than SMA. The 9-day moving average of the MACD is also calculated and plotted. This line is known as the signal line and can be used to identify buy and sell signals. The code for calculating the MACD is very simple and exploits kdb+/q’s built-in function ema . An example of how the code is executed, along with a subset of the output is displayed. /tab-table input /id-ID you want `ETH_USD/BTC_USD /ex-exchange you want /output is a table with the close,ema12,ema26,macd,signal line calculated macd:{[tab;id;ex] macd:{[x] ema[2%13;x]-ema[2%27;x]}; /macd line signal:{ema[2%10;x]}; /signal line res:select sym, date, exch, close, ema12:ema[2%13;close], ema26:ema[2%27;close], macd:macd[close] from tab where sym=id, exch=ex; update signal:signal[macd] from res } q)10#macd[bitcoinKraken;`BTC_USD;`KRAKEN] sym date exch close ema12 ema26 macd signal -------------------------------------------------------------------- BTC_USD 2019.05.09 KRAKEN 6151.4 6151.4 6151.4 0 0 BTC_USD 2019.05.10 KRAKEN 6337.9 6180.092 6165.215 14.87749 2.975499 BTC_USD 2019.05.11 KRAKEN 7209.9 6338.524 6242.599 95.92536 21.56547 BTC_USD 2019.05.12 KRAKEN 6973.9 6436.274 6296.769 139.505 45.15338 BTC_USD 2019.05.13 KRAKEN 7816.3 6648.586 6409.327 239.2588 83.97447 BTC_USD 2019.05.14 KRAKEN 7993.7 6855.527 6526.688 328.8385 132.9473 BTC_USD 2019.05.15 KRAKEN 8203 7062.83 6650.859 411.9708 188.752 BTC_USD 2019.05.16 KRAKEN 7880.7 7188.656 6741.959 446.6977 240.3411 BTC_USD 2019.05.17 KRAKEN 7350 7213.478 6786.999 426.4797 277.5688 BTC_USD 2019.05.18 KRAKEN 7266.8 7221.682 6822.54 399.1421 301.8835 Figure 3 graphs the MACD for ETH_USD using data from HITBTC. Figure 3: Moving Average Convergence Divergence for Ethereum using HITBTC data From the above graph, you can see how the close price interacts with the short and long EMA and how this then impacts the MACD and signal-line relationship. There is a buy signal when the MACD line crosses over the signal line and there is a short signal when the MACD line crosses below the signal line. Relative Strength Index¶ Figure 4: Relative Strength Index for Ethereum using HITBTC data Relative Strength Index (RSI) is a momentum oscillator that measures the speed and change of price movements. It oscillates between 0-100. It is said that a security is overbought when above 70 and oversold when below 30. It is a general trend and momentum indicator. The default period is 14 days. This can be reduced or increased – the shorter the period, the more sensitive it is to price changes. Short-term traders sometimes look at 2-day RSIs for overbought readings above 80 and oversold ratings below 20. The first calculation of the average gain/loss are simple 14-day averages. First Average Gain: sum of Gains over the past 14 days/14 First Average Loss: sum of Losses over the past 14 days/14 The subsequent calculations are based on the prior averages and the current gain/loss. //Relative strength index - RSI //close-close price /n-number of periods relativeStrength:{[num;y] begin:num#0Nf; start:avg((num+1)#y); begin,start,{(y+x*(z-1))%z}\[start;(num+1)_y;num] } rsiMain:{[close;n] diff:-[close;prev close]; rs:relativeStrength[n;diff*diff>0]%relativeStrength[n;abs diff*diff<0]; rsi:100*rs%(1+rs); rsi } q)update rsi:rsiMain[close;14] by sym,exch from wpData It is shrewd to use both RSI and MACD together as both measure momentum in a market, but, because they measure different factors, they sometimes give contrary indications. Using both together can provide a clearer picture of the market. RSI could be showing a reading of greater than 70, this would indicate that the the security is overbought, but the MACD is signaling that the market is continuing in the upward direction. Money Flow Index¶ Figure 5: Money Flow Index for Ethereum where n=14 Money Flow Index (MFI) is a technical oscillator similar to RSI but which instead uses price and volume for identifying overbought and oversold conditions. This indicator weighs in on volume and not just price to give a relative score. A low volume with a large price movement will have less impact on the relative score compared to a high volume move with a lower price move. You see new highs/lows and large price swings but also if there is a price swing whether there is any volume behind the move or if it is just a small trade. The market will generally correct itself. It can be used to spot divergences that warn traders of a change in trend. MFI is known as the volume-weighted RSI. We use the relativeStrength function as in the RSI calculation above. mfiMain:{[h;l;c;n;v] TP:avg(h;l;c); / typical price rmf:TP*v; / real money flow diff:deltas[0n;TP]; / diffs /money-flow leveraging func for RSI mf:relativeStrength[n;rmf*diff*diff>0]%relativeStrength[n;abs rmf*diff*diff<0]; mfi:100*mf%(1+mf); /money flow as a percentage mfi } q)update mfi:mfiMain[high;low;close;14;vol] by sym,exch from wpData Figure 6: MFI versus RSI Analysts use both RSI and MFI together to see whether a price move has volume behind it. Here is another good example to show the output of the update columns after applying the indicators to the in memory table defined above as bitcoinKraken . The table below shows bitcoinKraken updated with the output columns attached on to the end. This shows how easy it is to compare statistical outputs. In Figure 6 the 14-day period RSI and MFI are compared, but below a 6-day period is chosen. q)10#update rsi:rsiMain[close;6],mfi:mfiMain[high;low;close;6;vol] from bitcoinKraken date sym exch high low open close vol rsi mfi -------------------------------------------------------------------------------- 2019.05.09 BTC_USD KRAKEN 6174 6037.9 6042 6151.4 1808.803 2019.05.10 BTC_USD KRAKEN 6430 6110.1 6151.4 6337.9 9872.36 2019.05.11 BTC_USD KRAKEN 7450 6338 6339.5 7209.9 18569.93 2019.05.12 BTC_USD KRAKEN 7588 6724.1 7207.9 6973.9 18620.15 2019.05.13 BTC_USD KRAKEN 8169.3 6870 6970.1 7816.3 19668.6 2019.05.14 BTC_USD KRAKEN 8339.9 7620 7817.1 7993.7 18118.61 2019.05.15 BTC_USD KRAKEN 8296.9 5414.5 7988.9 8203 11599.71 90.64828 81.06234 2019.05.16 BTC_USD KRAKEN 8370 7650 8201.5 7880.7 13419.86 78.60196 85.19688 2019.05.17 BTC_USD KRAKEN 7946.2 6636 7883.6 7350 21017.35 62.25494 62.04519 2019.05.18 BTC_USD KRAKEN 7494.2 7205 7353.9 7266.8 6258.585 59.91089 62.10847 Commodity Channel Index¶ The Commodity Channel Index (CCI) is another tool used by technical analysts. Its primary use is for spotting new trends. It measures the current price level relative to an average price level over time. The CCI can be used for any market, not just for commodities. It can be used to help identify if a security is approaching overbought and oversold levels. Its primary use is for spotting new trends. This can help traders make decisions on trades whether to add to position, exit the position or take no part. When CCI is positive it indicates it is above the historical average and when it is negative it indicates it is below the historical average. Moving from negative ratings to high positive ratings can be used as a signal for a possible uptrend. Similarly, the reverse will signal downtrends. CCI has no upper or lower bound so finding out what typical overbought and oversold levels should be determined on each asset individually looking at its historical CCI levels. To calculate the Mean Deviation, a helper function called maDev (moving-average deviation). maDev:{[tp;ma;n] ((n-1)#0Nf), {[x;y;z;num] reciprocal[num]*sum abs z _y#x}' [(n-1)_tp-/:ma; n+l; l:til count[tp]-n-1; n] } This was calculated by subtracting the Moving Average from the Typical Price for the last n periods, summing the absolute values of these figures and then dividing by n periods. CCI:{[high;low;close;ndays] TP:avg(high;low;close); sma:mavg[ndays;TP]; mad:maDev[TP;sma;n]; reciprocal[0.015*mad]*TP-sma } q)update cci:CCI[high;low;close;14] by sym,exch from wpData Figure 7: Commodity Channel Index and close price for Bitcoin using Kraken data Bollinger Bands¶ Figure 8: Bollinger Bands for Bitcoin using Kraken data and n=20 Bollinger Bands are used in technical analysis for pattern recognition. They are formed by plotting two lines that are two standard deviations from the simple moving-average price, one in the negative direction and one positive. Standard deviation is a measure of volatility in an asset, so when the market becomes more volatile the bands widen. Similarly, less volatility leads to the bands contracting. If the prices move towards the upper band the security is seen to be overbought and as the prices get close to the lower bound the security is considered oversold. This provides traders with information regarding price volatility. 90% of price action occurs between the bands. A breakout from this would be seen as a major event. The breakout is not considered a trading signal. Breakouts provide no clue as to the direction and extent of future price movements. /tab-input table /n-number of days /ex-exchange /id-id to run for bollB:{[tab;n;ex;id] tab:select from wpData where sym=id,exch=ex; tab:update sma:mavg[n;TP],sd:mdev[n;TP] from update TP:avg(high;low;close) from tab; select date,sd,TP,sma,up:sma+2*sd,down:sma-2*sd from tab} q)bollB[wpData;20;`KRAKEN;`BTC_USD] Force Index¶ The Force Index is a technical indicator that measures the amount of power behind a price move. It uses price and volume to assess the force behind a move or a possible turning point. The technical indicator is an unbounded oscillator that oscillates between a negative and positive value. There are three essential elements to stock price movement-direction, extent and volume. The Force Index combines all three in this oscillator. Figure 9: Force Index and Close Price for Bitcoin using Kraken data The above graph is the 13-day EMA of the Force Index. It can be seen that the Force Index crosses the centre line as the price begins to increase. This would indicate that bullish trading is exerting a greater force. However, this changes towards the end of July where there is a significant change from a high positive force index to a negative one and the price drops dramatically. It suggests the emergence of a bear market. The Force Index calculation subtracts today’s close from the prior day’s close and multiplies it by the daily volume. The next step is to calculate the 13-day EMA of this value. //Force Index Indicator /c-close /v-volume /n-num of periods //ForceIndex1 is the force index for one period forceIndex:{[c;v;n] forceIndex1:1_deltas[0nf;c]*v; n#0nf,(n-1)_ema[2%1+n;forceIndex1] } q)update ForceIndex:forceIndex[close;vol;13] by sym,exch from wpData Ease of Movement Value¶ Ease of Movement Value (EMV) is another technical indicator that combines momentum and volume information into one value. The idea is to use this value to decide if the prices are able to rise or fall with little resistance in directional movement. 14-period EMV: 14 day simple average of EMV The scale factor is chosen to produce a normal number. This is generally relative to the volume of shares traded. //Ease of movement value -EMV /h-high /l-low /v-volume /s-scale /n-num of periods emv:{[h;l;v;s;n] boxRatio:reciprocal[-[h;l]]*v%s; distMoved:deltas[0n;avg(h;l)]; (n#0nf),n _mavg[n;distMoved%boxRatio] } q)update EMV:emv[high;low;vol;1000000;14] by sym,exch from wpData Figure 10: Ease of Movement, Close and Volume for Ethereum using Kraken Data Rate of Change¶ The Rate of Change (ROC) indicator measures the percentage change in the close price over a specific period of time. //Price Rate of change Indicator (ROC) /c-close /n-number of days prior to compare roc:{[c;n] curP:_[n;c]; prevP:_[neg n;c]; (n#0nf),100*reciprocal[prevP]*curP-prevP } q)update ROC:roc[close;10] from bitcoinKraken A positive move in the ROC indicates that there was a sharp price advance. This can be seen on the graph in Figure 11 between the 8th and 22nd of June. A downward drop indicates steep decline in the price. This oscillator is prone to whipsaw around the zero line as can be seen in the graph. For the graph below n is set to 9, a value commonly used by short-term traders. Figure 11: Rate of change for Bitcoin using Kraken data Stochastic Oscillator¶ Figure 12: Stochastic Oscillator with smoothing %K=1,%D=3 for Bitcoin using Kraken data The Stochastic Oscillator is a momentum indicator comparing a particular closing price of a security to a range of its prices over a certain period of time. You can adjust the sensitivity of the indicator by adjusting the time period and by taking the moving average of the result. The indicator has a 0-100 range that can be used to indicate overbought and oversold signals. A security is considered overbought when greater than 80 and oversold when less than 20. For this case, n will be 14 days. where C: Current Close L(n): Low across last n days H(n): High over the last n days %K: slow stochastic indicator %D: fast stochastic indicator, the n-day moving average of %K (generally n=3) //null out first 13 days if 14 days moving avg //Stochastic Oscillator /h-high /l-low /n-num of periods /c-close price /o-open stoOscCalc:{[c;h;l;n] lows:mmin[n;l]; highs:mma[n;h]; (a#0n),(a:n-1)_100*reciprocal[highs-lows]*c-lows } /k-smoothing for %D /for fast stochastic oscillation smoothing is set to one k=1/slow k=3 default /d-smoothing for %D - this generally set for 3 /general set up n=14,k=1(fast),slow(slow),d=3 stoOcsK:{[c;h;l;n;k] (a#0nf),(a:n+k-2)_mavg[k;stoOscCalc[c;h;l;n]] } stoOscD:{[c;h;l;n;k;d] (a#0n),(a:n+k+d-3)_mavg[d;stoOscK[c;h;l;n;k]] } q)update sC:stoOscCalc[close;high;low;5], sk:stoOscK[close;high;low;5;2], stoOscD[close;high;low;5;2;3] from bitcoinKraken The Commodity Channel Index (CCI) and the Stochastic Oscillator Both these technical indicators are oscillators, but calculated quite differently. One of the main differences is that the Stochastic Oscillator is bound between zero and 100, while the CCI is unbounded. Due to the calculation differences, they will provide different signals at different times, such as overbought and oversold readings. Aroon Oscillator¶ The Aroon Indicator is a technical indicator used to identify trend changes in the price of a security and the strength of that trend, which is used in the Aroon Oscillator. An Aroon Indicator has two parts: \(aroonUp\) and \(aroonDown\), which measure the time between highs and lows respectively over a period of time \(n\), generally 25 days. The objective of the indicator is that strong uptrends will regularly see new highs and strong downtrends will regularly see new lows. The range of the indicator is between 0-100. Figure 13: Aroon Oscillator and Aroon Indicator //Aroon Indicator aroonFunc:{[c;n;f] m:reverse each a _'(n+1+a:til count[c]-n)#\:c; #[n;0ni],{x? y x}'[m;f] } aroon:{[c;n;f] 100*reciprocal[n]*n-aroonFunc[c;n;f]} /- aroon[tab`high;25;max]-- aroon up /- aroon[tab`low;25;max]-- aroon down aroonOsc:{[h;l;n] aroon[h;n;max] - aroon[l;n;min]} q)update aroonUp:aroon[high;25;max], aroonDown:aroon[low;25;min], aroonOsc:aroonOsc[high;low;25] from krakenBitcoin Aroon Oscillator subtracts \(aroonUp\) from \(aroonDown\) making the range of the oscillator between -100 and 100. The oscillator moves above the zero line when \(aroonUp\) moves above the \(aroonDown\). The oscillator drops below the zero line when the \(aroonDown\) moves above the \(aroonUp\). Conclusion¶ This paper shows how kdb+/q can be applied to produce common trade analytics which are not available out of the box but which can be efficiently implemented using primitive functions. The functions shown range from moving averages to more complex functions like Relative Strength Index and Moving Average Convergence Divergence, as used by quants and traders building out more powerful analytics solutions. The common trend indicators discussed trigger buy/sell signals, and offer a clearer image of the current market. This touches the tip of the iceberg of what can be done in analytics and emphasizes the power of kdb+ in a data-analytics solution. Libraries of custom-built analytic functions can be created with ease, and in a short space of time applied to realtime and historical data. This paper also demonstrates KX Analyst, an IDE for creating analytical functions and visualizing their output. The combination of this library of functions and KX Analyst provides the user faster development and processing times to gain meaningful insights from the data. Author¶ James Galligan is a kdb+ consultant who has designed and developed data-capture and data-analytics platforms for trading and analytics across multiple asset classes in multiple leading financial institutions.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="2"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Working with MATLAB¶ Installation¶ Versions As MATLAB/datafeed toolbox evolves features or instruction below are subject to revisions. Please refer to toolbox documentation for latest version. Users have reported that this works with more recent versions (e.g. R2015b on RHEL 6.8/2016b and 2017a on macOS). See also community-supported native connector dmarienko/kdbml Download and unzip kx_kdbplus.zip. Add the resulting directory to your MATLAB path, for example in MATLAB >> addpath('/Users/Developer/matlabkx') Support for MATLAB is a part of Datafeed Toolbox for MATLAB: since R2007a edition. The MATLAB integration depends on the two Java files c.jar and jdbc.jar . KxSystems/kdb/c/c.jar KxSystems/kdb/c/jdbc.jar Add the JAR files to the classpath used by MATLAB. It can be added permanently by editing classpath.txt (type edit classpath.txt at the MATLAB prompt) or for the duration of a particular session using the javaaddpath function, for example >> javaaddpath /home/myusername/jdbc.jar >> javaaddpath /home/myusername/c.jar Installation directory In these examples change /home/myusername to the directory where jdbc.jar and c.jar are installed. Alternatively, this can be achieved in a MATLAB source file (i.e., *.m file) adding the following two functions before calling kx functions. javaaddpath('/home/myusername/jdbc.jar') javaaddpath('/home/myusername/c.jar') Confirm they have been added successfully using the javaclasspath function. >> javaclasspath STATIC JAVA PATH ... /opt/matlab/2015b/java/jar/toolbox/stats.jar /opt/matlab/2015b/java/jar/toolbox/symbol.jar DYNAMIC JAVA PATH /home/myusername/jdbc.jar /home/myusername/c.jar >> Connecting to a q process¶ First, we start up a kdb+ process that we wish to communicate with from MATLAB and load some sample data into it. Save following as tradedata.q file / List of securities seclist:([name:`ACME`ABC`DEF`XYZ] market:`US`UK`JP`US) / Distinct list of securities secs: distinct exec name from seclist n:5000 / Data table trade:([]sec:`seclist$n?secs;price:n?100.0;volume:100*10+n?20;exchange:5+n?2.0;date:2004.01.01+n?499) / Intra day tick data table intraday:([]sec:`seclist$n?secs;price:n?100.0;volume:100*10+n?20;exchange:5+n?2.0;time:08:00:00.0+n?43200000) / Function with one input parameter / Return total trading volume for given security totalvolume:{[stock] select volume from trade where sec = stock} / Function with two input parameters / Return total trading volume for given security with volume greate than / given value totalvolume2:{[stock;minvolume] select sum(volume) from trade where sec = stock, volume > minvolume} Then q tradedata.q -p 5001 q)show trade sec price volume exchange date ---------------------------------------- ACME 89.5897 1300 6.58303 2005.04.26 ABC 4.346879 2000 5.957694 2004.03.08 DEF 2.486644 1000 5.304114 2004.03.18 ACME 42.26209 1600 5.31383 2004.03.14 DEF 67.79352 2500 5.954478 2004.04.21 DEF 85.56155 1300 6.462338 2004.03.15 ACME 52.65432 1800 5.240313 2005.02.05 ABC 22.43142 2700 5.088007 2005.03.13 ABC 58.26731 2100 5.220929 2004.09.10 XYZ 74.14568 2900 5.075229 2004.08.24 DEF 35.67741 1500 6.064387 2004.03.12 DEF 30.37496 1300 5.025874 2004.03.24 ABC 20.30781 1000 6.642873 2005.02.02 DEF 2.984627 1200 6.346634 2004.12.15 ACME 28.80098 2100 5.591732 2004.09.19 DEF 45.20084 2800 5.481197 2004.08.01 DEF 29.25037 1000 6.065474 2005.02.05 XYZ 50.68805 1700 6.901464 2004.11.02 DEF 41.79832 2300 6.016484 2005.05.04 XYZ 13.64856 2900 6.435824 2005.04.03 .. q) We then start a new MATLAB session. From here on, >> represents the MATLAB prompt. We’re now ready to open a connection to the q process: >> q = kx('localhost',5001) q = handle: [1x1 c] ipaddress: 'localhost' port: 5001 Credentials We can also pass a username:password string as the third parameter to the kx function if it is required to log in to the q process. The q value is a normal MATLAB object and we can inspect the listed properties. We’ll use this value in all our communications with the q process. We close a connection using the close function: >> close(q) Installation errors If there is a problem with either the installation of the q integration, or the jar file is not found, we’ll get an error along the lines of: ??? Undefined function or method 'c' for input arguments of type 'char'. Error in ==> kx.kx at 51 w.handle = c(ip,p); Or if the socket is not currently connected then any future communications will result in an error like: ??? Java exception occurred: java.net.SocketException: Socket closed at java.net.SocketOutputStream.socketWrite(Unknown Source) at java.net.SocketOutputStream.write(Unknown Source) at c.w(c.java:99) at c.k(c.java:107) at c.k(c.java:108) Error in ==> kx.fetch at 65 t = c.handle.k(varargin{1}); Using the kdb+ process¶ It is typical to perform basic interactions with a database using the fetch function via a connected handle. For example in a legacy database we might perform this: x = fetch(q,'select * from tablename') We can use this function to perform basic interaction with kdb+, where we expect a value to be returned. This need not be a query but in fact can be general chunks of code. Using q as a calculator, we can compute the average of 0 to 999. >> fetch(q,'avg til 1000') ans = 499.5000 Fetching data from kdb+¶ The fetch function can be used to get q data such as lists, as well as tables. Given the list c : q)c:((til 100);(til 100)) q)c 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 .. 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 .. Then we can fetch it: >> hundreds = fetch(q, 'c') hundreds = java.lang.Object[]: [100×1 int64] [100×1 int64] We can use the cell function to strip the Java array wrapper away: >> hundreds_as_cell = cell(hundreds) hundreds_as_cell = 2×1 cell array {100×1 int64} {100×1 int64} Tables are returned as an object with an array property for each column. Taking the first 10 rows of the trade table as an example: q)10#trade sec price volume exchange date ---------------------------------------- ACME 89.5897 1300 6.58303 2005.04.26 ABC 4.346879 2000 5.957694 2004.03.08 DEF 2.486644 1000 5.304114 2004.03.18 ACME 42.26209 1600 5.31383 2004.03.14 DEF 67.79352 2500 5.954478 2004.04.21 DEF 85.56155 1300 6.462338 2004.03.15 ACME 52.65432 1800 5.240313 2005.02.05 ABC 22.43142 2700 5.088007 2005.03.13 ABC 58.26731 2100 5.220929 2004.09.10 XYZ 74.14568 2900 5.075229 2004.08.24 Will be returned in MATLAB: >> ten = fetch(q, '10#trade') ten = sec: {10×1 cell} price: [10×1 double] volume: [10×1 int64] exchange: [10×1 double] date: [10×1 double] With suitable computation in q, we can return data suitable for immediate plotting. Here we compute a 10-item moving average over the `ACME prices: q)mavg[10;exec price from trade where sec=`ACME] 89.5897 65.9259 61.50204 53.32677 54.74408 57.39743 57.15958 62.33525 56.8732.. >> acme = fetch(q,'mavg[10;exec price from trade where sec=`ACME]') Metadata¶ The q integration in MATLAB provides the tables meta function. >> tables(q) ans = 'intraday' 'seclist' 'trade' The experienced q user can use the \v command to see all values in the directory: >> fetch(q,'\v') ans = 'a' 'b' 'c' 'intraday' 'n' 'seclist' 'secs' 'trade' Sending data to q¶ We can use the fetch function to cause side effects in the kdb+ process, such as inserting data into a table. Given a table b : q)b:([] a:1 2; b:1 2) q)b a b --- 1 1 2 2 Then we can add a row like this: >> fetch(q,'b,:(3;3)') ans = [] and, sure enough, on the q side we see the new data: q)show b a b --- 1 1 2 2 3 3 The q integration also provides an insert function: this takes an array of items in the row and may be more convenient for certain purposes. >> insert(q,'b',{4,4}) shows on the q side as: q)show b a b --- 1 1 2 2 3 3 4 4 A more complicated row shows the potential advantage to better effect: >> insert(q,'trade',{'`ACME',100.45,400,.0453,'2005.04.28'}) Be warned though, that errors will not be detected very well. For example the following expression silently fails! >> insert(q,'b',{1,2,3}) whereas the equivalent fetch call provokes an error: >> fetch(q,'b,:(1;2;3)') Error using fetch (line 64) Java exception occurred: kx.c$KException: length at kx.c.k(c.java:110) at kx.c.k(c.java:111) at kx.c.k(c.java:112) Async commands to q¶ The exec function is used for sending asynchronous commands to q; ones we do not expect a response to, and which may be performed in the background while we continue interacting with the MATLAB process. Here we establish a large-ish data structure in the kdb+ process: >> exec(q,'big_data:10000000?100') Then we take the average of the data, delete it from the namespace and close the connection: >> fetch(q,'avg big_data') ans = 49.4976 >> exec(q,'delete big_data from `.') >> close(q) Handling null¶ kdb+ has the ability to set values to null. MATLAB doesnt have a corresponding null type, so if your data contains nulls you may wish to filter or detect them. MATLAB has the ability to call static methods within Java. The NULL method can provide the null values for the different data types. For example NullInt=kx.c.NULL('i') NullLong=kx.c.NULL('j') NullDouble=kx.c.NULL('f') NullDate=kx.c.NULL('d') With this, you can test values for null. The following shows that the comparison will return true when requesting null values from a kdb+ connection named conn: fetch(conn,'0Ni')== NullInt fetch(conn,'0N')== NullLong fetch(conn,'0Nd')== NullDate isequaln(fetch(conn,'0Ni'),NullInt) isequaln(fetch(conn,'0N'), NullLong) isequaln(fetch(conn,'0Nd'), NullDate) isequaln(fetch(conn,'0Nf'), NullDouble) An alternative is to have your query include a filter for nulls (if they are populated), so they arent retrieved by MATLAB. Getting more help¶ Start with help kx in your MATLAB session and also see help kx.fetch and so on for further details of the integration. MathWorks provides functions overview, usage instructions and some examples on the toolbox webpage. Python client for kdb+¶ The PyKX interface exposes q as a domain-specific language (DSL) embedded within Python, and also permits IPC connectivity to kdb+ from Python applications. PyKX supports three principal use cases: - It allows users to store, query, manipulate and use q objects within a Python process. - It allows users to query external q processes via an IPC interface. - It allows users to embed Python functionality within a native q session using it's under q functionality. It is documented and available to download from https://code.kx.com/pykx. Q client for Bloomberg¶ Marshall Wace has kindly contributed a Linux-based Bloomberg Feed Handler, written by Sufian Al-Qasem & Attila Vrabecz, using the Bloomberg Open Api. Design notes¶ Bloomberg uses an event-driven model whereby they push EVENT objects to consumers – SUMMARY, TRADE and QUOTE. The C code in bloomberg.c handles the connectivity to the Bloomberg appliance (hosted on client’s site) and also does the conversion from an EVENT object to a dictionary (Bloomberg mnemonic <> Value pair) which is then processed on the q main thread via the following: Update:{@[value;x;-1“Update: '”,string[x 0]," ",]} The Bloomberg API calls back on a separate thread and copies a pointer to that object onto a lock-free queue; eventfd is then used to create a K struct (a dictionary representation of the EVENT) on the q main thread and process. A function is defined for every EVENT type (Authorize/SessionStarted/MarketDataEvent/etc …) which carries out the desired behavior in q. Tested with Bloomberg Open API 3.6.2.0 and 3.7.5.1. Uses http://www.liblfds.org</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="3"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Frequently-asked questions from the k4 listbox¶ If you notice a question that is asked more then once on the k4 list, please feel free to add it here. Where can I find archives of the k4 list?¶ Archives are available to subscribers at the Topicbox. When you follow that link, you will be asked for your e-mail address and the mailing list name. Use k4 for the list name, and the e-mail address that you used to subscribe to the k4 list. How to post test data on the k4 list?¶ Always post your test data in the executable form. For example, q)foo:([]a:5?10;b:5?10;c:5?10) You can generate an executable form of your data using 0N! . q)0N!foo; +`a`b`c!(4 3 7 1 1;6 1 7 9 8;4 7 5 0 9) Note use of ; to suppress the default display. If you use the latter form, prefix it with k) in your post, so that others could easily cut and paste it in their q session. q)k)+`a`b`c!(4 3 7 1 1;6 1 7 9 8;4 7 5 0 9) a b c ----- 4 6 4 3 1 7 7 7 5 1 9 0 1 8 9 What are the limits on the number of variables in q functions?¶ Reference: Lambdas What does 'error mean?¶ Basics: Errors Why does sg work with :: but not : ? Also why does {x.time} not work?¶ Locals and globals are different: locals don’t have symbols associated with them, so for example .Q.dpft (you would have to pass in name of table) or x.time does not work with them. As a workaround for the second issue one can always use `time$x though. How do I query a column of strings for multiple values?¶ If you wish to query a column of strings for a single value either like or ~ (Match) with an iterator can be used q)e:([]c:("ams";"lon";"amS";"bar")) q)select from e where c ~\:"ams" c ----- "ams" q)select from e where c like "ams" c ----- "ams" q)select from e where c like "am*" c ----- "ams" "amS" To query for multiple strings you need to use another iterator, then aggregate the results into a single boolean value using sum or any . Generally the like form is easier to understand and more efficient. q)select from e where any c like/:("lon";"am*") c ----- "ams" "lon" "amS" How to kill long/invalid query on a server?¶ You can achieve that by sending SIGINT to the server process. In *nix shell, try $ kill -INT <pid> You can find the server process ID by examining .z.i . How do I recall and edit keyboard input?¶ Start q under rlwrap to get readline support, e.g. $ rlwrap l64/q -p 5001 This is available in most Linux repositories. An alternative to rlwrap is tecla's enhance . This is good for vi -mode users who would like more of vi ’s keys functionality – eg d f x will delete everything up to the next x and you can paste it back, too.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="4"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Geospatial indexing¶ This demo shows the basics of geospatial indexing with q. A 1-million-point random data set is queried from the HTML map client. Click on the map to see nearby points. Download KxSystems/kdb/e/geo.zip and run: $ make -C s2 $ q q/geo.q $ open html/geo.html This should then open a browser, connect to the kdb+ process and retrieve geo.html , displayed similar to: There are five text fields in the top row: - Last-click coordinates - Number of returned results - Min date* - Max date* - Lookup rectangle size* (degrees) Those marked with * are editable filters. When the mouse is clicked on the map, the underlying lat-lon coordinates are sent to the kdb+ process along with the filters over a websocket connection, and the points in the response are then plotted on the map. In addition to coordinates, kdb+ returns a trk column, which the client interprets as point colour. This uses the Google S2 library as a kdb+ shared object. To create the index, the function ids[lats;lons] maps (lat-lon) coordinates on a sphere to one-dimensional cell IDs. These are stored as 32-bit integers with the `p attribute applied. q)geo time trk lat lon cid ---------------------------------------------------------------- 2016.09.26D00:40:05.783973634 3233 51.79961 0.1946887 1205375107 2016.09.26D01:12:53.469740152 3233 51.80003 0.1923668 1205375107 2016.09.26D01:40:23.427598178 3233 51.79994 0.192314 1205375107 2016.09.26D04:11:52.743414938 3233 51.79958 0.1950875 1205375107 2016.09.26D08:39:32.459766268 3233 51.80044 0.1923126 1205375107 .. q)meta geo c | t f a ----| ----- time| p trk | j lat | f lon | f cid | i p lu , defined in geo.q as {[x;y]select from pl rect . x where all(lat;lon;time)within'(x,enlist y)} retrieves points contained in the given spherical rectangle. lu takes the rectangle coordinates with a time filter, and calculates the coverage (ranges of cells covering the rectangle) with rect[(lat0;lat1);(lon0;lon1)] . The cell ID ranges are looked up with pl , defined in geo.q as {raze{select[x]lat,lon,trk,time from geo}each flip deltas geo.cid binr/:x} The result is then filtered to remove points outside the rectangle (since the covering might exceed the rectangle dimensions) and to constrain by time. The simple HTML interface is implemented with openstreetmap and leaflet. HTTP¶ HTTP server¶ kdb+ has an in-built webserver capable of handling HTTP/HTTPS requests. Listening port¶ When kdb+ is configured to listen on a port, it uses the same port as that serving kdb+ IPC and websocket connections. SSL/TLS¶ HTTPS can be handled once kdb+ has been configured to use SSL/TLS. Authentication / Authorization¶ Client requests can be authenticated/authorized using .z.ac. This allows kdb+ to be customized with a variety of mechanisms for securing HTTP requests e.g. LDAP, OAuth2, OpenID Connect, etc. Request handling¶ HTTP request handling is customized by using the following callbacks: Default .z.ph handling¶ The default implementation of .z.ph displays all variables and views. For example, starting kdb+ listening on port (q -p 8080 ) and visiting http://localhost:8080 from a web browser on the same machine, displays all created variables/views). Providing q code as a GET param causes it to be evaluated eg. http://localhost:8080?1+1 returns 2 . .h.HOME can be set to be the webserver root to serve files contained in the directory e.g. creating an HTML file index.html in directory /webserver/ and setting .h.HOME="/webserver" allows the file to be viewed via `http://localhost:8080/index.html'. An example of customizing the default webserver can be found in simongarland/doth Keep-alive¶ Persistent connections to supported clients can be enabled via .h.ka Compression¶ HTTP server supports gzip compression via Content-Encoding: gzip for responses to form?… -style requests. The response payload must be 2,000+ chars and the client must indicate support via Accept-Encoding: gzip in the HTTP header. (Since V4.0 2020.03.17.) HTTP client¶ Creating HTTP requests¶ kdb+ has helper methods that provide functionality as described in the linked reference material: - .Q.hg for performing a HTTP GET, where a query string can be sent in the URL - .Q.hp for performing a HTTP POST, where data transmitted is sent in the request body e.g. q)/ perform http post q).Q.hp["http://httpbin.org/post";.h.ty`txt]"my data" "{\n \"args\": {}, \n \"data\": \"my data\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept-Encoding\": \"gzip\", \n \"Content-Length\": \"7\", \n \"Content-Type\": \"text/plain\", \n \"Host\": \"httpbin.org\", \n \"X-Amzn-Trace-Id\": \"Root=1-665711e1-19e62fef6b6e4d192a9a7096\"\n }, \n \"json\": null, \n \"origin\": \"78.147.173.108\", \n \"url\": \"http://httpbin.org/post\"\n}\n" q)/ request gzipped data, which is unzipped & returned in json and formatted appropriately q).j.k .Q.hg "http://httpbin.org/gzip" gzipped| 1b headers| `Accept-Encoding`Host`X-Amzn-Trace-Id!("gzip";"httpbin.org";"Root=1-665710aa-50bd49d724b532913348a62a") method | "GET" origin | "78.147.173.108" In addition, kdb+ provides a low level HTTP request mechanism: `:http://host:port "string to send as HTTP request" which returns the HTTP response as a string. An HTTP request generally consists of: - a request line (URL, method, protocol version), terminated by a carriage return and line feed, - zero or more header fields (field name, colon, field value), terminated by a carriage return and line feed - an empty line (consisting of a carriage return and a line feed) - an optional message body e.g. q)/ perform HTTP DELETE q)`:http://httpbin.org "DELETE /anything HTTP/1.1\r\nConnection: close\r\nHost: httpbin.org\r\n\r\n" "HTTP/1.1 200 OK\r\ndate: Wed, 29 May 2024 12:23:54 GMT\r\ncontent-type: application/json\r\ncontent-length: 290\r\nconnection: close\r\nserver: gunicorn/19.9.0\r\naccess-control-allow-origin: *\r\naccess-control-allow-credentials: true\r\n\r\n{\n \"args\": {},... q)postdata:"hello" q)/ perform HTTP POST (inc Content-length to denote the payload size) q)`:http://httpbin.org "POST /anything HTTP/1.1\r\nConnection: close\r\nHost: httpbin.org\r\nContent-length: ",(string count postdata),"\r\n\r\n",postdata "HTTP/1.1 200 OK\r\ndate: Wed, 29 May 2024 13:08:41 GMT\r\ncontent-type: application/json\r\ncontent-length: 321\r\nconnection: close\r\nserver: gunicorn/19.9.0\r\naccess-control-allow-origin: *\r\naccess-control-allow-credentials: true\r\n\r\n{\n \"args\": {}, \n \"data\": \"hello\"... An HTTP response typically consists of: - a status line (protocol version, status code, reason), terminated by a carriage return and line feed - zero or more header fields (field name, colon, field value), terminated by a carriage return and line feed - an empty line (consisting of a carriage return and a line feed) - an optional message body e.g. q)/ x will be complete HTTP response q)x:`:http://httpbin.org "DELETE /delete HTTP/1.1\r\nConnection: close\r\nHost: httpbin.org\r\n\r\n" q)/ separate body from headers, get body q)@["\r\n\r\n" vs x;1] "{\n \"args\": {}, \n \"data\": \"\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Host\": \"httpbin.org\", \n \"X-Amzn-Trace-Id\": \"Root=1-66572924-7396cee34f268fcd406e94d5\"\n }, \n \"json\": null, \n \"origin\": \"78.147.173.108\", \n \"url\": \"http://httpbin.org/delete\"\n}\n" If a server uses chunked transfer encoding, the response is constructed from the chunks prior to returning (since V3.3 2014.07.31). SSL/TLS¶ To use SSL/TLS, kdb+ should first be configured to use SSL/TLS. For any request requiring SSL/TLS, replace http with https . HTTP/HTML markup¶ The .h namespace provides a range of markup and HTTP protocol formatting tools. Q for Mortals §11.7.1 HTTP Connections inetd, xinetd¶ On *nix-like operating systems, inetd (or its successor xinetd ) maintains a list of passive sockets for various services configured to run on that particular machine. When a client attempts to connect to one of the service, inetd will start a program to handle the connection based on the configuration files. This way, inetd will run the server programs as they are needed by spawning multiple processes to service multiple network connections. A kdb+ server can work under inetd to provide a private server for each connection established on a designated port. For Windows you might be able to have kdb+ run under inetd using Cygwin. Configuration¶ To configure a kdb+ server to work under inetd or xinetd you have to decide on the name of the service and port on which this server should run and declare it in the /etc/services configuration file. Note This operation can be performed only by an administrative user (root). /etc/services : … # Local services kdbtaq 2015/tcp # kdb server for the taq database … If you have multiple databases which should be served over inetd , add multiple entries in the /etc/services file and make sure you are using different ports for each service name. Also, as a safety measure, create one applicative group (ex: kdb ) and two applicative users on your system, one (e.g. kdb ) owning the q programs and the databases and another one (e.g. kdbuser ) having the rights to execute and read data from the database directories. This can be achieved by assigning the two users to the applicative group mentioned above and setting the permissions on the programs to be readable and executable by the group, and the database directories readable and executable (search) by the group: rwxr-x--- . Once this is configured, you'll need to configure inetd /xinetd to make it aware of the new service. If you are running inetd , you’ll need to add the service configuration into /etc/inetd.conf (see the inedt.conf man page for more details). /etc/inetd.conf : … kdbtaq stream tcp nowait kdbuser /home/kdb/q/l64/q q /home/kdb/taq -s 4 … For xinetd , you’ll need to create a configuration file (kdbtaq for example) for the new service in the /etc/xinetd.d directory (see the xinetd.conf man page for more details). /etc/xinetd.d/kdbtaq : # default: on service kdbtaq { flags = REUSE socket_type = stream wait = no user = kdbuser env = QHOME=/home/kdb/q QLIC=/home/kdb/q server = /home/kdb/q/l64/q server_args = /home/kdb/taq -s 4 -q -g 1 # use taskset to conform to license # server = /bin/taskset # server_args = -c 0,1 /home/kdb/q/l64/q -q -g 1 # only_from = 127.0.0.1 localhost # bind = 127.0.0.1 # instances = 5 # per_source = 2 } After the configuration is finished, you will have to find your process ID for your inetd /xinetd server and send it the SIGHUP signal to read the new configuration: $ ps -e|grep inetd 3848 ? 00:00:00 xinetd $ kill -HUP 3848 \1 and \2 for stdout/stderr redirect</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="5"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Reference architecture for Azure¶ Lift and shift your kdb+ plants to the cloud and leverage virtual machines (VM) with storage kdb Insights provides a range of tools to build, manage and deploy kdb+ applications in the cloud. kdb Insights supports: - interfaces for deployment and common ‘Devops’ orchestration tools such as Docker, Kubernetes, Helm, and others. - integrations with major cloud logging services. kdb Insights provides: - a kdb+ native REST client; Kurl, to authenticate and interface with other cloud services. - kdb+ native support for reading from Azure Blog Storage, and a packaging utility, QPacker to build and deploy kdb+ applications to the cloud. By taking advantage of the kdb Insights suite of tools, you can quickly and easily create new and integrate existing kdb+ applications on Microsoft Azure. Deployment - QPacker – A packaging utility that supports q, Python and C libraries - Detailed guide to deploy kdb+ applications to the cloud Service integration - QLog – Integrations with major cloud logging services - Kurl – Native kdb+ REST client with authentication to cloud services Storage - kdb+ Object Store – Native support for reading and querying Azure Blob Storage kdb+ architecture patterns in Microsoft Azure¶ kdb+tick is an architecture that allows the capture, processing and querying of timeseries data against realtime, streaming and historical data. This reference architecture describes a full solution running kdb+tick within Microsoft Azure which consists of these functional components: - datafeeds - feedhandlers - tickerplant - realtime database - historical database - KX gateway An architectural pattern for kdb+tick in Microsoft Azure: Azure integration allows the ability to place kdb+ processing functions either in one Azure Virtual Machine (VM) instance or distributed across many Azure VM instances. The ability for kdb+ processes to communicate with each other through kdb+’s built-in language primitives, allows for this flexibility in final design layouts. The transport method between kdb+ processes and overall external communication is achieved through low-level TCP/IP sockets. If two components are on the same VM instance, then local Unix sockets can be used to reduce communication overhead. Many customers have tickerplants set up on their premises. The Microsoft Azure reference architecture allows you to manage a hybrid infrastructure that communicates with both tickerplants on-premises and in the cloud. The benefits of migrating tickerplants to a cloud infrastructure are vast, and include flexibility, auto-scaling, improved transparency in cost management, access to management and infrastructure tools built by Microsoft, quick hardware allocation and many more. This page focuses on kdb+tick deployment to virtual machines in Azure; however, kdb Insights provides another kdb+ architectural pattern for deploying to Microsoft Azure Kubernetes Service (AKS). Refer to managed app for more details. Datafeeds¶ These are the source data ingested into the system. For financial use cases, data may be ingested from B-pipe (Bloomberg), or Elektron (Refinitiv) or any exchange that provides a data API. Often the streaming data is available on a pub-sub component such as Kafka, or Solace, which are popular for having an open-source interface to kdb+. The datafeeds are in a proprietary format, but always one with which KX has familiarity. Usually this means a feed handler just needs to be aware of the specific data format. Due to the flexible architecture of KX, most underlying kdb+ processes that constitute the system can be placed anywhere in this architecture. For example, for latency, compliance or other reasons, the datafeeds might be relayed through your on-premises data center. Alternatively, the connection from the feedhandlers might be made directly from the Azure Virtual Network (VNet) into the market-data source. The kdb+ infrastructure is often used to store internally derived data. This can optimize internal data flow and help remove latency bottlenecks. The pricing of liquid products, for example on B2B markets, is often calculated by a complex distributed system. This system often changes due to new models, new markets or other internal system changes. Data in kdb+ that is generated by these internal steps also require processing and handling huge amounts of timeseries data. When all the internal components of these systems send data to kdb+, a comprehensive impact analysis captures any changes. Feedhandler¶ A feedhandler is a process that captures external data and translates it into kdb+ messages. Multiple feedhandlers can be used to gather data from several different sources and feed it to the kdb+ system for storage and analysis. There are a number of open-source (Apache 2 licensed) Fusion interfaces between KX and other third-party technologies. Feedhandlers are typically written in Java, Python, C++ and q. Tickerplant¶ A tickerplant (TP) is a specialized, single-threaded kdb+ process that operates as a link between the client’s data feed and a number of subscribers. It implements a pub-sub pattern,specifically, it receives data from the feedhandler, stores it locally in a table then saves it to a log file. It publishes this data to a realtime database (RDB) and any clients who have subscribed to it. It then purges its local tables of data. Tickerplants can operate in two modes: | mode | operation | |---|---| | batch | Collects updates in its local tables, batches up for a period of time and then forwards the update to realtime subscribers in a bulk update. | | realtime (zero latency) | Forwards the input immediately. This requires smaller local tables but has higher CPU and network costs. Each message has a fixed network overhead. | API calls: | call | operation | |---|---| | subscribe | Add subscriber to message receipt list and send subscriber table definitions. | | unsubscribe | Remove subscriber from message receipt list. | End of Day event: at midnight, the TP closes its log files, auto creates a new file, and notifies the realtime database about the start of the new day. Realtime database¶ The realtime database holds all the intraday data in memory to allow for fast powerful queries. For example, at the start of the business day, the RDB sends a message to the tickerplant and receives a reply containing the data schema, the location of the log file, and the number of lines to read from the log file. It then receives subsequent updates from the tickerplant as they are published. One of the key design choices for Microsoft Azure is the size of memory for this instance, as ideally we need to contain the entire business day/period of data in-memory. Purpose: - Subscribed to the messages from the tickerplant - Stores (in-memory) the messages received - Allows this data to be queried intraday Actions: - On message receipt: insert into local, in-memory tables. - End of Day receipt: usually writes intraday data down then sends a new End-of-Day message to the HDB. Optionally RDB sorts certain tables (for example, by sym and time) to speed up queries. An RDB can operate in single or multi-input mode. The default mode is single input, in which user queries are served sequentially and queries are queued until an update from the TP is processed (inserted into the local table). In standard tick scripts, the RDB tables are indexed (using hash tables), typically by the product identifier. Indexing has a significant impact on query speed, resulting in slower data ingestion. The insert function takes care of the indexing; during an update it also updates the hash table. The performance of the CPU and memory in the chosen Azure VM instance impacts the rate at which data is ingested and the time taken to execute data queries. Historical database¶ The historical database (HDB) is a simple kdb+ process with a pointer to the persisted data directory. A kdb+ process can read this data and memory map it, allowing for fast queries across a large volume of data. Typically, the RDB is instructed by the TP to save its data to the data directory at EOD. The HDB can then refresh its memory from the data directory mappings. HDB data is partitioned by date in the standard TP. If multiple disks are attached to the box, then data can be segmented and kdb+ makes use of parallel I/O operations. Segmented HDB requires a par.txt file that contains the locations of the individual segments. An HDB query is processed by multiple threads, and map-reduce is applied if multiple partitions are involved in the query. Purpose: - Provides a queryable data store of historical data - In instances involving research and development or data analytics, customers can create reports on order execution times Actions: - End of Day receipt - Reload the database to get the new day’s worth of data from the RDB write down. HDBs are often expected to be mirrored locally. If performance is critical, some users, (for example, quants) need a subset of the data for heavy analysis and backtesting. KX Gateway¶ In production, a kdb+ system may be accessing multiple timeseries datasets, usually each one representing a different market data source, or using the same data, refactored for different schemas. All core components of a kdb+tick can handle multiple tables. However, you can introduce multiple TPs, RDBs and HDBs based on your fault-tolerance requirements. This can result in a large number of kdb+ components and a high infrastructure segregation. A KX gateway generally acts as a single point of contact for a client. A gateway collects data from the underlying services, combines datasets and may perform further data operations (for example, aggregation, joins, pivoting, and so on) before it sends the result back to the user. The specific design of a gateway can vary in several ways according to expected use cases. For example, in a hot-hot setup, the gateway can be used to query services across availability zones. The implementation of a gateway is largely determined by the following factors: - Number of clients or users - Number of services and sites - Requirement for data aggregation - Support of free-form queries - Level of redundancy and failover The task of the gateway is to: - Check user entitlements and data-access permissions - Provide access to stored procedures, utility functions and business logic - Gain access to data in the required services (TP, RDB, HDB) - Provide the best possible service and query performance The KX Gateway must be accessible through Azure security rules from all clients of the kdb+ service. In addition, the location of the gateway service also needs to have visibility of the remaining kdb+ processes constituting the full KX service. Storage and filesystem¶ kdb+tick architecture needs storage space for three types of data: - TP log - If the TP needs to handle many updates, then writing to it needs to be fast since slow I/O may delay updates and can even cause data loss. Optionally, you can write updates to the TP log in batches, for example, every second as opposed to in real time. Data loss occurs if the TP or instance is halted unexpectedly, or stops or restarts, as the recently received updates are not persisted. If a TP process or the Azure VM instance goes down or restarts also results in data loss. The extra second of data loss is probably marginal to the whole outage window. If the RDB process goes down, then it can replay data to recover from the TP log. The faster it can recover, the fewer data are waiting in the TP output queue to be processed by the restarted RDB. Hence, a fast read operation is critical for resilience reasons. Using Azure Premium SSD Managed Disk or Ultra disk, or a subsection of an existing Lustre filesystem on Azure is a recommended solution. Managed disks are more resilient and would still contain the data despite any Azure VM restart or loss. - Sym file (and par.txt for segmented databases) - The sym file is written by the realtime database after end-of-day, when new data is appended to the historical database. The HDB processes read the sym file to reload new data. Time to read and write the sym file is often marginal compared to other I/O operations. It is beneficial to be able to write down to a shared filesystem, thereby adding huge flexibility in the Azure Virtual Network (VNet). Any other Azure VM instance can assume this responsibility in a stateless fashion. - HDB data - Performance of the filesystem solution determines the speed and operational latency for kdb+ to read its historical (at rest) data. The solution needs to be designed to cater for good query execution times for the most important business queries. These may splay across many partitions or segments of data or may deeply query on few or single partitions of data. The time to write a new partition impacts RDB EOD work. For systems that are queried around the clock the RDB write time needs to be very short. One advantage of storing your HDB within the Azure ecosystem is the flexibility of storage. This is usually distinct from “on-prem” storage, whereby you may start at one level of storage capacity and grow the solution to allow for dynamic capacity growth. One huge advantage of most Azure storage solutions is that permanent disks can grow dynamically without the need to halt instances, this allows you to dynamically change resources. For example, start with small disk capacity and grow capacity over time. The reference architecture recommends replicating data. This can either be tiered out to lower cost or lower performance object storage in Azure or the data can be replicated across availability zones. The latter may be useful if there is client-side disconnection from other time zones. You may consider failover of service from Europe to North America, or vice-versa. kdb+ uses POSIX filesystem semantics to manage HDB structure directly on a POSIX-style file system stored in persistent storage, for example Azure Disk Storage. There are many solutions that offer full operational functionality for the POSIX interface. Azure Blob Storage¶ Azure Blob Storage is an object store that scales to exabytes of data. There are different storage classes (Premium, Hot, Cool, Archive) for different availability. Infrequently used data can use cheaper but slower storage. The kdb Insights native object store functionality allows users to read HDB data from Azure Blob object storage. The HDB par.txt file can have segment locations that are on Azure Blob object storage. In this pattern, the HDB can reside entirely on Azure Blob storage or spread across Azure Disks, Azure Files or Azure Blob Storage as required. There is a relatively high latency when using Azure Blob cloud storage compared to local storage, such as Azure Disks. The performance of kdb+ when working with Azure Blob Storage can be improved by taking advantage of the caching feature of the kdb+ native objectstore. The results of requests to Azure Blob Storage can be cached on a local high-performance disk thus increasing performance. The cache directory is continuously monitored and a size limit is maintained by deleting files according to a LRU (least recently used) algorithm. Caching coupled with enabling secondary threads can increase the performance of queries against a HDB on Azure Blob Storage. The larger the number of secondary threads, irrespective of CPU core count, the better the performance of kdb+ object storage. Conversely, the performance of cached data appears to be better if the secondary-thread count matches the CPU core count. It is recommended to use compression on the HDB data residing on Azure Blob Storage. This can reduce the cost of object storage and possible egress costs, and also counteract the relatively high-latency and low bandwidth associated with Azure Blob Storage. Furthermore, Azure Blob Storage is useful for archiving, tiering, and backup purposes. The TP log file and the sym can be stored each day and archived for a period of time. The lifecycle management of the object store simplifies clean-up, whereby one can set the expiration time on any file. The versioning feature of Azure Blob Storage is particularly useful when a sym file bloat happens due to feed misconfiguration or upstream change. Migrating back to a previous version saves the health of the whole database. Azure Blob Storage provides strong read-after-write consistency. After a successful write or update of an object, any subsequent read request immediately receives the latest version of the object. Azure Blob Storage also provides strong consistency for list operations, so after a write, you can immediately perform a listing of the objects in a bucket with all changes reflected. This is especially useful when there are many kdb+ processes reading from Azure Blob Storage, as it ensures consistency. A kdb+ feed can subscribe to a Azure Blob Storage file update that the upstream drops into a bucket and can start its processing immediately. The data is available earlier compared to the solution when the feed is started periodically, for example, every hour. Azure Disk Storage¶ Azure Disk Storage can be used to store HDB and Tickerplant data, and is fully compliant with kdb+. It supports all the POSIX semantics required. Azure Ultra Disk volumes offers increased performance of 300 IOPS/GiB, up to a maximum of 160 K IOPS per disk and more durability, reducing the possibility of a storage volume failure. Azure Files¶ Azure Files over NFS offers NFS service for nodes in the same availability zone, and can run across zones, or can be exposed externally. Azure Files can be used to store HDB and tickerplant data and is fully compliant with kdb+. Microsoft plan to release to general availability shortly. Lustre FS¶ Lustre FS is POSIX compliant and built on Lustre, a popular open-source parallel filesystem that provides scale-out performance that increases linearly with a filesystem’s size. Lustre filesystems scale to hundreds of GB/s of throughput and millions of IOPS. It also supports concurrent access to the same file or directory from thousands of compute instances and provides consistent, sub-millisecond latencies for file operations, making it especially suitable for storing and retrieving HDB data. A Lustre FS persistent file system provides highly available and durable storage for kdb+ workloads. The file servers in a persistent file system are highly available and data is automatically replicated within the same Availability Zone. Memory¶ The TP uses very little memory during normal operation in realtime mode, while a full record of intraday data is maintained in the realtime database. Abnormal operation occurs if a realtime subscriber (including RDB) is unable to process the updates. TP stores these updates in the output queue associated with the subscriber. Large output queue needs a large memory. TP may exceed memory limits and exit in extreme cases. Also, TP in batch mode needs to store data. This also increases memory need. Consequently, the memory requirement of the TP box depends on the setup of the subscribers and the availability requirements of the tick system. The main consideration for an instance hosting the RDB is to use a memory optimized VM instance such as the Standard_E16s_v5 (with 128 GB memory), or Standard_E32s_v5 (256 GB memory). Azure also offers VM with extremely large memory, S896oom (BareMetal) , with 36TiB of memory, for clients who need to store large amounts of high-frequency data in memory, in the RDB, or to keep more than one partition of data in the RDB form. There is a tradeoff however, of large memory and RDB recovery time. The larger the tables, the longer it takes for the RDB to start from TP log. To alleviate this problem, you can split a large RDB into two. The rule for separating the tables into two clusters is the join operation between them. If two tables are never joined, then they can be placed into separate RDBs. It is recommended that HDB boxes have large memories. User queries may require large temporal space for complex queries. Query execution times are often dominated by IO cost to get the raw data. OS-level caching stores frequently used data. The larger the memory, the less cache miss and the faster the queries run. CPU¶ The CPU load generated by the TP depends on the number of publishers and their verbosity (number of updates per second), and the number of subscribers. Subscribers may subscribe to partial data, but any filtering applied consumes further CPU cycles. The CPU requirement of the realtime database comes from - appending updates to local tables - user queries Local table updates are very efficient especially if the TP sends batch updates. Faster CPU results in faster ingestion and lower latency. User queries are often CPU intensive. They perform aggregation, joins, and call expensive functions. If the RDB is set up in multi-input mode (started with a negative port) then user queries are executed in parallel. Furthermore, kdb+ 4.0 supports multithreading in most primitives, including sum , avg , dev , etc. (If the RDB process is heavily used and hit by many queries, then it is recommended to start in multi-process mode by the -s command-line option). VMs with many cores are recommended for RDB processes with large numbers of user queries. If the infrastructure is sensitive to the RDB EOD work, then powerful CPUs are recommended. Sorting tables before splaying is a CPU-intensive task. Historical databases are used for user queries. In many cases the IO dominates execution times. If the box has large memory and OS-level caching reduces IO operations efficiently, then CPU performance directly impacts execution times. Azure VM instances optimized for HPC applications, such as the HBv4-series (Standard_HB120rs_v4 with 120 AMD EPYC vCPUs), are recommended for CPU-bound services as described in the use cases above. Locality, latency and resiliency¶ The standard tick set up on premises requires the components to be placed on the same server. The tickerplant and realtime database are linked via the TP log file and the RDB and historical database are bound due to RDB EOD splaying. Customized tickerplants release this constraint in order to improve resilience. One motivation could be to avoid HDB queries impacting data capture in TP. You can set up an HDB writer on the HDB box and RDB can send its tables through IPC at midnight and delegate the IO work together with the sorting and attribute handling. It is recommended that the feedhandlers are placed outside the TP box on another VM between the TP and data feed. This minimises the impact on TP stability if the feedhandler malfunctions. Placement groups¶ The kdb+ tick architecture can be set up with placement groups, depending on the use case. A Proximity Placement Group is a configuration option that Azure offers, which lets you place a group of interdependent instances in a certain way across the underlying hardware in which those instances reside. The instances could be placed close together, spread through different racks, or spread through different Availability Zones. Cluster placement group¶ The cluster placement group configuration allows you to place your group of interrelated instances close together to achieve the best throughput and low latency results possible. This option only lets you pack the instances together inside the same Availability Zone, either in the same Virtual Network (VNet) or between peered VNets. Spread placement groups¶ With spread placement groups, each single instance runs on separate physical hardware racks. So, if you deploy five instances and put them into this type of placement group, each one of those five instances resides on a different rack with its own network access and power, either within a single AZ or in multi-AZ architecture. Recovery-time and recovery-point objectives¶ A disaster-recovery plan is usually based on requirements from both the Recovery Time Objective and Recovery Point Objective specifications, which can guide the design of a cost-effective solution. However, every system has its own unique requirements and challenges. Here, we suggest the best-practice methods for dealing with the various possible failures one needs to be aware of and plan for when building a kdb+ tick-based system. In the various combinations of failover operations that can be designed, the end goal is always to maintain availability of the application and minimize any disruption to the business. In a production environment, some level of redundancy is always required. Depending on the use case, requirements may vary but in nearly all instances requiring high availability, the best option is to have a hot-hot (or active-active) configuration. There are four main configurations found in production hot-hot, hot-warm, hot-cold, and pilot-light (or cold hot-warm). | Term | Description | |---|---| | Hot-hot | Describes an identical mirrored secondary system running, separate to the primary system, capturing and storing data but also serving client queries. In a system with a secondary server available, hot-hot is the typical configuration as it is sensible to use all available hardware to maximize operational performance. The KX gateway handles client requests across availability zones and collects data from several underlying services, combining data sets and if necessary, performing an aggregation operation before returning the result to the client. | | Hot-warm | The secondary system captures data but does not serve queries. In the event of a failover, the KX gateway reroutes client queries to the secondary (warm) system. | | Hot-cold | The secondary system has a complete backup or copy of the primary system at some previous point in time (recall that kdb+ databases are a series of operating system files and directories) with no live processes running. A failover in this scenario involves restoring from this latest backup, with the understanding that there may be some data loss between the time of failover to the time the latest backup was made. | | Pilot light (or cold hot-warm) | The secondary is on standby and the entire system can quickly be started to allow recovery in a shorter time period than a hot-cold configuration. | Typically, kdb+ is deployed in a high-value system. Hence, downtime can impact business which justifies the hot-hot setup to ensure high availability. Usually, the secondary system runs on completely separate infrastructure, with a separate filesystem, and saves the data to a secondary database directory, separate from the primary system. In this way, if the primary system or underlying infrastructure goes offline, the secondary system is able to take over completely. The usual strategy for failover is to have a complete mirror of the production system (feed handler, tickerplant, and realtime subscriber), and when any critical process goes down, the secondary is able to take over. Switching from production to disaster recovery systems can be implemented seamlessly using kdb+ interprocess communication. Disaster-recovery planning for kdb+ tick systems Data recovery for kdb+ tick Network¶ Network bandwidth needs to be considered if the TP components are not located on the same VM. The network bandwidth between Azure VMs depends on the type of the VMs. For example, a VM of type Standard_D8as_v4 has an expected network bandwidth 3.125 Gbps and a larger instance Standard_D32as_v4 can sustain 12.5 Gbps. For a given update frequency you can calculate the required bandwidth by employing the -22! internal function that returns the length of the IPC byte representation of its argument. The TP copes with large amounts of data if batch updates are sent. Make sure that the network is not your bottleneck in processing the updates. Azure Load Balancer¶ An Azure Load Balancer is a type of load balancing service by Azure. It is used for ultra-high performance, TLS offloading at scale, centralized certificate deployment, support for UDP, and static IP addresses for your application. Operating at the connection level, network load balancers are capable of securely handling millions of requests per second while maintaining ultra-low latencies. Load balancers can distribute load among applications that offer the same service. kdb+ is single threaded by default. The recommended approach is to use a pool of HDB processes. Distributing the queries can either be done by the gateway using async calls or by a load balancer. If the gateways are sending sync queries to the HDB load balancer, then a gateway load balancer is recommended to avoid query contention in the gateway. Furthermore, there are other TP components that enjoy the benefit of load balancers to better handle simultaneous requests. Adding a load balancer on top of an historical database (HDB) pool requires only three steps: - Create a Network Load Balancer with protocol TCP. Set the name, Availability Zone, Target Group name and Security group. The security group needs an inbound rule to the HDB port. - Create a launch template. A key part is the User Data window where you can type a startup-script. It mounts the volume that contains the HDB data and the q interpreter, sets environment variables (e.g. QHOME ) and starts the HDB. The HDB accepts incoming TCP connections from the Load Balancer, so you must set up an inbound Firewall rule using a Security Group. You can also leverage a Custom Image that you already created from an existing Azure VM. - Create an Azure Virtual Machine scale set (set of virtual machines) with autoscale rules to better handle peak loads. You can set the recently created instance group as a Target Group. All clients access the HDB pool using the Load Balancer’s DNS name (together with the HDB port) and the load balancer distributes the requests among the HDB servers seamlessly. General TCP load balancers with an HDB pool offer better performance than a stand-alone HDB, however, utilizing the underlying HDBs is not optimal. Consider three clients C1, C2, C3 and two servers HDB1 and HDB2. C1 is directed to HDB1 when establishing the TCP connection, C2 to HDB2 and C3 to HDB1 again. If C1 and C3 send heavy queries and C2 sends a few lightweight queries, then HDB1 is overloaded and HDB2 is idle. To improve the load distribution the load balancer needs to go under the TCP layer and needs to understand the kdb+ protocol. Logging¶ Azure provides a fully managed logging service that performs at scale and can ingest application and system log data. Azure Monitor allows you to view, search and analyze system logs. It provides an easy to use and customizable interface so that e.g. DevOps can quickly troubleshoot applications. Azure Monitor Logs enables you to see all of your logs, regardless of their source, as a single and consistent flow of events ordered by time. Events are organized into log streams and each stream is part of a log group. Related applications typically belong to the same log group. You don’t need to modify your tick scripts to enjoy the benefits of Azure Monitor. The Azure Monitor agent can be installed and configured to forward your application log to Azure Monitor. You should use the policies and policy initiatives below to automatically install the agent and associate it with a data-collection rule, every time you create a virtual machine. In the host configuration file you need to provide the log file to watch and to which log stream the new entries should be sent. Almost all kdb+ tick components can benefit from cloud logging. Feedhandlers log new data arrival, data and connection issues. The TP logs new or disappearing publishers and subscribers. It can log if the output queue is above a threshold. The RDB logs all steps of the EOD process which includes sorting and splaying of all tables. The HDB and gateway can log every user query. kdb+ users often prefer to save log messages in kdb+ tables. Tables that are unlikely to change are specified by a schema, while entries that require more flexibility use key-value columns. Log tables are ingested by log tick plans and these Ops tables are separated from the tables required for the business. One benefit of storing log messages is the ability to process log messages in qSQL. Timeseries join functions include as-of and window joins. For example, gateway functions are executed hundreds of times during the day. The gateway query executes RDB and HDB queries, often using a load balancer. All these components have their own log entries. You can simply employ Window Join to find relevant entries and perform aggregation to get insight into the performance characteristics of the execution chain. Please note that you can log both to kdb+ and to Azure Monitor. kdb Insights QLog provides kdb+ cloud-logging functionality. QLog supports multiple endpoint types through a simple interface and provides the ability to write to them concurrently. The logging endpoints in QLog are encoded as URLs with two main types: file descriptors and REST endpoints. The file descriptor endpoints supported are; :fd://stdout :fd://stderr :fd:///path/to/file.log REST endpoints are encoded as standard HTTP/S URLs such as: https://<CustomerId>.ods.opinsights.azure.com/api/logs?api-version=2016-04-01 QLog generates structured, formatted log messages tagged with a severity level and component name. Routing rules can also be configured to suppress or route based on these tags. Existing q libraries that implement their own formatting can still use QLog via the base APIs. This enables them to do their own formatting but still take advantage of the QLog-supported endpoints. Integration with cloud logging application providers can easily be achieved using logging agents. These can be set up alongside running containers or virtual machines to capture their output and forward to logging endpoints, such as Azure Monitor. Azure Monitor supports monitoring, alarming and creating dashboards. It is simple to create a Metric Filter based on a pattern and set an alarm (for example, sending email) if a certain criterion holds. You may also wish to integrate your KX monitoring for kdb+ components into this cloud logging and monitoring framework. The gives you insights into performance, uptime and overall health of the applications and the servers pool. You can visualize trends using dashboards. Interacting with Azure services¶ You can use Azure services through the console web interface. You may also need to interact from a q process. The following demonstration shows how to get a list of Virtual Machines for a specific Azure Tenant from a q process using either: - a system call to the Azure CLI - embedPy using the Azure SDK for Python - the Kurl Rest API client Azure CLI¶ A q process can run shell commands by calling the function system . The example below shows how we can get the list of virtual machines. We assume the Azure CLI is installed on the script-runner machine. (38env) stran@amon:~$ q KDB+ 4.0 2020.07.15 Copyright (C) 1993-2020 Kx Systems l64/ 4(16)core 7959MB stran amon 127.0.1.1 EXPIRE .. q) system "az vm list --output table" "Name ResourceGroup Location Zones" "--------------- ---------------------- ---------- -------" "staging-bastion STAGING-RESOURCE-GROUP westeurope" "tempVM STAGING-RESOURCE-GROUP westeurope" "windowsvm WINDOWSVM_GROUP westeurope" This example shows how the Azure CLI forms q using the system function. Unfortunately, this approach needs string manipulation so it is not always convenient. EmbedPy¶ Azure provides a Python client library to interact with Azure services. Using embedPy, a q process can load a Python environment and easily query the list of virtual machines for a given Azure tenant. (38env) stran@amon:\~/development/Azure$ q KDB+ 4.0 2020.07.15 Copyright (C) 1993-2020 Kx Systems l64/ 4(16)core 7959MB stran amon 127.0.1.1 EXPIRE .. q)\l p.q q)p)from azure.identity import ClientSecretCredential q)p)from azure.mgmt.compute import ComputeManagementClient q)p)Subscription_Id = "xxxxxxxx" q)p)Tenant_Id = "xxxxxxxxx" q)p)Client_Id = "xxxxxxxxx" q)p)Secret = "xxxxxxxxx" q)p)credential = ClientSecretCredential(tenant_id=Tenant_Id, client_id=Client_Id, client_secret=Secret) q)p)compute_client = ComputeManagementClient(credential, Subscription_Id) q)p)for vm in compute_client.virtual_machines.list_all(): print(vm.name) staging-bastion tempVM windowsvm q) Kurl REST API¶ Finally, you can send HTTP requests to the Azure REST API endpoints. KX Insights provides a native q REST API called Kurl. Kurl provides ease-of-use cloud integration by registering Azure authentication information. When running on a cloud instance, and a role is available, Kurl discovers and registers the instance metadata credentials. When running outside the cloud, OAuth2, ENV, and file-based credential methods are supported. Kurl takes care of your credentials and properly formats the requests. In the code lines below, we call the Azure Resource Manager REST API to pull the list of VMs for a specific tenant. The following example uses a simple Bearer token for authorization. (38env) stran@amon:\~$ q KDB+ 4.1t 2021.07.12 Copyright (C) 1993-2021 Kx Systems l64/ 4(16)core 7959MB stran amon 127.0.1.1 EXPIRE .. q)url:"https://management.azure.com/subscriptions" q)url,:"/c4f7ecef-da9e-4336-a9d6-d11d5838caff/resources" q)url,:"?api-version=2021-04-01" q)url,,:"&%24filter=resourceType%20eq%20%27microsoft.compute%2Fvirtualmachines%27" q)params:``headers!(::;enlist["Authorization"]!enlist "bearer XXXXXXXXXXXXXXX") q)resp:.kurl.sync (`$url;`GET;params) q)t: (uj) over enlist each (.j.k resp[1])[`value] q)select name, location, tags from t name location tags ----------------------------------------------------------------------- "staging-bastion" "westeurope" `cluster_name`name!("staging";"staging") "tempVM" "westeurope" `cluster_name`name!("";"") "windowsvm" "westeurope" `cluster_name`name!("";"") q) Package, manage, and deploy¶ QPacker (QP) is a tool to help developers package, manage and deploy kdb+ applications to the cloud. It automates the creation of containers and virtual machines using a simple configuration file qp.json . It packages kdb+ applications with common shared code dependencies, such as Python and C. QPacker can build and run containers locally as well as push to container registries such as DockerHub and Azure Container Registry. Software is often built by disparate teams, who may individually have remit over a particular component, and package that component for consumption by others. QPacker stores all artefacts for a project in a QPK file. While this file is intended for binary dependencies, it is also portable across environments. QPacker can interface with Hashicorp Packer to generate virtual-machine images for Azure. These VM images can then be used as templates for a VM instance running in the cloud. When a cloud target is passed to QPacker (qp build -azure ), an image is generated for each application defined in the top-level qp.json file. The QPK file resulting from each application is installed into the image and integrated with Systemd to allow the startq.sh launch script to start the application on boot. Azure Functions¶ Function as a service (FaaS) lets developers create an application without considering the complexity of building and maintaining the infrastructure that runs it. Cloud providers support only a handful of programming languages natively. Azure’s FaaS solution, Functions, supports Bash scripts that can start any executable including a q script. One use case is to start a gateway by Azure Functions to execute a client query. This provides cost transparency, zero cost when service is not used and full client query isolation. Access management¶ We distinguish application- and infrastructure-level access control. Application-level access management controls who can access kdb+ components and run commands. TP, RDB, and HDB are generally restricted to kdb+ infrastructure administrators only and the gateway is the access point for the users. One responsibility of the gateway is to check if the user can access the tables (columns and rows) s/he is querying. This generally requires checking user ID (returned by .z.u ) against some organizational entitlement database, cached locally in the gateway and refreshed periodically. Secure access – Azure Bastion¶ Azure Bastion, a fully platform-managed PaaS service that you provision inside your virtual network, lets you manage your kdb+ Azure Virtual Machines through an RDP and SSH session directly in the Azure portal using a single-click seamless experience. Azure Bastion provides secure and auditable instance management for your kdb+ tick deployment without the need to open inbound ports, maintain bastion hosts, or manage SSH keys. Use this to permission access to the KX gateway. This is a key task for the administrators of the KX system, and both user and API access to the entire system is controlled entirely through the KX gateway process. Hardware specifications¶ A number of Azure VM types are especially performant for kdb+ workloads. Azure offers five different sub-groups of VMs in the memory-optimized family – each with a high memory-to-vCPU ratio. Although the DSv2-series 11-15 and Esv4-series have a comparable vCPU to memory ratio, DSv2R instances support bandwidth up to 24.4Gbps and max uncached disk throughput performance of 62.5K IOPS and expected network bandwidth of 24.4Gbps, providing 2× higher disk throughput and network bandwidth performance compared to Esv4 instances. The Azure Hypervisor, based on Windows Hyper-V, is the native hypervisor powering the Azure Cloud Service platform and providing the building blocks for delivering Azure Virtual Machine types with a selection of compute, storage, memory, and networking options. | service | Azure VM type | storage | CPU, memory, I/O | |---|---|---|---| | tickerplant | Memory optimized: Dv4/DSv4, Ev4/Esv4, Ev5/Esv5, M HPC-optimized: HBv3 | Azure Managed Premium SSD/Ultra Disk Lustre FS? | High Perf Medium Medium | | realtime database | Memory optimized: Dv4/DSv4, Ev4/Esv4, Ev5/Esv5, M HPC-optimized: HBv3 | – | High Perf High Capacity Medium | | historical database | Memory optimized: Dv4/DSv4, Ev4/Esv4, Ev5/Esv5, M | Azure Managed Premium SSD/Ultra Disk Lustre FS? | Medium Perf Medium Memory High IO | | complex event processing (CEP) | Memory optimized: Dv4/DSv4, Ev4/Esv4, Ev5/Esv5, M | – | Medium Perf Medium Memory High IO | | gateway | Memory optimized: Dv4/DSv4, Ev4/Esv4, Ev5/Esv5, M | – | Medium Perf Medium Memory High IO | Resources¶ GitHub repository with standard tick.q scripts Building Real-time Tick Subscribers Data recovery for kdb+ tick Disaster-recovery planning for kdb+ tick systems Intraday writedown solutions Query Routing: a kdb+ framework for a scalable load-balanced system Order Book: a kdb+ intraday storage and access methodology kdb+tick profiling for throughput optimization KX Cloud Edition</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="6"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kdb+ and FIX messaging¶ Electronic trading volumes have increased significantly in recent years, prompting financial institutions, both buy and sell side, to invest in increasingly sophisticated Order Management Systems (OMS). OMSs efficiently manage the execution of orders using a set of pre-defined conditions to obtain the best price of execution. OMSs typically use the Financial Information eXchange (FIX) protocol, which has become the industry standard for electronic trade messaging since it was first developed in 1992. The demand for post-trade analytics and compliance requirements (for example proving a client order was filled at the best possible price) provide a need to retain all the FIX messages produced by an OMS. For large volumes of data this can prove extremely challenging; however kdb+ provides an ideal platform to capture and process the FIX messages. It allows efficient querying of large volumes of historical data, and in conjunction with a kdb+ tick set-up can produce powerful real-time post-trade analytics for the front office users. This paper will introduce the key steps to capture a FIX message feed from an OMS, and understand the data contained within each message. We produce an example that demonstrates a kdb+ set up that captures a FIX feed and produces a final-order state table. All tests were run using kdb+ version 3.1 (2013.12.27) FIX messages¶ FIX message format¶ FIX messages consist of a series of key-value pairs that contain all the information for a particular state of a transaction. Each tag relates to a field defined in the FIX specification for a given system. In FIX4.4, tags 1-956 are predefined and values for these fields must comply with the values outlined in the FIX protocol. Outside of this range custom fields may be defined; these may be unique to the trading system or firm. Some common tags are tabulated below. 1 Account 29 LastCapacity 6 AvgPx 30 LastMkt 8 BeginString 31 LastPx 9 BodyLength 32 LastQty 10 CheckSum 34 MsgSeqNum 11 ClOrdID 35 MsgType 12 Commission 37 OrderID 13 CommType 38 OrderQty 14 CumQty 39 OrderStatus 15 Currency 49 SenderCompID 17 ExecID 52 SendingTime 19 ExecRefID 56 TargetCompID 21 HandlInst 151 LeavesQty Some common FIX tags and respective fields A FIX message is comprised of a header, body and trailer. All messages must begin with a header consisting of BeginString (8), BodyLength (9), MsgType (35), SenderCompID (49), TargetCompID (56), MsgSeqNum (34) and SendingTime (52) tags. BeginString states the FIX version used, BodyLength is a character count of the message and MsgType gives the type of message, for instance New Order, Execution Report, etc. SenderCompID and TargetCompID contain information on the firms sending and receiving the message respectively. The message must finish with tag CheckSum (10); this is the count of all characters from tag 35 onwards including all delimiters. The body of the message consists of all other relevant tags, depending on the message type. FIX messages are delimited by ASCII SOH (Start of Heading), however as this unprintable we will use | as a delimiter in this paper. Below is an example of some FIX messages that we will use for this whitepaper. 8=FIX.4.4|9=178|35=D|49=A|56=B|1=accountA|6=0|11=00000001|12=0.0002| 13=2|14=|15=GBp|17=|1 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=0| 11=00000001|12=0.0002|13=2|14=|15=GBp|17=|19=| 21=|29=|30=|31=|32=|37=|38=10000|39=0|41=|44=|48=VOD.L| 50=AB|52=20131218-09:01:13|54=1|55=VOD|58=|59=1| 60=20131218-09:01:13|10=168 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.5| 11=00000001|12=|13=|14=1500|15=GBp|17=1 00000001|19=| 21=1|29=1|30=XLON|31=229.5|32=1500|37=|38=10000|39=1| 41=|44=|48=VOD.L|50=AB|52=20131218-09:02:01|54=1|55=VOD|58=|59=1| 60=20131218-09:02:01|10=193 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.6125| 11=00000001|12=|13=|14=6000|15=GBp|17=100000002|19=| 21=1|29=1|30=XLON|31=229.65|32=4500|37=|38=10000|39=1| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:03|54=1|55=VOD|58=|59=1| 60=20131218-09:01:03|10=197 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.6353846| 11=00000001|12=|13=|14=6500|15=GBp|17=100000003|19=| 21=1|29=1|30=XLON|31=229.91|32=500|37=|38=10000|39=1| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:14|54=1|55=VOD|58=|59=1| 60=20131218-09:01:14|10=199 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.7496933| 11=00000001|12=|13=|14=8150|15=GBp|17=100000004|19=| 21=1|29=1|30=XLON|31=230.2|32=1650|37=|38=10000|39=1|41=|44=|48=VOD.L| 50=AB|52=20131218-09:01:15|54=1|55=VOD|58=|59=1|60=20131218-09:01:15|10=199 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.6295| 11=00000001|12=|13=|14=10000|15=GBp|17=100000005|19=| 21=1|29=1|30=XLON|31=229.1|32=1850|37=|38=10000|39=2| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:46|54=1|55=VOD|58=|59=1| 60=20131218-09:01:46|10=197 Feed handler¶ A feed handler may be used to deliver the messages to kdb+. The feed handler should receive the flow of FIX messages from the OMS, parse the messages to extract the required fields and send them to the kdb+ tickerplant. Feed handlers are generally written in Java or C++ and are widely available, for example from KX. For the example provided in this white paper we load a file of FIX messages to a q feed handler. Our feed handler reads each FIX message from the file, extracts the tags and casts to the desired q type. The FIX tag and field names are stored in a FIX specification, which should include all possible tags from the OMS including any custom tags unique to our setup. The FIX specification allows us to create reference dictionaries to map the tags to the correct column names. q)fixTagToName 1 | Account 6 | AvgPx 8 | BeginString 11| ClOrdID 12| Commission 13| CommType 14| CumQty ... We include functions to parse the FIX messages and extract the desired tags. These functions can also be included in the RDB to allow us to extract additional information from the raw FIX message for fields not included in our schema. getAllTags:{[msg](!)."S=|"0:msg} getTag:{[tag;msg](getAllTags[msg])[tag]} We read the file containing the FIX messages, parse each message to extract the information and flip into a table. fixTbl:(uj/) {flip fixTagToName[key d]!value enlist each d:getAllTags x} each fixMsgs We need to extract the desired fields and cast to the correct type. Functions are used to match the schema of our FIX messages to a predefined schema in the RDB. colConv:{[intype;outtype] $[(intype in ("C";”c”))&(outtype in ("C";”c”)); eval'; (intype in ("C";”c”)); upper[outtype]$; (outtype in ("C";”c”)); string; upper[outtype]$string ] } matchToSchema:{[t;schema] c:inter[cols t;cols schema]; metsch:exec "C"^first t by c from meta schema; mett:exec "C"^first t by c from meta t; ?[t;();0b;c!{[y;z;x](colConv[y[x];z[x]];x)}[mett;metsch] each c] } We add the full FIX message to the table as a column of strings. This ensures no data is lost from the original message that was received and information can easily be obtained when necessary. The FIX message is then sent to the tickerplant. genFixMsgs:{[] //read fix message file fixMsgs:read0 hsym `$path,"/fixMsgs.txt"; // extract each tag, map to name and convert to table fixTbl:(uj/) {flip fixTagToName[key d]!value enlist each d:getAllTags x} each fixMsgs; // cast fixTbl to correct types t:matchToSchema[fixTbl;fixmsgs]; // Add the original fix message as a column update FixMessage:fixMsgs from t } runFIXFeed:{[] t:genFixMsgs[]; tick_handle[“upd”;`fixmsgs;t]; } FIX tags¶ In this section we look at some of the most important FIX messages. MsgType ¶ MsgType (tag 35) is a required field in the FIX message. It defines the type of message received, for example order, execution, allocation, heartbeat, Indication of Interest, etc. For the purpose of this paper we limit ourselves to handling the following message types, which will be most common from an OMS. | code | meaning | |---|---| | 8 | Execution report | | D | Order – single | | G | Order cancel/Replace request | | F | Order cancel request | Every time we receive a new order, the first message should contain MsgType D . We should only receive one D message per order. If this has to be amended at any stage we should receive an order replace request, (MsgType G ), to replace the original order. As the order executes we will receive execution reports (MsgType 8 ) for each execution. These are linked back to the original order through one of the ID fields, generally ClOrdID . The execution message contains some important updates to the overall state of the order, particularly CumQty and AvgPx . If the order is cancelled before the full order quantity is executed, a Cancel Request (MsgType F ) message is sent. This can be rejected with an Order Cancel Reject (MsgType 9 ) and the order will continue to execute. It is important to note that this only cancels any outstanding shares not yet executed and not the full order. OrdStatus ¶ OrdStatus tells us the current state the order is in. It is an important indicator in cases where the order has not been filled, showing if it is still executing, cancelled, done for the day (for multi-day orders) etc. The valid values are: 0 New 7 Stopped 1 Partially filled 8 Rejected 2 Filled 9 Suspended 3 Done for day A Pending New 4 Canceled B Calculated 5 Replaced C Expired 6 Pending Cancel/Replace Commission ¶ In FIX there are two fields needed to obtain the correct commission on an order: Commission (12) and CommType (13). Commission and CommType both return a numerical value; the latter a number defined as follows: 1 per unit (implying shares, par, currency, etc) 2 percentage 3 absolute (total monetary amount) 4 (for CIV buy orders) percentage waived - cash discount 5 (for CIV buy orders) percentage waived - enhanced units 6 points per bond or contract We will only be concerned with the first three cases (from the list above) for our example in this paper. We define a function to calculate the commission value: calcComm:{[comval;comtyp;px;qty] $[comtyp=`1; comval*qty; comtyp=`2; comval*px*qty; comtyp=`3; comval] } LastCapacity ¶ LastCapacity tells us the broker capacity in the execution. It indicates whether a fill on an order was executed as principal or agency. A principal transaction occurs when the broker fills the part of the order from its own inventory while an agency transaction involves the broker filling the order on the market. It is vital in calculating benchmarks or client loss ratios to distinguish between principal and agency flow. The valid values are: 1 Agent 2 Cross as agent 3 Cross as principal 4 Principal Example – order state¶ Approach¶ Our aim is to create a final-state table for all orders. In our example the RDB will subscribe to the tickerplant, receive all the messages and generate an order state. For large volumes this could be separated in two processes: the RDB should just capture all messages from the tickerplant and store them in a single table while a separate process can then be set up to subscribe to this table and generate the order and execution tables. This example details an approach to handling the most common messages expected from an OMS. The standard fields expected from an OMS are included, along with some derived fields. Schema¶ We set up the schema below for the fixmsgs table. It contains columns for every tag defined by our FIX spec, and well as a column called FixMessage , which contains the full FIX message as a string, and a column containing the tickerplant time. The FixMessage field is important as any information in the FIX message missing from our schema can still be extracted. fixmsgs:([] Account:`$(); AvgPx:`float$(); ClOrdID:(); Commission:`float$(); CommType:`$(); CumQty:`float$(); Currency:`$(); ExecID:(); ExecRefID:(); HandlInst:`$(); LastCapacity:`$(); LastMkt:`$(); LastPx:`float$(); LastQty:`int$(); LeavesQty:`float$(); MsgType:`$(); OrderID:(); OrderQty:`int$(); OrdStatus:`$(); OrigClOrdID:(); Price:`float$(); SecurityID:`$(); SenderSubID:`$(); SendingTime:`datetime$(); Side:`$(); Symbol:`$(); Text:(); TimeInForce:`$(); TransactTime:`datetime$(); FixMessage:(); Time:`datetime$() ) The order schema contains the core fields from the fixmsgs schema as well as derived fields: OrderTime and AmendTime . These fields are not included in the FIX spec but will be required by end users and as such are added in the RDB. The order table is keyed on OrderID . In practice a ClOrderID or a combination of ClOrderID and OrigClOrdID may be needed. If an order is cancelled and replaced the OrigClOrdID contains the ClOrderID of the previous version of the order. Only the final version is required in the final state, so we need to track these orders. order:([OrderID:()] ClOrdID:(); OrigClOrdID:(); SecurityID:`$(); Symbol:`$(); Side:`$(); OrderQty:`int$(); CumQty:`float$(); LeavesQty:`float$(); AvgPx:`float$(); Currency:`$(); Commission:`float$(); CommType:`$(); CommValue:`float$(); Account:`$(); MsgType:`$(); OrdStatus:`$(); OrderTime:`datetime$(); TransactTime:`datetime$(); AmendTime:`datetime$(); TimeInForce:`$() ) Processing orders¶ We define the following upd function on the RDB: upd:{[t;x] t insert x; x:`TransactTime xasc x; updNewOrder[`order;select from x where MsgType in `D]; x:select from x where not MsgType in `D; {$[(first x`MsgType)=`8; updExecOrder[`order;x]; (first x`MsgType)=`G; updAmendOrder[`order;x]; (first x`MsgType) in `9`F; updCancelOrder[`order;x]; :()]; } each (where 0b=(=':)x`MsgType) cut x } And a series of functions to handle each MsgType : updNewOrder:{[t;x] ...} updAmendOrder:{[t;x] ...} updCancelOrder:{[t;x] ...} updExecOrder:{[t;x] ...} We first ensure the messages are ordered correctly, according to TransactTime . This is so the messages are processed in the order they were generated, which is important when looking at the final state of an order. New orders are processed first since we should only ever receive one D message per order. updNewOrder[`order;select from x where MsgType in `D] For all subsequent updates for each order we need to ensure that all amendments, cancellations and executions are handled in the correct order. We separate the remaining messages into chunks of common MsgType and process each chunk sequentially. This is particularly important in the case where we receive an amended order in the middle of a group of executions. This is essential for the final order state to show the correct TransactTime , MsgType and OrdStatus of the final order. {$[(first x`MsgType)=`8; updExecOrder[`order;select from x where MsgType in `8]; updAmendOrder[`order;select from x where MsgType in `G`F]] } each (where 0b=(=':)x`MsgType) cut x New orders¶ Whenever a new order is received we must ensure it is entered into our final-state table. We define the following function: updNewOrder:{[t;x] x:update OrderTime:TransactTime from x; t insert inter[cols t;cols x]#x; } For each order, users will want to know the time the order was received. TransactTime is not sufficient here, since it will be overwritten in the final-state table by subsequent updates. We introduce a custom field called OrderTime . This contains the TransactTime of the new order message and will not be updated by any other messages. For a new order message we want to insert all the columns provided in the FIX message. We extract all common columns between our message and the schema. We also note the order table is keyed on OrderID . t insert inter[cols t;cols x]#x We receive the following new-order FIX messages from the OMS. 8=FIX.4.4|9=178|35=D|49=A|56=B|1=accountA|6=0| 11=0000001|12=0.0002|13=2|14=|15=GBp|17=|19=| 21=|29=|30=|31=|32=|151=10000|37=00000001|38=10000|39=| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:00|54=1|55=VOD|58=|59=1| 60=20131218-09:01:00|10=184 8=FIX.4.4|9=178|35=D|49=A|56=B|1=accountB|6=0| 11=0000002|12=0.0002|13=2|14=|15=GBp|17=|19=| 21=|29=|30=|31=|32=|151=4000|37=00000002|38=4000|39=| 41=|44=|48=RIO.L|50=AD|52=20131218-10:24:07|54=2|55=RIO|58=|59=1| 60=20131218-10:24:07|10=182 8=FIX.4.4|9=178|35=D|49=A|56=B|1=accountA|6=0| 11=0000003|12=0.0002|13=2|14=|15=GBp|17=|19=| 21=|29=|30=|31=|32=|151=20100|37=00000003|38=20100|39=| 41=|44=|48=BARC.L|50=AR|52=20131218-11:18:22|54=1|55=BARC|58=|59=1| 60=20131218-11:18:22|10=186 8=FIX.4.4|9=178|35=D|49=A|56=B|1=accountC|6=0| 11=0000004|12=0.0002|13=2|14=|15= The order state shows a series of unfilled orders. The CumQty and OrdStatus are initially null, as they are not present on the new order message. They will be populated by subsequent execution updates. q)select OrderID, MsgType, OrdStatus, SecurityID, Account, OrderQty, CumQty, Commission from order OrderID MsgType OrdStatus SecurityID Account OrderQty CumQty Commission --------------------------------------------------------------------------- "00000001" D VOD.L accountA 10000 0.0002 "00000002" D RIO.L accountB 4000 0.0002 "00000003" D BARC.L accountA 20100 0.0002 "00000004" D EDF.PA accountC 15000 0.0002 "00000005" D VOD.L accountD 3130 0.0002 Amendments and cancellations¶ Any value of the order may be amended by sending a message with MsgType G . This could reflect a correction to commission value, a change in the order quantity etc. The function to update amendments differs slightly from that for new orders. A field to display the latest amend time is added – this provides the end user with the TransactTime of the last change to the order. Every amend message should have been preceded by a new order message, so the amendment is upserted (rather than inserted) into the order state table. A production system could include some sanity checks to ensure we have received an order for any amendment. updAmendOrder:{[t;x] x:update AmendTime:TransactTime from x; t upsert inter[cols t;cols x]#x; } The following example shows an update to the commission value. We have received a new order with commission specified in percent. An update modifies this to an absolute value. The amendment is reflected in the order state and the total value of the commission is extracted using the calcComm function outlined earlier. 8=FIX.4.4|9=178|35=G|1=accountA|6=253.8854627| 11=0000003|12=700|13=3|14=20100| 15=GBp|17=|19=| 21=|29=|30=|31=|32=|151=0|37=00000003|38=20100|39=2| 41=|44=|48= BARC.L|50=AR|52=20131218-16:33:12|54=1|55=BARC|58=|59=1| 60=20131218- 16:33:12|10=195 q)select OrderID,MsgType,Commission,CommType from fixmsgs where OrderID like "00000003",MsgType in `D`G`F`9 OrderID MsgType Commission CommType -------------------------------------- "00000003" D 0.0002 2 "00000003" G 700 3 q)select OrderID, MsgType, CumQty, AvgPx, Commission, CommType, CommValue:calcComm'[Commission;CommType;AvgPx;CumQty] from order where OrderID like "00000003" OrderID MsgType CumQty AvgPx Commission CommType CommValue ---------------------------------------------------------------- "00000003" G 20100 253.8855 700 3 700 An Order Cancel Request (MsgType F ) indicates the cancellation of any outstanding unfilled order quantity. It can be rejected with an Order Cancel Reject (MsgType 9 ). Along with the order cancel message we should get an Execution Report to confirm the cancellation, with OrdStatus 4 to indicate the order is cancelled. As such this may be sufficient to indicate to end users a cancellation, with the Order Cancel Request and Order Cancel Reject omitted from the Order State logic. For this example we upsert only the MsgType and AmendTime from the cancel messages. updCancelOrder:{[t;x] x:update AmendTime:TransactTime from x; t upsert `OrderID xkey select OrderID,MsgType,AmendTime from x; } When the order is cancelled we receive the following FIX message to request a cancel. The order table shows an order that is not fully filled, but cancelled with nothing left to fill. 8=FIX.4.4|9=178|35=F|1=accountC|6=25.3156| 11=0000004|12=|13=|14=12500|15=EUR| 17=100000018|19=| 21=3|29=1|30=XPAR|31=0|32=0|151=2500|37=00000004|38=15000|39 =| 41=|44=|48=EDF.PA|50=CD|52=20131218-13:33:11|54=1|55=EDF|58=|59=1| 60=20131218-13:33:11|10=206 q)select OrderID,MsgType,OrdStatus,OrderQty,CumQty from order where MsgType=`F OrderID MsgType OrdStatus OrderQty CumQty -------------------------------------------- "00000004" F 1 15000 12500 The execution report should follow the cancel request to confirm the order has been cancelled and update the status of the order. The confirmation updates the OrdStatus and changes the LeavesQty to reflect the cancellation. We will see how to handle the execution report in the next section. 8=FIX.4.4|9=178|35=8|1=accountC|6=25.3156| 11=0000004|12=|13=|14=12500|15=EUR| 17=100000018|19=| 21=3|29=1|30=XPAR|31=0|32=0|151=2500|37=00000004|38=15000|39 =4| 41=|44=|48=EDF.PA|50=CD|52=20131218- 13:33:11|54=1|55=EDF|58=|59=1| 60=20131218-13:33:11|151=0|10=210 q)select OrderID,MsgType,OrdStatus,OrderQty,CumQty,LeavesQty from order where MsgType=`F OrderID MsgType OrdStatus OrderQty CumQty LeavesQty ------------------------------------------------------ "00000004" 8 4 15000 12500 0 Execution reports¶ Execution reports (MsgType 8 ) are sent every time there is a change in the state of the order. We are only interested in certain fields from execution messages. In our case we want to update OrderID , MsgType , OrdStatus , LastQty , LastPx , AvgPx , CumQty , LeavesQty and LastMkt in the order table. AvgPx , CumQty and LeavesQty are derived columns, giving the latest information for the full order. They should be calculated by the OMS and upserted straight into the order state. The LastQty contains the quantity executed on the last fill, and LastPx the price of the last fill. It is important to always take the latest OrdStatus from the execution messages, this ensures the order state always reflects the current state of the order. updExecOrder:{[t;x] t upsert select OrderID, MsgType, OrdStatus, LastQty, LastPx, AvgPx, CumQty, LeavesQty, LastMkt from x; } The following messages show all the execution reports received for one order. The first message is a confirmation of the new order and sets the OrdStatus to 0. The subsequent messages show each fill on the order. The OrdStatus is set to 1 for each fill until order is complete, when we receive an OrdStatus of 2. 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=0| 11=0000001|12=0.0002|13=2|14=|15=GBp|17=|19=| 21=|29=|30=|31=|32=|151=10000|37=00000001|38=10000|39=0| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:00|54=1|55=VOD|58=|59=1| 60=20131218-09:01:00|10=185 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=0| 11=0000001|12=0.0002|13=2|14=|15=GBp|17=|19=| 21=|29=|30=|31=|32=|151=10000|37=00000001|38=10000|39=0| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:03|54=1|55=VOD|58=|59=1| q60=20131218-09:01:03|10=185 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.5| 11=0000001|12=|13=|14=1500|15=GBp|17=100000001|19=| 21=1|29=1|30=XLON|31=229.5|32=1500|151=8500|37=00000001|38=10000|39=1| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:11|54=1|55=VOD|58=|59=1| 60=20131218-09:01:11|10=209 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.6125| 11=0000001|12=|13=|14=6000|15=GBp|17=100000002|19=| 21=1|29=1|30=XLON|31=229.65|32=4500|151=4000|37=00000001|38=10000|39=1| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:13|54=1|55=VOD|58=|59=1| 60=20131218-09:01:13|10=213 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.6353846| 11=0000001|12=|13=|14=6500|15=GBp|17=1##|19=| 21=1|29=1|30=XLON|31=229.91|32=500|151=3500|37=0000 0001|38=10000|39=1| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:14|54=1|55=VOD|58=|59=1| 60=20131218-09:01:14|10=215 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.7496933| 11=0000001|12=|13=|14=8150|15=GBp|17=100000004|19=| 21=1|29=1|30=XLON|31=230.2|32=1650|151=1850|37=00000001|38=10000|39=1| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:15|54=1|55=VOD|58=|59=1| 60=20131218-09:01:15|10=215 8=FIX.4.4|9=178|35=8|49=A|56=B|1=accountA|6=229.6295| 11=0000001|12=|13=|14=10000|15=GBp|17=100000005|19=| 21=1|29=1|30=XLON|31=229.1|32=1850|151=0|37=00000001|38=10000|39=2| 41=|44=|48=VOD.L|50=AB|52=20131218-09:01:46|54=1|55=VOD|58=|59=1| 60=20131218-09:01:46|10=210 The final table shows this order (OrderID "00000001" ) as fully filled. We can also see the cancelled order ("00000004" ) reflected with OrdStatus 4. The order we amended ("00000003" ) shows an amended commission value of 700. q)select OrderID, SecurityID, Side, MsgType, OrdStatus, OrderQty, CumQty, AvgPx, CommValue:calcComm'[Commission;CommType;AvgPx;CumQty] from order OrderID SecurityID Side MsgType OrdStatus OrderQty CumQty AvgPx CommValue ------------------------------------------------------------------------------- "00000001" VOD.L 1 8 2 10000 10000 229.6295 459.259 "00000002" RIO.L 2 8 2 4000 400 3253.537 260.283 "00000003" BARC.L 1 G 2 20100 20100 253.8855 700 "00000004" EDF.PA 1 8 4 15000 12500 25.3156 63.289 "00000005" VOD.L 2 8 2 3130 3130 229.7559 143.8272 Author¶ Damien Barker is a financial engineer who has worked as a consultant for some of the world's largest financial institutions. Based in London, Damien is currently working on trading and analytics application at a US investment bank.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="7"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">if[not[app.describeOnly] and not app.passOnly; / Only want to print this when running to see results .tst.callbacks.expecRan:{[s;e]; app.expectationsRan+:1; r:e[`result]; if[r ~ `pass; app.expectationsPassed+:1]; if[r in `testFail`fuzzFail; app.expectationsFailed+:1]; if[r like "*Error"; app.expectationsErrored+:1]; if[.tst.output.interactive; 1 $[r ~ `pass;"."; r in `testFail`fuzzFail;"F"; r ~ `afterError;"B"; r ~ `afterError;"A"; "E"]; ]; if[(app.failFast or app.failHard) and not r ~ `pass; s[`expectations]:enlist e; 1 "\n",.tst.output.spec s; if[app.failHard;.tst.halt:1b]; if[app.exit and not app.failHard;exit 1]; ]; } ]; \d . (.tst.loadTests hsym `$) each .tst.app.args; \d .tst if[app.failHard;.tst.app.specs[;`failHard]: 1b]; if[not app.runPerformance;.tst.app.specs[;`expectations]: {x .[;();_;]/ where x[;`type] = `perf} each app.specs[;`expectations]]; if[0 <> count app.runSpecs;.tst.app.specs: app.specs where (or) over app.specs[;`title] like/: app.runSpecs]; if[0 <> count app.excludeSpecs;.tst.app.specs: app.specs where not (or) over app.specs[;`title] like/: app.excludeSpecs]; app.results: $[not app.describeOnly;.tst.runSpec each app.specs;app.specs] if[not .tst.halt; app.passed:all `pass = app.results[;`result]; if[not app.passOnly; if[.tst.output.interactive and not app.describeOnly;-1 "\n"]; if[.tst.output.always or not app.passed; -1 {-1 _ x} .tst.output.top app.results; ]; if[not app.describeOnly; if[.tst.output.interactive; -1 "For ", string[count app.specs], " specifications, ", string[app.expectationsRan]," expectations were run."; -1 string[app.expectationsPassed]," passed, ",string[app.expectationsFailed]," failed. ",string[app.expectationsErrored]," errors."; ]; ]; ]; if[app.exit; exit `int$not app.passed]; ]; ================================================================================ FILE: qspec_lib_fixture.q SIZE: 2,984 characters ================================================================================ / Need to manage directories or only attempt to use absolute paths (latter is probably easier) .tst.fixtureAs:{[fixtureName;name]; dirPath: (` vs .tst.tstPath) 0; fixtureInDir:{$[any mp:x = (` vs' ps:(key y))[;0];` sv y,first ps where mp;`]}; fixture: $[not ` ~ fp:fixtureInDir[fixtureName;dirPath]; .tst.loadFixture[fp;name]; (`fixtures in key dirPath) and not ` ~ fp:fixtureInDir[fixtureName;` sv dirPath,`fixtures]; .tst.loadFixture[fp;name]; '"Error loading fixture '", (string fixtureName), "', not found in:\n\t", (1 _ string dirPath),"\n\t", (1 _ string ` sv dirPath,`fixtures)]; fixture ^ name } .tst.loadFixture:{[path;name]; $[2 = count fixtureName:` vs (` vs path) 1; / If there is an extension on the file path of the fixture .tst.loadFixtureTxt[path;name]; -11h = type key path; .tst.loadFixtureFile[path;name]; all -11h = (type key@) each ` sv' path,'key path; / If the path is a directory of files (splayed dir) .tst.loadFixtureFile[path;name]; .tst.loadFixtureDir[path;name]]; first fixtureName } .tst.fixture:.tst.fixtureAs[;`] .tst.currentDirFixture:` .tst.loadFixtureDir:{[f;name]; fixtureName: (` vs f) 1; dirFixtureLoaded: not ` ~ .tst.currentDirFixture; if[not dirFixtureLoaded;.tst.saveDir[];]; if[not fixtureName ~ .tst.currentDirFixture; if[dirFixtureLoaded;.tst.removeDirVars[];]; system "l ", 1 _ string f; .tst.currentDirFixture: fixtureName; ]; } .tst.loadFixtureTxt:{[f;name]; fname: ((` vs (` vs f) 1) 0) ^ name; .tst.mock[fname;(raze l[0;1] vs l[0];enlist l[0;1]) 0: 1 _ l: read0 f]; fname } .tst.loadFixtureFile:{[f;name]; .tst.mock[fname:((` vs f) 1) ^ name;get f]; fname } .tst.savedDir:.tst.defaultSavedDir:`directory`vars!("";(`,())!(),(::)) .tst.saveDir:{ if[not () ~ dirVars: .tst.findDirVars[]; .tst.savedDir:`directory`vars!(system "cd";(!).(::;get each)@\:` sv' `.,'dirVars); .tst.removeDirVars dirVars]; } .tst.removeDirVars:{![`.;();0b;] $[(::) ~ x;.tst.findDirVars[];x]} .tst.restoreDir:{ if[not ` ~ .tst.currentDirFixture; .tst.removeDirVars[]; .tst.currentDirFixture:`]; if[not "" ~ .tst.savedDir.directory; system "l ", .tst.savedDir.directory; (key .tst.savedDir.vars) set' value .tst.savedDir.vars; .tst.savedDir: .tst.defaultSavedDir;] } / Get a list of files (and thus variables) from the partition directory that do not match special partition directory files: / ie: Exclue the par.txt file and any partition directories (contained in the list .Q.ps), include the partition variable (.Q.pf) and the known partition tables (.Q.pt) / These will be the variables to delete from the top level namespace when we swap out a partition directory fixture .tst.findDirVars:{ $[count where -1h = (type .Q.qp get@) each ` sv' `.,'tables `.; /.Q.qp returns a boolean only when a table is a partition table or a splayed table distinct @[get;`.Q.pf;()],@[get;`.Q.pt;()],pvals where not any (pvals:key `:.) like/:(string @[get;`.Q.pv;()]),enlist "par.txt"; ()] } ================================================================================ FILE: qspec_lib_init.q SIZE: 598 characters ================================================================================ .utl.require .utl.PKGLOADING,"/mock.q" .utl.require .utl.PKGLOADING,"/fixture.q" .utl.require .utl.PKGLOADING,"/tests/internals.q" .utl.require .utl.PKGLOADING,"/tests/assertions.q" .utl.require .utl.PKGLOADING,"/tests/ui.q" .utl.require .utl.PKGLOADING,"/tests/spec.q" .utl.require .utl.PKGLOADING,"/tests/expec.q" .utl.require .utl.PKGLOADING,"/tests/fuzz.q" .utl.require .utl.PKGLOADING,"/loader.q" .tst.PKGNAME: .utl.PKGLOADING .tst.loadOutputModule:{[module]; if[not module in ("text";"xunit";"junit"); '"Unknown OutputModule ",module]; .utl.require .tst.PKGNAME,"/output/",module,".q" } ================================================================================ FILE: qspec_lib_loader.q SIZE: 416 characters ================================================================================ \d .tst loadTests:{[paths]; .utl.require each findTests[paths]} findTests:{[paths]; distinct raze suffixMatch[".q"] each distinct (),paths } suffixMatch:{[suffix;path]; if[path like "*",suffix;:enlist path]; f: ` sv' path,'f where not (f:(),key path) like ".*"; d: f where 11h = (type key@) each f; f: f where f like "*",suffix; raze f, .z.s[suffix] each d } testFilePath:{` sv (` vs .tst.tstPath)[0],x} ================================================================================ FILE: qspec_lib_mock.q SIZE: 1,136 characters ================================================================================ \d .tst initStore:store:(enlist `)!enlist (::) removeList:() / Used to replace the variable specified by name with newVal. Existing values will be clobbered / until restored. Standard variable re-assignment caveats apply / CAUTION: Mocking out the mock functions and variables is inadvisable mock:{[name;newVal]; name:$[$[1 = c:count vn:` vs name;1b;not null first vn]; / Create fully qualified name if given a local one ` sv .tst.context,name; (2 = c) and ` ~ first vn; '"Can't mock top-level namespaces!"; name]; / Early abort if name will be removed later if[name in removeList; :name set newVal]; if[`dne ~ @[get;name;`dne]; removeList,:name; :name set newVal]; if[not name in key store; store[name]:get name]; name set newVal } / Restores the environment to the previous state before any .tst.mock calls were made restore:{ / Restore all fully qualified symbols (set') . (key;value) @\: 1 _ store; `.tst.store set initStore; / Drop each fully qualified symbol from its respective namespace if[count removeList;(.[;();_;]') . flip ((` sv -1 _;last) @\: ` vs) each removeList]; `.tst.removeList set (); } ================================================================================ FILE: qspec_lib_output_junit.q SIZE: 2,840 characters ================================================================================ .utl.require .tst.PKGNAME,"/output/xml.q" \d .tst printJUnitTime:{string[`int$`second$x],$["000" ~ ns:3#((9 - count n)#"0"),9#n:string nano:(`long$x) mod 1000000000;"";".",ns]} expecTypes:`test`fuzz`perf!("should";"it holds that";"performs") output:()!() output[`top]:{[specs] xml.node["testsuites";()!()] raze output.spec each specs } output[`spec]:{[spec]; e:spec`expectations; attrs:`name`skipped`tests`errors`failures`time!(spec`title;0;count e;sum e[;`result] like "*Error";sum e[;`result]=`testFail;printJUnitTime sum e[;`time]); xml.node["testsuite";attrs;-1 _ ` sv output[`expectation] each e] } output[`expectation]:{[e]; label: expecTypes[e`type]," ",name:e[`desc]; outstr:output[e`type][e]; atr:`name`time!(label;printJUnitTime e[`time]); //if[e[`result] like "*Error";'blah;]; xml.node["testcase";atr] $[(e[`result] like "*Error") or count e`failures; output[e`type][e]; "" ] } output[`code]:{[e]; o:""; if[not "{}" ~ last value e[`before];o,:"Before code: \n", (last value e[`before]),"\n"]; o,:"Test code: \n",(last value e[`code]),"\n"; if[not "{}" ~ last value e[`after];o,:"After code: \n", (last value e[`after]),"\n"]; o } output[`anyFailures]:{[t];(`failures in key t) and count t[`failures]} output[`assertsRun]:{[t]; (string t[`assertsRun]), $[1 = t[`assertsRun];" assertion was";" assertions were"]," run.\n" } codeOutput:{[e] (output[`assertsRun] e),output.code e} output[`error]:{[e]; o:$[count e[`errorText]; xml.node["error";`type`message!(e[`errorText];xml.safeString[e`result], " occurred in test execution");xml.cdata codeOutput e]; "" ]; o } output[`test]:{[t]; o:""; o,:output.error[t]; if[output[`anyFailures] t; o,:raze {xml.node["failure";`type`message!(y;"Assertion failure occured during test");xml.cdata codeOutput x]}[t] each t`failures; ]; o } output[`fuzzLimit]:10; output[`fuzz]:{[t]; o:""; o,:output.error[t]; / If the fuzz assertions errors out after tests have been run, but not all failure processing has completed, the output will not pring correctly / Consider trying to figure out how to print the fuzz that the test failed on (store last fuzz?) if[(o~"") and output[`anyFailures] t; o,:raze {[t;f] h:"Maximum accepted failure rate: ", (string t[`maxFailRate]), "\n"; h,:"Failure rate was ", (string t[`failRate]), " for ", (string t[`runs]), " runs\n"; h,:"Displaying ", (string displayFuzz:min (.tst.output.fuzzLimit;count t[`fuzzFailureMessages])), " of ", (string count t[`fuzzFailureMessages]), " fuzz failures messages\n"; h,:raze (raze displayFuzz # t[`fuzzFailureMessages]),\:"\n"; xml.node["failure";`type`message!(f;"Fuzz failure occured during test");xml.cdata h,codeOutput t] }[t] each t`failures; ]; o } output[`perf]:{[p]; } output[`always]:1b output[`interactive]:0b ================================================================================ FILE: qspec_lib_output_text.q SIZE: 2,307 characters ================================================================================ \d .tst</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="8"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">replay:{[tabs;realsubs;schemalist;logfilelist] // realsubs is a dict of `subtabs`errtabs`instrs // schemalist is a list of (tablename;schema) // logfilelist is a list of (log count; logfile) .lg.o[`subscribe;"replaying the log file(s)"]; // store the orig version of upd origupd:@[value;`..upd;{{[x;y]}}]; // only use tables user has access to subtabs:realsubs[`subtabs]; if[count where nullschema:0=count each schemalist; tabs:(schemalist where not nullschema)[;0]; subtabs:tabs inter realsubs[`subtabs]]; // set the replayupd function to be upd globally if[not (tabs;realsubs[`instrs])~(`;`); .lg.o[`subscribe;"using the .sub.replayupd function as not replaying all tables or instruments"]; @[`.;`upd;:;.sub.replayupd[origupd;subtabs;realsubs[`instrs]]]]; {[d] @[{.lg.o[`subscribe;"replaying log file ",.Q.s1 x]; -11!x;};d;{.lg.e[`subscribe;"could not replay the log file: ", x]}]}each logfilelist; // reset the upd function back to original upd @[`.;`upd;:;origupd]; .lg.o[`subscribe;"finished log file replay"]; // return updated version of realsubs @[realsubs;`subtabs;:;subtabs] } subscribe:{[tabs;instrs;setschema;replaylog;proc] // if proc dictionary is empty then exit - no connection if[0=count proc;.lg.o[`subscribe;"no connections made"]; :()]; // check required flags are set, and add a definintion to the reconnection logic // when the process is notified of a new connection, it will try and resubscribe if[(not .sub.reconnectinit)&.sub.AUTORECONNECT; $[.servers.enabled; [.servers.connectcustom:{x@y;.sub.autoreconnect[y]}[.servers.connectcustom]; .sub.reconnectinit:1b]; .lg.o[`subscribe;"autoreconnect was set to true but server functionality is disabled - unable to use autoreconnect"]]; ]; // work out from the remote connection what type of tickerplant we are subscribing to // default to `standard tptype:@[proc`w;({@[value;`tptype;`standard]};`);`]; if[null tptype; .lg.e[`subscribe;e:"could not determine tickerplant type"]; 'e]; // depending on the type of tickerplant being subscribed to, change the functions for requesting // the tables and subscriptions $[tptype=`standard; [tablesfunc:{key `.u.w}; subfunc:{`schemalist`logfilelist`rowcounts`date!(.u.sub\:[x;y];enlist(.u`i`L);(.u `icounts);(.u `d))}]; tptype in `chained`segmented; [tablesfunc:`tablelist; subfunc:`subdetails]; [.lg.e[`subscribe;e:"unrecognised tickerplant type: ",string tptype]; 'e]]; // pull out the full list of tables to subscribe to utabs:@[proc`w;(tablesfunc;`);()]; // reduce down the subscription list realsubs:reducesubs[tabs;utabs;instrs;proc]; // check if anything to subscribe to, and jump out if[0=count realsubs`subtabs; .lg.o[`subscribe;"all tables have already been subscribed to"]; :()]; // pull out subscription details from the TP details:@[proc`w;(subfunc;realsubs[`subtabs];realsubs[`instrs]);{.lg.e[`subscribe;"subscribe failed : ",x];()}]; if[count details; if[setschema;createtables[details[`schemalist]]]; if[replaylog;realsubs:replay[tabs;realsubs;details[`schemalist];details[`logfilelist]]]; .lg.o[`subscribe;"subscription successful"]; updatesubscriptions[proc;;realsubs[`instrs]]each realsubs[`subtabs]]; // return the names of the tables that have been subscribed for and // the date from the name of the tickerplant log file (assuming the tp log has a name like `: sym2014.01.01 // plus .u.i and .u.icounts if existing on TP - details[1;0] is .u.i, details[2] is .u.icounts (or null) logdate:0Nd; if[tptype in `standard`chained; d:(`subtables`tplogdate!(details[`schemalist][;0];(first "D" $ -10 sublist string last first details[`logfilelist])^logdate)); :d,{(where 101 = type each x)_x}(`i`icounts`d)!(details[`logfilelist][0;0];details[`rowcounts];details[`date])]; if[tptype~`segmented; retdic:`logdir`subtables!(details[`logdir];details[`schemalist][;0]); :retdic,{(where 101 = type each x)_x}`i`icounts`d`tplogdate!details[`logfilelist`rowcounts`date`date]; ] } // wrapper function around upd which is used to only replay syms and tables from the log file that // the subscriber has requested replayupd:{[f;tabs;syms;t;x] // escape if the table is not one of the subscription tables if[not (t in tabs) or tabs ~ `;:()]; // if subscribing for all syms then call upd and then escape if[(syms ~ `)or 99=type syms; f[t;x];:()]; // filter down on syms // assuming the the log is storing messages (x) as arrays as opposed to tables c:cols[`. t]; // convert x into a table x:select from $[type[x] in 98 99h; x; 0>type first x;enlist c!x;flip c!x] where sym in syms; // call upd on the data f[t;x] } checksubscriptions:{update active:0b from `.sub.SUBSCRIPTIONS where not w in key .z.W;} retrysubscription:{[row] subscribe[row`table;$[((),`) ~ insts:row`instruments;`;insts];0b;0b;3#row]; } // if something becomes available again try to reconnect to any previously subscribed tables/instruments autoreconnect:{[rows] s:select from SUBSCRIPTIONS where ([]procname;proctype)in (select procname, proctype from rows), not active; s:s lj 2!select procname,proctype,w from rows; if[count s;.sub.retrysubscription each s]; } pc:{[result;W] update active:0b from `.sub.SUBSCRIPTIONS where w=W;result} // set .z.pc handler to update the subscriptions table .dotz.set[`.z.pc;{.sub.pc[x y;y]}@[value;.dotz.getcommand[`.z.pc];{[x]}]]; // if timer is set, trigger reconnections $[.timer.enabled and checksubscriptionperiod > 0; .timer.rep[.proc.cp[];0Wp;checksubscriptionperiod;(`.sub.checksubscriptions`);0h;"check all subscriptions are still active";1b]; checksubscriptionperiod > 0; .lg.e[`subscribe;"checksubscriptionperiod is set but timer is not enabled"]; ()] ================================================================================ FILE: TorQ_code_common_timer.q SIZE: 5,024 characters ================================================================================ // Functionality to extend the timer \d .timer enabled:@[value;`enabled;1b] // whether the timer is enabled debug:@[value;`debug;0b] // print when the timer runs any function logcall:(not @[value;`.proc.lowpowermode;0b]) & @[value;`logcall;1b] // log each timer call by passing it through the 0 handle nextscheduledefault:@[value;`nextscheduledefault;2h] // the default way to schedule the next timer // Assume there is a function f which should run at time T0, actually runs at time T1, and finishes at time T2 // if mode 0, nextrun is scheduled for T0+period // if mode 1, nextrun is scheduled for T1+period // if mode 2, nextrun is scheduled for T2+period id:0 getID:{:id+::1} // Store a table of timer values timer:([id:`int$()] // the of the timer timerchange:`timestamp$(); // when the function was added to the timer periodstart:`timestamp$(); // the first time to fire the timer periodend:`timestamp$(); // the the last time to fire the timer period:`timespan$(); // how often the timer is run funcparam:(); // the function and parameters to run lastrun:`timestamp$(); // the last run time nextrun:`timestamp$(); // the next scheduled run time active:`boolean$(); // whether the timer is active nextschedule:`short$(); // determines how the next schedule time should be calculated description:()); // a free text description // utility function to check funcparam comes in the correct format check:{[fp;dupcheck] if[dupcheck; if[count select from timer where fp~/:funcparam; '"duplicate timer already exists for function ",(-3!fp),". Use .timer.rep or .timer.one with dupcheck set to false to force the value"]]; $[0=count fp; '"funcparam must not be an empty list"; 10h=type fp; '"funcparam must not be string. Use (value;\"stringvalue\") instead"; fp]} // add a repeatingtimer rep:{[start;end;period;funcparam;nextsch;descrip;dupcheck] if[not nextsch in `short$til 3; '"nextsch mode can only be one of ",-3!`short$til 3]; `.timer.timer upsert (getID[];cp;start;0Wp^end;period;check[funcparam;dupcheck];0Np;$[start<cp;period*ceiling(cp-start)%period;0D]+start:(cp:.proc.cp[])^start;1b;nextsch;descrip);} // add a one off timer one:{[runtime;funcparam;descrip;dupcheck] `.timer.timer upsert (getID[];.proc.cp[];.proc.cp[];0Np;0Nn;check[funcparam;dupcheck];0Np;runtime;1b;0h;descrip);} // projection to add a default repeating timer. Scheduling mode 2 is the safest - least likely to back up repeat:rep[;;;;nextscheduledefault;;1b] once:one[;;;1b] // Remove a row from the timer remove:{[timerid] delete from `.timer.timer where id=timerid} removefunc:{[fp] delete from `.timer.timer where fp~/:funcparam} // run a timer function and reschedule if required run:{ // Pull out the rows to fire // Assume we only use period start/end when creating the next run time // sort asc by lastrun so the timers which are due and were fired longest ago are given priority torun:`lastrun xasc 0!select from timer where active,nextrun<x; runandreschedule each torun} nextruntime:-0Wp // run a timer function and reschedule it if required runandreschedule:{ // if debug mode, print out what we are doing if[debug; .lg.o[`timer;"running timer ID ",(string x`id),". Function is ",-3!x`funcparam]]; start:.proc.cp[]; @[$[logcall;0;value];x`funcparam;{update active:0b from `.timer.timer where id=x`id; .lg.e[`timer;"timer ID ",(string x`id)," failed with error ",y,". The function will not be rescheduled"]}[x]]; // work out the next run time n:x[`period]+(x[`nextrun];start;.proc.cp[]) x`nextschedule; // check if the next run time falls within the sceduled period // either up the nextrun info, or switch off the timer $[n within x`periodstart`periodend; update lastrun:start,nextrun:n from `.timer.timer where id=x`id; [if[debug;.lg.o[`timer;"setting timer ID ",(string x`id)," to inactive as next schedule time is outside of scheduled period"]]; update lastrun:start,active:0b from `.timer.timer where id=x`id]]; .timer.nextruntime:exec min[nextrun] from .timer.timer; } //Set .z.ts if[.timer.enabled; .dotz.set[`.z.ts;$[@[{value x;1b};.dotz.getcommand[`.z.ts];0b]; {[x;y] .timer.run now:.proc.cp[]; x@y}[value .dotz.getcommand[`.z.ts]]; {if[.proc.cp[]>.timer.nextruntime;.timer.run[.proc.cp[]]]}]];</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="9"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Dictionary programs¶ From GeeksforGeeks Python Programming Examples Follow links to the originals for more details on the problem and Python solutions. Sort dictionary by keys or values¶ Sort keys ascending¶ >>> kv = {2:'56', 1:'2', 5:'12', 4:'24', 6:'18', 3:'323'} >>> sorted(kv.keys()) [1, 2, 3, 4, 5, 6] q)kv:2 1 4 5 6 3!64 69 23 65 34 76 q)asc key kv `s#1 2 3 4 5 6 A dictionary is a mapping between two lists: the keys and the values. Keys are commonly of the same datatype; as are values. So most dictionaries are a mapping between two vectors. (Homogeneous lists.) Above, dictionary kv is formed from two vectors by the Dict operator ! . A list of key-value pairs can be flipped into two lists, and passed to (!). to form a dictionary. q)(!). flip(2 56;1 2;5 12;4 24;6 18;3 323) 2| 56 1| 2 5| 12 4| 24 6| 18 3| 323 Sort entries ascending by key¶ >>> [[k, kv[k]] for k in sorted(kv.keys())] [[1, 2], [2, 56], [3, 323], [4, 24], [5, 12], [6, 18]] q)k!kv k:asc key kv 1| 2 2| 56 3| 323 4| 24 5| 12 6| 18 Sort entries ascending by value¶ >>> sorted(kv.items(), key = lambda x:(x[1], x[0])) [(1, 2), (5, 12), (6, 18), (4, 24), (2, 56), (3, 323)] q)asc kv 1| 2 5| 12 6| 18 4| 24 2| 56 3| 323 The value of kv is the dictionary’s values. q)value kv 56 2 12 24 18 323 So an ascending sort of the dictionary returns it in ascending order of values. Sum of values¶ >>> d = {'a': 100, 'b':200, 'c':300} >>> sum(d.values()) 600 Dictionaries are first-class objects in q, and keywords apply to their values. q)d:`a`b`c!100 200 300 q)sum d 600 Delete an entry¶ >>> d = {"Arushi" : 22, "Anuradha" : 21, "Mani" : 21, "Haritha" : 21} >>> # functional removal >>> {key:val for key, val in d.items() if key != 'Mani'} {'Arushi': 22, 'Anuradha': 21, 'Haritha': 21} >>> # removal in place >>> d.pop('Mani') 21 >>> d {'Anuradha': 21, 'Haritha': 21, 'Arushi': 22} q)d:`Anuradha`Haritha`Arushi`Mani!21 21 22 21 q)delete Mani from d / functional removal Anuradha| 21 Haritha | 21 Arushi | 22 q)delete Haritha from `d / removal in place `d q)d Anuradha| 21 Arushi | 22 Mani | 21 Removal in place in q is effectively restricted to global tables. Within functions, use functional methods. Sort list of dictionaries by value¶ >>> lis = [{ "name" : "Nandini", "age" : 20}, ... { "name" : "Manjeet", "age" : 20 }, ... { "name" : "Nikhil" , "age" : 19 }] >>> >>> sorted(lis, key=itemgetter('age', 'name')) [{'name': 'Nikhil', 'age': 19}, {'name': 'Manjeet', 'age': 20}, {'name': 'Nandini', 'age': 20}] >>> sorted(lis, key=itemgetter('age'),reverse = True) [{'name': 'Nandini', 'age': 20}, {'name': 'Manjeet', 'age': 20}, {'name': 'Nikhil', 'age': 19}] A list of q same-key dictionaries is… a table. q)show lis:(`name`age!(`Nandini;20); `name`age!(`Manjeet;20); `name`age!(`Nikhil;19)) name age ----------- Nandini 20 Manjeet 20 Nikhil 19 q)lis iasc lis`age / sort ascending by age name age ----------- Nikhil 19 Nandini 20 Manjeet 20 q)lis{x iasc x y}/`name`age / sort by name within age name age ----------- Nikhil 19 Manjeet 20 Nandini 20 Merge two dictionaries¶ Using Python 2 def merge(dict1, dict2): d = {} d.update(dict1) d.update(dict2) return d >>> d1 = {'a': 10, 'b': 8, 'c': 42} >>> d2 = {'d': 6, 'c': 4} >>> merge(d1, d2) {'a': 10, 'b': 8, 'c': 4, 'd': 6} or in Python 3 >>> d1 = {'a': 10, 'b': 8, 'c': 42} >>> d2 = {'d': 6, 'c': 4} >>> {**d1, **d2} {'a': 10, 'b': 8, 'c': 4, 'd': 6} The Join operator (, ) in q has upsert semantics. q)d1:`a`b`c!10 8 42 q)d2:`d`c!6 4 q)d1,d2 a| 10 b| 8 c| 4 d| 6 Grade calculator¶ grades.py jack = { "name":"Jack Frost", "assignment" : [80, 50, 40, 20], "test" : [75, 75], "lab" : [78.20, 77.20] } james = { "name":"James Potter", "assignment" : [82, 56, 44, 30], "test" : [80, 80], "lab" : [67.90, 78.72] } dylan = { "name" : "Dylan Rhodes", "assignment" : [77, 82, 23, 39], "test" : [78, 77], "lab" : [80, 80] } jess = { "name" : "Jessica Stone", "assignment" : [67, 55, 77, 21], "test" : [40, 50], "lab" : [69, 44.56] } tom = { "name" : "Tom Hanks", "assignment" : [29, 89, 60, 56], "test" : [65, 56], "lab" : [50, 40.6] } def get_average(marks): total_sum = sum(marks) total_sum = float(total_sum) return total_sum / len(marks) def calculate_total_average(students): assignment = get_average(students["assignment"]) test = get_average(students["test"]) lab = get_average(students["lab"]) # Result based on weightings return (0.1 * assignment + 0.7 * test + 0.2 * lab) def assign_letter_grade(score): if score >= 90: return "A" elif score >= 80: return "B" elif score >= 70: return "C" elif score >= 60: return "D" else : return "E" def class_average_is(student_list): result_list = [] for student in student_list: stud_avg = calculate_total_average(student) result_list.append(stud_avg) return get_average(result_list) students = [jack, james, dylan, jess, tom] for i in students : print(i["name"]) print("=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=") print("Average marks of %s is : %s " %(i["name"], calculate_total_average(i))) print("Letter Grade of %s is : %s" %(i["name"], assign_letter_grade(calculate_total_average(i)))) print() class_av = class_average_is(students) print( "Class Average is %s" %(class_av)) print("Letter Grade of the class is %s " %(assign_letter_grade(class_av))) $ python3 grades.py Jack Frost =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+= Average marks of Jack Frost is : 72.79 Letter Grade of Jack Frost is : C James Potter =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+= Average marks of James Potter is : 75.962 Letter Grade of James Potter is : C Dylan Rhodes =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+= Average marks of Dylan Rhodes is : 75.775 Letter Grade of Dylan Rhodes is : C Jessica Stone =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+= Average marks of Jessica Stone is : 48.356 Letter Grade of Jessica Stone is : E Tom Hanks =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+= Average marks of Tom Hanks is : 57.26 Letter Grade of Tom Hanks is : E Class Average is 72.79 Letter Grade of the class is C Median not average The output above displays the class median score and letter grade, not the average. Oops. Shorter programs are easier to get right. grades.q / grade calculator students:flip`name`assignment`test`lab!flip( (`JackFrost; 80 50 40 20; 75 75; 78.20 77.20); (`JamesPotter; 82 56 44 30; 80 80; 67.90 78.72); (`DylanRhodes; 77 82 23 39; 78 77; 80 80); (`JessicaStone; 67 55 77 21; 40 50; 69 44.56); (`TomHanks; 29 89 60 56; 65 56; 50 40.6) ) students[`mark]:sum .1 .7 .2*(avg'')students `assignment`test`lab lg:{"EDCBA"sum 60 70 80 90<\:x} / letter grade from mark update letter:lg mark from `students; show students "Class average: ",string ca:avg students`mark "Class letter grade: ",lg ca q)\l grades.q name assignment test lab mark letter ------------------------------------------------------- JackFrost 80 50 40 20 75 75 78.2 77.2 72.79 C JamesPotter 82 56 44 30 80 80 67.9 78.72 75.962 C DylanRhodes 77 82 23 39 78 77 80 80 75.775 C JessicaStone 67 55 77 21 40 50 69 44.56 48.356 E TomHanks 29 89 60 56 65 56 50 40.6 57.26 E "Class average: 66.0286" "Class letter grade: D" Mirror characters in a string¶ def mirrorChars(s, k): original = 'abcdefghijklmnopqrstuvwxyz' reverse = 'zyxwvutsrqponmlkjihgfedcba' m = dict(zip(original,reverse)) lst = list(s) ti = range(k-1, len(lst)) for i in ti: lst[i] = m[lst[i]] return ''.join(lst) >>> mirrorChars('paradox', 3) 'paizwlc' mirrorChars:{[s;k] m:{x!reverse x}.Q.a; / mirror dictionary ti:(k-1)_ til count s; / target indexes @[s;ti;m] } q)mirrorChars["paradox";3] "paizwlc" Python and q solutions implement the same strategy: - write a mirror dictionary m - identify the indexes to be targeted ti - replace the characters at those indexes with their mirrors The Python has the further steps of converting the string to a list and back again. Count frequency¶ >>> lst = ([1, 1, 1, 5, 5, 3, 1, 3, 3, 1, 4, 4, 4, 2, 2, 2, 2]) >>> from collections import Counter >>> Counter(lst) Counter({1: 5, 2: 4, 3: 3, 4: 3, 5: 2}) q)lst:1 1 1 5 5 3 1 3 3 1 4 4 4 2 2 2 2 q)count each group lst 1| 5 5| 2 3| 3 4| 3 2| 4 Tuples to dictionary¶ >>> tups = [("akash", 10), ("gaurav", 12), ("anand", 14), ("suraj", 20), ... ("akhil", 25), ("ashish", 30)] >>> {t[0]:t[1] for t in tups} {'akash': 10, 'gaurav': 12, 'anand': 14, 'suraj': 20, 'akhil': 25, 'ashish': 30} q)tups:(("akash";10);("gaurav";12);("anand";14);("suraj";20);("akhil";25);("ashish";30)) q)(!).flip tups "akash" | 10 "gaurav"| 12 "anand" | 14 "suraj" | 20 "akhil" | 25 "ashish"| 30 Here we flip the tuples to get two lists, which we pass to Apply (. ) as the arguments to Dict (! ). The heading suggests a more general problem than turning a list of pairs into a dictionary. In q, the general case, with tuples of unspecified length, is handled by keyed tables.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="10"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category utility // @desc Retrieve previous generated model from disk // @param config {dictionary} Information about a previous run of AutoML // including the feature extraction procedure used and the best model // produced // @returns {table} Features produced using config feature extraction // procedures utils.loadModel:{[config] modelLibrary:config`modelLib; loadFunction:$[modelLibrary~`sklearn; .p.import[`joblib][`:load]; modelLibrary~`keras; $[check.keras[]; .p.import[`keras.models][`:load_model]; '"Keras model could not be loaded" ]; modelLibrary~`torch; $[0~checkimport 1; .p.import[`torch][`:load]; '"Torch model could not be loaded" ]; modelLibrary~`theano; $[0~checkimport 5; .p.import[`joblib][`:load]; '"Theano model could not be loaded" ]; '"Model Library must be one of 'sklearn', 'keras' or 'torch'" ]; modelPath:config[`modelsSavePath],string config`modelName; modelFile:$[modelLibrary in`sklearn`theano; modelPath; modelLibrary in`keras; modelPath,".h5"; modelLibrary~`torch; modelPath,".pt"; '"Unsupported model type provided" ]; loadFunction pydstr modelFile } // @kind function // @category utility // @desc Generate the path to a model based on user-defined dictionary // input. This assumes no knowledge of the configuration, rather this is the // gateway to retrieve the configuration and models. // @param dict {dictionary} Configuration detailing where to retrieve the // model which must contain one of the following: // 1. Dictionary mapping `startDate`startTime to the date and time // associated with the model run. // 2. Dictionary mapping `savedModelName to a model named for a run // previously executed. // @returns {char[]} Path to the model information utils.modelPath:{[dict] pathStem:path,"/outputs/"; model:$[all `startDate`startTime in key dict;utils.nearestModel[dict];dict]; keyDict:key model; pathStem,$[all `startDate`startTime in keyDict; $[all(-14h;-19h)=type each dict`startDate`startTime; "dateTimeModels/", ssr[string[model`startDate],"/run_",string[model`startTime],"/";":";"."]; '"Types provided for date/time retrieval must be a date and", " time respectively" ]; `savedModelName in keyDict; $[10h=type model`savedModelName; "namedModels/",model[`savedModelName],"/"; -11h=type model`savedModeName; "namedModels/",string[model`savedModelName],"/"; '"Types provided for model name based retrieval must be a string/symbol" ]; '"A user must define model start date/time or model name."; ] } // @kind function // @category utility // @desc Extract model meta while checking that the directory for the // specified model exists // @param modelDetails {dictionary} Details of current model // @param pathToMeta {symbol} Path to previous model metadata hsym // @returns {dictionary} Returns either extracted model metadata or errors out utils.extractModelMeta:{[modelDetails;pathToMeta] details:raze modelDetails; modelName:$[10h=type raze value modelDetails;;{sv[" - ";string x]}]details; errFunc:{[modelName;err]'"Model ",modelName," does not exist\n"}modelName; @[get;pathToMeta;errFunc] } // @kind data // @category utility // @desc Dictionary outlining the keys which must be equivalent for // data retrieval in order for a dataset not to be loaded twice (assumes // tabular return under equivalence) // @type dictionary utils.dataType:`ipc`binary`csv! (`port`select;`directory`fileName;`directory`fileName) // @kind data // @category utility // @desc Dictionary with console print statements to reduce clutter // @type dictionary utils.printDict:(!) . flip( (`describe;"The following is a breakdown of information for each of the ", "relevant columns in the dataset"); (`errColumns;"The following columns were removed due to type restrictions", " for "); (`preproc;"Data preprocessing complete, starting feature creation"); (`sigFeat;"Feature creation and significance testing complete"); (`totalFeat;"Total number of significant features being passed to the ", "models = "); (`select;"Starting initial model selection - allow ample time for large", " datasets"); (`scoreFunc;"Scores for all models using "); (`bestModel;"Best scoring model = "); (`modelFit;"Continuing to final model fitting on testing set"); (`hyperParam;"Continuing to hyperparameter search and final model fitting ", "on testing set"); (`kerasClass;"Test set does not contain examples of each class removing ", "multi-class keras models"); (`torchModels;"Attempting to run Torch models without Torch installed, ", "removing Torch models"); (`theanoModels;"Attempting to run Theano models without Theano installed, ", "removing Theano models"); (`latexError;"The following error occurred when attempting to run latex", " report generation:\n"); (`score;"Best model fitting now complete - final score on testing set = "); (`confMatrix;"Confusion matrix for testing set:"); (`graph;"Saving down graphs to "); (`report;"Saving down procedure report to "); (`meta;"Saving down model parameters to "); (`model;"Saving down model to ")) // @kind data // @category utility // @desc Dictionary of warning print statements that can be turned // on/off. If two elements are within a key,first element is the warning // given when ignoreWarnings=2, the second is the warning given when // ignoreWarnings=1. // @type dictionary utils.printWarnings:(!) . flip( (`configExists;("A configuration file of this name already exists"; "A configuration file of this name already exists and will be ", "overwritten")); (`savePathExists;("The savePath chosen already exists, this run will be", " exited"; "The savePath chosen already exists and will be overwritten")); (`loggingPathExists;("The logging path chosen already exists, this run ", "will be exited"; "The logging path chosen already exists and will be overwritten")); (`printDefault;"If saveOption is 0, logging or printing to screen must be ", "enabled. Defaulting to .automl.utils.printing:1b"); (`pythonHashSeed;"For full reproducibility between q processes of the NLP ", "word2vec implementation, the PYTHONHASHSEED environment variable must ", "be set upon initialization of q. See ", "https://code.kx.com/q/ml/automl/ug/options/#seed for details."); (`neuralNetWarning;("Limiting the models being applied. No longer running ", "neural networks or SVMs. Upper limit for number of targets set to: "; "It is advised to remove any neural network or SVM based models from ", "model evaluation. Currently running with in a number of data points in", " excess of: ")) ) // @kind data // @category utility // @desc Decide how warning statements should be handles. // 0=No warning or action taken // 1=Warning given but no action taken. // 2=Warning given and appropriate action taken. // @type int utils.ignoreWarnings:2 // @kind data // @category utility // @desc Default printing and logging functionality // @type boolean utils.printing:1b utils.logging :0b // @kind function // @category api // @desc Print string to stdout or log file // @param filename {symbol} Filename to apply to log of outputs to file // @param val {string} Item that is to be displayed to standard out of any type // @param nline1 {int} Number of new line breaks before the text that are // needed to 'pretty print' the display // @param nline2 {int} Number of new line breaks after the text that are needed // to 'pretty print' the display // @return {::} String is printed to std or to log file utils.printFunction:{[filename;val;nline1;nline2] if[not 10h~type val;val:.Q.s val]; newLine1:nline1#"\n"; newLine2:nline2#"\n"; printString:newLine1,val,newLine2; if[utils.logging; h:hopen hsym`$filename; h printString; hclose h; ]; if[utils.printing;-1 printString]; } // @kind function // @category utility // @desc Retrieve the model which is closest in time to // the user specified `startDate`startTime where nearest is // here defined at the closest preceding model // @param dict {dictionary} information about the start date and // start time of the model to be retrieved mapping `startDate`startTime // to their associated values // @returns {dictionary} The model whose start date and time most closely // matches the input utils.nearestModel:{[dict] timeMatch:sum dict`startDate`startTime; datedTimed :utils.getTimes[]; namedModels:utils.parseNamedFiles[]; if[(();())~(datedTimed;namedModels); '"No named or dated and timed models in outputs folder,", " please generate models prior to model retrieval" ]; allTimes:asc raze datedTimed,key namedModels; binLoc:bin[allTimes;timeMatch]; if[-1=binLoc;binLoc:binr[allTimes;timeMatch]]; nearestTime:allTimes binLoc; modelName:namedModels nearestTime; if[not (""~modelName)|()~modelName; :enlist[`savedModelName]!enlist neg[1]_2_modelName]; `startDate`startTime!("d";"t")$\:nearestTime } // @kind function // @category utility // @desc Retrieve the timestamp associated // with all dated/timed models generated historically // @return {timestamp[]} The timestamps associated with // each of the previously generated non named models utils.getTimes:{ dateTimeFiles:key hsym`$path,"/outputs/dateTimeModels/"; $[count dateTimeFiles;utils.parseModelTimes each dateTimeFiles;()] } // @kind function // @category utility // @desc Generate a timestamp for each timed file within the // outputs folder // @param folder {symbol} name of a dated folder within the outputs directory // @return {timestamp} an individual timestamp denoting the date+time of a run utils.parseModelTimes:{[folder] fileNames:string key hsym`$path,"/outputs/dateTimeModels/",string folder; "P"$string[folder],/:"D",/:{@[;2 5;:;":"] 4_x}each fileNames,\:"000000" } // @kind function // @category utility // @desc Retrieve the dictionary mapping timestamp of // model generation to the name of the associated model // @return {dictionary} A mapping between the timestamp associated with // start date/time and the name of the model produced utils.parseNamedFiles:{ (!).("P*";"|")0:hsym`$path,"/outputs/timeNameMapping.txt" } // @kind function // @category utility // @desc Delete files and folders recursively // @param filepath {symbol} File handle for file or directory to delete // @return {::|err} Null on success, an error if attempting to delete // folders outside of automl utils.deleteRecursively:{[filepath] if[not filepath>hsym`$path;'"Delete path outside of scope of automl"]; orderedPaths:{$[11h=type d:key x;raze x,.z.s each` sv/:x,/:d;d]}filepath; hdel each desc orderedPaths; } // @kind function // @category utility // @desc Delete models based on user provided information // surrounding the date and time of model generation // @param config {dictionary} User provided config containing, start date/time // information these can be date/time types in the former case or a // wildcarded string // @param pathStem {string} the start of all paths to be constructed, this // is in the general case .automl.path,"/outputs/" // @return {::|err} Null on success, error if attempting to delete folders // which do not have a match utils.deleteDateTimeModel:{[config;pathStem] dateInfo:config`startDate; timeInfo:config`startTime; pathStem,:"dateTimeModels/"; allDates:key hsym`$pathStem; relevantDates:utils.getRelevantDates[dateInfo;allDates]; dateCheck:(1=count relevantDates)&0>type relevantDates; relevantDates:string $[dateCheck;enlist;]relevantDates; datePaths:(pathStem,/:relevantDates),\:"/"; fileList:raze{x,/:string key hsym`$x}each datePaths; relevantFiles:utils.getRelevantFiles[timeInfo;fileList]; utils.deleteRecursively each hsym`$relevantFiles; emptyPath:where 0=count each key each datePaths:hsym`$datePaths; if[count emptyPath;hdel each datePaths emptyPath]; }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="11"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">.dotz.set[`.z.pw;p0[`pw;value .dotz.getcommand[`.z.pw];;]]; .dotz.set[`.z.po;p1[`po;value .dotz.getcommand[`.z.po];]]; .dotz.set[`.z.pc;p1[`pc;value .dotz.getcommand[`.z.pc];]]; .dotz.set[`.z.wo;p1[`wo;value .dotz.getcommand[`.z.wo];]]; .dotz.set[`.z.wc;p1[`wc;value .dotz.getcommand[`.z.wc];]]; .dotz.set[`.z.ws;p2[`ws;value .dotz.getcommand[`.z.ws];]]; .dotz.set[`.z.exit;p2[`exit;value .dotz.getcommand[`.z.exit];]]; .dotz.set[`.z.pg;p2[`pg;value .dotz.getcommand[`.z.pg];]]; .dotz.set[`.z.pi;p2[`pi;value .dotz.getcommand[`.z.pi];]]; .dotz.set[`.z.ph;p2[`ph;value .dotz.getcommand[`.z.ph];]]; .dotz.set[`.z.pp;p2[`pp;value .dotz.getcommand[`.z.pp];]]; .dotz.set[`.z.ps;p3[`ps;value .dotz.getcommand[`.z.ps];]];] ================================================================================ FILE: TorQ_code_handlers_permissions.q SIZE: 10,977 characters ================================================================================ \d .pm if[@[1b; `.access.enabled;0b]; ('"controlaccess.q already active";exit 1) ] enabled:@[value;`enabled;0b] // whether permissions are enabled maxsize:@[value;`maxsize;200000000] // the maximum size of any returned result set readonly:@[value;`.readonly.enabled;0b] val:$[readonly;reval;eval] valp:$[readonly;{reval parse x};value] / constants ALL:`$"*"; / used to indicate wildcard/superuser access to functions/data err.:(::); err[`func]:{"pm: user role does not permit running function [",string[x],"]"} err[`selt]:{"pm: no read permission on [",string[x],"]"} err[`selx]:{"pm: unsupported select statement, superuser only"} err[`updt]:{"pm: no write permission on [",string[x],"]"} err[`expr]:{"pm: unsupported expression, superuser only"} err[`quer]:{"pm: free text queries not permissioned for this user"} err[`size]:{"pm: returned value exceeds maximum permitted size"} / determine whether the system outputs booleans (permission check only) or evaluates query runmode:@[value;`runmode;1b] / determine whether unlisted variables are auto-allowlisted permissivemode:@[value; `permissivemode; 0b] / schema user:([id:`symbol$()]authtype:`symbol$();hashtype:`symbol$();password:()) groupinfo:([name:`symbol$()]description:()) roleinfo:([name:`symbol$()]description:()) usergroup:([]user:`symbol$();groupname:`symbol$()) userrole:([]user:`symbol$();role:`symbol$()) functiongroup:([]function:`symbol$();fgroup:`symbol$()) access:([]object:`symbol$();entity:`symbol$();level:`symbol$()) function:([]object:`symbol$();role:`symbol$();paramcheck:()) virtualtable:([name:`symbol$()]table:`symbol$();whereclause:()) publictrack:([name:`symbol$()] handle:`int$()) / api adduser:{[u;a;h;p] if[u in key groupinfo;'"pm: cannot add user with same name as existing group"]; user,:(u;a;h;p)} removeuser:{[u]user::.[user;();_;u]} addgroup:{[n;d] if[n in key user;'"pm: cannot add group with same name as existing user"]; groupinfo,:(n;d)} removegroup:{[n]groupinfo::.[groupinfo;();_;n]} addrole:{[n;d]roleinfo,:(n;d)} removerole:{[n]roleinfo::.[roleinfo;();_;n]} addtogroup:{[u;g] if[not g in key groupinfo;'"pm: no such group, .pm.addgroup first"]; if[not (u;g) in usergroup;usergroup,:(u;g)];} removefromgroup:{[u;g]if[(u;g) in usergroup;usergroup::.[usergroup;();_;usergroup?(u;g)]]} assignrole:{[u;r] if[not r in key roleinfo;'"pm: no such role, .pm.addrole first"]; if[not (u;r) in userrole;userrole,:(u;r)];} unassignrole:{[u;r]if[(u;r) in userrole;userrole::.[userrole;();_;userrole?(u;r)]]} addfunction:{[f;g]if[not (f;g) in functiongroup;functiongroup,:(f;g)];} removefunction:{[f;g]if[(f;g) in functiongroup;functiongroup::.[functiongroup;();_;functiongroup?(f;g)]]} grantaccess:{[o;e;l]if[not (o;e;l) in access;access,:(o;e;l)]} revokeaccess:{[o;e;l]if[(o;e;l) in access;access::.[access;();_;access?(o;e;l)]]} grantfunction:{[o;r;p]if[not (o;r;p) in function;function,:(o;r;p)]} revokefunction:{[o;r]if[(o;r) in t:`object`role#function;function::.[function;();_;t?(o;r)]]} createvirtualtable:{[n;t;w]if[not n in key virtualtable;virtualtable,:(n;t;w)]} removevirtualtable:{[n]if[n in key virtualtable;virtualtable::.[virtualtable;();_;n]]} addpublic:{[u;h]publictrack::publictrack upsert (u;h)} removepublic:{[u]publictrack::.[publictrack;();_;u]} cloneuser:{[u;unew;p] adduser[unew;ul[0] ;ul[1]; value (string (ul:raze exec authtype,hashtype from user where id=u)[1]), " string `", p]; addtogroup[unew;` sv value(1!usergroup)[u]]; assignrole[unew;` sv value(1!userrole)[u]]} / permissions check functions / making a dictionary of the parameters and the argument values pdict:{[f;a] d:enlist[`]!enlist[::]; d:d,$[not ca:count a; (); f~`select; (); (1=count a) and (99h=type first a); first a; /if projection first obtain a list of function and fixed parameters (fnfp) 104h=type value f; [fnfp:value value f; (value[fnfp 0][1])!fnfp[1],a]; /get paramaters and make a dictionary with the arguments 101h<>type fp:value[value[f]][1]; fp!a; ((),(`$string til ca))!a ]; d} fchk:{[u;f;a] r:exec role from userrole where user=u; / list of roles this user has o:ALL,f,exec fgroup from functiongroup where function=f; / the func and any groups that contain it c:exec paramcheck from function where (object in o) and (role in r); k:@[;pdict[f;a];::] each c; / try param check functions matched for roles k:`boolean$@[k;where not -1h=type each k;:;0b]; / errors or non-boolean results treated as false max k} / any successful check is sufficient - e.g. superuser trumps failed paramcheck from another role achk:{[u;t;rw;pr] if[fchk[u;ALL;()]; :1b]; if[pr and not t in key 1!access; :1b]; t: ALL,t; g:raze over (exec groupname by user from usergroup)\[u]; / groups can contain groups - chase all exec 0<count i from access where object in t, entity in g, level in (`read`write!(`read`write;`write))[rw]} / expression identification xqu:{(first[x] in (?;!)) and (count[x]>=5)} / Query xdq:{first[x] in .q} / Dot Q isq:{(first[x] in (?;!)) and (count[x]>=5)} query:{[u;q;b;pr] if[not fchk[u;`select;()]; $[b;'err[`quer][]; :0b]]; / must have 'select' access to run free form queries / update or delete in place if[((!)~q[0])and(11h=type q[1]); if[not achk[u;first q[1];`write;pr]; $[b;'err[`updt][first q 1]; :0b]]; $[b; :qexe q; :1b]; ]; / nested query if[isq q 1; $[b; :qexe @[q;1;.z.s[u;;b;pr]]; :1b]]; / select on named table if[11h=abs type q 1; t:first q 1; / virtual select if[t in key virtualtable; vt:virtualtable[t]; q:@[q;1;:;vt`table]; q:@[q;2;:;enlist first[q 2],vt`whereclause]; ]; if[not achk[u;t;`read;pr]; $[b; 'err[`selt][t]; :0b]]; $[b; :qexe q; :1b]; ]; / default - not specifally handled, require superuser if[not fchk[u;ALL;()]; $[b; 'err[`selx][]; :0b]]; $[b; :qexe q; :1b]} dotqd:enlist[`]!enlist{[u;e;b;pr]if[not (fchk[u;ALL;()] or fchk[u;`$string(first e);()]);$[b;'err[`expr][]];:0b];$[b;qexe e;1b]}; dotqd[`lj`ij`pj`uj]:{[u;e;b;pr] $[b;val @[e;1 2;expr[u]];1b]} dotqd[`aj`ej]:{[u;e;b;pr] $[b;val @[e;2 3;expr[u]];1b]} dotqd[`wj`wj1]:{[u;e;b;pr] $[b;val @[e;2;expr[u]];1b]} dotqf:{[u;q;b;pr] qf:.q?(q[0]); p:$[null p:dotqd qf;dotqd`;p]; p[u;q;b;pr]} / flatten an arbitrary data structure, maintaining any strings flatten:{raze $[10h=type x;enlist enlist x;1=count x;x;.z.s'[x]]} / string non-strings, maintain strings str:{$[10h=type x;;string]x}' lamq:{[u;e;b;pr] / get names of all defined variables to look for references to in expression rt:raze .api.varnames[;"v";1b]'[.api.allns[]]; / allow public tables to always be accessed rt:rt except distinct exec object from access where entity=`public; / flatten expression & tokenize to extract any possible variable references pq:`$distinct -4!raze(str flatten e),'" "; / filter expression tokens to those matching defined variables rqt:rt inter pq; prohibited:rqt where not achk[u;;`read;pr] each rqt; if[count prohibited;'" | " sv .pm.err[`selt] each prohibited]; $[b; :exe e; :1b]} exe:{v:$[(104<>a)&100<a:abs type first x;val;valp]x; if[maxsize<-22!v; 'err[`size][]]; v} qexe:{v:val x; if[maxsize<-22!v; 'err[`size][]]; v} / check if arg is symbol, and if so if type is <100h i.e. variable - if name invalid, return read error isvar:{$[-11h<>type x;0b;100h>type @[get;x;{[x;y]'err[`selt][x]}[x]]]} mainexpr:{[u;e;b;pr] / store initial expression to use with value ie:e; e:$[10=type e;parse e;e]; / variable reference if[isvar f:first e; if[not achk[u;f;`read;pr]; $[b;'err[`selt][f]; :0b]]; :$[b;qexe $[f in key virtualtable;exec (?;table;enlist whereclause;0b;()) from virtualtable[f];e];1b]; ]; / named function calls if[-11h=type f; if[not fchk[u;f;1_ e]; $[b;'err[`func][f]; :0b]]; $[b; :exe ie; :1b]; ]; / queries - select/update/delete if[isq e; :query[u;e;b;pr]]; / .q keywords if[xdq e;:dotqf[u;e;b;pr]]; / lambdas - value any dict args before razing if[any (100 104h)in type each raze @[e;where 99h=type'[e];value]; :lamq[u;ie;b;pr]]; / if we get down this far we don't have specific handling for the expression - require superuser if[not (fchk[u;ALL;()] or fchk[u;`$string(first e);()]); $[b;'err[`expr][f]; :0b]]; $[b; exe ie; 1b]} / projection to determine if function will check and execute or return bool, and in second arg run in permissive mode expr:mainexpr[;;runmode;permissivemode] allowed:mainexpr[;;0b;0b]</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="12"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">defeps:(!) . flip ( (L2R_LR;0.01); (L2R_L2LOSS_SVC;0.01); (L2R_L2LOSS_SVR;0.001); (L2R_L2LOSS_SVC_DUAL;0.1); (L2R_L1LOSS_SVC_DUAL;0.1); (MCSVM_CS;0.1); (L2R_LR_DUAL;0.1); (L1R_L2LOSS_SVC;0.01); (L1R_LR;0.01); (L2R_L1LOSS_SVR_DUAL;0.1); (L2R_L2LOSS_SVR_DUAL;0.1)) defparam:{[prob;param] if[0f>=param`eps;param[`eps]:defeps param`solver_type]; param} sparse:{{("i"$1+i)!x i:where not 0f=x} each flip x} prob:{`x`y!(sparse x;y)} read_problem:{[s] i:s?\:" "; y:i#'s; x:{(!/)"I: "0:x _y}'[1+i;s]; if[3.5>.z.K;x:("i"$key x)!value x]; `bias`x`y!-1f,"F"$(x;y)} write_problem:{ s:(("+";"")0>x`y),'string x`y; s:s,'" ",/:{" " sv ":" sv' string flip(key x;value x)} each x`x; s:s,\:" "; s} ================================================================================ FILE: funq_linreg.q SIZE: 3,690 characters ================================================================================ \c 20 100 \l funq.q plt:.ut.plot[30;15;.ut.c10] -1"generating 2 sets of independent normal random variables"; / NOTE: matrix variables are uppercase -1 .ut.box["**"]( "suppress the desire to flip matrices"; "matlab/octave/r all store data in columns"; "the following matrix *is* a two column matrix in q"); show X:(.ml.bm 10000?) each 1 1f / perhaps q needs the ability to tag matrices so they can be displayed / (not stored) flipped -1"plotting uncorrelations x,y"; show plt[sum] X -1"using $ to generate correlated x and y"; rho:.8 / correlation X[1]:(rho;sqrt 1f-rho*rho)$X -1"plotting correlations x,y"; show plt[sum] X -1 .ut.box["**"] ( "mmu is usually used for matrix multiplication"; "$ is usually used for vector dot product"; "but they can be used interchangeably"); Y:-1#X X:1#X -1"linear algebra often involves an operation such as"; -1"Y times X transpose or Y*X'. Matlab and Octave can parse"; -1"this syntax and perform the multiplication/transpose"; -1"by a change of indexation rather than physically moving the data"; -1"to get this same effect in q, we can change the"; -1"operation from 'Y mmu flip X' to 'X$/:Y'"; -1"timing with the flip"; \ts:100 Y mmu flip X -1"and without"; \ts:100 X$/:Y -1"fitting a line *without* intercept"; show THETA:Y lsq 1#X -1"to fit intercept, prepend a vector of 1s"; show .ml.prepend[1f] X -1"fitting a line with intercept"; show THETA:Y lsq .ml.prepend[1f] 1#X -1"plotting data with fitted line"; show plt[avg] .ml.append[0N;X,Y],'.ml.append[1]X,.ml.plin[X] THETA; -1"fitting with normal equations (fast but not numerically stable)"; .ml.normeq[Y;.ml.prepend[1f] X] if[2<count key `.qml; -1"qml uses QR decomposition for a more numerically stable fit"; 0N!.qml.mlsqx[`flip;.ml.prepend[1f] X;Y]; ]; -1"its nice to have closed form solution, but what if we don't?"; -1"we can use gradient descent as well"; alpha:.1 / learning rate THETA:enlist theta:2#0f / initial values -1"by passing a learning rate and function to compute the gradient"; -1".ml.gd will take one step in the steepest direction"; mf:.ml.gd[alpha;.ml.lingrad[();Y;X]] mf THETA -1"we can then use q's iteration controls"; -1"to run a fixed number of iterations"; 2 mf/ THETA -1"iterate until the cost is within a tolerance"; cf:.ml.lincost[();X;Y] (.4<0N!cf::) mf/ THETA -1"or even until convergence"; mf over THETA -1"to iterate until cost reductions taper off, we need our own function"; -1"we can change the logging behavior by changing the file handle"; -1"no logging"; first .ml.iter[0N;.01;cf;mf] THETA -1"in-place progress"; first .ml.iter[1;.01;cf;mf] THETA -1"new-line progress"; first .ml.iter[-1;.01;cf;mf] THETA -1"by passing an integer for the limit, we can run n iterations"; first .ml.iter[-1;20;cf;mf] THETA l:1000f / l2 regularization factor -1"we can reduce over-fitting by adding l2 regularization"; gf:.ml.lingrad[.ml.l2[l];Y;X] first .ml.iter[1;.01;cf;.ml.gd[alpha;gf]] THETA -1"we can also use the fmincg minimizer to obtain optimal theta values"; cgf:.ml.lincostgrad[.ml.l2[l];Y;X] first .fmincg.fmincg[1000;cgf;theta] -1"linear regression with l2 regularization has a closed-form solution"; -1"called ridge regression"; -1"in this example, we fit an un-regularized intercept"; .ml.ridge[0f,count[X]#l;Y;.ml.prepend[1f]X] -1"let's check that we've implemented the gradient calculations correctly"; cf:.ml.lincost[.ml.l2[l];Y;X]enlist:: gf:first .ml.lingrad[.ml.l2[l];Y;X]enlist:: .ut.assert . .ut.rnd[1e-6] .ml.checkgrad[1e-4;cf;gf;theta] cgf:.ml.lincostgrad[.ml.l2[l];Y;X] cf:first cgf:: gf:last cgf:: .ut.assert . .ut.rnd[1e-6] .ml.checkgrad[1e-4;cf;gf;theta] ================================================================================ FILE: funq_liver.q SIZE: 328 characters ================================================================================ liver.f:"bupa.data" liver.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/" liver.b,:"liver-disorders/" -1"[down]loading liver data set"; .ut.download[liver.b;;"";""] liver.f; liver.XY:((6#"E"),"H";",")0:`$liver.f liver.X:-1_liver.XY liver.c:`mcv`alkphos`sgpt`sgot`gammagt`drinks`train liver.t:flip liver.c!liver.XY ================================================================================ FILE: funq_logreg.q SIZE: 3,371 characters ================================================================================ \c 20 100 \l funq.q \l wdbc.q -1"partitioning wdbc data into train and test"; show d:.ut.part[`train`test!3 1;0N?] "f"$update "M"=diagnosis from wdbc.t y:first get first `Y`X set' 0 1 cut value flip d`train yt:first get first `Yt`Xt set' 0 1 cut value flip d`test -1"the sigmoid function is used to represent a binary outcome"; plt:.ut.plot[30;15;.ut.c10;sum] show plt .ml.sigmoid .1*-50+til 100 / logistic regression cost -1"to use gradient descent, we must first define a cost function"; THETA:enlist theta:(1+count X)#0f; -1"compute cost of initial theta estimate"; .ml.logcost[();Y;X;THETA] if[2<count key `.qml; -1"qml comes with a minimizer that can be called"; -1"with just this cost function:"; opts:`iter,1000,`full`quiet; /`rk`slp`tol,1e-8 0N!first 1_.qml.minx[opts;.ml.logcost[();Y;X]enlist::;THETA]; ]; -1"we can also define a gradient function to make this process faster"; .ml.loggrad[();Y;X;THETA] -1"check that we've implemented the gradient correctly"; rf:.ml.l2[1] cf:.ml.logcost[rf;Y;X]enlist:: gf:first .ml.loggrad[rf;Y;X]enlist:: .ut.assert . .ut.rnd[1e-6] .ml.checkgrad[1e-4;cf;gf;theta] cgf:.ml.logcostgrad[rf;Y;X] cf:first cgf:: gf:last cgf:: .ut.assert . .ut.rnd[1e-6] .ml.checkgrad[1e-4;cf;gf;theta] if[2<count key `.qml; -1"qml can also use both the cost and gradient to improve performance"; 0N!first 1_.qml.minx[opts;.ml.logcostgradf[();Y;X];THETA]; ]; -1"but the gradient calculation often shares computations with the cost"; -1"providing a single function that calculates both is more efficient"; -1".fmincg.fmincg (function minimization conjugate gradient) permits this"; -1 .ut.box["**"]"use '\\r' to create a progress bar with in-place updates"; theta:first .fmincg.fmincg[1000;.ml.logcostgrad[();Y;X];theta] -1"compute cost of initial theta estimate"; .ml.logcost[();Y;X;enlist theta] -1"test model's accuracy"; avg yt="i"$p:first .ml.plog[Xt;enlist theta] -1"lets add some regularization"; theta:(1+count X)#0f; theta:first .fmincg.fmincg[1000;.ml.logcostgrad[.ml.l1[10];Y;X];theta] -1"test model's accuracy"; avg yt="i"$p:first .ml.plog[Xt;enlist theta] show .ut.totals[`TOTAL] .ml.cm["i"$yt;"i"$p] -1"demonstrate a few binary classification evaluation metrics"; -1"how well did we fit the data"; tptnfpfn:.ml.tptnfpfn . "i"$(yt;p) -1"accuracy: ", string .ml.accuracy . tptnfpfn; -1"precision: ", string .ml.precision . tptnfpfn; -1"recall: ", string .ml.recall . tptnfpfn; -1"F1 (harmonic mean between precision and recall): ", string .ml.f1 . tptnfpfn; -1"FMI (geometric mean between precision and recall): ", string .ml.fmi . tptnfpfn; -1"jaccard (0 <-> 1 similarity measure): ", string .ml.jaccard . tptnfpfn; -1"MCC (-1 <-> 1 correlation measure): ", string .ml.mcc . tptnfpfn; -1"plot receiver operating characteristic (ROC) curve"; show .ut.plt roc:2#.ml.roc[yt;p] -1"area under the curve (AUC)"; .ml.auc . 2#roc fprtprf:(0 0 .5 .5 1;0 .5 .5 1 1;0w .8 .4 .35 .1) -1"confirm accurate roc results"; .ut.assert[fprtprf] .ml.roc[0 0 1 1;.1 .4 .35 .8] -1"use random values to confirm large vectors don't explode memory"; y:100000?0b p:100000?1f show .ut.plt roc:2#.ml.roc[y;p] -1"confirm auc for random data is .5"; .ut.assert[.5] .ut.rnd[.01] .ml.auc . roc ================================================================================ FILE: funq_mansfield.q SIZE: 339 characters ================================================================================ / mansfield park mansfield.f:"141.txt" mansfield.b:"https://www.gutenberg.org/files/141/old/" -1"[down]loading mansfield park text"; .ut.download[mansfield.b;;"";""] mansfield.f; mansfield.txt:read0 `$mansfield.f mansfield.chapters:1_"CHAPTER" vs "\n" sv 35_-373_mansfield.txt mansfield.s:{(2+first x ss"\n\n")_x} each mansfield.chapters ================================================================================ FILE: funq_markov.q SIZE: 830 characters ================================================================================ \l funq.q \l iris.q / markov clustering / https://www.cs.ucsb.edu/~xyan/classes/CS595D-2009winter/MCL_Presentation2.pdf / example from mcl man page / http://micans.org/mcl/man/mcl.html t:flip `k1`k2`v!"ssf"$\:() t,:`cat`hat,0.2 t,:`hat`bat,0.16 t,:`bat`cat,1.0 t,:`bat`bit,0.125 t,:`bit`fit,0.25 t,:`fit`hit,0.5 t,:`hit`bit,0.16 / take max of bidirectional links, enumerate keys k:() m:.ml.inflate[1;0f] .ml.addloop m|:flip m:.ml.full enlist[2#count k],exec (v;`k?k1;`k?k2) from t .ut.assert[(`hat`bat`cat;`bit`fit`hit)] (get`k!) each .ml.interpret .ml.mcl[2;1.5;0f] over m</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="13"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Predicting floods with q and machine learning¶ The Frontier Development Lab (FDL) is a public-private partnership run annually with both the European Space Agency (ESA) and National Aeronautics and Space Administration (NASA). The objective of FDL is to bring together researchers from the Artificial Intelligence (AI) and space science sectors to tackle a broad spectrum of challenges in the space industry. The projects this year include challenges in lunar and heliophysics research, astronaut health and disaster prevention. This paper will focus on the Disaster Prevention, Progress and Response (Floods) challenge, for which KX was a partner. The need for AI in disaster prevention¶ Floods are one of the most destructive natural disasters worldwide. All regions can be affected by flooding events and, with the increased variability in weather patterns due to global warming, this is likely to become even more prevalent. The speed at which flooding events can occur, and difficulties in predicting their occurrence, create huge logistical problems for both governmental and non-governmental agencies. Over the past 10 years, floods have caused on average 95 deaths a year in the US alone, making them one of the deadliest weather-related phenomenon. Worldwide, floods cost in excess of 40 billion dollars per year, impacting property, agriculture and the health of individuals. For the duration of the project, we collaborated with the United States Geological Survey (USGS), a scientific agency within the US Department of the Interior. The objective of the organization is to study the landscape of the US and provide information about its natural resources and the natural hazards that affect them. Currently, hydrologists use physical models to help predict floods. These models require predictions to be carefully calibrated for each stream or watershed and careful consideration must be taken for dams, levees, etc. Producing these models is extremely costly due to resource requirements. This limits the areas within the US that can use such systems to better prepare for flood events. The challenge¶ To predict the flood susceptibility of a stream area, the project was separated into two distinct problems. Monthly model Predicting, per month, if a stream height will reach a flood threshold or not. These flood thresholds were set by the National Oceanic and Atmospheric Administration (NOAA) and were location-specific. Knowing which areas are susceptible to flooding, allows locations to better prepare for a flood event. Time-to-peak model Predicting the time to peak of a flood event. When a major rain event occurs, knowing how long it will take for a river to reach its peak height is necessary in order to inform potentially affected individuals if and when they need to evacuate. This can help to reduce structural damage and loss of life during a disaster. Dependencies¶ All development was done with the following software versions: kdb+ 3.6 Python 3.7.0 The following Python modules were also used: TensorFlow 1.14.0 NumPy 1.17.2 pandas 0.24.2 Matplotlib 2.2.2 scikit_learn 1.1.0 xgboost 0.9.0 gmaps 0.9.0 geopandas 0.5.1 ipywidgets 7.5.1 In addition, a number of kdb+ libraries and interfaces were used: embedPy 1.3.2 JupyterQ 1.1.7 ML toolkit 0.3.2 The data¶ This project focuses on six states within the US, over a period of 10 years. Data was taken from ~800 gauge sites, between July 2009 and June 2019. Not all gauge sites had continuous historical data over the period, but all the available data for each site was used. The six states were: New Jersey, Nebraska, South Carolina, New York, South Dakota, and Virginia. The primary reason for choosing these states, was that they exhibit similar climate and landscape to one another. Climate and landscape play a major role in predicting floods, meaning that building a model to predict flood forecasting for the entire US would be extremely difficult as the climates and landscapes vary dramatically between locations. After discussions with hydrologists at USGS, it was decided that focusing on a subset of areas with similar geographies would help to ensure that the models being produced created meaningful results that could be used in a real world scenario. Required datasets and providers: USGS¶ USGS provided its Surface Water dataset. This consisted of the height of a stream as measured by gauges for over 11,000 sites in the US. The data was updated every 15 minutes, with some locations having historical data for over 50 years. As previously mentioned, the data chosen in this case was a subset of these sites based on geographical location. q)meta max_ht_str c | t f a -------| ----- site_no| C date | d height | f PRISM¶ The PRISM Climate Group provides climate observations across the whole of the US. This data contains information on total precipitation, minimum/maximum temperature and dew point for each requested latitude and longitude. Spatial and climate datasets are then developed to reveal short and long-term climate patterns. This dataset was used to extract daily precipitation readings from the site locations for each day during the requested 10-year period. q)meta precipall c | t f a -------| ----- site_no| s long | f lat | f elv | f date | d ppt | f National Land Cover Database (NLCD)¶ The NLCD database was collected using Landsat. The Landsat satellite program is a NASA/USGS collaboration which provides the longest continuous space-based record of Earth’s landscape. Landsat’s ground resolution and observation wavelengths allow the current use of land and its change over time to be detected and documented. This provides information such as land-use classification (urban, agriculture, forest, etc.), how well the land allows water to pass through it (impervious surface information) and tree cover. This dataset has updated records every five years from 2006. The granularity of this dataset is related to how quickly land use changes over time. q)meta nlcd c | t f a ---------| ----- site_no | s INTPTLAT | f INTPTLON | f Measure | f REACHCODE| f distance | f imp | f year | j National Hydrology Dataset Plus (NHDPlus)¶ NHDPlus is a geo-spatial hydrologic framework dataset associated with USGS, released in 2006. It is based off the NHD dataset, which provides information about the streams, rivers, canals, lakes and ponds throughout the US. The features used from this dataset were the catchment and watershed (an area of land containing streams that drain into a single stream/river) area characteristics at the stream site locations. Catchment areas are particularly important, as these are the areas of a river/stream in which water is collected and accumulates. This is vital information for predicting whether a flood is likely to occur or not. A description of some important features contained within this dataset are found below: CatAreaSqKm Area of catchments WsAreaSqKm Area of all watersheds Elev Mean of all elevation WtDep Mean of all water table depths Om Mean of all organic matter Perm Mean of all permeability of soils RckDep Mean of all depth to bedrock of soils Clay Mean of all clay values Sand Mean of all sand values Runoff Mean of all runoff values WetIndex Mean wetness index BFI Ratio of base flow to total flow as a percentage DamNrmStor Volume of all reservoirs per unit area q)meta basin c | t f a --------------------| ----- site_no | s CatAreaSqKm | f WsAreaSqKm | f CatAreaSqKmRp100 | f WsAreaSqKmRp100 | f CanalDensCat | f CanalDensWs | f ElevCat | f ElevWs | f .. Flooded Locations And Simulated Hydrographs Project (FLASH)¶ FLASH is a database containing information about flood events within the US. The main goal of the FLASH project was to improve the accuracy and timing when predicting these flash floods. The information used from this dataset was the time taken for a river to reach its peak height after a major rain event. q)meta peak c | t f a ----------| ----- site_no | s lat | f lon | f start_time| z s end_time | z peak_q | f peak_time | z delta_time| f date | d NOAA¶ NOAA is a US governmental agency for monitoring and preserving the climate and environment. This dataset provided flood-level thresholds for locations across the US. These thresholds consisted of four warning stages and the river height for a given location that causes them to reach these stages. The four warning stages are | warning | meaning | |---|---| | Action | Mitigation action needs to be taken in order to prepare for possible hydrological activity. | | Flood | The river height poses a threat to lives, property and businesses. | | Moderate | Some inundation of roads or buildings. A flood warning should be released. | | Major | Extensive inundation of roads and buildings. Considerable evacuations of areas may occur. | q)meta warning c | t f a ----------| ----- GaugeLID | C Status | C Location | C Latitude | f Longitude | f Waterbody | C State | C Observed | C ObsTime | C Units | C Action | C Flood | C ... Feature engineering¶ Given all the different data sources available, it was possible to split the information into three datasets. The datasets differ based on the features contained within them. These features were created by extracting aggregated information from the rainfall and stream height datasets. A description of the features defining each dataset are explained below: - Ungauged basin - Features included the past rainfall information of stream sites. This dataset can be used at locations where there is no stream height gauge present, therefore no features were extracted from the stream gauge dataset. The land use data was also included in this dataset. - Gauged basin - Features were extracted containing information about past stream height values before an event. This was also joined with the data from the Ungauged basin above. TheGauged basin dataset can be used for locations containing historical data from stream gauges. - Perfect Forecasts - This dataset extracted features using the current rainfall values for a short period of time after after an event. Data from the Ungauged basin andGauged basin were also included. This dataset is used when weather predictions are available for locations along with historical stream height information. The reason that these three datasets were chosen, was to reflect how this model could be used in a real-world scenario. For the Perfect Forecasts dataset, we assume that future forecasted rainfall is available for the location, however in some applications, this future rainfall prediction cannot always be given with certainty, which is when the Gauged basin or Ungauged basin datasets could be used. The Gauged basin dataset is suitable for stream sites that have historical stream height data recorded over a period of time. When predicting on new sites that do not have this historical data or future rainfall predictions available, the Gauged basin dataset is applicable. To obtain these features, the addFeat function was used. This enabled features pertaining to previous information to be extracted. This previous information included the past rainfall and stream height values during a set period of time before an event. Windowed features were also added using this function. These window features provided a summary of how certain values changed, such as rainfall or stream height, over a fixed window of time. Windowed features were included, as knowing how fast the rainfall is accumulating in an area along with how the stream height is changing before an event can have a major impact on predicting if a flood will occur. addFeat takes the following arguments as inputs: - table that is being updated - how many values to calculate (integer list) - column to apply the function to (symbol) - new column name (symbol) - dictionary of where, group-by and what type of functional statement is used - function that is applied to the grouped columns // Upstream, previous, windowed and lagged extraction function addFeat:{[tab;n;col;newcol;dict;fnc] dict[`w][ tab; dict`wh; dict`gr; $[1<count col;raze;]applyFunc[n;newcol;;fnc] each col] } The following functions are also called within addFeat : // Create new column name colname:{enlist`$string[y],"_",string[x],"_",string[z]} // Apply functions to appropriate columns // and rename columns appropriately applyFunc:{[n;newcol;col;fnc] raze{[n;newcol;col;fnc] colname[n;newcol;col]!enlist fnc[n;col] }[col;newcol;;fnc] each raze n } Whether or not flooding will occur, also depends on what happens in the stream prior to reaching the stream site. This information can be extracted by looking at what happens at locations upstream (direction towards the source of the river), such as the upstream precipitation and stream height values. The ordering of the stream ID number allows the addFeat function to extract the upstream values from the precipitation and stream height datasets. The ID number of each stream site consisted of at least eight digits. The first two digits were the grouping number of the river-basin catchment. The remaining digits were in ascending order based on the location of the gauge along the stream. An example of this grouping is given below q)select distinct site_no by catch_id:2#'string each site_no from precip catch_id| site_no .. --------| ------------------------------------------------------------.. "01" | `01303500`01304000`01304500`01305000`01305500`01306460`01308.. "02" | `02077000`02079500`02079640`02110400`02110500`02110550`02110.. "03" | `03010674`03010820`03011020`03014500`03164000`03165000`03165.. "04" | `04213319`04213376`04213394`04213401`04213500`04214060`04214.. "05" | `05050000`05290000`05291000 .. "06" | `06334500`06354881`06355500`06356500`06357800`06359500`06360.. This characteristic of the site ID numbering is then applied in order to extract the upstream features. Only the first upstream value of each stream site was obtained as this is the most influential upstream information affecting the results. // Calculate value of lagged features prv:{(xprev;y;x)} // Grouping site numbers by catchment, using the first 2 digits catch_site:((';#);2;($:;`site_no)) site_date :`site_no`date!(catch_site;`date) dict:`w`wh`gr!(!;();site_date) // Extract the 1st upstream precipitation for each site per day upstr_ppt :addFeat[precip;1;`ppt;`upstr;dict;prv] // Extract the 1st upstream stream height for each site per day upstr_height:addFeat[maxht;1;`height;`upstr;dict;prv] When predicting flood events, it is important to also look at what happens in the days prior to the event, such as the amount of rainfall that fell or how the height of the stream changed. The addFeat function was applied to obtain these historical values of rainfall and stream gauge height, for both the current and upstream locations. After discussions with hydrologists, it was decided that the 10 days prior to an event would be included. Any longer than that was deemed to be irrelevant and had the potential to negatively affect the predictions. dict[`gr]:site:(enlist `site_no)!enlist `site_no prev_rain:addFeat[ upstr_ppt; enlist 1_til 10; `ppt`upstr_ppt_1; `prev; sited:dict; prv] all_height:addFeat[ upstr_height; enlist 1_til 10; `height`upstr_height_1; `prev; dict; prv] q)reverse cols prev_rain //print the new columns created `prev_upstr_height_1_9`prev_upstr_height_1_8`prev_upstr_height_1_7`p.. q)reverse cols all_height //print the new columns created `prev_upstr_ppt_1_9`prev_upstr_ppt_1_8`prev_upstr_ppt_1_7`prev_upstr.. The above features were applicable to both the monthly and time to peak models. However, additional features that were problem-specific were also added. Monthly model When forecasts were provided for each model, it was important to include information about the maximum moving average rainfall over different time windows. This windowed feature gives an insight into how fast the rain was falling over a set time period, which may indicate whether a flood will occur or not. This feature was added to the dataset using the functions displayed below. // Calculate maximal moving average max_mavg:{(max;(mavg;y;x))} dict[`gr]:(`date`site_no)!(($;enlist`month;`date);`site_no) all_rain:addFeat[ prev_rain; enlist 1_til 15; `ppt`upstr_ppt_1; `window; dict; max_mavg] Time-to-peak model The hours before a flood event can provide important information about how quickly the stream gauge height is moving. This is incredibly useful to a model predicting how long it will take for the stream to reach its peak height. The information extracted at a given stream location comprised of the maximum moving averages over different bucket sizes for the two days before the event. This was found using stream height data from USGS, which was updated at 15-minute intervals. To make the times from the stream-height dataset consistent with the FLASH dataset, named below as peak_data , the times were converted to be time-zone agnostic. These zones are: EDT Eastern Daylight Time CDT Central Daylight Time EST Eastern Standard Time // Obtain timezone (unk) information for each site // from the gauges hdb (str) time_zone:raze{ si:x 0; dd:x 1; select `$first site_no,`$first unk from str where date=first dd,si=`$site_no } each site_date q)time_zone site_no unk ------------ 01312000 EST 01315000 EST 01315500 EST 01318500 EST 01321000 EST q)// Join time-zone information to the FLASH dataset q)5#peak_data:peak_data ij`site_no xkey time_zone site_no lat lon start_time end_time .. ---------------------------------------------------------------------.. 01396500 40.6778 -74.87917 2009.07.01T16:30:00.000 2009.07.01T20:15:0.. 01397000 40.5722 -74.86806 2009.07.01T19:00:00.000 2009.07.02T02:00:0.. 01377500 40.9928 -74.02111 2009.07.01T23:45:00.000 2009.07.02T00:15:0.. 06478500 43.1858 -97.63528 2009.07.06T16:45:00.000 2009.07.22T15:45:0.. 04215500 42.8297 -78.77528 2009.07.10T21:15:00.000 2009.07.11T01:45:0.. // Modify the time-zone based on extracted information from gauges hdb change_zone:{ tm:x 0; tz:x 1; $[tz=`EDT;tm-04:00;tz=`CDT;tm-05:00;tm-06:00] } After change_zone was applied to the peak_data table, the start_time, end_time and peak_time values were updated in order to reflect the correct time-zone. q)5#peak_data site_no lat lon start_time end_time .. ---------------------------------------------------------------------.. 01396500 40.6778 -74.87917 2009.07.01T12:30:00.000 2009.07.01T16:15:0.. 01397000 40.5722 -74.86806 2009.07.01T15:00:00.000 2009.07.01T22:00:0.. 01377500 40.9928 -74.02111 2009.07.01T19:45:00.000 2009.07.01T20:15:0.. 06478500 43.1858 -97.63528 2009.07.06T11:45:00.000 2009.07.22T10:45:0.. 04215500 42.8297 -78.77528 2009.07.10T15:15:00.000 2009.07.10T19:45:0.. The features, along with information about the projected rainfall in the days following the event, were also extracted and joined onto the dataset. // The date range of interest range:{(within;x;(,;(+;(-:;2);y);y))} // The where clause to be applied wh:{( range[`date;x 1]; range[`datetime;x 2]; (=;enlist first x; ($;enlist`;`site_no)) ) } // Dictionary to be passed to the feat function dict:{`w`wh`gr!(?;wh x;0b)} // Extract the windowed height features for // 2, 4, 12 and 48 hours before each event raze window_ht_prev:{ addFeat[str; enlist 2 4 12 48; `height; `window_prev; dict x; max_mavg] } each flip peak_data `site_no`date`start_time q)10#raze window_ht_prev window_prev_height_2 window_prev_height_4 window_prev_height_12 windo.. ---------------------------------------------------------------------.. 6.25 6.25 6.245 6.211.. 2.89 2.89 2.89 2.89 .. 1.92 1.92 1.92 1.92 .. 12.875 12.8625 12.84833 12.81.. 2.38 2.38 2.38 2.38 .. 3.22 3.22 3.22 3.22 .. 2.82 2.82 2.82 2.816.. 1.52 1.52 1.52 1.52 .. 0.47 0.47 0.47 0.47 .. 1.67 1.67 1.668333 1.657.. wh:{((within;`date;(,;y 1;(+;y 1;x)));(=;enlist first y;`site_no))} dict:{`w`wh`gr!(?;wh[x;y];0b)} // Extract the rainfall predictions for each site // up to 3 days after each event raze rain_pred:{addFeat[ all_rain; enlist 1_til x; `ppt`upstr_ppt_1; `fut_window; dict[x;y]; max_mavg] }[3] each flip peak_data `site_no`date q)10#raze rain_pred fut_window_ppt_1 fut_window_ppt_2 fut_window_upstr_ppt_1_1 fut_window.. ---------------------------------------------------------------------.. 3.06 1.665 2.29 1.265 .. 1.53 0.935 2.49 1.39 .. 0.71 0.605 0.71 0.605 .. 1.07 0.535 0.32 0.16 .. 0.44 0.22 0.62 0.31 .. 0.62 0.31 1.78 0.89 .. 1.8 0.97 0.66 0.57 .. 1.61 0.935 1.58 0.93 .. 2.12 1.095 0.43 0.245 .. 0.87 0.79 0.69 0.51 .. Target data¶ Monthly model The target data used in this case was the flood-level warning, extracted from the NOAA dataset. The latitude and longitude of these provided thresholds did not exactly match the stream-gauge locations. As such, the latitudes and longitudes of both the stream locations and NOAA threshold readings were joined using a k-dimensional tree (kd-tree) nearest-neighbors algorithm. This algorithm is explained in Appendix 1. The code used to achieve this nearest-neighbors calculation is seen below with the algorithm implementation contained in full in the GitHub repository associated with this paper. // Get latitudes and longitudes of the warning and stream gauge sites latl:raze each warning[`Latitude`Longitude],'gauges[`dec_lat_va`dec_long_va] // Create a kd tree of the latitudes and longitudes tabw:kd.buildtree[latl;2] // Get indices of where the stream gauge lat and long are. // Will be used for indexing in the kd tree gauge_val:count[warning]+til count gauges // Calculate the threshold nearest neighbor of each stream-gauge // location using the kdtree nnwarn:kd.nns[ ; tabw; (count[warning]#0),count[gauges]#1; flip wlatl; `edist] each gauge_val // Make a table indicating the index of the nearest neighbor // to each gauge site along with the corresponding distance joins:([site_no:gauges`site_no]nn:nnwarn[;0];ndw:nnwarn[;1]) q)10#joins site_no | nn ndw --------| --------------- 01303500| 4652 0.1774627 01304000| 1545 0.1393672 01304500| 3475 0.04823593 01305000| 2800 0.1609363 01305500| 1508 0.07574579 01306460| 1508 0.1871804 01308000| 2458 0.05487667 01308500| 2458 0.04829199 01309500| 2458 0.100588 01309950| 1596 0.07899928 q)10#floodlvl:(maxht ij joins)lj`nn xkey warning site_no date height nn ndw GaugeLID Status Loca.. ---------------------------------------------------------------------.. 01303500 2009.07.01 0.75 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.02 0.53 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.03 0.36 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.04 0.32 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.05 0.31 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.06 0.3 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.07 0.84 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.08 0.6 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.09 0.4 4652 0.1774627 "WESN6" "no_flooding" "Gle.. 01303500 2009.07.10 0.35 4652 0.1774627 "WESN6" "no_flooding" "Gle.. This dataset was then joined onto the stream-gauge data, adding columns counting the number of times a given stream gauge reached each warning level per month. For the sake of this project, we only wanted to focus on the Flood stage. This level was chosen in an attempt to achieve a more balanced dataset while still predicting a meaningful target. Choosing either of the more severe levels would result in a very low number of targets making it more difficult to discern events of interest. Our target data was a binary label denoting whether the flood warning level was reached in a given month. Any site that claimed to flood more than 28 days per month were omitted from the dataset as we only wanted to focus on events that occurred infrequently and were more difficult to predict. threshold:0!select first Action, first Flood, first Moderate, first Major, no_Action:count where height>Action, no_Flood:count where height>Flood, no_Mod:count where height>Moderate, no_Major:count where height>Major by site_no, "m"$date from floodlvl threshold:select from threshold where no_Flood<28 threshold[`target]:threshold[`no_Flood]>0 q)threshold site_no date Action Moderate Major target.. ---------------------------------------------.. 01200000 2009.07 6 10 12 0 .. 01200000 2009.08 6 10 12 0 .. 01200000 2009.09 6 10 12 0 .. 01200000 2009.10 6 10 12 0 .. 01200000 2009.11 6 10 12 0 .. 01200000 2009.12 6 10 12 0 .. 01200000 2010.01 6 10 12 0 .. 01200000 2010.02 6 10 12 0 .. 01200000 2010.03 6 10 12 0 .. 01200000 2010.04 6 10 12 0 .. 01200000 2010.05 6 10 12 0 .. 01200000 2010.06 6 10 12 0 .. ... Time-to-peak model The FLASH dataset was then used for the time-to-peak model, which highlights how long it will take a stream gauge location to reach its peak height after the rain event. Only dates within the 10-year period and site numbers within the six states mentioned were included. The target data was calculated by subtracting the start-time (denoted by the start of a major rainfall event at the location) from the time that the peak height was found to occur. This was then converted into a binary classification problem by setting a threshold for a ‘flash flood’ at 3.5 hours after the major rainfall event. Any time above this was set to 0b and less than this time was 1b . This threshold was chosen after discussions with hydrologists, who found this to be a reasonable split in the dataset. q)10#peak_data[`delta_peak]:24 * (-). peak_data`peak_time`start_time 0.25 3.5 0.25 130.5 1.5 2.25 6.5 1.5 0.25 20.75 q)10#peak_data[`target]:peak_data[`delta_peak]<3.5 1110110110b Spatial and temporal joins¶ Monthly model After joining the stream-height and precipitation tables from USGS and PRISM, the dataset was then broken up into monthly values. By taking the first day of each month at a site, it was possible to obtain the maximum moving averages of precipitation for different window sizes for a given month, along with the precipitation and height values for the last few days of the month prior. This data was then joined to the stream_char dataset, which consisted of the basin and land-cover characteristics, and the “threshold” dataset, based on month and site number. Lagged features were then added to this dataset, which included information such as did a flood occur in the month prior, the year prior and also how often on average did the given location flood. all_monthly_data:addFeat[ all_monthly_data; enlist 1 12; `target; `lagged; sited; prv] tgts:value exec no_Flood by site_no from all_monthly_data all_monthly_data[`lagged_target_all]:raze{count[x]mavg raze x}each tgts Time-to-peak model The daily rain and height, FLASH, and the stream_char were then joined based on site number and date to create the time-to-peak dataset. q)10#all_peak_data:peak_data ij`site_no`year xkey stream_char site_no lat lon start_time end_time .. ---------------------------------------------------------------------.. 01396500 40.6778 -74.87917 2009.07.02T04:30:00.000 2009.07.02T08:15:0.. 01397000 40.5722 -74.86806 2009.07.02T07:00:00.000 2009.07.02T14:00:0.. 01377500 40.9928 -74.02111 2009.07.02T11:45:00.000 2009.07.02T12:15:0.. 06478500 43.1858 -97.63528 2009.07.07T07:45:00.000 2009.07.23T06:45:0.. 04215500 42.8297 -78.77528 2009.07.11T15:15:00.000 2009.07.11T19:45:0.. 04215000 42.89 -78.64528 2009.07.11T15:45:00.000 2009.07.11T20:15:0.. 06834000 40.3517 -101.1236 2009.07.17T23:00:00.000 2009.07.18T21:15:0.. 01391000 40.9978 -74.11194 2009.07.21T14:45:00.000 2009.07.21T18:15:0.. 02162093 34.0274 -81.04194 2009.07.23T00:00:00.000 2009.07.23T00:45:0.. 01391000 40.9978 -74.11194 2009.07.28T20:30:00.000 2009.07.29T18:00:0.. q)10#monthly_stream_data:monthly_data ij`site_no`year xkey stream_char site_no date height upstr_height_1 prev_height_1 prev_height_2 pr.. ---------------------------------------------------------------------.. 01303500 2009.07 0.75 0.36 .. 01303500 2009.08 0.4 0.45 0.4 0.32 0... 01303500 2009.09 0.27 0.3 0.28 0.31 0... 01303500 2009.10 0.26 0.3 0.27 0.3 0... 01303500 2009.11 0.28 0.33 0.29 0.3 0... 01303500 2009.12 0.49 0.34 0.42 0.28 0... 01303500 2010.01 1.41 0.36 0.73 0.29 0... 01303500 2010.02 0.29 0.32 0.88 0.35 0... 01303500 2010.03 1.81 0.44 1.24 1.04 1... 01303500 2010.04 1.14 0.59 1.66 2.07 1... Train and test split¶ A dictionary was created for each of the three separate datasets: - Ungauged - Gauged - Perfect Forecasts The dictionary contained the different feature columns created above along with the basin characteristics required to make up the above datasets for each of the monthly (M ) and time to peak (P ) models. fc:{x where x in y} / find column ungauged_colsM: fc[ungauged_cols;cols cleaned_monthly] gauged_colsM: fc[gauged_cols;cols cleaned_monthly] perfect_forecast_colsM:fc[perfect_forecast_cols;cols cleaned_monthly] ungauged_colsP: fc[ungauged_cols;cols cleaned_peak] gauged_colsP: fc[gauged_cols;cols cleaned_peak] perfect_forecast_colsP:fc[perfect_forecast_cols;cols cleaned_peak] ungauge: `M`P!(ungauged_colsM;ungauged_colsP) gauge: `M`P!(ungauge[`M],gauged_colsM;ungauge[`P],gauged_colsP) forecast:`M`P! (gauge[`M],perfect_forecast_colsM;gauge[`P],perfect_forecast_colsP) q)forecast M| `month`cos_t`sin_t`elv`imp`CatAreaSqKm`WsAreaSqKm`CatAreaSqKmRp100.. P| `month`cos_t`sin_t`elv`imp`CatAreaSqKm`WsAreaSqKm`CatAreaSqKmRp100.. These dictionaries were then used to extract the appropriate columns from each table, to make them suitable inputs to machine-learning models. This was achieved by using the split_dict function which takes a table as input, as well as M or P indicating which model was being used. split_dict:{(!). flip( (`ungauged;flip x ungauge y); (`gauged;flip x gauge y); (`forecast;flip x forecast y) ) } q)split_dict[all_monthly_data;`M] ungauged| 7i -0.959493 -0.2817326 456f 1.454468 0.7407 526.9086.. gauged | 7i -0.959493 -0.2817326 456f 1.454468 0.7407 526.9086.. forecast| 7i -0.959493 -0.2817326 456f 1.454468 0.7407 526.9086.. The function returned a dictionary containing the matrix for each of the ungauged, gauged and perfect forecast datasets. q)(split_dict[all_monthly_data;`M])`ungauged 7i -0.959493 -0.2817326 456f 1.454468 0.7407 526.9086 0.1926 87.. 8i -0.6548607 -0.7557496 456f 1.454468 0.7407 526.9086 0.1926 87.. 9i -0.1423148 -0.9898214 456f 1.454468 0.7407 526.9086 0.1926 87.. 10i 0.415415 -0.909632 456f 1.454468 0.7407 526.9086 0.1926 87.. 11i 0.8412535 -0.5406408 456f 1.454468 0.7407 526.9086 0.1926 87.. 12i 1f -2.449294e-16 456f 1.454468 0.7407 526.9086 0.1926 87.. 2i 0.8412535 0.5406408 456f 1.454468 0.7407 526.9086 0.1926 87.. 3i 0.415415 0.909632 456f 1.454468 0.7407 526.9086 0.1926 87.. 4i -0.1423148 0.9898214 456f 1.454468 0.7407 526.9086 0.1926 87.. 5i -0.6548607 0.7557496 456f 1.454468 0.7407 526.9086 0.1926 87.. 6i -0.959493 0.2817326 456f 1.454468 0.7407 526.9086 0.1926 87.. 7i -0.959493 -0.2817326 456f 1.454468 0.7407 526.9086 0.1926 87.. .. Prior to the application of a train-test split on the data, rows containing null values are removed from the dataset as these cannot be passed to machine learning algorithms. cleaned_monthly:all_monthly_data[del_null all_monthly_data] cleaned_peak: all_peak_data[del_null all_peak_data] Monthly model When splitting the data for this model, it was deemed important that no time leakage occurred between the training and test sets (e.g. the training set contained information from 2009 to 2017, while the test set contained the remaining years). This ensured that the model was being tested in a way that was similar to a real-world deployment. A split was chosen so that 20 percent of the data for each site was in the test set. // The cutoff dataset is produced and date defined // at which the datasets are to be cutoff cutoff:update cutoff_date:min[date]+floor 0.8*max[date]-min[date] by site_no from cleaned_monthly // Extract data and targets from the dataset XtrainMi:select from cutoff where date<cutoff_date XtestMi :select from cutoff where date>=cutoff_date ytrainM :exec target from cutoff where date<cutoff_date ytestM :exec target from cutoff where date>=cutoff_date // From the master training and testing datasets extract // appropriate information for the monthly data XtrainM:split_dict[XtrainMi;`M] XtestM:split_dict[XtestMi;`M] Time-to-peak model The time-to-peak data was separated so that sites did not appear in both the train and test datasets. This was done to ensure that the models being produced could be generalized to new locations. The target data was binned into a histogram as below and the train-test split completed such that the distribution of targets in the training and testing sets were stratified. sites:0!select sum target by site_no from cleaned_peak plt[`:hist][sites`target] plt[`:xlabel]["Number of events per site"] plt[`:ylabel]["Number of associated sites"] plt[`:show][] // Set the number of events associated with each bin of the dataset bins:0 5 15 25.0 // Split the target data into the associated bin y_binned:bins bin`float$sites`target // Using embedPy, stratify site numbers and targets // into an 80-20 train-test split of the data tts:train_test_split[ sites `site_no; sites `target; `test_size pykw 0.2; `random_state pykw 607; `shuffle pykw 1b; `stratify pykw y_binned ]` // Update the cleaned_peak data // to add a flag indicating training/testing cleaned_peak[`split]:`TRAIN peak_split:update split:`TEST from cleaned_peak where site_no in`$tts 1 Building models¶ For both problems a variety of models were tested, but for the sake of this paper, models and results from an eXtreme Gradient Boost (XGBoost) and random-forest classifier (described in more detail in Appendix 2) are presented below. These models were chosen due to their ability to deal with complex, imbalanced datasets where overfitting is a common feature. Overfitting occurs when the model fits too well to the training set, capturing a lot of the noise from the data. This leads to the model performing successfully in training, while not succeeding as well on the testing or validation sets. Another problem that can occur, is that a naïve model can be produced, always predicting that a flood will not occur. This leads to high accuracy but not meaningful results. As seen below in the results section, XGBoosts and random forests were able to deal much better with these issues by tuning their respective hyper-parameters. To visualize the results, a precision-recall curve was used, illustrating the trade-off between the positive predictive value and the true positive rate over a variety of probability thresholds. This is a good metric for the success of a model when the classes are unbalanced, compared with similar graphs such as the ROC curve. Precision and recall were also used because getting a balance between these metrics when predicting floods was vital to ensure that all floods were given warnings. Yet also to ensure that a low number of false positives were given, the penalty for which was that warnings would be ignored. A function named pr_curve was created to output the desired results from the models. This function outputs the accuracy of prediction, the meanclass accuracy, a classification report highlighting the precision and recall per class, along with a precision-recall curve. This function also returned the prediction at each location in time for the models used, as can be seen later in this paper to create a map of flooding locations. The arguments to the pr_curve function are: - matrix of feature values - list of targets - dictionary of models being used The dictionary of models consisted of XGBoost and a random-forest model, with varying hyper-parameters for each model. build_model:{[Xtrain;ytrain;dict] rf_hyp_nms:`n_estimators`random_state`class_weight; rf_hyp_vals:(dict`rf_n;0;(0 1)!(1;dict`rf_wgt)); rf_clf:RandomForestClassifier[pykwargs rf_hyp_nms!rf_hyp_vals] [`:fit][Xtrain; ytrain]; xgb_hyp_nms:`n_estimators`learning_rate`random_state, `scale_pos_weight`max_depth; xgb_hyp_vals:(dict`xgb_n;dict`xgb_lr;0;dict`xgb_wgt;dict`xgb_maxd); xgb_clf: XGBClassifier[pykwargs xgb_hyp_nms!xgb_hyp_vals] [`:fit] [np[`:array]Xtrain; ytrain]; `random_forest`XGB!(rf_clf;xgb_clf) } Results¶ The results below were separated based on the three datasets. Model testing¶ Ungauged models¶ Monthly dict:`rf_n`rf_wgt`rf_maxd`xgb_n`xgb_lr`xgb_wgt`xgb_maxd! (200;1;8;200;.2;15;7) models:build_model[XtrainM`ungauged;ytrainM;dict] q)pltU1:pr_curve[XtestM`ungauged;ytestM;models] Accuracy for random_forest: 0.9380757 Meanclass accuracy for random_forest: 0.8382345 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.9424622 0.9939699 0.967531 13101 1 | 0.7340067 0.2152024 0.3328244 1013 avg/total| 0.8382345 0.6045861 0.6501777 14114 Accuracy for XGB: 0.9197959 Meanclass accuracy for XGB: 0.69065 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.9512177 0.9629799 0.9570627 13101 1 | 0.4300823 0.3613031 0.3927039 1013 avg/total| 0.69065 0.6621415 0.6748833 14114 Time-to-peak dict:`rf_n`rf_wgt`rf_maxd`xgb_n`xgb_lr`xgb_wgt`xgb_maxd! (220;1;17;340;.01;1.5;3) models:build_model[XtrainP`ungauged;ytrainP;dict] q)pltU2 :pr_curve[XtestP`ungauged;ytestP;models] Accuracy for random_forest: 0.7330896 Meanclass accuracy for random_forest: 0.7312101 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.7336066 0.9572193 0.8306265 374 1 | 0.7288136 0.2485549 0.3706897 173 avg/total| 0.7312101 0.6028871 0.6006581 547 Accuracy for XGB: 0.7751371 Meanclass accuracy for XGB: 0.7474176 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.7995227 0.8957219 0.8448928 374 1 | 0.6953125 0.5144509 0.5913621 173 avg/total| 0.7474176 0.7050864 0.7181275 547 Gauged models¶ Monthly dict:`rf_n`rf_wgt`rf_maxd`xgb_n`xgb_lr`xgb_wgt`xgb_maxd! (100;16;8;100;0.2;16;9) models:build_model[XtrainM`gauged;ytrainM;dict] q)pltG1:pr_curve[XtestM`gauged;ytestM;models] Accuracy for random_forest: 0.9430843 Meanclass accuracy for random_forest: 0.9163495 Accuracy for random_forest: 0.9422559 Meanclass accuracy for random_forest: 0.9000509 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.9439867 0.9969468 0.9697442 13101 1 | 0.8561151 0.2349457 0.3687064 1013 avg/total| 0.9000509 0.6159463 0.6692253 14114 Accuracy for XGB: 0.9332578 Meanclass accuracy for XGB: 0.7507384 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.9559055 0.9729792 0.9643668 13101 1 | 0.5455712 0.4195459 0.4743304 1013 avg/total| 0.7507384 0.6962625 0.7193486 14114 Time-to-peak dict:`rf_n`rf_wgt`rf_maxd`xgb_n`xgb_lr`xgb_wgt`xgb_maxd! (100;1;17;350;0.01;1.5;3) models:build_model[XtrainP`gauged;ytrainP;dict] q)pltG2 :pr_curve[XtestP`gauged;ytestP;models] Accuracy for random_forest: 0.7367459 Meanclass accuracy for random_forest: 0.763421 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.7309237 0.973262 0.8348624 374 1 | 0.7959184 0.2254335 0.3513514 173 avg/total| 0.763421 0.5993478 0.5931069 547 Accuracy for XGB: 0.7842779 Meanclass accuracy for XGB: 0.7650789 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.7990654 0.9144385 0.8528678 374 1 | 0.7310924 0.5028902 0.5958904 173 avg/total| 0.7650789 0.7086643 0.7243791 547 Perfect Forecasts models¶ Monthly dict:`rf_n`rf_wgt`xgb_n`xgb_lr`xgb_wgt`xgb_maxd! (100;15;100;0.2;15;7) models:build_model[XtrainM`forecast;ytrainM;dict] q)pltP1:pr_curve[XtestM`forecast;ytestM;models] Accuracy for random_forest: 0.9448066 Meanclass accuracy for random_forest: 0.9130627 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.9462553 0.9971758 0.9710484 13101 1 | 0.8798701 0.2675222 0.4102952 1013 avg/total| 0.9130627 0.632349 0.6906718 14114 Accuracy for XGB: 0.9471447 Meanclass accuracy for XGB: 0.8045102 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.9695219 0.9736661 0.9715896 13101 1 | 0.6394984 0.6041461 0.6213198 1013 avg/total| 0.8045102 0.7889061 0.7964547 14114 Time-to-peak dict:`rf_n`rf_wgt`rf_maxd`xgb_n`xgb_lr`xgb_wgt`xgb_maxd! (100;1;17;300;0.01;2.5;3) models:build_model[XtrainP`forecast;ytrainP;dict] q)pltP2 :pr_curve[XtestP`forecast;ytestP;models] Accuracy for random_forest: 0.7550274 Meanclass accuracy for random_forest: 0.7668274 class | precision recall f1_score support ---------| ------------------------------------ 0 | 0.751046 0.959893 0.842723 374 1 | 0.7826087 0.3121387 0.446281 173 avg/total| 0.7668274 0.6360159 0.644502 547 Accuracy for XGB: 0.7440585 Meanclass accuracy for XGB: 0.7027966 class | precision recall f1_score support ---------| ------------------------------------- 0 | 0.8031088 0.828877 0.8157895 374 1 | 0.6024845 0.5606936 0.5808383 173 avg/total| 0.7027966 0.6947853 0.6983139 547 Scoring summary¶ Ungauged models¶ Monthly The accuracies of both classifiers in the monthly model were relatively high in this case. Random forests achieved a slightly higher score of 0.938. The meanclass accuracy was lower for both classifiers, ranging from ~0.7-0.84 in the random forests and XGBoost respectively. However, considering that the class distribution was extremely imbalanced, the accuracy is an unreliable metric to evaluate the models fairly. Both classifiers returned low precision and recall scores when evaluating the positive class, indicating that the models were not adept at discerning flood events. Low scores of ~0.4 were also seen in the precision-recall curves for both classifiers. Time-to-peak XGBoosts achieved both a higher accuracy of 0.78 and a more stable precision recall ratio, 0.7 to 0.51, for the positive class when compared with random forests. This indicates that a relatively large amount of flood events occurring under the 3.5 hour threshold were being identified by the model. The meanclass accuracies for both classifiers were similar at ~0.75. The area under the precision-recall curve were also seen to be comparable for both classifiers. Gauged models¶ Monthly Improvements in both the accuracy and meanclass accuracy were evident in the gauged models when compared to the ungauged example. In this case, higher accuracies were achieved by the random-forest classifier. The meanclass accuracy also performed better at 0.9 when compared with the XGBoost classifier result of 0.75. Although still low, a slightly improved balance between the precision and recall scores, 0.54 to 0.42, for the positive class was reached by the XGBoost. The area under the precision-recall curve improved in both classifiers to 0.51 (XGBoost) and 0.54 (random forests) from the previous ungauged model. Time-to-peak The accuracy and meanclass accuracy achieved with the gauged datasets were very similar to the results obtained in the ungauged model. This in conjunction with similarities to the precision and recall scores indicates that the addition of previous stream/river heights does not impact the models. The areas under with the precision-recall curves however, were also similar when compared with the previous models curves. Perfect Forecast models¶ Monthly Similar accuracy results were seen between the random forests and XGBoost with results on the order of 0.945. The random-forest classifier achieved a greater meanclass accuracy score of 0.91 compared with that of XGBoosts 0.8. Precision and recall scores for the positive class were also high at ~0.62 for both metrics. However the random-forest classifier produced a high imbalance between the precision and recall scores which were 0.88 and 0.27 respectively. Both precision-recall curves improved from the previous gauged model, achieving areas of 0.69 and 0.64 for XGBoost and the random forest classifier respectively. Time-to-peak A slight decrease in accuracy occurred in both classifiers compared with previous models, although an improved balance between the precision and recall scores of 0.6 and 0.56 were seen for the XGBoost. The area under the precision-recall curves increased slightly when compared to the the previous models’ results, reaching scores of 0.65 for the XGBoost and and 0.62 for the random forests classifiers. Feature significance¶ There was also a lot to be learned from determining which features contributed to predicting the target for each model. To do this, the function ml.fresh.significantfeatures was applied to the data, to return the statistically significant features based on a p-value. Combining this with ml.fresh.ksigfeat[x] enabled the top x most significant features to be extracted from each dataset. title:{"The top 15 significant features for ",x," predictions are:"} nums :{string[1+til x],'x#enlist". "} q_kfeat:.ml.fresh.ksigfeat 15 Monthly model title["monthly"] X_Month:flip forecast[`M]!cleaned_monthly forecast[`M] y_Month:cleaned_monthly`target 3 cut`$nums[15],' string .ml.fresh.significantfeatures[X_Month;y_Month;kfeat] "The top 15 significant features for monthly predictions are:" 1. lagged_target_all 2. window_ppt_1 3. window_ppt_2 4. window_ppt_3 5. window_ppt_4 6. window_ppt_5 7. window_ppt_6 8. window_upstr_ppt_1_1 9. window_upstr_ppt.. 10. window_upstr_ppt_1_3 11. window_upstr_ppt_1_4 12. lagged_target_1 13. lagged_target_12 14. window_upstr_ppt_1_5 15. window_ppt_7 From the above results, it is evident that the most important features for predicting flooding in the monthly models were the features that were created throughout the notebook. In particular the windowed precipitation features (both in the site and upstream locations, window_ppt_ and window_upstr_ppt_ ) along with the lagged target values (the average target value overall being the most important, lagged_target_all , and the target value from the previous month before the event holding the least amount of information compared, lagged_target_1 ). It is also evident that the basin characteristics from the NLCDPlus dataset do impact the model predictions as strongly as the created features. Time-to-peak model title["time-peak"] X_t2p:flip forecast[`P]!cleaned_peak[forecast[`P]] y_t2p:cleaned_peak`target 3 cut`$nums[15],' string .ml.fresh.significantfeatures[X_t2p;y_t2p;kfeat] "The top 15 significant features for time-peak predictions are:" 1. WsAreaSqKmRp100 2. WsAreaSqKm 3. window_prev_height_48 4. prev_upstr_height_1_1 5. window_prev_height_12 6. prev_height_1 7. WetIndexCat 8. prev_height_5 9. prev_height_4 10. prev_height_6 11. prev_height_7 12. prev_height_2 13. window_prev_height_4 14. prev_height_8 15. prev_height_3 Compared with the monthly model, it is clear that the basin characterics such as the area of a watershed (WsAreaSqKmRp100 , WsAreaSqKm ) and the wetness index of a catchment (WetIndexCat ) along with the previous height of the stream before the flood event (in both the current and upstream location) have the most impact on the models predictions. The maximum moving averages of the height up to 48 hours before the flood event (window_prev_height_ ) also played an important role. The previous and windowed precipitation values did not appear as a top feature for this model. Graphics¶ Monthly model Using these results, it was also possible to build a map that highlighted per month which areas were at risk of flooding. This could be used by governmental bodies to prioritize funding in the coming weeks. // Extract the prediction values from the monthly perfect forecast model preds:last pltP1`model newtst:update preds:preds from XtestMi // Find all the locations that the model predicted will flood in 2018. newt:select from newtst where date within 2018.01 2018.12m,preds=1 // Convert the table to a pandas dataframe dfnew:.ml.tab2df newt // Show the monthly flood predictions using gmaps graphs:.p.get`AcledExplorer graphs[`df pykw dfnew][`:render][]; Time-to-peak model Data relating to the peak height of a stream from an actual flooding event was also compared with the upper bound peak time from our model. // The predictions for the ungauged model are extracted pred:last pltU2`model // For a specific site the start, peak and end times of an produced pg:raze select site_no, start_time, end_time, peak_time from XtrainPi where unk=`EDT,i in where pred=XtestPi`target,site_no=`02164110, target=1,delta_peak>2 // Define parameters to be taken into account in plotting rainfall :`x_val`col`title!(pg[`start_time];`r;`rainfall) actual_peak:`x_val`col`title!(pg[`peak_time];`g;`actual_peak) pred_bound :`x_val`col`title! (03:30+pg[`start_time];`black;`predicted_upper_bound) // Extract relevant information for each site // at the time of a major rainfall event graph:select from str where date within (`date$pg[`start_time];`date$pg[`end_time]), datetime within (neg[00:15]+pg[`start_time];[00:10]+pg[`end_time]), (pg`site_no)=`$site_no // Plot stream height as a function of time times :graph`datetime heights:graph`height plt_params:`label`linewidth!(`height;3) plt[`:plot][times;heights;pykwargs plt_params] // Plot lines indicating relevant events pltline:{ dict:`color`label`linewidth!(x`col;x`title;3); plt[`:axvline][x`x_val;pykwargs dict]; } pltline each(rainfall;actual_peak;pred_bound) plt[`:legend][`loc pykw `best] plt[`:title]["Time to Peak"] plt[`:ylabel]["Height"] plt[`:xlabel]["Time"] plt[`:xticks][()] plt[`:show][] The above plot gives an indication to how close the time to peak models prediction was compared with the time that the actual flood peak occurred. In a real life scenario, being able to create an upper bound time limit predicting when a flood will reach its peak, allows emergency services to take appropriate action, possibly reducing the damages caused by floods. Conclusion¶ From the above results we could predict, with relatively high accuracy, whether an area was likely to flood or not in the next month. We could also produce a model to predict if a stream would reach its peak height within 3.5 hours. For the monthly models, the future weather predictions played an important role in predicting whether an area would flood or not. Accuracy increased as the weather predictions and gauged information columns were added to the dataset. This corresponds with the results from the significant feature tests, where lagged_target information and the windowed rain volumes of the current month were deemed to be the most features for inclusion. For the majority of the models, the random-forests classifier obtained high-accuracy results, however this often coincided with imbalanced precision and recall scores. In some scenarios, high-precision scores were achieved along with corresponding low recall, indicating that flooding events could be missed. Although XGBoosts didn’t achieve as high accuracy, the precision and recall scores were much more balanced, which is a favorable trait to have in this type of model when predicting complex events such as flooding. The opposite was true for the time-to-peak models, as previous rain- and stream-gauge information, along with the basin characteristics, were deemed to be the most significant features when predicting these values. Including additional information about the future predicted rainfall did not improve the accuracy of the results. The best results were obtained from the gauged model by the XGBoost classifier. Despite this, the Perfect Forecasts dataset achieved the best balance between the precision and recall of the positive class, compared with the ungauged model that favoured high precision alongside low recall scores. Both of these results are to be physically expected. In the case of the monthly prediction, information regarding future rainfall information is vital to predicting if an area will flood in the next month. Whereas in the case of a time-to-peak value, it would extremely unlikely that information about rainfall in the days following the peak height being reached would add any predictive power to the model. Knowing the features that contribute to flood susceptibility and the length of time it takes for a river to reach its peak height, are important pieces of information to extract from the model. From this, organizations such as USGS can better prepare for flood events and understand how changing climates and placement of impervious surface can affect the likelihood of flooding. The best results from the models above were obtained by continuously adjusting the hyper-parameters of the model. The unbalanced target data in the monthly model, meant that weighting the classes was an important feature to experiment with. This was particularly important when trying to obtain high precision and recall results. Between the two models, balance in the recall and precision was better for the XGBoost model. Author¶ Diane O’Donoghue joined First Derivative in 2018 as a data scientist in the Capital Markets Training Program and is currently on the Machine Learning team based in London. Within the team, Diane has been involved with expanding the Machine Learning Toolkit and the automated machine-learning platform. Code¶ The code presented in this paper is available from kxcontrib/fdl2019. Acknowledgements¶ I gratefully acknowledge the Disaster Prevention team at FDL: Piotr Bilinski, Chelsea Sidrane, Dylan Fitzpatrick, and Andrew Annex for their contribution and support, along with my colleagues in the Machine Learning team. Appendixes¶ 1. Kd-tree¶ A kd-tree is used in k-dimensional space to create a tree structure. In the tree each node represents a hyperplane which divides the space into two seperate parts (the left and the right branch) based on a given direction. This direction is associated with a certain axis dimension, with the hyperplane perpendicular to the axis dimension. What is to the left or right of the hyperplane is determined by whether each data point being added to the tree is greater or less than the node value at the splitting dimension. For example, if the splitting dimension of the node is x , all data points with a smaller x value than the value at the splitting dimension node will be to the left of the hyperplane, while all points equal to or greater than will be in the right subplane. The tree is used to efficiently find a datapoint’s nearest neighbor, by potentially eleminating a large portion of the dataset using the kd-tree’s properties. This is done by starting at the root and moving down the tree recursively, calculating the distance between each node and the datapoint in question, allowing branches of the dataset to be eliminated based on whether this node-point distance is less than or greater than the curent nearest neighbor distance. This enables rapid lookups for each point in a dataset. Visual representation of a kd-tree 2. Ensemble methods¶ An ensemble learning algorithm combines multiple outputs from a wide variety of predictors to achieve improved results. A combination of ‘weak’ learners are typically used with the objective to achieve a ‘strong’ learner. A weak predictor is a classifier that is only slightly correlated to the true predictions, while a strong learner is highly correlated. One of the advantages of using ensemble methods is that overfitting is reduced by diversifying the set of predictors used and averaging the outcome, lowering the variance in the model. - XGBoosts - XGBoosts, commended for its speed and performance, is an ensemble method built on a gradient-boosting framework of decision trees. This method uses boosting techniques by building the model sequentially, using the results from the previous step to improve the next. This method relies on subsequent classifiers to learn from the mistakes of the previous classifier. - Random Forests - This is also an ensemble method, where classifiers are trained independently using a randomized subsample of the data. This randomness reduces overfitting, while making the model more robust than if just a single decision tree was used. To obtain the output of the model, the decisions of multiple trees are merged together, represented by the average.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="14"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">SSL/TLS¶ Since V3.4t 2016.05.12, kdb+ can use Secure Sockets Layer (SSL)/Transport Layer Security (TLS) to encrypt connections using the OpenSSL libraries. Configuration¶ OpenSSL library¶ Ensure that your OS has the latest OpenSSL libraries installed, and that they are in your LD_LIBRARY_PATH (Unix), DYLD_LIBRARY_PATH (MacOS), or PATH (Windows). OpenSSL version¶ Beginning with v4.1t 2022.03.25, kdb+ will try to load versioned shared libraries for OpenSSL. It will load the first library that it can locate from the lists below: libssl.so libssl.so.3 libssl.1.1 libssl.so.1.0 libssl.so.10 libssl.so.1.0.2 libssl.so.1.0.1 libssl.1.0.0 libssl.3.dylib libssl.1.1.dylib both libssl and libcrypto are loaded, the library names in priority order are | libssl | libcrypto | | |---|---|---| w64 | libssl-3-x64.dll libssl-1_1-x64.dll | libcrypto-3-x64.dll libcrypto-1_1.dll | w32 | libssl-3.dll libssl-1_1.dll | libcrypto-3.dll libcrypto-1_1.dll | The Windows build was tested with the pre-compiled libs Win32 OpenSSL v1.1.1L Light, Win64 OpenSSL v1.1.1L Light. Prior to V4.1t 2022.03.25, kdb+ would load the following files: libssl.so libssl.dylib libssl-1_1-x64.dll libcrypto-1_1-x64.dll w64 build libssl-1_1.dll libcrypto-1_1.dll w32 build Keys/Certificates¶ Since TLS uses certificates, prior to enabling TLS in a kdb+ server, ensure that you have the necessary certificates in place. The minimum for a TLS-enabled server is to provide a certificate and its associated key, both in PEM format. To locate these files, q uses the default path as reported by the openssl version -d command as a base, e.g. $ openssl version -d OPENSSLDIR: "/opt/local/etc/openssl" Configuration of keys/certificates and checks performed is controlled by the following environment variables KX first Since V3.6, kdb+ gives preference to the KX_ prefix for the SSL_* environment variables to avoid clashes with other OpenSSL based products. For example, the value for getenv `KX_SSL_CERT_FILE has a higher precedence than getenv `SSL_CERT_FILE for determining config. SSL_CERT_FILE¶ The certificate file. This must be in PEM format and must be sorted starting with the subject's certificate (actual client or server certificate), followed by intermediate CA certificates if applicable, and ending at the highest level (root). Default value is <OPENSSLDIR>/server-crt.pem SSL_CA_CERT_FILE¶ A file containing certificate authority (CA) certificates in PEM format. The file can contain several CA certificates identified by -----BEGIN CERTIFICATE----- ... (CA certificate in base64 encoding) ... -----END CERTIFICATE----- sequences. Text is allowed before, between, and after the certificates; it can be used, for example, for descriptions of the certificates. Default value is <OPENSSLDIR>/cacert.pem SSL_CA_CERT_PATH¶ A directory containing certificate authority (CA) certificates in PEM format. Default value is <OPENSSLDIR> SSL_KEY_FILE¶ Private key in PEM format. Default value is <OPENSSLDIR>/server-key.pem SSL_CIPHER_LIST¶ The default cipher list is set to the Intermediate compatibility (default) as recommended by Mozilla.org. You may override this, to reduce the list to whatever your IT security policy requires. Default value is ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA- AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM- SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA- AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256: ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA: ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA: DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE- RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH- RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256: AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS A good source for what is generally recommended can be found at Ciphersuites. If you select a set which is not compatible with the peer process, you’ll observe a message at the q console similar to the following. 140735201689680:error:1408A0C1:SSL routines:ssl3_get_client_hello:no shared cipher:s3_srvr.c:1417: SSL_VERIFY_CLIENT¶ Controls the processing of certificates from a client. Uses the certificates from SSL_CA_CERT_FILE or SSL_CA_CERT_PATH to verify the client’s certificate. Can be set to one of the following values: NO (default) kdb+ does not request nor validate the certificate from a clientYES server requests a client certificate and disconnects the client if the provided certificate is missing or invalidREQUESTONLY (since 4.1t 2024.02.07) server requests a client certificate but allows the connection if the client certificate is missing or invalidIFPRESENT (since 4.1t 2024.02.07) server requests a client certificate and terminates the connection if an invalid certificate is provided; the server continues if the certificate is missing or valid SSL_VERIFY_SERVER¶ Controls the processing of certificates from a server. Default value is YES . Checks the X509 certificate the peer presented by verifying the server’s certificate against a trusted source, using the certificates from SSL_CA_CERT_FILE or SSL_CA_CERT_PATH to verify the server’s certificate. Expired certificates will fail the verification process. If the verification process fails or no server certificate provided, the TLS/SSL handshake is immediately terminated with an alert message containing the reason for the verification failure. Setting to NO does not terminate the connection due to a failure verifying the certificate. Checking Configuration¶ Configured TLS settings for a kdb+ process can be viewed with (-26!)[] . Certificates¶ If you don’t have a certificate, you can create a self-signed certificate using the openssl program. An example script (makeCerts.sh ) to do so follows; customize as necessary. mkdir $HOME/certs && cd $HOME/certs # create private key for CA (certificate authority) openssl genrsa -out ca-private-key.pem 2048 # create X509 certificate for CA (certificate authority) openssl req -x509 -new -nodes -key ca-private-key.pem -sha256 -days 365 -out ca-cert.pem -subj /C=US/ST=CA/L=Somewhere/O=Someone/CN=FoobarCA # create server private key openssl genrsa -out server-private-key.pem 2048 # create servers certificate signing request (CSR) # CSR contains the common name(s) you want your certificate to secure, information about your company, and your public key (taken from provided private key) openssl req -new -sha256 -key server-private-key.pem -subj /C=US/ST=CA/L=Somewhere/O=Someone/CN=Foobar -out server.csr # create X509 certificate for the server (signed by CA) openssl x509 -req -in server.csr -CA ca-cert.pem -CAkey ca-private-key.pem -CAcreateserial -out server-cert.pem -days 365 -sha256 # create client private key openssl genrsa -out client-private-key.pem 2048 # create clients certificate signing request (CSR) # CSR contains the common name(s) you want your certificate to secure, information about your company, and your public key (taken from provided private key) openssl req -new -sha256 -key client-private-key.pem -subj /C=US/ST=CA/L=Somewhere/O=Someone/CN=Foobar -out client.csr # create X509 certificate for the client (signed by CA) openssl x509 -req -in client.csr -CA ca-cert.pem -CAkey ca-private-key.pem -CAcreateserial -out client-cert.pem -days 365 -sha256 Using this script the server settings can be configured as: $ export SSL_CERT_FILE=$HOME/certs/server-cert.pem $ export SSL_KEY_FILE=$HOME/certs/server-private-key.pem $ export SSL_CA_CERT_FILE=$HOME/certs/ca-cert.pem with the client as: $ export SSL_CERT_FILE=$HOME/certs/client-cert.pem $ export SSL_KEY_FILE=$HOME/certs/client-private-key.pem $ export SSL_CA_CERT_FILE=$HOME/certs/ca-cert.pem mkcert is a simple tool for making locally-trusted development certificates. Secure your certificates Store your certificates outside of the directories accessible from within kdb+, otherwise remote users can easily steal your server’s key file! TLS Server Mode¶ When the certificates are in place, and the environment variables are set, TLS Server Mode can be enabled through the command-line option -E. TLS Client Mode¶ TLS connections can be opened to TLS-enabled servers with q)h:hopen`:tcps://hostname:port[:username:password] Clients can also request secure HTTP (HTTPS) and WebSockets (WSS) connections. If you don't wish to verify a server’s certificate, set $ export SSL_VERIFY_SERVER=NO To allow verification of certificates which were not issued by you, you can import the CA bundle from reputable sources, e.g. $ curl https://curl.se/ca/cacert.pem > $HOME/certs/cabundle.pem $ export SSL_CA_CERT_FILE=$HOME/certs/cabundle.pem If you open the downloaded cabundle.pem with a text editor you’ll see a list of certificates, and you can append your own self-signed ca.pem to this file if you wish. If there is an issue in loading the CA certificate, an error similar to the following will be printed at the q console q).Q.hg`$":https://www.kx.com" 140735201689680:error:02001002:system library:fopen:No such file or directory:bss_file.c:175:fopen('/opt/local/etc/openssl/cacert.pem','r') 140735201689680:error:2006D080:BIO routines:BIO_new_file:no such file:bss_file.c:178: 140735201689680:error:0B084002:x509 certificate routines:X509_load_cert_crl_file:system lib:by_file.c:253: 'conn. OS reports: Protocol not available Connection information¶ Extra protocol details for a connection handle are available via .z.e , including information about whether the current handle's TLS certificate was successfully verified. Suitability and restrictions¶ Currently we would recommend TLS be considered only for long-standing, latency-insensitive, low-throughput connections. The overhead of hopen on localhost appears to be 40-50× that of a plain connection, and once handshaking is complete, the overhead is ~1.5× assuming your OpenSSL library can utilize AES-NI. OpenSSL 1.1 is supported since V4.0 2020.03.17. Thread Support¶ The following associated features are not implemented for TLS - multithreaded input mode - use within secondary threads hopen timeout (implemented in V3.5) Since 4.1t 2023.11.10 this was altered to allow use on any thread where messaging was previously supported: i.e. - incoming connections in multithreaded input queue mode (mtiqm) - one-shot sync requests within peach or mtiqm socket thread - https client requests within peach or mtiqm socket thread How to handle temporal data in q¶ Stepped attribute¶ In traditional RDMSs temporal changes in data are often represented by adding valid-time-interval information to each relationship, usually achieved by adding start and end columns to the relational tables. This approach is often wasteful because in many cases the end of each interval is the start of the next leading to a lot of repetition. Q offers a better alternative. Recall that adding an `s attribute to a dictionary makes it behave as a step function. Compare q)d:(10*til 10)!til 10 q)d 5 10 15 0N 1 0N and q)s:`s#d q)s 5 10 15 0 1 1 Since keyed tables in q are a special case of a dictionary, adding an `s attribute to a keyed table has similar effect. For example, consider a table that records changes in a phone book: q)show ph name date | phone ---------------| ----- Bill 2000.01.01| 4444 John 2000.01.01| 5555 John 2000.06.10| 4444 Without an `s attribute, this table will readily produce for example John’s phone number on the date of the change, but any other date will produce an 0N . q)show ph ((`John;2000.01.01);(`John;2000.03.01);(`John;2000.06.10)) phone ----- 5555 4444 Adding the attribute will change the result to q)ph:`s#ph q)show ph ((`John;2000.01.01);(`John;2000.03.01);(`John;2000.06.10)) phone ----- 5555 5555 4444 Such tables can be used with lj : q)show x name date | x ---------------| - John 2000.06.08| 0 John 2000.06.09| 1 John 2000.06.10| 2 John 2000.06.11| 3 John 2000.06.12| 4 q)show x lj ph name date | x phone ---------------| ------- John 2000.06.08| 0 5555 John 2000.06.09| 1 5555 John 2000.06.10| 2 4444 John 2000.06.11| 3 4444 John 2000.06.12| 4 4444 Upsert into a stepped dictionary¶ If you try to upsert into a dict flagged as stepped, a 'step error will be signalled. q)d:`s#`a`b!1 2; q)`d upsert `c`d!3 4 'step To update such a dict, remove the `s attribute, upsert, and add the `s attribute again. Comparing temporals¶ Note the comparison of ordinal with cardinal datatypes, particularly when the types differ.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="15"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">================================================================================ FILE: TorQ-Finance-Starter-Pack_code_processes_metrics.q SIZE: 4,096 characters ================================================================================ / schemas for tables sumstab:([] time:`timestamp$(); sym:`g#`symbol$(); sumssize:`int$(); sumsps:`float$(); sumspricetimediff:`float$()); latest:([sym:`u#`symbol$()] time:`timestamp$(); sumssize:`int$(); sumsps:`float$(); sumspricetimediff:`float$()); \d .metrics / load settings windows:@[value;`windows;0D00:01 0D00:05 0D01]; enableallday:@[value;`enableallday;1b]; tickerplanttypes:@[value;`tickerplanttypes;`segmentedtickerplant]; rdbtypes:@[value;`rdbtypes;`rdb]; tpconsleep:@[value;`tpconsleep;10]; requiredprocs:rdbtypes,tickerplanttypes; tpcheckcycles:@[value;`tpcheckcycles;0W]; rdbconnsleep:@[value;`rdbconnsleep;10]; \d . / define upd to keep running sums upd:{[t;x] / join latest to x, maintaining time col from x, then calc running sums r:ungroup select time,sumssize:(0^sumssize)+sums size,sumsps:(0^sumsps)+sum price*size,sumspricetimediff:(0^sumspricetimediff)+sums price*0^deltas[first lt;time] by sym from x lj delete time from update lt:time from latest; / add latest values for each sym from r to latest latest,::select by sym from r; / add records to sumstab for all records in update message sumstab,::`time`sym xcols 0!r } / function to calc twap/vwap / calculates metrics for windows in .metrics.windows metrics:{[syms] / allow calling function with ` for all syms syms:$[syms~`;exec distinct sym from latest;syms,()]; / metric calcs t:select sym,timediff,vwap:(lsumsps-sumsps)%lsumssize-sumssize,twap:(lsumspricetimediff-sumspricetimediff)%.z.p - time / get sums asof time each window ago from aj[`sym`time;([]sym:syms) cross update time:.z.p - timediff from ([]timediff:.metrics.windows);sumstab] / join latest sums for each sym lj 1!select sym,lsumssize:sumssize, lsumsps:sumsps, lsumspricetimediff:sumspricetimediff from latest; / add allday window if[.metrics.enableallday; if[not all syms in key .metrics.start;.metrics.start::exec first time by sym from sumstab]; t:`sym`timediff xasc t,select sym,timediff:0Nn,vwap:sumsps%sumssize,twap:sumspricetimediff%.z.p - .metrics.start[sym] from latest where sym in syms ]; :t; } // Define top-level functions for receiving messages from an STP endofperiod:{[currp;nextp;data] .lg.o[`endofperiod;"Received endofperiod. currentperiod, nextperiod and data are ",(string currp),", ", (string nextp),", ", .Q.s1 data]}; endofday:{[dt;data] .lg.o[`endofday;"Received endofday for ",string dt]}; \d .metrics / get handle for TP & subscribe subscribe:{ / exit if no handles found if[0=count s:.sub.getsubscriptionhandles[tickerplanttypes;();()!()];:()]; subproc:first s; / subsribe to trade table .lg.o[`subscribe;"subscribing to ", string subproc`procname]; .sub.subscribe[`trade;`;0b;0b;subproc] } / get subscribed to TP, recover up until now from RDB init:{ r:subscribe[]; // Block process until all required processes are connected .servers.startupdepcycles[requiredprocs;tpconsleep;tpcheckcycles]; r:subscribe[]; / check if updates have already been sent from TP, if so recover from RDB if[0<r[`icounts]`trade; / get handle for RDB h:exec first w from s:.sub.getsubscriptionhandles[rdbtypes;();()!()]; .lg.o[`recovery;"recovering ",(a:string r[`icounts]`trade)," records from trade table on ",string first s`procname]; / query data from before subscription from RDB t:h"select time,sym,size,price from trade where i<",a; .lg.o[`recovery;"recovered ",(string count t)," records"]; / insert data recovered from RDB into relevant tables t:select time,sym,sumssize,sumsps,sumspricetimediff from update sumssize:sums size,sumsps:sums price*size,sumspricetimediff:sums price*time-prev time by sym from t; @[`.;`sumstab;:;t]; @[`.;`latest;:;select by sym from t]; ]; / setup empty start dict for use in all day calculation start::()!(); } \d . / get connections to TP, & RDB for recovery .servers.CONNECTIONS:.metrics.rdbtypes,.metrics.tickerplanttypes; .servers.startup[]; / run the initialisation function to get subscribed & recover .metrics.init[]; ================================================================================ FILE: TorQ-Finance-Starter-Pack_code_processes_vwapsub.q SIZE: 3,702 characters ================================================================================ \d .vwapsub // enter vwapsub namespace tickerplanttypes:@[value;`tickerplanttypes;`segmentedtickerplant]; // tickerplant types to subscribe to hdbtypes:@[value;`hdbtypes;`hdb]; // hdbtypes to connect to // datareplay settings realtime:@[value;`realtime;0b]; // use realtime feed or datareplay. default is 0b (datareplay) replayinterval:@[value;`replayinterval;0D00:10:00.00]; // interval to run calcvwap at replaysts:@[value;`replaysts;2015.01.07D01:00:00.00]; // start time of data to retreive from hdb replayets:@[value;`replayets;2015.01.08D17:00:00.00]; // end time of data to retrieve from hdb requiredprocs:value(`hdbtypes`tickerplanttypes)realtime; // required processes tpcheckcycles:@[value;`tpcheckcycles;0W]; // specify the number of times the process will check for requiredprocs tpconnsleep:@[value;`tpconnsleep;10]; // number of seconds between attempts to connect to the source tickerplant // Add hdb and tickerplant to connections list for TorQ .servers.CONNECTIONS:tickerplanttypes,hdbtypes; // upd function gets sum of price*size and sum of size by sym // and adds it to the running total inside the vwap table // This can be used to calculate current vwap quickly. upd:{[t;d] if[t~`trade; `vwap set (`.[`vwap]) + select spts:sum price*size,ssize:sum size by sym from d; ]; }; // Calculates vwap at current time and adds it to the vwaptimes table, at time t. calcvwap:{ //`vwaptimes insert `time`vwap!(t;(select vwap:spts%ssize by sym from `.[`vwap])); select vwap:spts%ssize by sym from `.[`vwap] }; // replay data set datareplay:{[] // Turn off timer system"t 0"; // Block process until all required processes are connected .servers.startupdepcycles[requiredprocs;tpconnsleep;tpcheckcycles]; // Retrieve handle to hdb from TorQ serverlist h:first exec w from .servers.SERVERS where proctype in .vwapsub.hdbtypes; // whc:(parse"select from t where ex=\"N\"") 2; // example of optional where clause params: (!) . flip ((`tabs;`trade); (`h;h); (`sts;replaysts); (`ets;replayets); //(`where;whc); // Optional where clause (`interval;replayinterval); (`timer;1b); (`timerfunc;`.vwapsub.logvwap)); // Run datareplay utility using avove parameters msgs:.datareplay.tablesToDataStream params; // Execute each message. value each msgs`msg; }; logvwap:{`vwaptimes insert `time`vwap!(x;.vwapsub.calcvwap[])}; // subscribe to tickerplant types subscribe:{[] // Block process until all required processes are connected .servers.startupdepcycles[requiredprocs;tpconnsleep;tpcheckcycles]; if[count s:.sub.getsubscriptionhandles[tickerplanttypes;();()!()]; .lg.o[`subscribe;"found available tickerplant, attempting to subscribe"]; .sub.subscribe[`trade;`;0b;0b;first s]; ]; .timer.rep[`timestamp$.proc.cd[]+00:00;0Wp;replayinterval;(`logvwapnow;`);0h;"Run logvwapnow at set interval";1b] } \d . vwap:([sym:`$()]spts:`float$();ssize:`int$()); vwaptimes:([]time:`timestamp$();vwap:()); logvwapnow:{.vwapsub.logvwap[.z.p]}; // set upd function at top level upd:.vwapsub.upd; // Perform server discovery .servers.startup[]; // use tickerplant or datareplay $[.vwapsub.realtime; .vwapsub.subscribe[]; // sub to tickerplant .vwapsub.datareplay[]]; // replay hdb data ================================================================================ FILE: TorQ-Finance-Starter-Pack_code_rdb_examplequeries.q SIZE: 641 characters ================================================================================ /- RDB query for counting by sym /- if not today, return an empty table countbysym:{[startdate;enddate] $[.z.d within (startdate;enddate); select sum size, tradecount:count i by sym from trade; ([sym:`symbol$()] size:`long$(); tradecount:`long$())]} /- time bucketted count hloc:{[startdate;enddate;bucket] $[.z.d within (startdate;enddate); select high:max price, low:min price, open:first price,close:last price,totalsize:sum `long$size, vwap:size wavg price by sym, bucket xbar time from trade; ([sym:`symbol$();time:`timestamp$()] high:`float$();low:`float$();open:`float$();close:`float$();totalsize:`long$();vwap:`float$())]} ================================================================================ FILE: TorQ-Finance-Starter-Pack_code_tick_feed.q SIZE: 2,985 characters ================================================================================ / generate data for rdb demo sn:2 cut ( `AMD;"ADVANCED MICRO DEVICES"; `AIG;"AMERICAN INTL GROUP INC"; `AAPL;"APPLE INC COM STK"; `DELL;"DELL INC"; `DOW;"DOW CHEMICAL CO"; `GOOG;"GOOGLE INC CLASS A"; `HPQ;"HEWLETT-PACKARD CO"; `INTC;"INTEL CORP"; `IBM;"INTL BUSINESS MACHINES CORP"; `MSFT;"MICROSOFT CORP") s:first each sn n:last each sn p:33 27 84 12 20 72 36 51 42 29 / price m:" ABHILNORYZ" / mode c:" 89ABCEGJKLNOPRTWZ" / cond e:"NONNONONNN" / ex src:`BARX`GETGO`SUN`DB side:`buy`sell / init.q cnt:count s pi:acos -1 gen:{exp 0.001 * normalrand x} normalrand:{(cos 2 * pi * x ? 1f) * sqrt neg 2 * log x ? 1f} randomize:{value "\\S ",string "i"$0.8*.z.p%1000000000} rnd:{0.01*floor 0.5+x*100} vol:{10+`int$x?90} randomize[] / ========================================================================================= / generate weights to stop even distribution of counts and sizes weight:0.1*1+neg[cnt]?2*cnt / assign multipliers to skew size columns volmap:s!neg[cnt]?weight bidmap:s!neg[cnt]?weight askmap:s!neg[cnt]?weight / returns list where count of each item is given by random permutation of integer weights skewitems:{[weights;items]raze weights#'neg[count items]?items} / skew sym counts with weighted list of indices weightedsyms:skewitems[`long$weight*10;til cnt]</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="16"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Installing kdb+¶ You can run kdb+ on Linux, macOS, or Windows Step 1: Download¶ The 64-bit kdb+ Personal Edition interpreter is licensed for non-commercial use. It is not licensed for use on cloud servers. The provided license-key file (kc.lic ) requires an always-on Internet connection. Commercial versions of kdb+ are available to customers from downloads.kx.com. Credentials are available from the customer's Designated Contacts. Requires a 64-bit interpreter and a k4.lic or kc.lic license-key file OR a 32-bit interpreter. 32-bit applications will not run in macOS 10.15+ (Catalina and later) Internal distribution at customer sites Most customers download the latest release of kdb+ (along with the accompanying README.txt , the detailed change list) and make a limited number of approved kdb+ versions available from a central file server. Designated Contacts should encourage developers to keep production systems up to date with these versions of kdb+. This can greatly simplify development, deployment and debugging. Platforms and versions The names of the ZIPs denote the platform: l64.zip – 64-bit Linux; w32.zip – 32-bit Windows, etc. m64 contains a universal binary suitable for both Intel and Apple Silicon Macs. l64 contains the Linux x86 build, with l64arm containing the Linux build suitable for ARM processors. Numerical release versions of the form 3.5, or 4.0 are production code. Versions of kdb+ with a trailing t in the name such as 3.7t are test versions and are neither intended nor supported for production use. Step 2: Unzip your download¶ Here we assume you install kdb+ in your HOME directory on Linux or macOS; or in C:\ on Windows, and set the environment variable QHOME accordingly. os QHOME --------------- Linux ~/q macOS ~/q Windows c:\q You can install kdb+ anywhere as long as you set the path in QHOME . Open a command shell and cd to your downloads directory. Unzip the downloaded ZIP to produce a folder q in your install location. unzip l64.zip -d $HOME/q unzip m64.zip -d $HOME/q Expand-Archive w64.zip -DestinationPath C:\q How to run 32-bit kdb+ on 64-bit Linux Use the uname -m command to determine whether your machine is using the 32-bit or 64-bit Linux distribution. If the result is i686 ori386 or similar, you are running a 32-bit Linux distributionx86_64 , you are running a 64-bit Linux distribution To install 32-bit kdb+ on a 64-bit Linux distribution, you need a 32-bit library. Use your usual package manager to install i686 or i386: for example, sudo apt-get install libc6-i386 . Step 3: Install the license file¶ If you have a license file, k4.lic or kc.lic , put it in the QHOME directory. Your QHOME directory will then contain: ├── kc.lic ├── l64/ │ └── q └── q.k ├── kc.lic ├── m64/ │ └── q └── q.k ├── kc.lic ├── w64/ │ └── q └── q.k (32-bit versions have 32 in the folder name instead of 64 .) kdb+ looks for a license file in QHOME . To keep your license file elsewhere, set its path in environment variable QLIC . Step 4: Confirm success¶ Confirm kdb+ is working: launch your first q session. cd q/l64/q cd spctl --add q/m64/q xattr -d com.apple.quarantine q/m64/q q/m64/q Authorizing macOS to run kdb+ MacOS Catalina (10.15) introduced tighter security. It may display a warning that it does not recognize the software. If the spctl and xattr commands above have not authorized the OS to run q, open System Preferences > Security & Privacy. You should see a notification that q has been blocked – and a button to override the block. c:\q\w64\q The q session opens with a banner like this. KDB+ 4.0 2020.06.01 Copyright (C) 1993-2020 Kx Systems m64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 EXPIRE… q) License files and 32-bit kdb+ 32-bit kdb+ does not require a license file to run, but if it finds one at launch it will signal a license error if the license is not valid. Try your first expression. q)til 6 0 1 2 3 4 5 End the q session and return to the command shell. q)\\ $ Step 5: Edit your profile¶ Defining q as a command allows you to invoke kdb+ without specifying the path to it. The q interpreter refers to environment variable QHOME for the location of certain files. Without this variable, it will guess based on the path to the interpreter. Better to set the variable explicitly. The QLIC environment variable tells kdb+ where to find a license key file. Absent the variable, QHOME is used. - Open ~/.bash_profile in a text editor, append the following line, and save the file. (Edit~/.bashrc to define a q command for non-console processes.) export QHOME=~/q export PATH=~/q/l64/:$PATH - In the command shell, use the revised profile: source .bash_profile - Open ~/.zshrc in a text editor, append the following lines, and save the file. export QHOME=~/q export PATH=~/q/m64/:$PATH - In the command shell, use the revised profile: source ~/.zshrc In the command shell issue the following commands: setx QHOME "C:\q" setx PATH "%PATH%;C:\q\w64" (In the above, substitute 32 for 64 if you are installing 32-bit kdb+.) Test the new command. Open a new command shell and type q . Last login: Sat Jun 20 12:42:49 on ttys004 ❯ q KDB+ 4.0 2020.06.01 Copyright (C) 1993-2020 Kx Systems m64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 EXPIRE… q) Further customization¶ rlwrap for Linux and macOS¶ On Linux and macOS, the rlwrap command allows the Up arrow to retrieve earlier expressions in the q session. This can be very useful and it is recommended you install it. Run rlwrap -v to check if it's currently installed. If not, install rlwrap using your package manager. Common package managers are: apt , dnf and yum for Linux, and Homebrew and MacPorts for macOS. After installation, the q command can be changed to always run with rlwrap : alias q="rlwrap -r q" This can be added to the end of the user's profile to take effect on every session. Interactive development environments¶ If you are a solo student, we recommend learning q by running it from a command shell, as a REPL, writing scripts in a text editor. The examples on this site are produced that way; visual fidelity should help you as you learn. Jupyter notebooks are an interactive publishing format. We are producing lessons in this form and the library is growing. The JupyterQ interface lets you run q code in notebooks. Notebooks are not, however, an IDE, and are unsuitable for studying features such as event handlers. For more advanced study, use either the bare q REPL, or download and install our interactive development environment, KX Developer. Multiple versions¶ Multiple versions of kdb+ can be installed on a system by following this guide. What’s next?¶ Learn the q programming language, look through the reference card, or see in the Database what you can do with kdb+. Licensing¶ Who needs a license for kdb+?¶ Everyone. All use of kdb+ is governed by a license. Without one, kdb+ signals an error 'k4.lic and aborts. 64-bit installations require a license key file: k4.lic , or kc.lic for kdb+ On Demand. If both are found, the kc.lic file is used. Obtain a license key file¶ A license file can be a commercial license or an on-demand person license (for non-commercial use). On-Demand License¶ It requires a kc.lic license key file and an always-on internet connection to operate. Licensing server¶ If kdb+ with an on-demand license cannot contact the KX license server it will abort with a timestamped message. '2018.03.28T11:20:03.831 couldn't connect to license daemon -- exiting If an HTTP proxy is required, the environment variables http_proxy or HTTP_PROXY define the URL of the HTTP proxy to use. Since 4.1t 2022.11.01,4.0 2022.10.26,4.0ce 2022.09.16 the on-demand system honours the NO_PROXY/no_proxy environment variables, with the lowercase version taking precedence. Commercial License¶ Use of commercial kdb+ is covered by your license agreement with KX. Your copy of kdb+ will need access to a valid license key file. If you wish to begin using kdb+ commercially, please contact <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="97e4f6fbf2e4d7fcefb9f4f8fa">[email protected]</a>. Install the license key file¶ Save a copy of the license key file (k4.lic or kc.lic ) in the QHOME folder. (See installation instructions for your operating system.) Restart your kdb+ session and note the change in the banner. tom@mb13:~/q$ q KDB+ 3.6 2018.07.30 Copyright (C) 1993-2018 Kx Systems m64/ 2()core 8192MB tom mb13.local 192.168.1.44 EXPIRE 2019.05.15 <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="23574c4e63485b0d404c4e">[email protected]</a> #400 q)til 6 0 1 2 3 4 5 q) Note the license number (#400 in the example) and quote it in any correspondence about the license. If you are sharing use of a commercial license, you will probably want to set the environment variable QLIC to the path of the license key file, as below. License errors¶ A list of possible errors can be found here. Keeping the license key file elsewhere¶ The default location for the license key file is the QHOME folder. You do not have to keep the license key file there. You can use the environment variable QLIC to specify a different filepath. Folder not file Like QHOME , QLIC points to a folder, not a file. For example, QLIC='/Users/simon/q' Core restrictions¶ If the license is for fewer cores than the total number on the machine, the number of cores available to kdb+ must be restricted with OS programs, or kdb+ will signal 'cores and abort. KDB+ 3.6 2018.07.30 Copyright (C) 1993-2018 Kx Systems m64/ 4(3)core 16384MB simon simon-macos.local 127.0.0.1 <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="96e5fffbf9f8d6fdeeb8f5f9fb">[email protected]</a> #40000 'cores As long as you use taskset or numa correctly, the binary will not abort itself. You can see the number of cores entitled to a q process: - by looking at the banner, e.g. …m64/ 4(3)core… – the 4 here is the number of cores reported by the OS, and the 3 is the number of cores licensed - with .z.c – not the physical cores of the system, but rather the number the process is allowed to use - the first element of .z.l The number of licensed cores is always 16 for the on-demand license. On the road The license key file binds the interpreter to your computer’s hostname. For example, for a Mac named mymbp the hostname might be mymbp.local . When traveling you may find a network has changed the hostname, for example to mymbp.lan or mymbp.fritz.box . kdb+ then signals a host error on launch. Linux and macOS users can restore their hostnames from the command shell, e.g. scutil --set HostName "mymbp.local" License questions¶ Designated Contacts should send license questions to <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="d1bdb8b2b0b5bcb8bf91baa9ffb2bebc">[email protected]</a>. Emergency failover licenses¶ In case of an emergency, such as a hardware or infrastructure failure that renders your license key file unusable, the Designated Contact can email <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="680e090104071e0d1a280310460b0705">[email protected]</a> to request a temporary failover license to allow use of a different machine or IP address. Q by Puzzles¶ The q language favors terse code that exploits the iteration implicit in the primitives. Accustomed to analyze problems into loops, tests, and cases? The shift to array thinking can be a challenge. Play is an excellent way to explore new techniques. Q by Puzzles studies array-oriented programming techniques in the context of simple problems. They explore distinctive language features in some depth. Use them to train your brain for q. | problem | solution features | code lines | |---|---|---| | 12 Days of Christmas | Map a simple data structure to a complex one Index At with nested indexes Amend and Amend At to make changes at depth | 2 | | ABC Problem | Search a tree of possibilities, stop when found recursion Each right Index At with nested indexes | 3 | | Abundant Odds | Find values in a series that pass a test Each, Do, and While iterators composition | 7 | | Four is Magic | Convergence and finite-state machines Converge iterator composition Index, Index At | 7 | | The Name Game | Make substitutions in a string or list of strings projection ssr Amend At Do and Over iterators | 5 | | Summarize and Say | Analyze a dictionary of results; map between dictionaries composition hash dictionary Do iterator reverse lookup iterator syntax | 7 | | Word Wheel | Examine the differences between dictionaries compile test results, and index into them composition parallelization | 12 |</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="17"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// Load all utilties if[not @[get;".ml.registry.q.main.utils.init";0b]; loadfile`:registry/q/main/utils/requirements.q; loadfile`:registry/q/main/utils/check.q; loadfile`:registry/q/main/utils/create.q; loadfile`:registry/q/main/utils/copy.q; loadfile`:registry/q/main/utils/delete.q; loadfile`:registry/q/main/utils/misc.q; loadfile`:registry/q/main/utils/path.q; loadfile`:registry/q/main/utils/search.q; loadfile`:registry/q/main/utils/set.q; loadfile`:registry/q/main/utils/update.q; loadfile`:registry/q/main/utils/load.q; loadfile`:registry/q/main/utils/get.q; loadfile`:registry/q/main/utils/query.q ] registry.q.main.utils.init:1b ================================================================================ FILE: ml_ml_registry_q_main_utils_load.q SIZE: 1,851 characters ================================================================================ // load.q - Utilties related to loading items into the registry // Copyright (c) 2021 Kx Systems Inc // // @overview // Utilities relating to object loading within the registry // // @category Model-Registry // @subcategory Utilities // // @end \d .ml // @private // // @overview // Load any code with a file extension '*.p','*.py','*.q' // that has been saved with a model. NB: at the moment there is // no idea of precedence within this load process so files should // not be relied on to be loaded in a specific order. // // @todo // Add some level of load ordering to the process // // @param codePath {string} The absolute path to the 'code' // folder containing any source code // // @return {null} registry.util.load.code:{[codePath] files:key hsym`$codePath; if[0~count key hsym`$codePath;:(::)]; qfiles:files where files like "*.q"; registry.util.load.q[codePath;qfiles]; pfiles:files where files like "*.p"; registry.util.load.p[codePath;pfiles]; pyfiles:files where files like "*.py"; mlops.load.py[codePath;pyfiles]; } // @private // // @overview // Load code with the file extension '*.q' // // @param codePath {string} The absolute path to the 'code' // folder containing any source code // @param files {symbol|symbols} q files which should be loadable // // @return {null} registry.util.load.q:{[codePath;files] sfiles:string files; {system "l ",x,y}[codePath]each $[10h=type sfiles;enlist;]sfiles } // @private // // @overview // Load code with the file extension '*.p' // // @param codePath {string} The absolute path to the 'code' // folder containing any source code // @param files {symbol|symbol[]} Python files which should be loadable // // @return {null} registry.util.load.p:{[codePath;files] pfiles:string files; {system "l ",x,y}[codePath]each $[10h=type pfiles;enlist;]pfiles; } ================================================================================ FILE: ml_ml_registry_q_main_utils_misc.q SIZE: 5,535 characters ================================================================================ // misc.q - Miscellaneous utilities for interacting with the registry // Copyright (c) 2021 Kx Systems Inc // // @overview // Miscellaneous utilities for interacting with the registry // // @category Model-Registry // @subcategory Utilities // // @end \d .ml // @private // // @overview / Protected execution of set function. If an error occurs, any created // folders will be deleted. // // @param function {fn} Function to be applied // @param arguments {list} Arguments to be applied // @param config {dict} Configuration information provided by the user // // @return {null} registry.util.protect:{[function;arguments;config] $[`debug in key .Q.opt .z.x; function . arguments; .[function;arguments;registry.util.checkDepth[config;]] ] } // @private // // @overview // Check the depth of the failing model. If first model in experiment, remove // the entire experiment, otherwise simply remove all folders associated with // the failing model. // // @param config {dict} Configuration information provided by the user // @param err {string} Error string generated when upserting to table // // @return {null} registry.util.checkDepth:{[config;err] logging.warn "'",err,"' flagged when adding new model to modelStore."; // Check if experiment is already in modelStore modelStoreExperiments:?[config`modelStorePath;();();`experimentName]; $[any config[`experimentName]in distinct modelStoreExperiments; // Yes: delete current model version as other models will be // present within the experiment registry.delete.model . config`folderPath`experimentName`modelName`version; // No: delete the entire experiment registry.delete.experiment . config`folderPath`experimentName ]; } // @private // // @overview // Generate paths to object and modelStore // // @param folderPath {string|null} A folder path indicating the location // the registry containing the model to be deleted // or generic to remove registry in the current directory // @param objectType {symbol} ````experiment `allModels or `modelVersion``` // @param experimentName {string} Name of experiment // @param modelName {string} Name of model // @param modelVersion {long[]} Model version number (major;minor) // @param config {dict} Configuration information provided by the user // // @return {dict} Paths to object and modelStore registry.util.getObjectPaths:{[folderPath;objectType;experimentName;modelName;modelVersion;config] paths:registry.util.getRegistryPath[folderPath;config]; registryPath:paths`registryPath; modelStorePath:paths`modelStorePath; if[any experimentName ~/: (::;"");experimentName:"undefined"]; experimentName:"",experimentName; experimentPath:$[unnamed:experimentName in("undefined";""); "/unnamedExperiments"; "/namedExperiments/",experimentName ]; additionalFolders:$[objectType~`allModels; modelName; objectType~`modelVersion; modelName,"/",registry.util.strVersion modelVersion; unnamed&modelName~""; string first key hsym`$registryPath,experimentPath; "" ]; objectPath:hsym`$registryPath,experimentPath,"/",additionalFolders; `objectPath`modelStorePath!(objectPath;modelStorePath) } // @private // // @overview // Generate path to file // // @param folderPath {string|null} A folder path indicating the location // the registry containing the file to be deleted // or generic to remove registry in the current directory // @param experimentName {string} Name of experiment // @param modelName {string} Name of model // @param modelVersion {long[]} Model version number (major;minor) // @param localFolder {symbol} Local folder code/metrics/params/config // @param config {dict} Extra details on file to be located // // @return {#hsym} Path to file. registry.util.getFilePath:{[folderPath;experimentName;modelName;modelVersion;localFolder;config] cfg:registry.util.check.config[folderPath;()!()]; registryPath:registry.util.getRegistryPath[folderPath;cfg]`registryPath; if[any experimentName ~/: (::;"");experimentName:"undefined"]; experimentName:"",experimentName; experimentPath:$[unnamed:experimentName in("undefined";""); "/unnamedExperiments"; "/namedExperiments/",experimentName ]; prefix:registryPath,experimentPath,"/",modelName,"/",registry.util.strVersion[modelVersion]; $[localFolder~`code; hsym `$prefix,"/code/",config`codeFile; localFolder~`metrics; hsym `$prefix,"/metrics/","metric"; localFolder~`params; hsym `$prefix,"/params/",(config`paramFile),".json"; localFolder~`config; hsym `$prefix,"/config/",string[config`configType],".json"; logging.error"No such local folder in model registry"] } // @private // // @overview // Check user specified folder path and generate corresponding regisrty path // // @param folderPath {string|null} A folder path indicating the location // the registry containing the model to be deleted // or generic to remove registry in the current directory // @param config {dict} Configuration information provided by the user // // @return {string} Path to registry folder registry.util.getRegistryPath:{[folderPath;config] registry.util.check.registry[config] } // @private // // @overview // Parse version as a string // // @param version {long[]} Version number represented as a duple of // major and minor version // // @return {string} Version number provided as a string registry.util.strVersion:{[version] if[0h=type version;version:first version]; "." sv string each version } ================================================================================ FILE: ml_ml_registry_q_main_utils_path.q SIZE: 1,531 characters ================================================================================ // path.q - Utilities for generation of registry paths // Copyright (c) 2021 Kx Systems Inc // // @overview // Utilities for generation of registry paths // // @category Model-Registry // @subcategory Utilities // // @end \d .ml // @private // // @overview // Generate the path to the model/parameter/metric/version folder based // on provided registry path and model information // // @param registryPath {string} Full/relative path to the model registry // @param config {dict} Information relating to the model // being saved, this includes version, experiment and model names // @param folderType {symbol|null} Which folder is to be accessed? 'model'/ // 'params'/'metrics', if '::' then the path to the versioned model is // returned // // @return {string} The full path to the requested folder within a versioned // model registry.util.path.modelFolder:{[registryPath;config;folderType] folder:$[folderType~`model; "/model/"; folderType~`params; "/params/"; folderType~`metrics; "/metrics/"; folderType~`code; "/code/"; folderType~(::); ""; logging.error"Unsupported folder type" ]; experiment:config`experimentName; expBool:any experiment like "undefined"; experimentType:$[expBool;"un",;]"namedExperiments/"; if[not expBool; experimentType:experimentType,/experiment,"/" ]; modelName:raze config`modelName; modelVersion:"/",/registry.util.strVersion config`version; registryPath,"/",experimentType,modelName,modelVersion,folder } ================================================================================ FILE: ml_ml_registry_q_main_utils_query.q SIZE: 1,216 characters ================================================================================ // query.q - Utilities relating to querying the modelStore // Copyright (c) 2021 Kx Systems Inc // // @overview // Utilities relating to querying the modelStore // // @category Model-Registry // @subcategory Utilities // // @end \d .ml // @private // // @overview // Check user-defined keys in config and generate the correct format for // where clauses // // @param config {dict} Any additional configuration needed for // retrieving the modelStore. Can also be an empty dictionary `()!()`. // @param whereClause {(fn;symbol;any)[]|()} List of whereClauses. Can // initially be an empty list which will be popultated within the below. // Individual clauses will contain the function (like/=) to use in // the where clause, followed by the column name as a symbol and the // associated value to check. // @param keys2check {symbol[]} List of config keys to check // @param function {function} `like/=` to be used in where clause // // @return {(fn;symbol;any)[]|()} Updated whereClause registry.util.query.checkKey:{[config;whereClause;key2check;function] if[any b:key2check in key config; key2check@:where b; whereClause,:{(x;z;y z)}[function;config]each key2check ]; whereClause } ================================================================================ FILE: ml_ml_registry_q_main_utils_requirements.q SIZE: 2,591 characters ================================================================================ // requirements.q - Utilities for the addition of requirements with a model // Copyright (c) 2021 Kx Systems Inc // // @overview // Utilities for the addition of requirements with a model // // @category Model-Registry // @subcategory Utilities // // @end \d .ml // @private // // @overview // Generate a requirements file using pip freeze and save to the // model folder, this requires the user to be using a virtual environment // as allowing ad-hoc pip freeze results in incompatible requirements due // to on prem files generated over time // // @param config {dict} Configuration provided by the user to // customize the experiment // // @return {::} registry.util.requirements.pipfreeze:{[config] sys:.p.import`sys; if[(sys[`:prefix]`)~sys[`:base_prefix]`; logging.error"Cannot execute a pip freeze when not in a virtualenv" ]; destPath:config[`versionPath],"/requirements.txt"; requirements:system"pip freeze"; hsym[`$destPath]0:requirements }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="18"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// Server connection details \d .servers CONNECTIONS:() // sortworker doesn't need to connect to other processes STARTUP:1b // create connections ================================================================================ FILE: TorQ_config_settings_tickerlogreplay.q SIZE: 4,106 characters ================================================================================ // default configuration for the tickerplant replay .merge.mergebybytelimit:0b // merge limit configuration - 0b is by row count, 1b is by byte size. \d .replay // Variables firstmessage:0 // the first message to execute lastmessage:0W // the last message to replay messagechunks:0W // the number of messages to replay at once schemafile:` // the schema file to load data in to tablelist:enlist `all // the tables to replay into (to allow subsets of tp logs to be replayed). `all means all hdbdir:` // the hdb directory to write to tplogfile:` // the tp log file to replay. Only this or tplogdir should be used (not both) tplogdir:` // the tp log directory to read the log files from. Only this or tplogfile should be used (not both) partitiontype:`date // the partitioning of the database. Can be date, month or year (int would have to be handled bespokely) emptytables:1b // whether to overwrite any tables at start up sortafterreplay:1b // whether to re-sort the data at the end of the replay. Sort order is determined by the result of sortandpart[`tablename] partafterreplay:1b // whether to apply the parted attribute after the replay. Parted column is determined by result of first sortandpart[`tablename] basicmode:0b // do a basic replay, which replays everything in, then saves it down with .Q.hdpf[`::;d;p;`sym] segmentedmode:1b // use if logs are written using the segmented tickerplant exitwhencomplete:1b // exit when the replay is complete checklogfiles:0b // check if the log file is corrupt, if it is then write a new "good" file and replay it instead gc:1b // garbage collect at appropriate points (after each table save and after the full log replay) autoreplay:1b // start replaying logs at the end of the script without any further user input clean:1b // clean existing folders on start up. Needed if a replay screws up and we are replaying by chunk or multiple tp logs upd:{[t;x] insert[t;x]} // default upd function used for replaying data sortcsv:hsym first .proc.getconfigfile["sort.csv"] //location of sort csv file compression:() //specify the compress level, empty list if no required partandmerge:0b //setting to do a replay where the data is partitioned and then merged on disk mergemethod:`part //can merge data from temporary storage to the hdb in three ways: // 1. part - the entire partition is merged to the hdb // 2. col - each column in the temporary partitions are merged individually // 3. hybrid - partitions merged by column or entire partittion based on byte limit tempdir:`:tempmergedir //location to save data for partandmerge replay mergenumrows:10000000; //default number of rows for merge process mergenumtab:`quote`trade!10000 50000; //specify number of rows per table for merge process mergenumbytes:500000000 // default partition bytesize for merge limit in merge process (only used when .merge.mergebybytelimit=1b) / - settings for the common save code (see code/common/save.q) .save.savedownmanipulation:()!() // a dict of table!function used to manipuate tables at EOD save .save.postreplay:{{[d;p] }} // post replay function, invoked after all the tables have been written down for a given log file // turn off some of the standard stuff \d .proc loadhandlers:0b logroll:0b ================================================================================ FILE: TorQ_config_settings_tickerplant.q SIZE: 742 characters ================================================================================ // Tickerplant config \d .proc loadcommoncode:0b // do not load common code logroll:0b // do not roll logs // Configuration used by the usage functions - logging of client interaction \d .usage enabled:0b // switch off the usage logging // Client tracking configuration // This is the only thing we want to do // and only for connections being opened and closed \d .clients enabled:1b // whether client tracking is enabled opencloseonly:1b // only log open and closing of connections // Server connection details \d .servers enabled:0b // disable server tracking \d .timer enabled:0b // disable the timer \d .hb enabled:0b // disable heartbeating \d .zpsignore enabled:0b // disable zpsignore - zps should be empty ================================================================================ FILE: TorQ_config_settings_wdb.q SIZE: 9,620 characters ================================================================================</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="19"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// maintain a dictionary of the db partitions which have been written to by the loader partitions:()!() // maintain a list of files which have been read filesread:() // loader function loaddata:{[loadparams;rawdata] .lg.o[`dataloader;"reading in data chunk"]; // check if we have already read some data from this file // if this is the first time we've seen it, then the first row // may contain the header information // in both cases we want to return a table with the same column names data:$[not loadparams[`filename] in filesread; // it hasn't been seen - the first row may or may not be column headers [filesread,::loadparams[`filename]; loadparams[`headers] xcol $[0h>type loadparams[`separator];{flip x!y}[loadparams[`headers]];::]@(loadparams[`types];loadparams[`separator])0:rawdata]; // if it hasn't been read then we have to just read it as a list of lists flip loadparams[`headers]!(loadparams[`types];first loadparams[`separator])0:rawdata]; .lg.o[`dataloader;"Read ",(string count data)," rows"]; // do some optional extra processing .lg.o[`dataloader;"processing data"]; data:0!loadparams[`dataprocessfunc] . (loadparams;data); // enumerate the table - best to do this once .lg.o[`dataloader;"Enumerating"]; data:$[`symdir in key loadparams; .Q.en[loadparams[`symdir];data]; .Q.en[loadparams[`dbdir];data]]; writedatapartition[loadparams[`dbdir];;loadparams[`partitiontype];loadparams[`partitioncol];loadparams[`tablename];data] each distinct loadparams[`partitiontype]$data[loadparams`partitioncol]; // garbage collection if[loadparams`gc; .gc.run[]]; } writedatapartition:{[dbdir;partition;partitiontype;partitioncol;tablename;data] // sub-select the data to write towrite:data where partition=partitiontype$data partitioncol; // generate the write path writepath:` sv .Q.par[dbdir;partition;tablename],`; .lg.o[`dataloader;"writing ",(string count towrite)," rows to ",string writepath]; // splay the table - use an error trap .[upsert;(writepath;towrite);{.lg.e[`dataloader;"failed to save table: ",x]}]; // make sure the written path is in the partition dictionary partitions[writepath]:(tablename;partition); } finish:{[loadparams] if[count loadparams`compression; .lg.o[`dataloader;"setting compression parameters to "," " sv string loadparams`compression]; .dotz.set[`.z.zd;loadparams`compression]]; // re-sort and set attributes on each partition {.sort.sorttab(x;where partitions[;0]=x)} each distinct value partitions[;0]; // unset .z.zd @[.dotz.unset;`.z.zd;()]; // garbage collection if[loadparams`gc; .gc.run[]]; } // load all the files from a specified directory loadallfiles:{[loadparams;dir] // reset the partitions and files read variables partitions::()!(); filesread::(); // Check the input if[not 99h=type loadparams; .lg.e[`dataloader; ".loader.loadallfiles requires a dictionary parameter"]]; // required fields req:`headers`types`tablename`dbdir`separator; if[not all req in key loadparams; .lg.e[`dataloader;"loaddata requires a dictionary parameter with keys of ",(", " sv string req)," : missing ",", " sv string req except key loadparams]]; // join the loadparams with some default values loadparams:(`dataprocessfunc`chunksize`partitioncol`partitiontype`compression`gc!({[x;y] y};`int$100*2 xexp 20;`time;`date;();0b)),loadparams; // required types reqtypes:`headers`types`tablename`dbdir`symdir`chunksize`partitioncol`partitiontype`gc!`short$(11;10;-11;-11;-11;-6;-11;-11;-1); // check the types if[count w:where not (type each loadparams key reqtypes)=reqtypes; .lg.e[`dataloader;"incorrect types supplied for ",(", " sv string w)," parameter(s). Required type(s) are ",", " sv string reqtypes w]]; if[not 10h=abs type loadparams`separator; .lg.e[`dataloader;"separator must be a character or enlisted character"]]; if[not 99h<type loadparams`dataprocessfunc; .lg.e[`dataloader;"dataprocessfunc must be a function"]]; if[not loadparams[`partitiontype] in `date`month`year`int; .lg.e[`dataloader;"partitiontype must be one of `date`month`year`int"]]; if[not count[loadparams`headers]=count loadparams[`types] except " "; .lg.e[`dataloader;"headers and non-null separators must be the same length"]]; if[c:count loadparams[`compression]; if[not (3=c) and type[loadparams[`compression]] in 6 7h; .lg.e[`dataloader;"compression parameters must be a 3 item list of type int or long"]]]; // if a filepattern was specified ensure that it's a list if[(`filepattern in key loadparams) & 10h=type loadparams[`filepattern];loadparams[`filepattern]:enlist loadparams[`filepattern]]; // get the contents of the directory based on optional filepattern filelist:$[`filepattern in key loadparams;(key dir:hsym dir) where max like[key dir;] each loadparams[`filepattern];key dir:hsym dir]; // create the full path filelist:` sv' dir,'filelist; // Load each file in chunks {[loadparams;file] .lg.o[`dataloader;"**** LOADING ",(string file)," ****"]; .Q.fsn[loaddata[loadparams,(enlist`filename)!enlist file];file;loadparams`chunksize]}[loadparams] each filelist; // finish the load finish[loadparams]; } ================================================================================ FILE: TorQ_code_common_datareplay.q SIZE: 4,066 characters ================================================================================ \d .datareplay // Generate times between two input times in p intervals getBuckets:{[s;e;p](s+p*til(ceiling 1+e%p)-(ceiling s%p))} // params[`t] is table data // params[`tc] is time column to cut on // params[`tn] is table name // params[`interval] is the time interval to bucket the messages into. tableDataToDataStream:{[params] // Sort table by time column. params[`t]:params[`tc] xasc delete date from params[`t]; // get all times from table t_times:params[`t][params[`tc]]; $[not null params[`interval]; [ // if there is an interval, bucket messages into this interval // make bukets of ten second intervals times:getBuckets[params[`sts];params[`ets];params[`interval]]; // put start time in fornt of t_times t_times:params[`sts],t_times; //Get places to cut cuts:distinct t_times bin times; cuts:cuts where cuts>-1; // fill first cut if[0<>first cuts;cuts:0,cuts]; //cut table by time interval msgs:cuts cut params[`t]; // get times that match data time:{first x[y]}[;params[`tc]] each msgs; // Return table of times and message chunks -1_([]time:time;msg:{(`upd;x;y)}[params[`tn]] each msgs) ]; // if there is no intevral, cut by distinct time. ([] time:distinct t_times; msg:{(`upd;x;$[1<count y;flip y;first y])}[params[`tn]] each (where differ t_times) cut params[`t] ) ] }; // params[`h] is handle to hdb process // params[`tn] is table name used to query hdb // params[`syms] is list of instruments to get // params[`where] is an additional where clause in functional form - Not Reuqired // params[`sts] is start of time window to get // params[`ets] is end of time window to get tableToDataStream:{[params] // Build where clause wherec:(enlist (within;`date;(enlist;`date$params[`sts];`date$params[`ets]))) // date in daterange ,$[count params[`syms];enlist (in;`sym;enlist params[`syms]);()] //if syms is empty, omit sym in syms ,$[count params[`where];params[`where];()] // custom where clause (optional) ,enlist (within;params[`tc];(enlist;params[`sts];params[`ets])); // time within (sts;ets) // Have hdb evaluate select statement. t:@[params[`h]; (eval;(?;params[`tn];enlist wherec;0b;())); {.lg.e[`dataloader;"Failed to evauluate query on hdb: ",x]} ]; tableDataToDataStream[params,enlist[`t]!enlist t] }; // params[`sts] is start of time window to get // params[`ets] is end of time window to get // params[`tp] is the inrement between times // params[`timerfunc] is the timer function to use getTimers:{[params] times:getBuckets[params[`sts];params[`ets];params[`interval]]; ([]time:times;msg:params[`timerfunc],'times) } // params[`tabs] is list of tables to get - Required // params[`sts] is start of time window to get - Required // params[`ets] is end of time window to get - Required // params[`syms] is list of instruments to get - Default all syms // params[`where] is an additional where clause in functional form - Not Reuqired // params[`timer] is whether or not to retrieve timer - Default 0b // params[`h] is handle to hdb - Default 0 (self) // params[`interval] is the time interval to bucket the messages into. - Not Required // prarms[`tc] is the time column of the tables specified - Defualt `time // params[`timerfunc] is the timer function to use in timer messages - Default `.z.ts tablesToDataStream:{[params] defaults:`timer`h`syms`interval`tc`timerfunc`where!(0b;0;`symbol$();`timespan$0n;`time;`.z.ts;()); params:defaults,params; // check for default parameters `tabs`sts`ets if[count missing:`tabs`sts`ets except key params;'"mising prameters: "," " sv string missing;]; params[`tabs]:(),params[`tabs]; ds:raze {tableToDataStream x,(enlist `tn)!enlist y}[params] each params[`tabs]; $[params[`timer]; `time xasc ds,getTimers[params,enlist[`interval]! enlist $[null k:params[`interval];0D00:00:10.00;k]]; `time xasc ds] }; \d . ================================================================================ FILE: TorQ_code_common_dbwriteutils.q SIZE: 4,789 characters ================================================================================ / - this scrip contains the code which is used to apply data manipulation at save down, sort and apply attributes to data and garbage collect / - typically used by the TorQ process that persist data to disk e.g. rdb, tickerlogreplay, wdb, ... \d .sort / - create an initial .sort.params table (this will be populated later by the getsortcsv function) params:([] tabname:`symbol$(); att:`symbol$(); column:`symbol$(); sort:`boolean$()); / - setting default location for the sort csv file / - will be used if there is a null parameter passed to the getsortcsv function defaultfile:first .proc.getconfigfile["sort.csv"];</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="20"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // @kind function // @category nlpSentUtility // @desc Decrease the weight of valences before "but", and increase // the weight of valences after it // @param tokens {symbol[]} The tokenized sentence // @param valences {number[]} The sentiment of each token // @returns {number[]} The modified valences sent.i.butCheck:{[tokens;valences] valences:"f"$valences; i:tokens?`but; j:count[tokens]-i; $[j;@[;til i;*;.5]@[;i+1+til j-1;*;1.5]@;]valences } // @private // @kind data // @category nlpSentUtility // @desc These are terms that negate what follows them // @type symbol[] sent.i.NEGATE:`$( "aint";"arent";"cannot";"cant";"couldnt";"darent";"didnt";"doesnt"; "ain't";"aren't";"can't";"couldn't";"daren't";"didn't";"doesn't"; "dont";"hadnt";"hasnt";"havent";"isnt";"mightnt";"mustnt";"neither"; "don't";"hadn't";"hasn't";"haven't";"isn't";"mightn't";"mustn't"; "neednt";"needn't";"never";"none";"nope"; "nor";"not";"nothing"; "nowhere";"oughtnt";"shant";"shouldnt";"uhuh";"wasnt";"werent"; "oughtn't";"shan't";"shouldn't";"uh-uh";"wasn't";"weren't";"without"; "wont";"wouldnt";"won't";"wouldn't";"rarely";"seldom";"despite") // @private // @kind data // @category nlpSentUtility // @desc The co-efficient for sentiments following a negation // @type float sent.i.N_SCALAR:-0.74 // @private // @kind function // @category nlpSentUtility // @desc Check if the preceding words increase, decrease, // or negate the valence // @param tokens {symbol[]} The tokenized sentence // @param valences {float[]} The sentiment of each token // @returns {float} The modified valences sent.i.negationCheck:{[tokens;valences] valences,:3#0f; // "never so/as/this" act like boosters s:tokens in`so`as`this; posNever:where(tokens=`never)&(next next s)|next s; valences:@[valences;posNever+/:2 3;*;1.5 1.25]; // Tokens in NEGATE or ending in "n't" i:where(tokens in sent.i.NEGATE)|tokens like"*n't"; valences:@[valences;1 2 3+\:i except posNever;*;sent.i.N_SCALAR]; // Occurences of "least" that are not part of "at/very least" j:where(tokens=`least)&not prev tokens in`at`very; valences:@[valences;j+1;*;sent.i.N_SCALAR]; -3_ valences } // @private // @kind data // @category nlpSentUtility // @desc Load the dictionary of terms and their sentiment // Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model // for Sentiment Analysis of Social Media Text. Eighth International // Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI,June 2014 // @type dictionary sent.i.lexicon :(!).("SF";"\t")0: hsym `$.nlp.path,"/vader/lexicon.txt"; // @private // @kind data // @category nlpSentUtility // @desc Additional lexicon sentiments // @type dictionary sent.i.lexicon,:(!). flip( (`$"the shit"; 3f); (`$"the bomb"; 3f); (`$"bad ass"; 1.5f); (`$"yeah right"; -2f); (`$"cut the mustard"; 2f); (`$"kiss of death"; -1.5f); (`$"hand to mouth"; -2f)); // @private // @kind function // @category nlpSentUtility // @desc Calculate the sentiment, given the individual valences // @param valences {float[]} The sentiment of each token // @param text {string} A piece of text // @returns {dictionary} The sentiment of the text along the dimensions // `pos`neg`neu and`compound sent.i.scoreValence:{[valences;text] if[not count valences;:`compound`pos`neg`neu!0 0 0 0f]; compound:sum valences; // Punctuation can increase the intensity of the sentiment punctAmplifier:sent.i.amplifyEP[text]+sent.i.amplifyQM text; compound+:signum[compound]*punctAmplifier; // Normalize score compound:{x%sqrt 15+x*x}compound; // Discriminate between positive, negative and neutral sentiment scores positive:sum 1+valences where valences>0; negative:sum -1+valences where valences<0; neutral:count where valences=0; // If punctuation affects the sentiment, apply emphasis to dominant sentiment if[positive>abs negative;positive+:punctAmplifier]; if[positive<abs negative;negative-:punctAmplifier]; // Used to noramlize the pos, neg and neutral sentiment total:positive+neutral+abs negative; `compound`pos`neg`neu!(compound,abs(positive;negative;neutral)%total) } ================================================================================ FILE: ml_nlp_code_utils.q SIZE: 6,339 characters ================================================================================ // code/utils.q - NLP utilities // Copyright (c) 2021 Kx Systems Inc // // General nlp utility functions \d .nlp // @private // @kind function // @category nlpUtility // @desc Import python functions i.np:.p.import`numpy i.str:.p.import[`builtins]`:str i.bool:.p.import[`builtins]`:bool // @private // @kind function // @category nlpUtility // @desc A fast way to sum a list of dictionaries in // certain cases // @param iter {long} The number of iterations. Note that within this // library iter is set explicitly to 2 for all present invocations // @param dict {dictionary[]} A list of dictionaries // @returns {dictionary} The dictionary values summed together i.fastSum:{[iter;dict] // Summing a large number of dictionaries is expensive if there are many // distinct keys. // This splits them into groups, which have fewer distinct keys, and then // adds those groups. dictGroup:(ceiling sqrt count dict)cut dict; sum$[iter;.z.s iter-1;sum]each dictGroup }[2] // @private // @kind function // @category nlpUtility // @desc Replace empty dicts with (,`)!,0f // @param docs {dictionary[]} Documents of text // @returns {dictionary[]} Any empty dictionaries are filled i.fillEmptyDocs:{[docs] $[98=type docs; 0^docs; @[docs;i;:;count[i:where not count each docs]#enlist(1#`)!1#0f] ] } // @private // @kind function // @category nlpUtility // @desc Given a monotonically increasing list of integral numbers, // this finds any runs of consecutive numbers // @param array {number[]} Array of values // @returns {long[][]} A list of runs of consecutive indices i.findRuns:{[array] prevVals:array=1+prev array; inRun:where prevVals|next prevVals; (where array<>1+prev array)_ array@:inRun } // @private // @kind function // @category nlpUtility // @desc Index of the first occurrence of the minimum // value of an array // @param array {number[]} Array of values // @return {number} The index of the minimum element of the array i.minIndex:{[array] array?min array } // @private // @kind function // @category nlpUtility // @desc Index of the first occurrence of the maximum // value of the array // @param array {number[]} Array of values // @return {number} The index of the maximum element of the array i.maxIndex:{[array] array?max array } // @private // @kind function // @category nlpUtility // @desc Calculate the harmonic mean // @param array {number[]} Array of values // @returns {float} The harmonic mean of the input i.harmonicMean:{[array] 1%avg 1%array } // @private // @kind function // @category nlpUtility // @desc Calculate a vector's magnitude // @param array {number[]} Array of values // @returns {float} The magnitude of the vector i.magnitude:{[array] sqrt sum array*array } // @private // @kind function // @category nlpUtility // @desc Normalize a list or dictionary so the highest value is 1f // @param vals {float[]|dictionary} A list or dictionary of numbers // @returns {float[]|dictionary} The input, normalized i.normalize:{[vals] vals%max vals } // @private // @kind function // @category nlpUtility // @desc Takes the largest N values // @param n {long} The number of elements to take // @param vals {any[]} A list of values // @returns {any[]} The largest N values i.takeTop:{[n;vals] n sublist desc vals } // @private // @kind function // @category nlpUtility // @desc Calculate the Jaro similarity score of two strings // @param str1 {string|string[]} A string of text // @param str2 {string|string[]} A string of text // @returns {Float} The similarity score of two strings i.jaro:{[str1;str2] lenStr1:count str1; lenStr2:count str2; if[0=lenStr1;:0f]; // The range to search for matching characters range:1|-1+floor .5*lenStr1|lenStr2; // The low end of each window lowWin:deltas 0|til[lenStr1]+/:(-1 1)*range; k:lowWin[0]+where each str1='sublist\:[flip lowWin]str2; j:raze k[0;0]{x,(y except x)0}/1_k; nonNull:where not null j; n:count nonNull; // Find the number of transpositions trans:.5*sum str1[nonNull]<>str2 asc j nonNull; avg(n%lenStr1;n%lenStr2;(n-trans)%n) } // @private // @kind function // @category nlpUtility // @desc Generating symmetric matrix from triangle (ragged list) // This is used to save time when generating a matrix where the upper // triangular component is the mirror of the lower triangular component // @param raggedList {float[][]} A list of lists of floats representing // an upper triangular matrix where the diagonal values are all 0. // eg. (2 3 4f; 5 6f; 7f) for a 4x4 matrix // @returns {float[][]} An n x n two dimensional array // The input, mirrored across the diagonal, with all diagonal values being 1 i.matrixFromRaggedList:{[raggedList] // Pad the list with 0fs to make it an array,and set the diagonal values to // .5 which become 1 when the matrix is added to its flipped value matrix:((til count raggedList)#'0.),'.5,'raggedList; matrix+flip matrix } // @private // @kind data // @category nlpUtility // @desc Parts-of-speech not useful as keywords // @type symbol[] i.stopUniPOS:asc`ADP`PART`AUX`CONJ`DET`SYM`NUM`PRON`SCONJ i.stopPennPOS:asc`CC`CD`DT`EX`IN`LS`MD`PDT`POS`PRP`SYM`TO`WDT`WP`WRB`, `$("PRP$";"WP$";"$") // @private // @kind function // @category nlpUtility // @desc Get the count of individual terms in a corpus // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @returns {dictionary} The count of terms in the corpus i.getTermCount:{[parsedTab] tokens:parsedTab[`tokens]@'where each not parsedTab`isStop; i.fastSum{1+log count each group x}each tokens } // @kind function // @category nlpUtility // @desc Calculate the probability of words appearing in a text // @param tokens {symbol[]} The tokens in the text // @param occurance {dictionary} The total times a token appears in the text // @param token {symbol} A single token // @param nextToken {symbol} The next token in the list of tokens // @returns {dictionary} The probability that the secondary word in the // sequence follows the primary word. i.biGram:{[tokens;occurance;token;nextToken] returnKeys:enlist(token;nextToken); countToken:count where nextToken=tokens 1+where token=tokens; returnVals:countToken%occurance[token]; returnKeys!enlist returnVals } ================================================================================ FILE: ml_nlp_init.q SIZE: 426 characters ================================================================================ // init.q - Load nlp libraries // Copyright (c) 2021 Kx Systems Inc path:{string`nlp^`$@[{"/"sv -1_"/"vs ssr[;"\\";"/"](-3#get .z.s)0};`;""]}` system"l ",path,"/","nlp.q" \d .nlp loadfile`:code/utils.q loadfile`:code/regex.q loadfile`:code/sent.q loadfile`:code/parser.p loadfile`:code/parser.q loadfile`:code/dateTime.q loadfile`:code/extractRtf.p loadfile`:code/email.q loadfile`:code/cluster.q loadfile`:code/nlpCode.q ================================================================================ FILE: ml_nlp_nlp.q SIZE: 927 characters ================================================================================ // nlp.q - Setup for nlp namespace // Copyright (c) 2021 Kx Systems Inc // // Define version, path, and loadfile \d .nlp if[not `e in key `.p; @[{system"l ",x;.pykx.loaded:1b};"pykx.q"; {@[{system"l ",x;.pykx.loaded:0b};"p.q"; {'"Failed to load PyKX or embedPy with error: ",x}]}]]; if[not `loaded in key `.pykx;.pykx.loaded:`import in key `.pykx]; if[.pykx.loaded;.p,:.pykx];</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="21"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">^ Fill, fills ¶ Replace nulls ^ Fill¶ Replace nulls x^y ^[x;y] Where x and y are conforming lists or dictionaries returns y with any nulls replaced by the corresponding item of x . q)0^1 2 3 0N 1 2 3 0 q)100^1 2 -5 0N 10 0N 1 2 -5 100 10 100 q)1.0^1.2 -4.5 0n 0n 15 1.2 -4.5 1 1 15 q)`nobody^`tom`dick``harry `tom`dick`nobody`harry q)1 2 3 4 5^6 0N 8 9 0N 6 2 8 9 5 Integer x items are promoted when y is float or real. q)a:11.0 2.1 3.1 0n 4.5 0n q)type a 9h q)10^a 11 2.1 3.1 10 4.5 10 q)type 10^a 9h When x and y are dictionaries, both null and missing values in y are filled with those from x . q)(`a`b`c!1 2 3)^`b`c!0N 30 a| 1 b| 2 c| 30 Fill is an atomic function. Domain and range: b g x h i j e f c s p m d z n u v t ---------------------------------------- b | b . x h i j e f c . p m d z n u v t g | . g . . . . . . . . . . . . . . . . x | x . x h i j e f c . p m d z n u v t h | h . h h i j e f c . p m d z n u v t i | i . i i i j e f c . p m d z n u v t j | j . j j j j e f c . p m d z n u v t e | e . e e e e e f c . p m d z n u v t f | f . f f f f f f c . p m d z n u v t c | c . c c c c c c c . p m d z n u v t s | . . . . . . . . . s . . . . . . . . p | p . p p p p p p p . p p p p n u v t m | m . m m m m m m m . p m d . . . . . d | d . d d d d d d d . p d d z . . . . z | z . z z z z z z z . p . z z n u v t n | n . n n n n n n n . n . . n n n n n u | u . u u u u u u u . u . . u n u v t v | v . v v v v v v v . v . . v n v v t t | t . t t t t t t t . t . . t n t t t Range: bcdefghijmnpstuvxz ^ Coalesce where x and y are keyed tables fills ¶ Replace nulls with preceding non-nulls fills x fills[x] Where x is a list, returns x with any null items replaced by their preceding non-null values, if any. fills is a uniform function. q)fills 0N 2 3 0N 0N 7 0N 0N 2 3 3 3 7 7 To back-fill, reverse the list and the result: q)reverse fills reverse 0N 2 3 0N 0N 7 0N 2 2 3 7 7 7 0N For a similar function on infinities, first replace them with nulls: q)fills {(x where x=0W):0N;x} 0N 2 3 0W 0N 7 0W 0N 2 3 3 3 7 7 The keyword fills is defined as ^\ , which fills forward, meaning that non-null items are filled over succeeding null items. q)fills 1 0N 3 0N 0N 5 1 1 3 3 3 5 q)fills `x``y```z `x`x`y`y`y`z q)update fills c2 from ([] `a`b`c`d`e`f; c2:1 0N 3 0N 0N 5) x c2 ---- a 1 b 1 c 3 d 3 e 3 f 5 To fill initial nulls apply the derived function as a binary. q)fills 0N 0N 3 0N 5 0N 0N 3 3 5 q)0 ^\ 0N 0N 3 0N 5 0 0 3 3 5 Domain and range: b g x h i j e f c s p m d z n u v t ---------------------------------------- b | b . x h i j e f c . p m d z n u v t g | . g . . . . . . . . . . . . . . . . x | x . x h i j e f c . p m d z n u v t h | h . h h i j e f c . p m d z n u v t i | i . i i i j e f c . p m d z n u v t j | j . j j j j e f c . p m d z n u v t e | e . e e e e e f c . p m d z n u v t f | f . f f f f f f c . p m d z n u v t c | c . c c c c c c c . p m d z n u v t s | . . . . . . . . . s . . . . . . . . p | p . p p p p p p p . p p p p n u v t m | m . m m m m m m m . p m d . . . . . d | d . d d d d d d d . p d d z . . . . z | z . z z z z z z z . p . z z n u v t n | n . n n n n n n n . n . . n n n n n u | u . u u u u u u u . u . . u n u v t v | v . v v v v v v v . v . . v n v v t t | t . t t t t t t t . t . . t n t t t Range: bcdefghijmnpstuvxz ? Find¶ Find the first occurrence of an item in a list. x?y ?[x;y] where x is a list or a null, returns for - atom y the smallest index ofy - list y the smallest index of each item ofy Where y or an item of it is not found in x , the smallest index is the smallest integer not found in key x , i.e. count x . Comparisons are exact and are not subject to to comparison tolerance. q)w:10 -8 3 5 -1 2 3 q)w?-8 1 q)w[1] -8 q)w?3 / the first occurrence of 3 2 q)w?17 / not found 7 q)w[7] 0N q)"abcde"?"d" 3 ? (find) is a multithreaded primitive. Type-specific¶ Find is type-specific relative to x . Where x is a - simple list and y a list whose atoms are all the same type asx , and whose first item is a list, the result corresponds toy item-by-item; i.e. Find is right-atomic.q)rt:(10 5 -1;-8;3 17) q)i:w?rt q)i 0 3 4 7 2 7 q)w[i] 10 5 -1 0N 3 0N (If the first item of y is an atom, a type error is signalled.) - list of lists and y is a simple list, items ofx are matched with the whole ofy .q)u:("abcde";10 2 -6;(2 3;`ab)) q)u?10 2 -6 1 q)u?"abcde" 0 - list of lists and y is a mixed list then items ofx are matched with items ofy .q)u?(2 3;`ab) 3 3 In this case Find matches items of x with2 3 and`ab , not(2 3;`ab) . The rank of y should match the rank of the list element for x . Rank-sensitive¶ x?y can’t deal with mixed-rank x . If rank x is n then x?y looks for objects of rank n-1. 2 3?2 3#til 6 / looks for rank 0 objects (0 1 2;4 5)?2 3#til 6 / looks for rank 1 objects A solution to find (2 3;`ab) is q)f:{where x~\:y} q)f[u;(2 3;`ab)] ,2 Searching tables¶ Where x is a table then y must be a compatible record (dictionary or list) or table. That is, each column of x , paired with the corresponding item of y , must be valid arguments of Find. q)\l sp.q q)sp?(`s1;`p4;200) 3 q)sp?`s`p`qty!(`s2;`p5;450) 12 Implicit Find¶ Find is implicit in the definitions of except , in , within and dictionary lookup. first , last ¶ first ¶ First item of a list first x first[x] Where x is a list or dictionary, returns its first item, else x . Often used with Each to get the first item of each item of a list, or of each key in a dictionary. q)first 1 2 3 4 5 1 q)first 42 42 q)RaggedArray:(1 2 3;4 5;6 7 8 9;0) q)first each RaggedArray 1 4 6 0 q)RaggedDict:`a`b`c!(1 2;3 4 5;"hello") q)first RaggedDict / value of first key 1 2 q)first each RaggedDict a| 1 b| 3 c| "h" Returns the first row of a table. q)\l sp.q q)first sp s | `s$`s1 p | `p$`p1 qty| 300 first is the dual to enlist . q)a:10 q)a~first enlist 10 1b q)a~first first enlist enlist 10 1b first is an aggregate function. last ¶ Last item of a list last x last[x] Where x is a list or dictionary, returns its last item; otherwise x . q)last til 10 9 q)last `a`b`c!1 2 3 3 q)last 42 42 fkeys ¶ Foreign-key columns of a table fkeys x fkeys[x] Where x is a table, returns a dictionary that maps foreign-key columns to their tables. q)f:([x:1 2 3]y:10 20 30) q)t:([]a:`f$2 2 2;b:0;c:`f$1 1 1) q)meta t c| t f a -| ----- a| i f b| i c| i f q)fkeys t a| f c| f flip ¶ flip x flip[x] Returns x transposed, where x may be a list of lists, a dictionary or a table. In a list of lists, each list must be the same length. q)flip (1 2 3;4 5 6) 1 4 2 5 3 6 The flip of a dictionary is a table, and vice versa. If x is a dictionary where the keys are a list of symbols, and the values are lists of the same count (or atoms), then flip x returns a table. The flip of a table is a dictionary. q)D:`sym`price`size!(`IBM`MSFT;10.2 23.45;100 100) q)flip D sym price size --------------- IBM 10.2 100 MSFT 23.45 100 q)D~flip flip D 1b If an atom(s) are provided, they are extended to match the length of the list(s). q)flip (1 2 3;4) 1 4 2 4 3 4 q)flip `sym`price`size!(`I;10.2 23.45 45.67;100) sym price size -------------- I 10.2 100 I 23.45 100 I 45.67 100 ! Flip Splayed or Partitioned¶ x!y ![x;y] Where x is a symbol list and y is - an hsym symbol atom denoting a splayed table - a non-hsym symbol atom denoting a partitioned table returns the flip of y . ! Flip Splayed or Partitioned¶x!y ![x;y] Where x is a symbol list and y is returns the flip of y . floor ¶ Round down floor x floor[x] Returns the greatest integer less than or equal to numeric x . q)floor -2.1 0 2.1 -3 0 2 floor is a multithreaded primitive. Implicit iteration¶ floor is an atomic function. q)floor(1.2;3.4 5.6) 1 3 5 q)floor`a`b!(1.2;3.4 5.6) a| 1 b| 3 5 q)floor([]a:1.2 3.4;b:5.6 7.8) a b --- 1 5 3 7 domain: b g x h i j e f c s p m d z n u v t range: . . . h i j j j c s . . . s . . . . Prior to V3.0¶ Prior to V3.0, floor - used comparison tolerance - accepted datetime (Since V3.0, use "d"$ instead.) q)floor 2 - 10 xexp -12 -13 1 2 q)floor 2009.10.03T13:08:00.222 /type error since V3.0 2009.10.03 q)"d"$2009.10.03T13:08:00.222 2009.10.03 Domain and range¶ domain b g x h i j e f c s p m d z n u v t range . . . h i j j j c s . . . . . . . . Range: hijcs</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="22"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Exposed infrastructure¶ The k programming language¶ Q is an embedded domain-specific language for time-series analysis, implemented in the proprietary programming language k. As such, q leaves features of k exposed. They should be avoided. The k language has no public documentation and is for use by KX system programmers only. It changes from version to version of q. These changes are not documented. Use of k in q scripts The use of k expressions in kdb+ applications is unsupported and strongly discouraged. Internal functions¶ The operator ! with a negative left argument calls an internal function. Q cover functions should be substituted where available. Unary forms¶ Many q binary operators have unary forms. They can be evaluated in q but this use is discouraged as poor q style. q)(#:)"zero" / discouraged 4 q)count "zero" / supported 4 Instead, use the corresponding q keywords. !: key/til #: count $: string %: reciprocal &: where *: first +: flip ,: enlist -: neg .: get 0:: read0 1:: read1 <: iasc =: group >: idesc ?: distinct @: type ^: null _: floor |: reverse ~: not Variadic keywords¶ Q keywords, such as deltas , that are simple covers of extensions inherit their variadic syntax, though they cannot be applied infix as the extensions can. q)deltas / cover for the extension -': q)y:1 1 3 5 8 13 q)-':[y] / unary 1 0 2 2 3 5 q)deltas[y] / unary 1 0 2 2 3 5 q)-':[10;y] / binary, bracket -9 0 2 2 3 5 q)deltas[10;y] / binary, bracket -9 0 2 2 3 5 q)10-':y / binary, infix -9 0 2 2 3 5 q)10 deltas y / cannot be applied infix 'Cannot write to handle 10. OS reports: Bad file descriptor [0] 10 deltas y ^ The keywords are intended as covers for the unary application of the extension. For binary application, use the extension, as shown above. Binary application of variadic keywords The binary application of variadic keywords is deprecated. Support for it may be withdrawn in the future. The variadic keywords are: deltas differ max maxs min mins prd prds ratios sum sums sv and vs ¶ The keywords sv and vs cover overloads of /: and \: . q)(0x40\:)2 /poor q style 0x00000000000000000002 q)0x40 vs 2 0x00000000000000000002 /good q style The keywords are defined for readability. Use them. File system¶ kdb+ communicates with the filesystem through - one-shot operations - handles to persistent connections Handles are more efficient for multiple operations on a file. File paths are displayed separated with forward slashes, regardless of the operating system. One-shot operations¶ get set read/write or memory-map a data file¹ value read a data file¹ hcount file size hdel delete a file or folder hsym symbol/s to file symbol/s¹ 0: File Text read/write chars¹ read0 read chars¹ 1: File Binary read/write bytes¹ read1 read bytes¹ 2: Dynamic Load load shared object save load a variable rsave rload a splayed table dsave tables ? Enum Extend ¹ Has application beyond the file system. Setting and getting¶ Keywords set and get let you treat files as variables that persist in the filesystem. q)`:data/foo`:data/bar set'(42;"thin white duke") `:data/foo`:data/bar q)get `:data/foo 42 q)get `:data/bar "thin white duke" File utilities¶ Writing and reading¶ Any file can be read or written as bytes (binary). Text-file primitives handle text files. 0 associates with text; 1 with bytes. The File Text operator 0: can also represent a table as strings, and interpret key-value pairs. Tables¶ kdb+ uses files and directories to persist database tables. Partitioning a table divides its rows across multiple directories. Splaying a table stores each column as a separate file. Connections¶ A persistent connection enables multiple operations on a file without repeatedly opening and closing it. Opening a connection to a file returns a handle to the connection. The handle takes the form of an int that is also an applicable value. System handles 0, 1, and 2 are to the console, stdout, and stderr. They are always open. Opening a connection to a non-existent file creates it and any missing ancestor directories. Applying the handle to data appends it to the file as bytes. Applying the neg of the handle to char data appends it as text. The result of a successful operation is the positive or negative handle. Text¶ q)key `:foo/ / does not exist q)show h:hopen `:foo/bar.txt 12i q)key `:foo/ / file and dir created ,`bar.txt q)neg[h] "hear the lark and hearken" -12i q)-12i "to the barking of the dog fox" -12i q)neg[h] "gone to ground" -12i q)hclose h q)hcount `:foo/bar.txt 71 q)read0 `:foo/bar.txt "hear the lark and hearken" "to the barking of the dog fox" "gone to ground" q)read0 (`:foo/bar.txt;10;20) "ark and hearken" "to t" Bytes¶ q)hopen ":foo/hello.dat" 7i q)7i 0x68656c6c6f776f726c64 7i q)hclose 7i q)read1 `:foo/hello.dat 0x68656c6c6f776f726c64 Relative filepaths¶ Relative filepaths are sought in the following locations, in order. File compression Q for Mortals §14 Introduction to kdb+ Function notation¶ Function notation enables the definition of functions. Function notation is also known as the lambda notation and the defined functions as lambdas. Anonymity Although the term lambda originated elsewhere as a name for an anonymous function, we use it to denote any function defined using the lambda notation. In this usage a lambda assigned a name is still a lambda. For example, if plus:{x+y} , then plus is a lambda. Lambdas have datatype 100. A lambda is defined as a pair of braces (curly brackets) enclosing an optional signature (a list of up to 8 argument names) followed by a zero or more expressions separated by semicolons. Signature¶ q){[a;b] a2:a*a; b2:b*b; a2+b2+2*a*b}[20;4] / binary function 576 Functions with 3 or fewer arguments may omit the signature and instead use default argument names x , y and z . A lambda with a signature is signed; without, unsigned. q){[x;y](x*x)+(y*y)+2*x*y}[20;4] / signed lambda 576 q){(x*x)+(y*y)+2*x*y}[20;4] / unsigned lambda 576 Use x , y , and z only as names of the first three arguments Using other names for the first arguments of a lambda often helps the reader. But using x , y , or z for any other argument sows confusion. Pattern matching¶ The function signature can include patterns - see pattern matching. Rank¶ The rank of a function is the number of arguments it takes. The rank of a signed lambda is the number of names in its signature. The rank of an unsigned lambda is the here highest-numbered of the three default argument names x (1), y (2) and z (3) used in the function definition. {[h;l;o;c].5*(h-l;c-o)} / rank 4 {x+y*10} / rank 2 {x+z*10} / rank 3 Result¶ The result of the lambda is the result of the last statement evaluated. If the last statement is empty, the result is the generic null, which is not displayed. q)f:{2*x;} / last statement is empty q)f 10 / no result shown q)(::)~f 10 / matches generic null 1b Explicit return¶ To terminate evaluation successfully and return a value, use an empty assignment, which is : with a value to its right and no variable to its left. q)c:0 q)f:{a:6;b:7;:a*b;c::98} q)f 0 42 q)c 0 Abort¶ To abort evaluation immediately, use Signal, which is ' with a value to its right. q)c:0 q)g:{a:6;b:7;'`TheEnd;c::98} q)g 0 {a:6;b:7;'`TheEnd;c::98} 'TheEnd q)c 0 Name scope¶ Within the context of a function, - name assignments with : are local to it and end after evaluation - assignments with :: are global (in the session root) and persist after evaluation unless the name assigned is an argument or already defined as a local q)a:b:0 / set globals a and b to 0 q)f:{a:10+3*x;b::100+a;} / f sets local a, global b q)f 1 2 3 / apply f q)a / global a is unchanged 0 q)b / global b is updated 113 116 119 q)b:42 q){[a;b]b::99;a+b}[10;20] / assignment is local 109 q)b 42 q){b:x=y;b::99;x+b}[10;20] / assignment is local 109 q)b 42 References to names not assigned locally are resolved in the session root. Local assignments are strictly local: invisible to other functions applied during evaluation. q)a:42 / assigned in root q)f:{a+x} q)f 1 / f reads a in root 43 q){a:1000;f x}1 / f reads a in root 43 Local variables are identified on parsing and initialized as () (empty list). Assignments within code branches (never recommended) can produce unexpected results. q)t:([]0 1) q){select from t}[] / global t x - 0 1 q){if[x;t:([]`a`b)];select from t} 1b / local t x - a b q) {if[x;t:([]`a`b)];select from t} 0b / local t is () 'type [4] {if[x;t:([]`a`b)];select from t} ^ Within lambdas, read and set global variables with get and set Multiline definition¶ In scripts function definitions can straddle multiple lines. sqsum:{[a;b] / square of sum a2:a*a; b2:b*b; a2+b2+2*a*b / implicit result } Variables and constants¶ A lambda definition can include up to: | in use | current | V3.5 | <V3.5 | | |---|---|---|---|---| | arguments | 8 | 8 | 8 | | | locals | \(m\) | 110 | 23 | 23 | | globals | \(n\) | 110 | 31 | 31 | | constants | \(239-(m+n)\) | 95 | 96 |</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="23"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/ The environment variables to query proxy information for each URL scheme and the 'bypass' configuration .http.cfg.proxyEnvVars:(`symbol$())!`symbol$(); .http.cfg.proxyEnvVars[`$("http://"; "ws://")]: 2#`HTTP_PROXY; .http.cfg.proxyEnvVars[`$("https://"; "wss://")]: 2#`HTTPS_PROXY; .http.cfg.proxyEnvVars[`bypass]: `NO_PROXY; / The new line separator for HTTP requests .http.newLine:"\r\n"; / The HTTP version to send to the target server .http.httpVersion:"HTTP/1.1"; / The valid TLS-enabled URL schemes .http.tlsSchemes:`https`wss; / The cached or latest proxy information .http.proxy:key[.http.cfg.proxyEnvVars]!count[.http.cfg.proxyEnvVars]#""; / The user agent to send with each HTTP request .http.userAgent:""; / If .Q.gz is available, checked on init .http.gzAvailable:0b; / The default 'Accept' header to send if one isn't specified by the caller / Set to empty string to disable sending 'Accept' header .http.acceptHeader:"*/*"; / Step dictionary of HTTP response codes to their types for additional information .http.responseTypes:`s#100 200 300 400 500i!`informational`success`redirect`clientError`serverError; / Headers that are extracted in '.http.i.parseResponse' for post processing .http.responseExtractHeaders:`contentType`contentEncoding!`$("content-type";"content-encoding"); .http.init:{ if[.http.cfg.cacheProxy; .log.if.info "Querying environment variables for HTTP / HTTPS proxy settings"; .http.proxy:.http.i.getProxyConfig[]; ]; if[.http.cfg.sendUserAgent; if["" ~ .http.userAgent; .http.userAgent:"-" sv string `kdbplus,.z.K,.z.k,.z.i; ]; .log.if.info "Send user agent with HTTP requests enabled [ User Agent: ",.http.userAgent," ]"; ]; .http.gzAvailable:.ns.isSet `.Q.gz; .log.if.info "HTTP compression with GZIP [ Available: ",string[`no`yes .http.gzAvailable]," ]"; }; / Peforms a HTTP GET to the target URL and parses the response / NOTE: The header "Connection: close" is sent with this request / @see .http.send .http.get:{[url; headers] headers[`Connection]:"close"; :.http.send[`GET; url; ""; ""; headers]; }; / Performs a HTTP POST to the target URL and parses the response / NOTE: The header "Connection: close" is sent with this request / @see .http.send .http.post:{[url; body; contentType; headers] headers[`Connection]:"close"; :.http.send[`POST; url; body; contentType; headers]; }; / Performs a HTTP PUT to the target URL and parses the response / NOTE: The header "Connection: close" is sent with this request / @see .http.send .http.put:{[url; body; contentType; headers] headers[`Connection]:"close"; :.http.send[`PUT; url; body; contentType; headers]; }; / Performs a HTTP DELETE to the target URL and parses the response / NOTE: The header "Connection: close" is sent with this request / @see .http.send .http.delete:{[url; body; contentType; headers] headers[`Connection]:"close"; :.http.send[`DELETE; url; body; contentType; headers]; }; / Performs a HTTP PATCH to the target URL and parses the response .http.patch:{[url; body; contentType; headers] headers[`Connection]:"close"; :.http.send[`PATCH; url; body; contentType; headers]; }; / Sends a HTTP request and parses the response / @param method (Symbol) The HTTP method that the request will be sent as / @param url (String) The target URL to send data to / @param body (String) The body content to send / @param contentType (String) The optional type of the content being sent. If empty, will default to 'text/plain' / @param headers (Dict) A set of headers to optionally send with the POST request. This dictionary must have symbol keys and string values. / @throws InvalidHeaderKeyTypeException If any of the header names are not a symbol / @throws InvalidHeaderValueTypeException If any of the header values are not a string / @see .http.i.getUrlDetails / @see .http.i.buildRequest / @see .http.i.send / @see .http.i.parseResponse .http.send:{[method; url; body; contentType; headers] if[not all (.type.isSymbol,(3#.type.isString),.type.isDict) @' (method; url; body; contentType; headers); '"IllegalArgumentException"; ]; if[0 < count headers; if[not all .type.isSymbol each key headers; '"InvalidHeaderKeyTypeException"; ]; if[not all .type.isString each value headers; '"InvalidHeaderValueTypeException"; ]; ]; if[0 < count body; if[0 = count contentType; contentType:"text/plain"; ]; headers[`$"Content-Type"]:contentType; ]; urlParts:.http.i.getUrlDetails url; response:.http.i.parseResponse .http.i.send[urlParts;] .http.i.buildRequest[method; urlParts; headers; body]; if[.http.cfg.followRedirects & `redirect = response`statusType; location:response[`headers] key[response`headers] first where `location = lower key response`headers; if["/" = first location; location:raze urlParts[`scheme`baseUrl],location; ]; if[0 < count location; .log.if.info "Following HTTP redirect as configured [ Original URL: ",url," ] [ New URL: ",location," ]"; response:.http.send[method; location; body; contentType; headers]; ]; ]; :response; }; / Builds the HTTP request string: / * If proxy is enabled, ensure the target request is an absolute path / * Basic authorisation is supported via 'user:pass@' syntax in the URL / * If configured, the "User-Agent" header will be sent / * If available, the request will request 'gzip' compressed responses (with 'Accept-Encoding') / * The "Host" header is always appended to the headers during request building / @param requestType (Symbol) The HTTP request type (e.g. GET) / @param urlParts (Dict) The URL breakdown and proxy details (using '.http.i.getUrlDetails') / @param headers (Dict) The set of headers to sent with the request / @param body (String) The body of content to send as part of the request / @returns (String) A complete HTTP request string that can be sent to the remote server / @see .http.newLine / @see .http.userAgent / @see .http.httpVersion / @see .http.i.headerToString .http.i.buildRequest:{[requestType; urlParts; headers; body] headers:(1#.q),headers; urlPath:urlParts`path; if["?" in urlPath; urlArgs:last "?" vs urlPath; urlArgs:"=" vs/: "&" vs urlArgs; if[not all 2 = count each urlArgs; .log.if.error "URL query string is invalid, must be ampersand separated 'key=value' pairs [ URL: ",urlParts[`path]," ]"; '"InvalidUrlQueryStringException"; ]; urlArgs:.h.hu@/:/: urlArgs; urlArgs:"&" sv "=" sv/: urlArgs; urlPath:first["?" vs urlPath],"?",urlArgs; ]; if[urlParts`proxy; urlPath:raze urlParts[`scheme`baseUrl],urlPath; ]; if[0 < count body; headers[`$"Content-Length"]:string count body; if[not (`$"Content-Type") in key headers; headers[`$"Content-Type"]:"text/plain"; ]; body,:.http.newLine; ]; if[0 < count urlParts`auth; headers[`Authorization]:"Basic ",.Q.btoa urlParts`auth; ]; if[.http.cfg.sendUserAgent; headers[`$"User-Agent"]:.http.userAgent; ]; if[.http.gzAvailable; headers[`$"Accept-Encoding"]:"gzip"; ]; if[not `accept in lower key headers; if[0 < count .http.acceptHeader; headers[`Accept]:.http.acceptHeader; ]; ]; headers[`host]:urlParts`baseUrl; request:enlist " " sv (string requestType; urlPath; .http.httpVersion); request,:.http.i.headerToString ./: flip (key;value)@\: enlist[`]_ headers; :.http.newLine sv request,enlist .http.newLine,body; }; / @param url (String) The URL to breakdown into its constituent parts / @returns (Dict) The URL as broken down by '.Q.hap' with keys assigned to it and the target host/port information / @throws InvalidUrlException If the result from '.Q.hap' is not exactly 4 strings / @see .Q.hap .http.i.getUrlDetails:{[url] urlParts:.Q.hap url; if[not 4 = count urlParts; '"InvalidUrlException"; ]; details:`scheme`auth`baseUrl`path!urlParts; details,:.http.i.getTargetHp details; :details; }; / Converts a header key and value into the correct string format for the HTTP request / @param hKey (Symbol) The header key / @param hVal () The header value / @returns (String) The header value as 'key: value' .http.i.headerToString:{[hKey; hVal] keyStr:@[string hKey; 0; upper]; valStr:.type.ensureString hVal; :keyStr,": ",valStr; }; / Sends the specified request string to the URL / @param urlParts (Dict) The URL breakdown (using '.http.i.getUrlDetails') / @param requestStr (String) The string to send to the target URL / @returns (String) The response as receieved from the target server / @throws TlsNotAvailableException If a TLS-encrypted URL scheme is specified and TLS is not available / @throws HttpConnectionFailedException If the connection to the target URL fails .http.i.send:{[urlParts; requestStr] if[any urlParts[`scheme] like/: string[.http.tlsSchemes],\:"://"; if[not .util.isTlsAvailable[]; .log.if.error "Cannot open TLS-based connection as TLS is not available in the current process"; '"TlsNotAvailableException"; ]; ]; urlForLog:.http.i.urlForLog urlParts; .log.if.info "Sending HTTP request [ URL: ",urlForLog," ] [ Via Proxy: ",string[`no`yes urlParts`proxy]," ]"; .log.if.trace "HTTP request:\n",requestStr; httpResp:@[urlParts`hp; requestStr; { (`HTTP_REQUEST_FAIL; x) }]; if[`HTTP_REQUEST_FAIL ~ first httpResp; .log.if.error "Failed to connect to HTTP endpoint [ URL: ",urlForLog," ]. Error - ",last httpResp; '"HttpConnectionFailedException"; ]; .log.if.info "HTTP request returned OK [ URL: ",urlForLog," ]"; :httpResp; }; / Converts the URL parts dictionary into a string that is suitable for printing to the log by removing any / password specified with the 'user:pass@' syntax / @param urlParts (Dict) The URL breakdown (using '.http.i.getUrlDetails') / @returns (String) The URL to print to log .http.i.urlForLog:{[urlParts] urlParts:`scheme`auth`baseUrl`path#urlParts; if[0 = count urlParts`auth; :raze urlParts; ]; authPassSplit:first where ":" = urlParts`auth; if[not null authPassSplit; urlParts[`auth]:@[urlParts`auth; authPassSplit + 1_ til count[urlParts`auth] - authPassSplit; :; "*"]; ]; :raze @[urlParts; `auth; ,[;"@"]]; }; / Loads the proxy configuration from the specified environment variables. If the upper-case variants of the / environment variables are not set, the function will also look for the lower-case variants as well / The 'bypass' environment variable is split on comma before being returned / @returns (Dict) The proxy configuration with the keys as specified in '.http.cfg.proxyEnvVars' / @see .http.cfg.proxyEnvVars .http.i.getProxyConfig:{ proxy:getenv each .http.cfg.proxyEnvVars; notSet:where proxy~\:""; if[0 < count notSet; proxy[notSet]:getenv each lower .http.cfg.proxyEnvVars notSet; ]; proxy[`bypass]:"," vs proxy`bypass; :proxy; };</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="24"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // @kind function // @category nlpClusteringUtility // @desc Graph clustering that works on a similarity matrix // @param matrix {boolean[][]} NxN adjacency matrix // @returns {long[][]} Lists of indices in the corpus where each row // is a cluster cluster.i.similarityMatrix:{[matrix] matrix:"f"$matrix; // Make the matrix stochastic and run MCL until stable normMatrix:cluster.i.columnNormalize matrix; attractors:cluster.i.MCL/[normMatrix]; // Use output of MCL to get the clusters clusters:where each attractors>0; // Remove empty clusters and duplicates distinct clusters where 0<>count each clusters } // @private // @kind function // @category nlpClusteringUtility // @desc SM Van Dongen's MCL clustering algorithm // @param matrix {float[][]} NxN matrix // @return {float[][]} MCL algorithm applied to matrix cluster.i.MCL:{[matrix] // Expand matrix by raising to the nth power (currently set to 2) do[2-1;mat:{i.np[`:matmul;x;x]`}matrix]; mat:cluster.i.columnNormalize mat*mat; @[;;:;0f] ./:flip(mat;where each(mat>0)&(mat<.00001)) } // @kind function // @category nlpClustering // @desc Uses the top ten keywords of each document in order to cluster // similar documents together // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param k {long} The number of clusters to return // @returns {long[][]} The documents' indices grouped into clusters cluster.summarize:{[parsedTab;k] if[0=count parsedTab;:()]; docs:i.takeTop[10]each cluster.i.asKeywords parsedTab; summary:i.fastSum[docs]%count docs; centroids:(); do[k; // Find the document that summarizes the corpus best // and move that document to the centroid list centroids,:nearest:i.maxIndex docs[;i.maxIndex summary]; summary-:docs nearest; summary:(where summary<0)_ summary ]; cluster.groupByCentroids[docs centroids;docs] } // @kind function // @category nlpClustering // @desc Use the top 50 keywords of each document to calculate the // cohesiveness as measured by the mean sum of sqaures // @param keywords {dictionary[]} A parsed document containing keywords and // their associated significance scores // @returns {float} The cohesion of the cluster cluster.MSE:{[parsedTab] n:count parsedTab; if[(0=n)|0=sum count each parsedTab,(::);:0n]; if[1=n;:1f]; centroid:i.takeTop[50]i.fastSum parsedTab; docs:i.fillEmptyDocs parsedTab; // Don't include the current document in the centroid, or for small clusters // it just reflects its similarity to itself dists:0^compareDocToCentroid[centroid]each docs; avg dists*dists } // @kind function // @category nlpClustering // @desc The bisecting k-means algorithm which uses k-means to // repeatedly split the most cohesive clusters into two clusters // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param k {long} The number of clusters to return // @param iters {long} The number of times to iterate the refining step // @returns {long[][]} The documents' indices, grouped into clusters cluster.bisectingKMeans:{[parsedTab;k;iters] docs:cluster.i.asKeywords parsedTab; if[0=n:count docs;:()]; (k-1)cluster.i.bisect[iters;docs]/enlist til n } // @kind function // @category nlpClustering // @desc k-means clustering for documents // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param k {long} The number of clusters to return // @param iters {long} The number of times to iterate the refining step // @returns {long[][]} The documents' indices, grouped into clusters cluster.kmeans:{[parsedTab;k;iters] docs:cluster.i.asKeywords parsedTab; numDocs:count docs; iters cluster.i.kmeans[docs]/(k;0N)#neg[numDocs]?numDocs } // @kind function // @category nlpClustering // @desc Given a list of centroids and a list of documents, match each // document to its nearest centroid // @param centroids {dictionary[]} Centroids as keyword dictionaries // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @returns {long[][]} Lists of document indices where each list is a cluster // N.B. These don't line up with the number of centroids passed in, // and the number of lists returned may not equal the number of centroids. // There can be documents which match no centroids (all of which will end up // in the same group), and centroids with no matching documents. cluster.groupByCentroids:{[centroids;parsedTab] // If there are no centroids, everything is in one group if[not count centroids;:enlist til count parsedTab]; value group cluster.i.findNearestNeighbor[centroids]each parsedTab } // @kind function // @category nlpClustering // @desc Uses the Radix clustering algorithm and bins are taken from // the top 3 terms of each document // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param k {long} The number of clusters desired, though fewer may // be returned. This must be fairly high to cover a substantial amount of the // corpus, as clusters are small // @returns {long[][]} The documents' indices, grouped into clusters cluster.radix:{[parsedTab;k] docs:cluster.i.asKeywords parsedTab; // Bin on keywords, taking the 3 most significant keywords from each document // and dropping those that occur less than 3 times reduced:{distinct 4#key desc x}each docs; // Remove any keywords that occur less than 5 times keywords:where (count each group raze reduced) >= 5; keywords:keywords except `; clusters:{[reduced;keyword]where keyword in/:reduced}[reduced]each keywords; // Score clusters based on the harmonic mean of their cohesion and log(size) cohesion:i.normalize cluster.MSE each docs clusters; size:i.normalize log count each clusters; score:i.harmonicMean each flip(cohesion;size); // Take the n*2 highest scoring clusters, as merging will remove some // but don't run it on everything, since merging is expensive. // This may lead to fewer clusters than expected if a lot of merging happens clusters:clusters sublist[2*k]idesc score; sublist[k]cluster.i.mergeOverlappingClusters/[clusters] } // @kind function // @category nlpClustering // @desc Uses the Radix clustering algorithm and bins by the most // significant term // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param k {long} The number of clusters desired, though fewer may // be returned. This must be fairly high to cover a substantial amount of the // corpus, as clusters are small // @returns {long[][]} The documents' indices, grouped into clusters cluster.fastRadix:{[parsedTab;k] docs:cluster.i.asKeywords parsedTab; // Group documents by their most significant term grouped:group i.maxIndex each docs; // Remove the entry for empty documents grouped:grouped _ `; // Remove all clusters containing only one element clusters:grouped where 1<count each grouped; // Score clusters based on the harmonic mean of their cohesion and log(size) cohesion:i.normalize cluster.MSE each docs clusters; size:i.normalize log count each clusters; score:i.harmonicMean each flip(cohesion;size); // Return the n highest scoring clusters clusters sublist[k]idesc score } // @kind function // @category nlpClustering // @desc Cluster a subcorpus using graph clustering // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param minimum {float} The minimum similarity that will be considered // @param sample {boolean} If this is true, a sample of sqrt(n) documents is // used, otherwise all documanets are used // @returns {long[][]} The documents' indices, grouped into clusters cluster.MCL:{[parsedTab;minimum;sample] docs:cluster.i.asKeywords parsedTab; idx:$[sample;(neg"i"$sqrt count docs)?count docs;til count docs]; keywords:docs idx; n:til count keywords; similarities:i.matrixFromRaggedList compareDocToCorpus[keywords]each n; // Find all the clusters clusters:cluster.i.similarityMatrix similarities>=minimum; clustersOfOne:1=count each clusters; if[not sample;:clusters where not clustersOfOne]; // Any cluster of 1 documents isn't a cluster, so throw it out outliers:raze clusters where clustersOfOne; // Only keep clusters where the count is greater than one clusters@:where 1<count each clusters; // Find the centroid of each cluster centroids:avg each keywords clusters; // Move each non-outlier to the nearest centroid nonOutliers:(til count docs)except idx outliers; nonOutliers cluster.groupByCentroids[centroids;docs nonOutliers] } ================================================================================ FILE: ml_nlp_code_dateTime.q SIZE: 5,914 characters ================================================================================ // code/dateTime.q - Nlp time utilities // Copyright (c) 2021 Kx Systems Inc // // Utilities for handling dates and times \d .nlp // @private // @kind function // @category nlpTimeUtility // @desc Pads a string containing a single integer to two digits // or extracts the last 2 digits from a string // @param day {string} Contains a date // @returns {string} Padded date to two digits tm.i.parseDay:{[day] -2#"0",day where day in .Q.n } // @private // @kind dictionary // @category nlpTimeUtility // @desc Dictionary mapping the months of the year // @type dictionary // to a symbol denoting integer representation tm.i.months:`jan`feb`mar`apr`may`jun`jul`aug`sep`oct`nov`dec!`$string 1+til 12 // @private // @kind function // @category nlpTimeUtility // @desc Convert a long-form or short-form month string to // a string denoting the month as an integer "feb"/"february" // become "02" // @param day {string} A month of the year in English // @returns {string} A padded integer representing the month of the year tm.i.parseMonth:{[month] -2#"0",string month^tm.i.months month:lower`$3 sublist month } // @private // @kind function // @category nlpTimeUtility // @desc Pad a string denoting a year to 4 digits // if input > 35 this is deemed to be 1900s // i.e. "20" -> "2020" / "44" -> "1944") // @param year {string} Contains a year // @returns {string} Padded year value tm.i.parseYear:{[year] -4#$[35<"I"$-2#year;"19";"20"],year } // @private // @kind function // @category nlpTimeUtility // @desc Convert year string to the entire date // encapsulating that year // @param year {string} A year // @returns {string} Date range from Jan 1 to Dec 31 of // the specified year tm.i.convY:{[year] "D"$year,/:(".01.01";".12.31") } // @private // @kind function // @category nlpTimeUtility // @desc Convert string containing yearMonth // to the date range encapsulating that month // i.e. "test 2020.02" -> 2020.02.01 2020.02.29 // "2019.02 test" -> 2019.02.01 2019.02.28 // @param text {string} Text containing yearMonth value // @returns {string} Date range for the month of the // provided yearMonth tm.i.convYearMonth:{[text] txt:regex.matchAll[;text]each regex.objects`year`month; matches:ungroup([format:"ym"]txt); updMatches:matches,'flip`txt`s`e!flip matches`txt; matches:value select format,last txt by s from updMatches; format:tm.i.formatYM/[matches`format]; format:raze@[format;i where 1<count each i:group format;:;" "]; 0 -1+"d"$0 1+"M"$"."sv tm.i[`parseYear`parseMonth]@'matches[`txt]idesc format } // @private // @kind function // @category nlpTimeUtility // @desc Seperate YearMonth formats to year and month // i.e "ym" -> "y","m" // @params ym {string[]} The format for each date objecct // @returns {string} Formats of YearMonths objects seperated tm.i.formatYM:{[ym] @[ym;where not counts;except[;raze ym where counts:1=count each ym]] } // @private // @kind function // @category nlpTimeUtility // @desc Convert string containing yearMonthDay // to the date range encapsulating that day // i.e. "test 2020.01.01" -> 2020.01.01 2020.01.01 // "2010.01.01 test" -> 2010.01.01 2010.01.01 // @param text {string} Text containing yearMonthDay value // @returns {string} Date range associated with the // provided yearMonthDay tm.i.convYearMonthDay:{[text] txt:regex.matchAll[;text]each regex.objects`year`month`day; matches:ungroup([format:"ymd"]txt); updMatches:matches,'flip`txt`s`e!flip matches`txt; matches:value select format,.nlp.cstring last txt by s from updMatches; format:tm.i.formatYMD/[matches`format]; format:tm.i.resolveFormat raze@[format;where 1<count each format;:;" "]; 2#"D"$"."sv tm.i[`parseYear`parseMonth`parseDay]@'matches[`txt]idesc format }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="25"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// Get high-level stats per second with bulk multiplier seconds:$[`bulk=last scenario;.observer.bulkrows;1]*select count i by time.second from midtab; // 1_??? // Conglomerate transit time stats into a keyed table and grab message per second data from table and return stats1:`stats xcols update stats:`med`avg`max`drift,batching:first scenario,mode:last scenario from cl !/: vals,enlist drift; stats2:select maxmps:max x,medmps:med x,avgmps:avg x,batching:first scenario,mode:last scenario from seconds; :(stats1;stats2) }; // Set handle from procfile, open handles to feed, consumer, STP and TP .observer.init:{ .lg.o[`init;"Setting up process..."]; .proc.readprocfile .proc.file; .servers.startup[]; .observer.feedhandle:.servers.gethandlebytype[`feed;`any]; .observer.conshandle:.servers.gethandlebytype[`consumer;`any]; .observer.stphandle:.servers.gethandlebytype[`segmentedtickerplant;`any]; .observer.tphandle:.servers.gethandlebytype[`tickerplant;`any]; .observer.kxhandle:.servers.gethandlebytype[`tick;`any]; // If auto-run is on, begin tests if[.observer.autorun;.observer.startrun[]]; }; // Call init function .observer.init[]; ================================================================================ FILE: TorQ_tests_performance_code_tick.q SIZE: 1,383 characters ================================================================================ // KX Tick code if[200=system "t";system "t 0"] system "l ",(src:first (.Q.opt .z.x)`schemafile),".q" system "l ",getenv[`KDBTESTS],"/performance/settings/u.q" // if[not system"p";system"p 5010"] // \l tick/u.q \d .u ld:{if[not type key L::`$(-10_string L),string x;.[L;();:;()]];i::j::-11!(-2;L);if[0<=type i;-2 (string L)," is a corrupt log. Truncate to length ",(string last i)," and restart";exit 1];hopen L}; tick:{init[];if[not min(`time`sym~2#key flip value@)each t;'`timesym];@[;`sym;`g#]each t;d::.z.D;if[l::count y;L::`$":",y,"/",x,10#".";l::ld d]}; endofday:{end d;d+:1;if[l;hclose l;l::0(`.u.ld;d)]}; ts:{if[d<x;if[d<x-1;system"t 0";'"more than one day?"];endofday[]]}; if[system"t"; .dotz.set[`.z.ts;{pub'[t;value each t];@[`.;t;@[;`sym;`g#]0#];i::j;ts .z.D}]; upd:{[t;x] if[not -16=type first first x;if[d<"d"$a:.z.P;.z.ts[]];a:"p"$a;x:$[0>type first x;a,x;(enlist(count first x)#a),x]]; t insert x;if[l;l enlist (`upd;t;x);j+:1];}]; if[not system"t";system"t 1000"; .dotz.set[`.z.ts;{ts .z.D}]; upd:{[t;x]ts"d"$a:.z.P; if[not -16=type first first x;a:"p"$a;x:$[0>type first x;a,x;(enlist(count first x)#a),x]]; f:key flip value t;pub[t;$[0>type first x;enlist f!x;flip f!x]];if[l;l enlist (`upd;t;x);i+:1];}]; \d . src:$["/" in src;(1 + last src ss "/") _ src; src]; / if src contains directory path, remove it .u.tick[src;first .proc.params`schemafile]; ================================================================================ FILE: TorQ_tests_performance_settings_consumer.q SIZE: 551 characters ================================================================================ // Settings file for consumer process // Server settings .servers.enabled:1b; .servers.CONNECTIONS:`tickerplant`segmentedtickerplant`feed`tick; .servers.USERPASS:`admin:admin; // Process settings .consumer.singlecols:`time`batch`mode`feedtime; .consumer.bulkcols:`time`sym`price`size`src`ex`cond`mode1`side`batch`mode`feedtime; .consumer.whereclause:((=;`mode;enlist `single);(=;`mode;enlist `bulk)); // Results table schema .consumer.results:flip `batching`pubmode`time`feedtime`consumertime`feedtotp`tptoconsumer`feedtoconsumer!"SSPPPNNN" $\: (); ================================================================================ FILE: TorQ_tests_performance_settings_database.q SIZE: 172 characters ================================================================================ singleupd:flip `time`sym`mode`feedtime!"PSSP" $\: (); updates:update `g#sym from flip `time`sym`price`size`src`ex`cond`mode1`side`batch`mode`feedtime!"PSFJSBCCSSSP" $\: (); ================================================================================ FILE: TorQ_tests_performance_settings_feed.q SIZE: 541 characters ================================================================================ // Server settings .servers.enabled:1b; .servers.CONNECTIONS:`tickerplant`segmentedtickerplant`observer`tick; .servers.USERPASS:`admin:admin; .servers.HOPENTIMEOUT:30000; // Feed parameters .feed.sym:`AMD`AIG`AAPL`DELL`DOW`GOOG`HPQ`INTL`IBM`MSFT; .feed.mode:" ABHILNORYZ"; .feed.cond:" 89ABCEGJKLNOPRTWZ"; .feed.ex:10b; .feed.src:`BARX`GETGO`SUN`DB; .feed.side:`buy`sell; .feed.maxprice:100.0; .feed.maxsize:50; .feed.looptime:00:01:00; // Create bulk update .feed.bulk:.feed.bulkrows ?' .feed[`sym`maxprice`maxsize`src`ex`cond`mode`side]; ================================================================================ FILE: TorQ_tests_performance_settings_observer.q SIZE: 778 characters ================================================================================ // Server settings .servers.CONNECTIONS:`tickerplant`segmentedtickerplant`feed`consumer`tick; .servers.USERPASS:`admin:admin; // List of all TP types to go through .observer.tplist:`defaultbatch`memorybatch`immediate`vanillaimm`vanillabatch`tickimm`tickbatch; // .observer.tplist:`vanillaimm`vanillabatch`tickimm`tickbatch; .observer.scenarios:.observer.tplist cross `single`bulk; // System line to reset TP, performance directory .observer.tpreset:"l ",getenv[`KDBCODE],"/processes/tickerplant.q"; .observer.tickreset:"l ",getenv[`KDBTESTS],"/performance/code/tick.q"; .observer.perfdir:getenv[`KDBTESTS],"/performance"; // Run tests on startup or not .observer.autorun:1b; // Size of bulk updates .observer.bulkrows:100; // Write results to disk .observer.savetodisk:1b; ================================================================================ FILE: TorQ_tests_performance_settings_testdb.q SIZE: 172 characters ================================================================================ singleupd:flip `time`sym`mode`feedtime!"PSSP" $\: (); updates:update `g#sym from flip `time`sym`price`size`src`ex`cond`mode1`side`batch`mode`feedtime!"PSFJSBCCSSSP" $\: (); ================================================================================ FILE: TorQ_tests_performance_settings_u.q SIZE: 485 characters ================================================================================ // Kx u namespace \d .u init:{w::t!(count t::tables`.)#()} del:{w[x]_:w[x;;0]?y}; .dotz.set[`.z.pc;{del[;x]each t}]; sel:{$[`~y;x;select from x where sym in y]} pub:{[t;x]{[t;x;w]if[count x:sel[x]w 1;(neg first w)(`upd;t;x)]}[t;x]each w t} add:{$[(count w x)>i:w[x;;0]?.z.w;.[`.u.w;(x;i;1);union;y];w[x],:enlist(.z.w;y)];(x;$[99=type v:value x;sel[v]y;@[0#v;`sym;`g#]])} sub:{if[x~`;:sub[;y]each t];if[not x in t;'x];del[x].z.w;add[x;y]} end:{(neg union/[w[;;0]])@\:(`.u.end;x)} ================================================================================ FILE: TorQ_tests_runall.q SIZE: 1,140 characters ================================================================================ // Define run-all function .k4.runall:{[run;res] // Find all the run scripts, generate strings and execute rundirs:dirs where `run.sh in' key each dirs:.Q.dd[run;] each key run; runtime:string .z.p; command:{1_string[x],"/run.sh -r ",y," -wq"}[;runtime] each rundirs; {show "Executing ",x;system x} each command; // Load in results and error CSVs files:.Q.dd[resdir;] each f where (f:key resdir:.Q.dd[res;`$string .z.d]) like "*.csv"; errors:0:[("PSIISSBSJJBBBI";enlist csv);first files]; results:0:[("PSIISSBSJJBBBBI";enlist csv);last files]; // Get errors from most recent log files and set results to local variables reclogs:.Q.dd[logdir;] each l where not (l:key logdir:.Q.dd[resdir;`logs]) like "*",ssr[string .z.d;".";"_"],"*"; logerr:err!read0 each err:reclogs where reclogs like "*err*"; `results`fails`errors set' (results;errors;logerr); }; // Only execute function if the necessary flags are passed in if[all `rundir`resdir in key args:.Q.opt .z.x; .k4.runall . hsym each `$first each args[`rundir`resdir]; show each ("Test results:";results;"Test failures:";fails;"Logged errors:";errors) ]; ================================================================================ FILE: TorQ_tests_runtests.q SIZE: 1,956 characters ================================================================================ // Set up results and logging directories .k4.setup:{[respath;testname] .os.md each (respath;rp:respath,string[.z.d],"/"); if[not 11h=type key hsym `$logpath:raze rp,"/logs/";.os.md logpath]; .proc.createlog[logpath;testname;`$ssr[string .z.p;"[D.:]";"_"];0b]; }; // Generate results files and handles to them .k4.handlesandfiles:{[dir;filename] h:hopen f:hsym `$dir,"/",filename; if[not hcount f;.lg.o[`writeres;"Creating file ",1_string f]]; :(h;f) }; // Write test results to disk .k4.writeres:{[res;err;respath;rtime;testname] // Create results directories and files and open handles, timestamp test results .os.md each (respath;rp:respath,string[.z.d],"/"); hf:.k4.handlesandfiles[rp;] each ("results_";"failures_") ,\: (raze "." vs string .z.d),".csv"; res:`runtime xcols update runtime:first rtime from delete timestamp from res; err:`runtime xcols update runtime:first rtime from delete timestamp from err; // If file is empty, append full results/error table to it, if not, drop the header row before appending .lg.o[testname;"Writing ",string[count KUTR]," results rows and ",string[count KUerr]," error rows"]; {neg[x] $[hcount y;1;0]_csv 0: z} .' hf ,' enlist each (res;err); hclose each first each hf; }; //-- SCRIPT START --// // Grab relevant command-line arguments clargs:({x};{"P"$x};{`$last "/" vs x}) @' first each (.Q.opt .z.x)[`testresults`runtime`test]; // Set up results and logging directories if not in debug mode and results directory defined if[01b~`debug`testresults in key .Q.opt .z.x;.[.k4.setup;clargs 0 2;{.lg.e[`test;"Error: ",x]}]]; // Load & run tests, show results KUltd each hsym`$.proc.params[`test]; KUrt[]; show each ("k4unit Test Results";KUTR;"k4unit Test Errors";KUerr); // If enabled write results to disk if[all `write`testresults in key .Q.opt .z.x;.[.k4.writeres;(KUTR;KUerr),clargs;{.lg.e[`test;"Error: ",x]}]]; if[not `debug in key .Q.opt .z.x;exit count KUerr]; ================================================================================ FILE: TorQ_tests_schemas_insertdata.q SIZE: 327 characters ================================================================================ stpHandle:gethandle[`stp1] wdbHandle:gethandle[`wdb1] testtrade:((5#`GOOG),5?`4;10?100.0;10?100i;10#0b;10?.Q.A;10?.Q.A;10#`buy) testquote:(10?`4;(5?50.0),50+5?50.0;10?100.0;10?100i;10?100i;10?.Q.A;10?.Q.A;10#`3) stpHandle @/: `.u.upd ,/: ((`trade;testtrade);(`quote;testquote)) wdbHandle(`.u.end;`.wdb.currentpartition) exit 0 ================================================================================ FILE: TorQ_tests_schemas_settings.q SIZE: 126 characters ================================================================================ .servers.CONNECTIONS:`wdb`segmentedtickerplant`hdb`idb`gateway`rdb .servers.startup[]; hdbdir: `$getenv[`TORQHOME],"/wdbhdb/" ================================================================================ FILE: TorQ_tests_stp_batching_settings.q SIZE: 395 characters ================================================================================ // IPC connection parameters .servers.CONNECTIONS:`rdb`segmentedtickerplant; .servers.USERPASS:`admin:admin;</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="26"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Developing with kdb+ and the q language¶ kdb+ is - a high-performance cross-platform historical timeseries columnar database - an in-memory compute engine - a realtime streaming processor - an expressive query and programming language called q For cloud deployment, see kdb Insights Core. Architecture of kdb+ systems¶ A kdb+ tick based archecture can be used to capture, process and analyse vasts amount of real-time and historical data. The following diagram illustrates the components that are often found in a vanilla kdb+ tick setup: Components¶ Data feed¶ This is a source of real-time data; for example, financial quotes and trades from Bloomberg or Refinitiv, or readings from a network of sensors. Feedhandler¶ Parses data from the data feed to a format that can be ingested by kdb+. Multiple feed handlers can be used to gather data from a number of different sources and feed it to the kdb+ system for storage and analysis. KX’s Fusion interfaces connect kdb+ to a range of other technologies, such as R, Apache Kafka, Java, Python and C. Tickerplant (TP)¶ A kdb+ processing acting as a TP (tickerplant) captures the initial data feed, writes it to the log file and publishes these messages to any registered subscribers. Aims for zero-latency. Includes ingesting data in batch mode. Manages subscriptions: adds and removes subscribers, and sends subscriber table definitions. Handles end-of-day (EOD) processing. tick.q represents a tickerplant and is provided as a starting point for most environments. Best practices for tickerplants Tickerplants should be lightweight, not capturing data and using very little memory. For best resilience, and to avoid core resource competition, run them on their own cores. TP Log¶ This is the file to which the Tickerplant logs the q messages it receives from the feedhandler. It is used for recovery: if the RDB has to restart, the log file is replayed to return to the current state. Best practices for log files Store the file on a fast local disk to minimize publication delay and I/O waits. Real-time database (RDB)¶ A kdb+ processing acting as a RDB (real-time database) subscribes to messages from the Tickerplant, stores them in memory, and allows this data to be queried intraday. At startup, the RDB sends a message to the tickerplant and receives a reply containing the data schema, the location of the log file, and the number of lines to read from the log file. It then receives subsequent updates from the TP as they are published. At end of day usually writes intraday data to the Historical Database, and sends it a new EOD message. r.q represents a tickerplant and is provided as a starting point for most environments. Best practices for real-time databases RDBs queried intraday should exploit attributes in their tables. For example, a trade table might be marked as sorted by time (`s#time ) and grouped by sym (`g#sym ). RDBs require RAM as they are storing the intraday messages. Calculate how much RAM your RDB needs for a given table: (Expected max # of messages) * schema cost * flexibility ratio Schema cost: for a given row, a sum of the datatype size. Flexibility ratio: 1.5 is a common value Real-time engine/subscriber (RTE/RTS)¶ A kdb+ processing acting as a RTE (real-time engine) subscribes to the intraday messages and typically performs some additional function on receipt of new data – e.g. calculating an order book or maintaining a subtable with the latest price for each instrument. A RTE is sometimes referred to as a RTS (real-time subscriber). Best practices for real-time subscribers Write streaming analytics to compute the required results, rather than timed computations. Ensure analytics can deal with multiple messages, so there are no dependencies here if the tickerplant runs in batch mode. Check analytic run time versus expected TP publish intervals to ensure you don’t bottleneck. In general, look to the most busy and stressful market day for this, and add additional scaling factors. E.g. If my TP publishes a message ~every 30ms, my analytic should take less than 30ms to run. To allow for message throughput to double in the TP, the analytic should run in <15ms. Historical database (HDB)¶ A kdb+ processing acting as a HDB (historical database) provides a queryable data store of historical data; for example, for creating customer reports on order execution times, or sensor failure analyses. Large tables are usually stored on disk partitioned by date, with each column stored as its own file. The dates are referred to as partitions and this on-disk structure contributes to the high performance of kdb+. Best practices for historical databases Attributes are key. Partition tables on disk on the most-queried column. If the first two columns are time and sym , sorting on time within sym partitions is assumed and provides a performance boost. Can add grouping attribute for other highly-queried columns. When creating the database schema consider the symbol versus string type choice very carefully: - Symbol type: Use symbols for columns with highly repeating data that are queried most frequently e.g. sym, exchange, side etc. - String type: Any highly variable data e.g. order ID Database sizing follows the same formula as the RDB sizing. Consider using compression for older data, or less-queried columns, to reduce on-disk size. Typically compression sees ⅕ the space usage. When compressing databases, choose compression algorithm and blocksizes through performance comparisons on typical queries. Example HDB script¶ A q script named hdb.q that can be used by kdb+ to create a HDB process: /q tick/hdb.q sym -p 5012 if[1>count .z.x;show"Supply directory of historical database";exit 0]; hdb:.z.x 0 /Mount the Historical Date Partitioned Database @[{system"l ",x};hdb;{show "Error message - ",x;exit 0}] Usage q hdb.q SRC [-p 5012] | Parameter Name | Description | Default | |---|---|---| | SRC | The directory used by the RDB to which it saves previous values at end-of-day | <none> | | -p | listening port for client communication, for example, a RDB instructing the HDB to reload its DB or client queries against the HDB data | <none> | Standard kdb+ command line options may also be passed A HDB is empty until the RDB saves its first set of tables at end-of-day. Gateway¶ The entry point into the kdb+ system. Responsible for routing incoming queries to the appropriate processes, and returning their results. Can connect both the real-time and historical data to allow users to query across both. In some cases, a gateway will combine the result of a series of queries to different processes. Best practices for gateways Run only lightweight code. Track disconnections and queries submitted. Return sensible errors when queries fail. Use the deferred-response feature (V3.6) to avoid additional coding on the side of connecting non-kdb+ processes. Load-management: round-robin might not be the best option for your system. Consider other options specific to your APIs and load. Query Routing: A kdb+ framework for a scalable, load balanced system KX and the cloud¶ KX technology was created to address one of the most basic problems in high-performance computing: the inability of traditional relational database technology to keep up with the explosive escalation of data volumes. Ever since, our singular goal has been to provide clients and partners with the most efficient and flexible tools for ultra-high-speed processing of real-time, streaming and historical data. The resulting KX streaming-analytics platform provides a framework for designing, building and deploying data-capture systems and visualizations. Designed from the start for extreme scale, and running on industry-standard servers, KX technology has been proven to solve complex problems faster than any of its competitors. The basis for KX technology is kdb+, the world’s fastest timeseries database. It is a uniquely integrated platform combining: - a high-performance timeseries columnar database - an in-memory compute engine - a real-time streaming processor - an expressive query and programming language, q Solutions created on the KX framework have extensive redundancy, fault tolerance, query filtering, alerting, reporting and visualization features. They are used for stock-market analysis, algorithmic trading, predictive analytics, scientific analysis, and embedded-sensor data capture for IoT use cases. Why cloud?¶ KX is a certified Amazon Solutions Partner and Google Cloud Partner and has successfully deployed on numerous public, private and hybrid clouds. Some motivations for cloud deployments: - Instant scaling - Data analytics processing and/or storage capacity can be scaled up instantly, on-demand, and without putting extra hardware in your own data center. - Bursty workloads - Cloud may be ideal for burst processing of your compute load. For example, you might need to run hundreds of cores for just 30 minutes in a day for a specific risk-calculation workload. - Periodic data access - Your quants and developers might want to work on kdb+ only for a few hours a day during the work week. This is a suitable model for an on-demand or a spot-pricing service. - Development/UAT/Prod life-cycles - These can be hosted on their own instances and spun down at the end of each phase. Small memory/core instances can be cheap, and enlarged or shrunk at need. Deployments¶ KX customers have deployed kdb+ and other KX solutions successfully in the cloud, including the three main cloud vendors: Database: tables in the filesystem¶ Roughly speaking, kdb+ is what happens when q tables are persisted and then mapped back into memory for operations. — Jeffry A. Borror, Q for Mortals Tables are first-class entities in q. Large q tables can be held in memory, but memory is finite and every process eventually terminates. Sooner or later we need to persist tables in the filesystem. We will also need to perform operations on tables that are too large to hold in memory. Roughly speaking, q tables and columns are represented in the filesystem as eponymous directories and binary files. How you serialize a table depends on its size and how you need to use it. | serialization | representation | best where | |---|---|---| | object | single binary file | small and most queries use most columns | | splayed table | directory of column files | up to 100 million rows | | partitioned table | table partitioned by e.g. date, with a splayed table for each date | more than 100 million records; or growing steadily | | segmented database | partitioned tables distributed across disks | tables larger than disks; or you need to parallelize access | Object¶ Q will serialize and file any object as a single binary file – the simplest way to persist a table. A database with tables trades and quotes , and a sym list: db/ ├── quotes ├── sym └── trades By specifying the extension (e.g. CSV, XLS) you can also export the table in another format. If most queries on a table do not need all the columns for each query consider splaying it. Splayed table¶ A table is splayed by storing each of its columns as a single file. The table is represented by a directory. db/ ├── quotes/ | ├── time | ├── sym | └── price └── trades/ ├── time ├── sym ├── price └── vol With a splayed table, a query deserializes into memory only files for the column/s it requires. If the table either - grows - holds more than 100 million records - has columns that exceed the maximum size of a vector in memory consider partitioning it. Partitioned table¶ The records of a partitioned table are divided in its root directory between multiple partition directories. The table is partitioned by the values of a single column. Each partition contains records that have the same value in the partitioning column. With timeseries data, this is most commonly a date or time. db/ ├── 2020.10.03/ │ ├── quotes/ │ │ ├── price │ │ ├── sym │ │ └── time │ └── trades/ │ ├── price │ ├── sym │ ├── time │ └── vol ├── 2020.10.05/ │ ├── quotes/ │ │ ├── price │ │ ├── sym │ │ └── time │ └── trades/ │ ├── price │ ├── sym │ ├── time │ └── vol └── sym The partition directory is named for its partition value and contains a splayed table with just the records that have that value. If either - your table exceeds the size of your storage - you need to parallelize access to it - you want to partition it by a datatype that is not integer-based, e.g. a symbol consider segmenting it across multiple storage devices. Segmented database¶ The root directory of a segmented database contains only two files: par.txt : a text file listing the paths to the segments- the sym file for enumerated symbol columns Segments are stored outside the root, usually on various volumes. Each segment contains a partitioned table. DISK 0 DISK 1 DISK 2 db/ db/ db/ ├── par.txt ├── 2020.10.03/ ├── 2020.10.04/ └── sym │ ├── quotes/ │ ├── quotes/ │ │ ├── .d │ │ ├── .d │ │ ├── price │ │ ├── price │ │ ├── sym │ │ ├── sym │ │ └── time │ │ └── time │ └── trades/ │ └── trades/ │ ├── .d │ ├── .d │ ├── price │ ├── price │ ├── sym │ ├── sym │ ├── time │ ├── time │ └── vol │ └── vol ├── 2020.10.05/ ├── 2020.10.06/ │ ├── quotes/ │ ├── quotes/ .. .. Dividing the table between storage devices lets you - store very large tables - parallelize queries - optimize updates Queries on serialized tables¶ Deserialization and reserialization is implicit in qSQL queries. q)select city,pop,country.code from `:linked/cities city pop code ---------------------- Tokyo 37435191 81 Delhi 29399141 91 Shanghai 26317104 86 q)`:linked/countries upsert (`Brazil;`$"South America";55) `:linked/countries q)get`:linked/countries country| cont code -------| ------------------ China | Asia 86 India | Asia 91 Japan | Asia 81 Brazil | South America 55 Operations on serialized tables¶ Some operators and keywords work on some serialized tables. For example, cols works on tables in memory or mapped to memory, and on filesymbols for splayed tables but not tables serialized as an object. Serialize as an object Q for Mortals §14. Introduction to kdb+ Developer tools¶ KX Developer¶ KX Developer is a visual environment used to manage, manipulate and explore massive datasets in real-time by exploiting kdb+’s server-based analytics technology. KX Analyst¶ KX Analyst is a version of KX Developer extended for use by enterprise customers. It supports a wide range of users, from non-technical analysts to experienced q programmers. KX Libraries¶ KX Libraries are a collection of useful q libraries for q development and build pipelines. The KX Libraries package provides libraries for Markdown documentation generation from q source, unit testing and property-based testing frameworks, static code linting, code profiling, code coverage, and an expressive data visualization library. KX Dashboards¶ KX Dashboards offers an easy-to-use, yet powerful drag-and-drop interface to allow creators to build dashboards without the need for programming experience. KX Delta Platform¶ KX Delta Platform is a suite of products for building kdb+ user interfaces in HTML5/JS. It has three modules. KX Control¶ KX Control is a client-server application that allows you to design, build, deploy and manage data-capture/streaming systems. KX Stream¶ KX Stream is a platform for capturing, storing and enriching large volumes of data. It provides a framework to develop and deploy customized analytics that quickly perform complex calculations on large volumes of real-time and historical market data. Stream for KX deployed in a financial market KX Monitoring¶ KX Monitoring gathers and displays information about a server’s status. Each server requires a SysMon Java application installed and running to collect the server and process statistics. The dashboards provide powerful tools for visualizing and analyzing the current and historical state of the system.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="27"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Tables¶ Tables are first-class objects in q. Construct¶ Construct a small table using table notation. q)ec1:([]city:`Istanbul`Moscow`London`StPetersburg;country:`Turkey`Russia`UK`Russia;pop:15067724 12615279 9126366 5383890) q)ec1 city country pop ----------------------------- Istanbul Turkey 15067724 Moscow Russia 12615279 London UK 9126366 StPetersburg Russia 5383890 Equals means equals In q names are assigned values with the colon. The equals sign = is the Equals operator. It returns a boolean. q)a:5 q)a+2 / a gets 5 7 q)a=2 / no, it is not 0b Unlike classical relational databases, q tables are ordered. You can index them. A table is a list of dictionaries. Any single row is a dictionary. q)ec1 2 city | `London country| `UK pop | 9126366 And a list of dictionaries with the same keys is – a table. q)ec1 2 0 city country pop ------------------------- London UK 9126366 Istanbul Turkey 15067724 Flipping a table gets you its columns as a dictionary of vectors. q)flip ec1 city | Istanbul Moscow London StPetersburg country| Turkey Russia UK Russia pop | 15067724 12615279 9126366 5383890 Flipping it again puts you back where you started. q)flip flip ec1 city country pop ----------------------------- Istanbul Turkey 15067724 Moscow Russia 12615279 London UK 9126366 StPetersburg Russia 5383890 So another way to construct a table: q)ec2:flip`city`country`pop!(`Berlin`Kyiv`Madrid;`Germany`Ukraine`Spain;3748148 3703100 3223334) q)ec2 city country pop ---------------------- Berlin Germany 3748148 Kyiv Ukraine 3703100 Madrid Spain 3223334 CSVs are a common source of tables. Work¶ There are two ways to work with tables and you can mix them to suit yourself. QSQL queries are very like SQL. (Perhaps a little less verbose.) q)select city,pop from ec2 upsert ec1 city pop --------------------- Berlin 3748148 Kyiv 3703100 Madrid 3223334 Istanbul 15067724 Moscow 12615279 London 9126366 StPetersburg 5383890 Or you can think in terms of the underlying q objects. The Join operator , catenates lists. q)1 2 3,10 20 1 2 3 10 20 q)"abc","def" "abcdef" Two tables are two lists of dictionaries. q)ec2,ec1 city country pop ----------------------------- Berlin Germany 3748148 Kyiv Ukraine 3703100 Madrid Spain 3223334 Istanbul Turkey 15067724 Moscow Russia 12615279 London UK 9126366 StPetersburg Russia 5383890 Keys¶ Setting one or more columns of a table as its key divides it into two tables (the keyed and non-keyed columns) and from them makes a dictionary. The dictionary’s key is the key column/s of the table. Its value is the unkeyed column/s. Both key and value are tables. Persist¶ Any object can be persisted to a file. q)conts:`Africa`Asia`Australia`Europe`NorthAmerica`SouthAmerica q)`:path/to/continents set conts `:path/to/continents q)get `:path/to/continents `Africa`Asia`Australia`Europe`NorthAmerica`SouthAmerica q)`:path/to/ec set ec `:path/to/ec q)select from `:path/to/ec where pop>5000000 city country pop ----------------------------- Istanbul Turkey 15067724 Moscow Russia 12615279 London UK 9126366 StPetersburg Russia 5383890 Go large¶ Flat tables are limited by the absolute maximum size of a vector in kdb+. Tables up to 100 million rows can be splayed (one file for each column) across directories. If your table is larger – or grows – you can partition it; usually by time period. If your table exceeds disk size, you can segment it. (This can also improve I/O performance of a partitioned table.) get , set , save Splayed tables, Partitioned tables Q for Mortals §8. Tables, §14. Introduction to kdb+ abs ¶ Absolute value abs x abs[x] Where x is a numeric or temporal, returns the absolute value of x . Null is returned if x is null. q)abs -1.0 1f q)abs 10 -43 0N 10 43 0N abs is a multithreaded primitive. Implicit iteration¶ abs is an atomic function. q)abs(10;20 -30) 10 20 30 It applies to dictionaries and tables. Domain and range¶ domain b g x h i j e f c s p m d z n u v t range i . i h i j e f i . p m d z n u v t Range: ihjefpmdznuvt</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="28"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of special characters // @type string regex.i.patterns.money:"[$¥€£¤฿]?\\s*((?<![.0-9])([0-9][0-9, ]*(\\.", "([0-9]{0,2})?)?|\\.[0-9]{1,2})(?![.0-9]))\\s*((hundred|thousand|million", "|billion|trillion|[KMB])?\\s*([$¥€£¤฿]|dollars?|yen|pounds?|cad|usd|", "gbp|eur))|[$¥€£¤฿]\\s*((?<![.0-9])([0-9][0-9, ]*(\\.([0-9]{0,2})?)?|", "\\.[0-9]{1,2})(?![.0-9]))\\s*((hundred|thousand|million|billion|", "trillion|[KMB])\\s*([$¥€£¤฿]|dollars?|yen|pounds?|cad|usd|gbp|eur)?)?" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of phone number characters // @type string regex.i.patterns.phoneNumber:"\\b((\\+?\\s*\\(?[0-9]+\\)?[-. /]?)?\\(?[0-9]+", "\\)?[-. /]?)?[0-9]{3}[-. ][0-9]{4}(\\s*(x|ext\\s*.?|extension)[ .-]*[0-9]", "+)?\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of email address characters // @type string regex.i.patterns.emailAddress:"\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of url characters // @type string regex.i.patterns.url:"((https?|ftps?)://(www\\d{0,3}\\.)?|www\\d{0,3}\\.)", "[^\\s()<>]+(?:\\([\\w\\d]+\\)|([^[:punct:]\\s]|/))" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of zipcode characters // @type string regex.i.patterns.zipCode:"\\b\\d{5}\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of postal code characters // @type string regex.i.patterns.postalCode:"\\b[a-z]\\d[a-z] ?\\d[a-z]\\d\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of postal or zip code characters // @type string regex.i.patterns.postalOrZipCode:"\\b(\\d{5}|[a-z]\\d[a-z] ?\\d[a-z]\\d)\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of date separator characters // @type string regex.i.patterns.dateSeparate:"[\\b(of |in )\\b\\t .,-/\\\\]+" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of date characters // @type string regex.i.patterns.day:"\\b[0-3]?[0-9](st|nd|rd|th)?\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of monthly characters // @type string regex.i.patterns.month:"\\b([01]?[0-9]|jan(uary)?|feb(ruary)?|mar(ch)?|", "apr(il)?|may|jun(e)?|jul(y)?|aug(ust)?|sep(tember)?|oct(ober)?|nov(ember)?", "|dec(ember)?)\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of yearly characters // @type string regex.i.patterns.year:"\\b([12][0-9])?[0-9]{2}\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of year characters in full // @type string regex.i.patterns.yearFull:"\\b[12][0-9]{3}\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of am characters // @type string regex.i.patterns.am:"(a[.\\s]?m\\.?)" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of pm characters // @type string regex.i.patterns.pm:"(p[.\\s]?m\\.?)" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of time (12hr) characters // @type string regex.i.patterns.time12:"\\b[012]?[0-9]:[0-5][0-9](h|(:[0-5][0-9])([.:][0-9]", "{1,9})?)?\\s*(",sv["|";regex.i.patterns`am`pm],")?\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of time (24hr) characters // @type string regex.i.patterns.time24:"\\b[012][0-9][0-5][0-9]h\\b" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of all time characters // @type string regex.i.patterns.time:"(",sv["|";regex.i.patterns`time12`time24],")" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of year/month characters as a list // @type string regex.i.patterns.yearMonthList:"(",sv["|";regex.i.patterns`year`month],")" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of year/month/date characters // @type string regex.i.patterns.yearMonthDayList:"(",sv["|"; regex.i.patterns`year`month`day],")" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of year/month characters along with date separators // @type string regex.i.patterns.yearMonth:"(",sv[regex.i.patterns.dateSeparate; 2#enlist regex.i.patterns.yearMonthList],")" // @private // @kind data // @category nlpRegexUtilityPattern // @desc A string of year/month/date characters along with date // separators // @type string regex.i.patterns.yearMonthDay:"(",sv[regex.i.patterns.dateSeparate; 3#enlist regex.i.patterns.yearMonthDayList],")" // @kind function // @category nlpRegex // @desc Compile a regular expression pattern into a regular // expression embedPy object which can be used for matching // @params patterns {string} A regex pattern // @params ignoreCase {boolean} Whether the case of the string is to be ignored // @return {<} The compiled regex object regex.compile:{[patterns;ignoreCase] case:$[ignoreCase;regex.i.re`:IGNORECASE;0]; regex.i.re[`:compile][pydstr patterns;case] } // @kind function // @category nlpRegex // @desc Finds all the matches in a string of text // @params patterns {<} A regex pattern as an embedPy object // @params text {string} A piece of text // @returns {::|string[]} If the pattern is not present in the text a null // is returned. Otherwise, the pattern along with the index where the // pattern begins and ends is returned regex.i.matchAll:.p.eval["lambda p,t:[[x.group(),x.start(),x.end()]", "for x in p.finditer(t)]";<] regex.matchAll:{cstring regex.i.matchAll[x;pydstr cstring y]} // @kind function // @category nlpRegex // @desc Compile all patterns into regular expression objects // @return {<} The compiled regex object regex.objects:regex.compile[;1b]each 1_regex.i.patterns ================================================================================ FILE: ml_nlp_code_sent.q SIZE: 8,847 characters ================================================================================ // code/email.q - Nlp sentiment utilities // Copyright (c) 2021 Kx Systems Inc // // Utilities for sentiment analysis \d .nlp // @private // @kind function // @category nlpSentUtility // @desc Create a regex patterns used for tokenization // @returns {<} The compiled regex object sent.i.tokenPattern:{ rightFacingEmoticons:"[<>]?[:;=8][\\-o\\*\\']?[\\)\\]\\(\\[dDpP/\\:\\}\\{@", "\\|\\\\]"; / n.b. Left-facing rarely used miscEmoticons:"<3|[0o][._][0o]|</3|\\\\o/|[lr]&r|j/[jkptw]|\\*\\\\0/\\*|v\\", ".v|o/\\\\o"; urlStart:"https?://"; // Match any words word:"\\b(?:the shit|the bomb|bad ass|yeah right|cut the mustard|kiss of ", "death|hand to mouth|sort of|kind of|kind-of|sort-of|cover-up|once-in-a-", "lifetime|self-confident|short-sighted|short-sightedness|son-of-a-bitch)", "\\b|[\\w]{2,}(?:'[ts])?"; text:"(?:",urlStart,"|",rightFacingEmoticons,"|",miscEmoticons,"|",word,")"; regex.compile[;1b]text }[] // @private // @kind function // @category nlpSentUtility // @desc Tokenizer specifically for sentiment analyzer // (won't work for general purpose tokenizing) // @param text {string} The text to be tokenized // @returns {symbol[]} The tokens of the text // (each word/emoticon ends up in its own token) sent.i.tokenize:{[text] `$regex.matchAll[sent.i.tokenPattern;text][;0] } // @private // @kind function // @category nlpSentUtility // @desc Check for added emphasis resulting from exclamation points // (up to 4 of them) using empirically derived mean sentiment intensity. // Ratings increase for exclamation points // @param text {string} The complete sentence // @returns {float} An amount to increase the sentiment by sent.i.amplifyEP:{[text] .292*4&sum"!"=text } // @private // @kind function // @category nlpSentUtility // @desc Check for added emphasis resulting from question marks // (2 or 3+) using empirically derived mean sentiment intensity rating. // Ratings increases for question marks // @param text {string} The complete sentence // @returns {float} An amount to increase the sentiment by sent.i.amplifyQM:{[text] (0 0 .36 .54 .96)4&sum"?"=text } // @private // @kind data // @category nlpSentUtility // @desc Positive booster words. This increases positive valences // @type symbol[] sent.i.posBoosters:`$( "absolutely";"amazingly";"awfully";"completely";"considerably";"decidedly"; "deeply";"effing";"enormously";"entirely";"especially";"exceptionally"; "extremely";"fabulously";"flipping";"flippin";"fricking";"frickin"; "frigging";"friggin";"fully";"fucking";"greatly";"hella";"highly";"hugely"; "incredibly";"intensely";"majorly";"more";"most";"particularly";"purely"; "quite";"really";"remarkably";"so";"substantially";"thoroughly";"totally"; "tremendously";"uber";"unbelievably";"unusually";"utterly";"very"); // @private // @kind data // @category nlpSentUtility // @desc Negative booster words. This increase negative valences // @type symbol[] sent.i.negBoosters:`$( "almost";"barely";"hardly";"just enough";"kind of";"kinda";"kindof"; "kind-of";"less";"little";"marginally";"occasionally";"partly";"scarcely"; "slightly";"somewhat";"sort of";"sorta";"sortof";"sort-of"); // @private // @kind data // @category nlpSentUtility // @desc The co-efficient how much boosters increase sentiment // @type float sent.i.BOOSTER_INCR:.293 // @private // @kind data // @category nlpSentUtility // @desc The co-efficient how much allcaps increase sentiment // @type float sent.i.ALLCAPS_INCR:.733 // @private // @kind data // @category nlpSentUtility // @desc A dictionary mapping all possible boosters // to their associated values // @type dictionary sent.i.Boosters:(!). flip(sent.i.posBoosters,\:sent.i.BOOSTER_INCR), (sent.i.negBoosters,\:neg sent.i.BOOSTER_INCR) // @private // @kind function // @category nlpSentUtility // @desc Add weight for "booster" words like "really", or "very" // @param tokens {symbol[]} The tokenized sentence // @param isUpperCase {boolean[]} A vector where an element is 1b if the // associated token is upper case // @param valences {float[]} The sentiment of each token // @returns {float} The modified valences sent.i.applyBoosters:{[tokens;isUpperCase;valences] weight:sent.i.Boosters tokens; // Inc degree of capitalized boosters whereUpper:where isUpperCase; weight[whereUpper]+:sent.i.ALLCAPS_INCR*signum weight whereUpper; // Add weight to next 3 tokens (add/remove 3 dummy vals in case booster // is last token) boosts:-3_@[(3+count valences)#0f;i+/:1 2 3;+; weight[i:where not null weight]*/:1 .95 .9]; // Add extra weight valences+boosts*signum valences }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="29"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Pattern Matching¶ Pattern matching allows an object such as a list or dictionary to be matched to a pattern, assigning variables to its parts, checking types, and/or modifying values via a filter function. It can simplify unpacking multiple objects passed to and returned from functions, and reduce the overhead of type checking. Assignment¶ The left side of the assignment operator may be a pattern. Various kinds of patterns have different effects. When used in an assignment, the pattern must have parentheses around it. A failed match results in no variables being changed. q)(b;c):2 3 q)b 2 q)c 3 The return value of the match is the entire assigned object, including any modifications from filter functions. q)a:(b;:1+):1 2 q)a 1 3 q)b 1 Function parameters¶ Pattern matching can also be used in the parameter list of a function, in which case the parentheses are not necessary unless the pattern requires them (such as a list pattern). q)f:{[(a;b);c]a+b+c} q)f[1 2;3] 6 Pattern conditional¶ The pattern conditional takes the form :[v;p1;r1;p2;r2;...;rd] where v is a value to be matched, p1 , p2 , ... are patterns and r1 , r2 , ... rd are the corresponding return values. The value is matched to the patterns in turn, and the value of the expression is the return value corresponding to the first successful match, or rd if no patterns match. Similarly to the regular conditional, the result expressions corresponding to failed matches, as well as any match after the first successful match, are not evaluated. q)a: :[1f;r:`f;"float";r:`i;"int";"other"] q)a "float" q)r 1f q)a: :[1i;r:`f;"float";r:`i;"int";"other"] q)r 1i q)a "int" q)a: :[1;r:`f;"float";r:`i;"int";"other"] q)a "other" Types of patterns¶ Null¶ The null pattern matches anything. It cannot be used as the main pattern in an assignment, but it can appear as a component of more complex patterns by elision. Constant value¶ The simplest pattern is a constant (atom or list). If the assigned value exactly matches (see ~), the assignment does nothing. If the values don't match, a 'match error is thrown. q)(1):1 q)(`a):`a q)(1):2 'match [0] (1):2 ^ q)(1 2):1 2 q)(1 2):1 3 'match [0] (1 2):1 3 ^ Name¶ A name is an identifier used as a pattern. The variable with the name is set to the matched value. On its own, this is equivalent to a simple assignment, but a name pattern can be used as a component of more complex patterns. q)(a):1 3 q)a 1 3 Name with index¶ A name can be augmented with an index, similar to indexed assignment. The index is not a pattern but a value. q)a:1 2 3 q)(a[1]):4 q)a 1 4 3 List¶ A list pattern looks like a general list. Each element of the list is a pattern itself. Combined with name patterns, this can be used to assign multiple variables in a single assignment. q)(b;c):2 3 q)b 2 q)c 3 The length of the pattern must match the length of the assigned value, and each element is matched in turn. q)(b;c):2 3 4 'length q)(b;c;3):2 3 4 'match [0] (b;c;3):2 3 4 ^ Since a table can be used as a list, it can match a list pattern: q)(a;b):([]colA:1 2;colB:3 4) q)a colA| 1 colB| 3 q)b colA| 2 colB| 4 Null patterns can be used by eliding items from the list pattern. In this case, the element is checked for existence but its value is not matched. q)(a;b;):1 2 'length [0] (a;b;):1 2 ^ q)(a;b;):1 2 3 Dictionary¶ A dictionary pattern can be made using the ! operator or using the bracketed dictionary syntax. Each value in the dictionary is a pattern. The values are matched with those with the same key in the assigned value. The assigned value may have additional keys that are ignored. q)(1 2!(one;two)):1 2!"ab" q)one "a" q)two "b" q)(1 2!(one;two)):1 2 3!"abc" q)(1 2!(one;two)):1 3!"ac" 'match [0] (1 2!(one;two)):1 3!"ac" ^ q)([four:d]):`one`two`three`four`five!1 2 3 4 5 q)d 4 As with lists, null patterns can be used. For the bracketed syntax, this means not putting a value after the colon for a key. q)([one:;four:d]):`one`two`three`four`five!1 2 3 4 5 q)([six:;four:d]):`one`two`three`four`five!1 2 3 4 5 'match [0] ([six:;four:d]):`one`two`three`four`five!1 2 3 4 5 ^ Table¶ Tables can also be used as patterns similarly to dictionaries. q)([]cc:e):([]aa:1 2;bb:3 4;cc:5 6) q)e 5 6 q)([k1:f]cc:e):([k1:7 8]aa:1 2;bb:3 4;cc:5 6) q)f 7 8 Operator¶ Certain operators can be used as patterns. Currently only ! and flip (for dict-to-table conversion) can be used in this way. q)(flip([a;b])):([]a:1 2;b:3 4) q)a 1 2 q)b 3 4 q)(a!b):1 2!"ab" q)a 1 2 q)b "ab" q)(a!):1 2!3 4 q)a 1 2 Type check¶ The type check pattern takes the form p:`x , where p is a pattern (including the null pattern) and x is the type character for the type being checked. A lowercase letter matches an atom and an uppercase letter matches a list. If the type is correct, the pattern match proceeds to p , otherwise a 'type error is thrown. q)(:`f):3f q)(:`f):3e 'type [0] (:`f):3e ^ q)((a;b):`F):3 4f q)a 3f q)b 4f q)((a;b):`F):3 4e 'type [0] ((a;b):`F):3 4e ^ Filter function¶ The filter function pattern takes the form p:expr where expr is an expression that returns a callable (such as a lambda, projection or operator). The result of expr is called on the value from the assigned value, and the result is matched to p . q)(a:3+):4 q)a 7 q)tempCheck:{$[x<0;'"too cold";x>40;'"too hot";x]} q)c2f:{[x:tempCheck]32+1.8*x} q)c2f -4.5 'too cold q)c2f 42.8 'too hot q)c2f 20 68f Parallel processing¶ The iterator Each Parallel ': (or its mnemonic keyword peach ) delegates processing to secondary tasks for parallel execution. This can be useful, for example, for computationally expensive functions, or for accessing several drives at once from a single CPU. To execute in parallel, start kdb+ with multiple secondary processes, using -s in the command line, and (since V3.5) the \s system command. Each Parallel iterates a unary value: the argument list of the derived function is divided between secondary processes for evaluation. The result of m':[x] is exactly the same as m'[x] . If no secondary tasks are available, performance is the same as well. Syntax: (f':) x , f':[x] , f peach x where f is a unary value and the items of list x are in its domain. q)f:{sum exp x?1.0} q)\t f each 2#1000000 132 q)\t f peach 2#1000000 / with 2 CPUs 70 Use the Apply operator to project a higher-rank value over argument pairs (or triples, etc.). For example, x g'y <=> g'[x;y] <=> .[g;]'[flip(x;y)] . Thus q)g:{sum y*exp x?1.0} q)\ts g'[2#1000000;2 3] 57 16777856 q)\ts .[g;]peach flip(2#1000000;2 3) 32 1744 The secondary processes used by Parallel Each and peach are either threads or processes according to the sign of the value used in the command line. Threads¶ Globals¶ The function f is executed within the secondary processes, unless the list x is a single-item list, in which case the function is executed within the main kdb+ thread. Only the main kdb+ thread may update global variables The function executed with peach is restricted to updating local variables only. Thus: q){`a set x} peach enlist 0 works, as single-item list shortcuts to execute on the main kdb+ thread q){`a set x} peach 0 1 fails and signals noupdate as it is executed from within secondary threads. Table counts in a partitioned database peach defaults to each when no secondary threads are specified on startup. It then executes on the only available thread, the main kdb+ thread. q){`a set x} peach 0 1 works when no secondary threads are specified, as peach defaults to each . The algorithm for grouping symbols differs between secondary threads and the main kdb+ thread. The main kdb+ thread uses an optimization not available to the secondary threads. E.g. kdb+ started with two secondary threads q)s:100000000?`3 q)\t {group s} peach enlist 0 / defaults to main thread as only single item 2580 q)\t {group s} peach 0 1 / group in secondary threads, can't use optimized algorithm 9885 However, grouping integers behaves as expected q)s:100000000?1000 q)\t {group s} peach enlist 0 2308 q)\t {group s} peach 0 1 2802 Perfect scaling may not be achieved, because of resource clashes. Number of cores/secondary threads¶ A vector with n items peached with function f with s secondary processes on m cores is distributed such that threads are preassigned which items they will be responsible for processing, e.g. for 9 jobs over 4 threads, thread #0 will be assigned elements 0, 4, 8; if each job takes the same time to complete, then the total execution time of jobs will be quantized according to #jobs mod #cores, i.e. with 4 cores, 12 jobs should execute in a similar time as 9 jobs (assuming #secondary processes≥#cores). Sockets and handles¶ Handles between threads A handle must not be used concurrently between threads as there is no locking around a socket descriptor, and the bytes being read/written from/to the socket will be garbage (due to message interleaving) and most likely result in a crash. Since V3.0, a socket can be used from the main thread only, or if you use the one-shot sync request syntax as q)`:localhost:5000 "2+2" peach forms the basis for a multithreaded HDB. For illustration, consider the following query. q){select max price by date,sym from trade where date=d} peach date This would execute a query for each date in parallel. The multithreaded HDB with par.txt hides the complexity of splitting the query up between threads and aggregating the results. Memory usage¶ Each secondary thread has its own heap, a minimum of 64MB. Since V2.7 2011.09.21, .Q.gc[] in the main thread collects garbage in the secondary threads too. Automatic garbage collection within each thread (triggered by a wsfull ), or hitting the artificial heap limit as specified with -w on the command line) is executed only for that particular thread, not across all threads. Symbols are internalized from a single memory area common to all threads. Processes (distributed each)¶ Since V3.1, peach can use multiple processes instead of threads, configured through the startup command-line option -s with a negative integer, e.g. -s -4 . Unlike multiple threads, the distribution of the workload is not precalculated, and is distributed to the secondary processes as soon as they complete their allocated items. All data required by the peached function must either already exist on all secondary processes, or be passed as an argument. Argument sizes should be minimized because of IPC costs. The motivating use case for this mode is multiprocess HDBs, combined with non-compressed data and .Q.MAP[] . Secondary processes must be started explicitly and .z.pd set to a vector of their connection handles, or a function that returns it. These handles must not be used for other messages: peach will close them if it receives anything other than a response message. e.g. q).z.pd:{n:abs system"s";$[n=count handles;handles;[hclose each handles;:handles::`u#hopen each 20000+til n]]} q).z.pc:{handles::`u#handles except x;} q)handles:`u#`int$(); .Q.fc (parallel on cut) Q for Mortals §A.68 peach</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="30"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Serverless q/kdb+ on AWS Lambda¶ Although kdb+ is already widely used within AWS, we are often asked by users to explain how we run kdb+ under a “serverless” framework. One way of running kdb+ in this mode is to use the AWS Lambda service. AWS Lambda is an event-driven, serverless computing platform. It runs code in response to events and automatically manages the computing resources required by that code. Lambda can be described as a type of serverless Function-as-a-Service (FaaS). FaaS is one approach to building event-driven computing systems. It relies on functions as the unit of deployment and execution and provides provision-free scalability and built-in reliability. Lambda functions can be triggered by a variety of events that occur on AWS or on supporting third-party services. They enable you to build reactive, event-driven systems. When there are multiple, simultaneous events to respond to, Lambda runs more copies of the function in parallel and scales with the size of the workload. Benefits¶ No servers to provision¶ AWS Lambda can run your code without requiring you to provision or manage servers. In this case, with kdb+, you can write your q script and upload it to Lambda, which will manage the execution environment. High availability¶ Serverless applications have built-in availability and fault tolerance. You don’t need to architect for these capabilities in your q/kdb+ code as the services running the application provide them by default. Continuous scaling¶ AWS Lambda can scale your kdb+ application by running code in response to each event trigger. Your code runs in parallel and processes each trigger individually, scaling precisely with the size of the workload. Cost per execution¶ AWS Lambda charges for every 100ms your code executes and the number of times your code is triggered. There is no cost when your code isn’t running. Using Lambda with other AWS services¶ AWS Lambda integrates with a range of other AWS services. Triggers can be set up to invoke a function in response to a file being put in an S3 bucket, lifecycle events, response to incoming HTTP requests, consume events from a queue, process records from a Kinesis Data Stream, as well as many more AWS services. Each service that integrates with Lambda sends data to the lambda function as an event in JSON format. The structure of the event document is different for each event type, and contains data about the resource or request that triggered the function. Each Lambda runtime converts the event into an object and passes it to your function. q/kdb+ Lambda runtime¶ The AWS Lambda execution environment provisions and runs secure sandboxed micro virtual machines in order to execute functions. These secure sandboxes can be rapidly provisioned with minimal footprint, enabling performance and security. The q/kdb+ runtime runs in this standard Lambda execution environment. A runtime is responsible for - running the function’s setup code - reading the handler name from an environment variable - reading invocation events from the Lambda runtime API The runtime passes the event data to the function handler, and posts the response from the handler back to the lambda function. A custom runtime’s entry point is an executable file named bootstrap . Initialization tasks¶ The runtime code is responsible for completing some initialization tasks. Then it processes invocation events in a loop until it is terminated. The initialization tasks run once per instance of the function to prepare the environment to handle invocations. - Retrieve settings – Read environment variables to get details about the function and environment. _HANDLER – The location to the handler, from the function’s configuration. The standard format is file.method, where file is the name of the file without an extension, and method is the name of a method or function defined in the file.LAMBDA_TASK_ROOT – The directory that contains the function code (/var/task/ ).AWS_LAMBDA_RUNTIME_API – The host and port of the runtime API. - Initialize the function – Load the handler file and run any global or static code that it contains. Functions should create static resources like SDK clients and database connections once, and reuse them for multiple invocations. - Handle errors – If an error occurs, call the initialization error API and exit immediately. Initialization counts towards billed execution time and timeout. When an execution triggers the initialization of a new instance of your function, you can monitor the behavior of the function in CloudWatch logs and AWS X-Ray Trace. X-Ray traces allow the user to analyze and debug distributed applications and troubleshoot the root cause of performance issues and errors. q/kdb+ bootstrap and code execution¶ To deploy the q/kdb+ runtime to your Lambda environment follow the instructions at Deploy using Serverless Application Repository. Once the q/kdb+ runtime is deployed to your Lambda environment, two files can be seen on the Lambda console. bootstrap script.q A custom runtime’s entry point is an executable file named bootstrap . Once the Lambda function is invoked by an event, the bootstrap file passes the event data to the function handler and posts the response from the handler back to the Lambda function. The following bootstrap takes advantage of using FIFO pipes in Linux for larger payloads and enhanced error handling. The bootstrap also sets some environment variables for QHOME , QLIC and PATH . The q code to execute is placed in a file called script.q . On the Lambda console, the handler is set as script.q . In the bootstrap file, the handler is called using $_HANDLER . q /var/task/$_HANDLER > response & When the function is invoked, the JSON event object is passed into a file called event_data using curl -sSLf -D headers -o event_data [http://${AWS_LAMBDA_RUNTIME_API}/2018-06-01/runtime/invocation/next](about:blank) bootstrap : #!/bin/sh set -eu export QHOME=/opt export QLIC=/tmp export PATH="$PATH:$QHOME/l64:$LAMBDA_TASK_ROOT" while true; do WORKDIR=$(mktemp -d) cd "$WORKDIR" curl -sSLf -D headers -o event_data http://${AWS_LAMBDA_RUNTIME_API}/2018-06-01/runtime/invocation/next AWS_LAMBDA_REQUEST_ID=$(sed -ne 's/^Lambda-Runtime-Aws-Request-Id:\s*\(.*\)\s*\r$/\1/ p' headers) # exported in case we want these in q-land export AWS_LAMBDA_DEADLINE_MS=$(sed -ne 's/^Lambda-Runtime-Deadline-Ms:\s*\(.*\)\s*\r$/\1/ p' headers) export AWS_LAMBDA_TRACE_ID=$(sed -ne 's/^Lambda-Runtime-Trace-Id:\s*\(.*\)\s*\r$/\1/ p' headers) # we stream to support large responses mkfifo response # we call q executable and q code and send response to response pipe q /var/task/$_HANDLER > response & # we want to stop just before the deadline WATCHDOG=$(((AWS_LAMBDA_DEADLINE_MS - $(date -u +%s) * 1000 - 1000) / 1000)) [ $WATCHDOG -ge 3 ] || WATCHDOG=3 curl -sSf -X POST --max-time $WATCHDOG \ -H 'Expect:' \ -H 'Content-Type: application/octet-stream' \ -T response \ http://${AWS_LAMBDA_RUNTIME_API}/2018-06-01/runtime/invocation/${AWS_LAM BDA_REQUEST_ID}/response & PID_C=$! q -q <&- > response & PID_Q=$! wait $PID_Q && RC_Q=0 || RC_Q=$? [$RC_Q -eq 0 ] || error $AWS_LAMBDA_REQUEST_ID q # curl may have not finished yet wait $PID_C || true # we tidy up here in case some external resources were never actually used by our q code PIDS=$(ps --no-headers -o pid --ppid $$) echo "$PIDS" | xargs -r kill -9 2>/dev/null || true cd - >/dev/null unset AWS_LAMBDA_REQUEST_ID AWS_LAMBDA_DEADLINE_MS AWS_LAMBDA_TRACE_ID rm -rf "$WORKDIR" done exit 0 Runtime environment limitations¶ - The disk space (ephemeral) is limited to 512 MB in /tmp directory. - The default deployment package size is 50 MB. - Memory range is from 128 to 3008 MB. - Maximum execution timeout for a function is 15 minutes. - Requests limitations by lambda: - Request and response (synchronous calls) body payload size can be up to 6 MB. - Event request (asynchronous calls) body can be up to 128 KB. Technically the limit for the deployment package size can be much higher if you let your lambda function pull the deployment package from S3. AWS S3 allows for deploying function code with a substantially higher deployment package limit (~250 MB) as compared to directly uploading to Lambda or any other AWS service. Deploy using Serverless Application Repository¶ The q/kdb+ runtime is available from the Serverless Application Repository. The AWS Serverless Application Repository is a managed repository for serverless applications. Following the instructions from the above link, you can deploy the latest version of q/kdb+ to your AWS Lambda environment. To register your q/kdb+ Lambda function, visit serverless.kx.com. Option pricing using Black-Scholes pricing model¶ For many financial institutions, HPC grids are a key infrastructure component of their financial and risk modelling. These model computations are becoming increasingly more complex and a growing number of financial institutions are evolving to leverage serverless architectures to achieve high parallelization on AWS Lambda. One use case for financial institutions is to calculate Value at Risk (VaR) using the Black-Scholes pricing model. The following steps show how to set up a q/kdb+ Lambda function to price one call option using the Black-Scholes pricing model written in q. For simplicity, an approximation is used for the Cumulative Normal Distribution function (CNDF). - On the Lambda console, create a new file called Black_Scholes.q and copy in the code below. - Set the handler to Black_Scholes.q . - When invoked, the bootstrap file now calls the $_HANDLER variable to execute the Black Scholes q code. Black_Scholes.q : tbl:.j.k raze read0 hsym `$"event_data" cndf:{ abs(x>0) -(exp[-.5*x*x]%sqrt 2*3.14159265358979323846) *t*.31938153+t*-.356563782+t*1.781477937 +t*-1.8212 55978+1.330274429*t:1%1+.2316419*abs x } BlackScholes:{[s;x;r;t;v;cp] d1:((log s%x)+(r+0.5*(v*v))*t)%v*sqrt t; d2:((log s%x)+(r-0.5*(v*v))*t)%v*sqrt t; $[cp~"c";(s*cndf[d1])-x*(exp neg r*t)*cndf[d2]; cp~"p";(x*(exp neg r*t)*cndf[neg d2])-s*cndf[neg d1]; '"(c)all/(p)ut error"] } tbl[`BlackScholes_Option_Price]:BlackScholes[ tbl.EquityPrice; tbl.Strike; tbl.Rate; tbl.Time; tbl.Volatility; -10h$tbl.CallPut ] .j.j tbl Invoke Lambda function and decode logs¶ The Lambda function is invoked by calling the AWS Lambda invoke CLI command. The input parameters are passed to our Black-Scholes pricing model as a JSON message using the --payload parameter. $ aws lambda invoke \ --invocation-type RequestResponse \ --function-name q_Black_Scholes \ --region us-east-1 \ --log-type Tail \ --payload '{ "Ticker": "AAPL","EquityPrice": 200,"Strike": 220,"Rate": 0.025,"Time": 0.25,"Volatility": 0.2673,"CallPut": "c"}' \ --profile kx_lambda_user output.txt \ --query 'LogResult' \ --output text \ | base64 -D You can also use the testing feature on AWS Lambda to test your function from the AWS Lambda console. In the Black_Scholes.q script, the event_data is parsed into a table using tbl:.j.k raze read0 hsym `$"event_data" Each parameter (Price, Strike, etc.) is then passed to the BlackScholes q function. The .j.j function outputs the result in JSON format. The bootstrap file then passes the result object to the response pipe and the output is returned to your Lambda invoke CLI call. Bootstrap response curl command: curl -sSf -X POST --max-time $WATCHDOG \ -H 'Expect:' \ -H 'Content-Type: application/octet-stream' \ -T response \ http://${AWS_LAMBDA_RUNTIME_API}/2018-06-01/runtime/invocation/${AWS_LAMBDA_REQUEST_ID}/response & The CLI command should return this response in the output.txt file: "{\"Ticker\":\"AAPL\",\"EquityPrice\":200,\"Strike\":220,\"Rate\":0.025,\"Time\":0.25, \"Volatility\":0.2673,\"CallPut\":\"c\",\"BlackScholes_Option_Price\":4.220232} From the response payload we see the function was successful and calculated the Black-Scholes option price. For simplicity, one option price was calculated but this function can be easily extended to scale and process multiple option-price calculations/events. In the next section, we will demonstrate scaling and processing multiple events from an S3 bucket. Stream data from Amazon S3¶ To demonstrate a q/kdb+ Lambda function processing multiple events, we detail how to stream data from AWS Simple Storage Service (S3). Using FIFO named pipes and .Q.fps (pipe streaming) within q, data can be streamed in for processing. To illustrate this example, we create 100 files each containing 1 million Black-Scholes input parameters. The files are placed in a S3 bucket. This S3 bucket is the trigger for the Lambda function. Configuring Amazon S3 Event Notifications for how to add an S3 bucket event as a trigger to your Lambda function Each parameter file is space-delimited and has a column for Spot, Strike, Time, Rate and Volatility as illustrated below. 28.54 25.00 21.00 0.05 0.53 25.07 26.00 22.00 0.05 0.54 26.92 27.00 20.00 0.05 0.48 25.97 28.00 45.00 0.05 0.45 23.09 29.00 90.00 0.05 0.48 The Lambda function that is created has an associated AWS IAM role. To access the data in S3, the Lambda roles credentials are updated using IAM to allow permission to access the S3 bucket where you would like to place your parameter files. Writing IAM Policies: How to Grant Access to an Amazon S3 Bucket Next we update the handler to use the q code process_s3data.q . The steps in the process_s3data.q code are as follows. - Parse event data from S3 event to create S3 object key and bucket variables. - Set environment variables that will be used by the Stream_Data script. - Call S3 function from Stream_Data script, initiate FIFO pipe and stream in S3 data. - Load blackScholes.q . - Create inputs table to store input parameters. - Use .Q.fps to stream in the S3 data from the FIFOpipe_stream to inputs table. - Use .Q.fu to run the inputs through theblackScholes formula. black_scholes_data contains the input parameters and the calculated option prices. process_s3data.q : //Parse S3 event data jsonevent:.j.k raze read0 hsym `$"event_data" s3key:raze jsonevent@/`Records`s3`object`key bucket:raze jsonevent@/`Records`s3`bucket`name //Set environment variables `LAMBDAQ_REF setenv "pipe_stream" `LAMBDAQ_REGION setenv "us-east-1" `LAMBDAQ_BUCKET setenv bucket `LAMBDAQ_KEY setenv s3key // Call S3 function from Stream_Data script, // initiate fifo pipe and stream in S3 data. \Stream_Data s3 //Load black scholes q code \l /var/task/blackScholes.q //Create black scholes inputs table inputs: flip `spot`strike`time`rate`vol!"FFFFF"$\:() // Stream S3 data from pipe into inputs table .Q.fps[{`inputs insert ("FFFFF";" ";1)0:x}]`:pipe_stream // Calculate option prices using black scholes formula blackScholesPrices:.Q.fu[blackScholes] inputs black_scholes_data:update blackScholesPrices from inputs blackScholes.q : cndf:{ abs(x>0)-(exp[-0.5*x*x]%sqrt 2*3.14159265358979323846)* t*.31938153+t*-0.356563782+t*1.781477937+t* -1.821255978+1.330274429*t:1.%1+.2316419*abs x } blackScholes:{[table] s:table[`spot]; x:table[`strike]; t:table[`time]; r:table[`rate]; v:table [`vol]; cp:`c; d1:((log s%x)+(r+0.5*(v*v))*t)%v*sqrt t; d2:((log s%x)+(r-0.5*(v*v))*t)%v*sqrt t; $[cp~`c;(s*cndf[d1])-x*(exp neg r*t)*cndf[d2]; cp~`p;(x*(exp neg r*t)*cndf[neg d2])-s*cndf[neg d1]; '"(c)all/(p)ut error"] } The Stream_Data script uses the IAM credentials of the Lambda role to sign the API request and access the S3 bucket where the data resides. The files are then streamed into a FIFO pipe which is processed using .Q.fps . The script uses the following environment variables to sign the request. AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN Stream_Data : #!/bin/sh set -eu do_curl () { [ "${LAMBDAQ_REF:-}" -a "${LAMBDAQ_URI:-}" ] || exit 64 mkfifo "$LAMBDAQ_REF" curl -sSf --compressed -o "$LAMBDAQ_REF" "$LAMBDAQ_URI" & } sha256 () { echo -n "$1" | openssl dgst -sha256 | cut -d' ' -f2 | tr -d '\n' } hmac () { echo -n "$1" | openssl dgst -sha256 -mac HMAC -macopt "$2" | cut -d' ' -f2 | tr -d '\n' } do_s3 () { [ "${LAMBDAQ_REF:-}" -a "${LAMBDAQ_REGION:-}" -a "${LAMBDAQ_BUCKET:-}" -a "${LAMBDAQ_KEY:-}" ] || exit 64 AWS_DATE=$(date -u +%Y%m%dT%H%M%S)Z AWS_ALGO=AWS4-HMAC-SHA256 AWS_SERVICE=s3 AWS_HOST=$LAMBDAQ_BUCKET.$AWS_SERVICE.$LAMBDAQ_REGION.amazonaws.com AWS_PATH=/${LAMBDAQ_KEY%\?*} [ "$AWS_PATH" = "/$LAMBDAQ_KEY" ] && AWS_QUERY= || AWS_QUERY=${LAMBDAQ_KEY#*\?} AWS_REQUEST=aws4_request AWS_REGION=$LAMBDAQ_REGION PCHK=$(sha256 '') CCHK=$(sha256 "GET $AWS_PATH $AWS_QUERY host:$AWS_HOST x-amz-content-sha256:$PCHK x-amz-date:$AWS_DATE${AWS_SESSION_TOKEN:+ x-amz-security-token:$AWS_SESSION_TOKEN} host;x-amz-content-sha256;x-amz-date${AWS_SESSION_TOKEN:+;x-amz-securit y-token} $PCHK") SKEY=$(hmac aws4_request hexkey:$(hmac $AWS_SERVICE hexkey:$(hmac $AWS_REGION hexkey:$(hmac ${AWS_DATE%T*} key:"AWS4$AWS_SECRET_ACCESS_KEY")))) SIG=$(hmac "$AWS_ALGO $AWS_DATE ${AWS_DATE%T*}/$AWS_REGION/$AWS_SERVICE/$AWS_REQUEST $CCHK" hexkey:$SKEY) mkfifo "$LAMBDAQ_REF" curl -sSf --compressed -o "$LAMBDAQ_REF" \ -H "Authorization: $AWS_ALGO Credential=$AWS_ACCESS_KEY_ID/${AWS_DATE%T*}/$AWS_REGION/$AWS_SERVICE/$ AWS_REQUEST, SignedHeaders=host;x-amz-content-sha256;x-amz-date${AWS_SESSION_TOKEN:+ ;x-amz-security-token}, Signature=$SIG" \ -H "x-amz-content-sha256: $PCHK" \ -H "x-amz-date: $AWS_DATE" \ ${AWS_SESSION_TOKEN:+-H "x-amz-security-token: $AWS_SESSION_TOKEN"} \ "https://$AWS_HOST$AWS_PATH?$AWS_QUERY" & } case $1 in curl) do_curl;; s3) do_s3;; *) exit 64;; esac exit 0 Process Amazon Kinesis data streams¶ Using q/kdb+ on Lambda, we can send data between the AWS Kinesis Service and a remote q/kdb+ process using IPC. For a simple test, set up a running q process on an Elastic Compute Cloud (EC2) server and note the IP address. kdb+ is available on EC2 from the AWS Marketplace or can be manually installed on a machine of your choosing. - Run the q process on port 5001 ( \p 5001 ) - Open port 5001 on the EC2 Security Group Next update the handler to use Kinesis.q The Kinesis.q script will first deserialize the Kinesis event message. event:.j.k raze read0 hsym `$"event_data" It then handles the IPC connection between Lambda and the remote q process. Using the hopen function, a handle is opened to the IP address of the EC2 server running the remote q process. h:hopen `:34.123.12.123:5001 Kinesis.q : event:.j.k raze read0 hsym `$"event_data" h:hopen `:34.123.12.123:5001 h "show `Sending_Kinesis_Data_Stream" h(set;`kinesis_data;event) hclose h To invoke the Kinesis.q function, a sample Kinesis Data Stream event message can be used. This can be set up by using the testing feature from the AWS Lambda console. There is also a range of test events available for other AWS services. From the AWS Lambda console, Select Configure Test Events > Create New Test Event > Event Template > Amazon Kinesis Data Stream. Select Create, then Test. Example Kinesis data stream event: { "Records": [ { "kinesis": { "partitionKey": "partitionKey-03", "kinesisSchemaVersion": "1.0", "data": "SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=", "sequenceNumber": "49545115243490985018280067714973144582180062593244200961", "approximateArrivalTimestamp": 1428537600 }, "eventSource": "aws:kinesis", "eventID": "shardId-000000000000:49545115243490985018280067714973144582180062593 244200961", "invokeIdentityArn": "arn:aws:iam::EXAMPLE", "eventVersion": "1.0", "eventName": "aws:kinesis:record", "eventSourceARN": "arn:aws:kinesis:EXAMPLE", "awsRegion": "us-east-1" } ] } Once the test is run, the following can be seen on the remote q/kdb+ process. Remote q process running on EC2 (port 5001): q)`Sending_Kinesis_Data_Stream q)kinesis_data Records| +`kinesis`eventSource`eventID`invokeIdentityArn`eventVersion`eventName`eventSourceARN`awsRegion! (,`partitionKey`kinesisSchemaVersion`data`se quenceNumber`approximateArrivalTimestamp! ("partitionKey-03";"1.0";"SGV sbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=";"4954511524349098501828006771497314 4582180062593244200961";1.428538e+09); ,"aws:kinesis";,"shardId-0000000 00000:49545115243490985018280067714973144582180062593244200961"; ,"arn: aws:iam::EXAMPLE";,"1.0";,"aws:kinesis:record";,"arn:aws:kinesis:EXAMP LE";,"us-east-1") The Kinesis Data stream data was successfully sent to q and stored in the object called kinesis_data . This simple example illustrates how easy it is to send data from AWS Lambda to another q/kdb+ process. This example can easily be extended to execute based on a trigger event from Kinesis or any other AWS supported services. Serverless gateway process¶ Many kdb+ systems are constructed where data is stored across several processes. These architectures can range from a single real-time and historic database on the same server to multi-site architectures where data of various forms is stored in hundreds of different processes. In either case there is likely to be the same requirement to access data across processes. This is typically achieved using a kdb+ gateway process. The primary objective of a gateway is to act as a single interface point and separate the end user from the configuration of the underlying databases. The task of the gateway can be broken down into the following steps. - Check user entitlements and data-access permissions - Provide access to stored procedures - Gain access to data in the required services - Provide best possible service and query performance Setting up a gateway in a serverless framework has many benefits. - There are no servers to manage. - The gateway can scale to meet user requests. - You only pay for the compute time for each request Trades table on a remote EC2 machine: exchange sym time price size -------------------------------------------------------------------- exchangeA BTCEUR 2020-01-27D22:33:39.163733000 8076.09 -0.00380418 exchangeA BTCUSD 2020-01-27D22:33:39.163867000 8897 -0.03520613 exchangeA BTCUSDC 2020-01-27D22:33:39.163905000 8895.96 -0.06329409 exchangeB BTCGBP 2020-01-27D22:33:39.163967000 6827.19 0.02871252 exchangeB BTCGBP 2020-01-27D22:33:39.225011000 6850.9 -0.01170263 exchangeC BTCGBP 2020-01-27D22:33:39.225011000 6870.8 0.127068 On the remote RDB/HDB process running on the EC2 server, we set up the following q functions. getExchangeData:{[t;x] select from t where exchange = x} getCcyData:{[t;x] select from t where sym = x} The following q code can be set up in a Lambda function to illustrate a simple gateway process. A handle h is opened to the remote RDB/HBD process. // Open handle to remote RDB/HDB h:hopen `:34.12.123.12:5001 // get all exchange trades Exchange_trades: h(`getExchangeData;`trades;`exchangeA) // get to all BTCUSD trades BTCUSD_trades: h(`getCcyData;`trades;`BTCUSD) hclose h This simple example demonstrates how a gateway process can be quickly set up on AWS Lambda and from this could be extended to handle additional functionality such as permissioning and user entitlements. Runtime and trace logs¶ Each Lambda function can be monitored by AWS X-Ray to analyze and debug issues. AWS X-Ray tracing is useful for distributed applications, such as those built using microservices architectures. X-Ray tracing can be enabled from the Lambda console or using the AWS CLI. $ aws lambda update-function-configuration \ --function-name q_Function \ --tracing-config '{"Mode": "Active"}' Taking one example, we examine the trace logs for a function and see how much time is attributed to function invocation and run time of the q code. For illustration purposes, a function was run 100 times in parallel and the X-Ray trace logs can be seen below. Response distribution of the 100 invocations of the function Taking a closer look at one of the traces, we can see the function took a total of 479ms to run. 330ms of this time was spent on running the q code and 138ms was spent preparing the environment. Trace details Using X-Ray it’s easy to debug and monitor function execution time and memory usage. Consequently, you can make informed decisions on running the function, such as increasing or decreasing the memory size so the function executes more efficiently. Reserved concurrency and execution limits¶ The Lambda service for every AWS account starts with a pool of 1000 concurrent executions. All q/kdb+ lambda functions from this account share executions from this pool. If one q/kdb+ lambda function were to receive for example 1000 requests, AWS would run those from the common pool. If a second q/kdb+ Lambda function in the same account were to receive for example 200 requests, it could be completely or partially rejected as the number of combined concurrent executions is over the limit of 1000. To guarantee that executions will be available to your q/kdb+ lambda function, you can use the Reserved Concurrency parameter. AWS however reserves 100 executions for the common pool at all times. So, if the account limit is 1000, the maximum number of Reserved Concurrency would be 900. To modify the Reserved Concurrency via the command line use the following command. $ aws lambda put-function-concurrency \ --function-name YOUR_FUNCTION_NAME_HERE \ --reserved-concurrent-executions 50 It is also worth noting, in order to raise the limit above 1,000 concurrent function executions, a request can be submitted to the AWS Support Centre.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="31"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Atomic functions¶ Many q functions iterate recursively through list or dictionary arguments down to items of some depth. Where a function recurses to the atoms of an argument, it is atomic in that domain: typically, left-atomic, right-atomic; or simply atomic for all its arguments. A function that recurses to strings is string-atomic. Formal definition¶ Where f is a function, and x is a list of its arguments, .[f;x]~.[f';x] . q).[+;(2;(3 4;5))] 5 6 7 q).[+';(2;(3 4;5))] / the iterator is unnecessary 5 6 7 Application, projection, and indexing By extension, for a unary function, f is atomic if f[x]~f'[x] . q)neg (5 2; 3; -8 0 2) -5 -2 -3 8 0 -2 q)neg each (5 2; 3; -8 0 2) / the iterator is unnecessary -5 -2 -3 8 0 -2 Informal definition¶ A unary is atomic if it applies to both atoms and lists, and in the case of a list, applies independently to every atom in the list. For example, the unary neg is atomic. A result of neg is just like its argument, except that each atom in an argument is replaced by its negation. q)neg 3 4 5 -3 -4 -5 q)neg (5 2; 3; -8 0 2) -5 -2 -3 8 0 -2 neg applies to a list by applying independently to every item. Accessing the i th item of a list x is denoted by x[i] , and therefore the rule for how neg applies to a list x is that the i th item of neg x , which is (neg x)[i] , is neg applied to the i th item. neg can be defined recursively for lists in terms of its definition for atoms. To do so we need two language constructs. - Any function f can be applied independently to the items of a list by modifying the function with the Each iterator, as inf' . - The function {0>type x} has the value 1 whenx is an atom, and 0 whenx is a list. Using these constructs, neg can be defined as follows: neg:{$[0>type x; 0-x; neg'[x]]} That is, if x is an atom then neg x is 0-x , and otherwise neg is applied independently to every item of the list x . One can see from this definition that neg and neg' are identical. In general, this is the definition of atomic: a function f of any number of arguments is atomic if f is identical to f' . A binary f is atomic if the following rules apply (these follow from the general definition that was given just above, or can be taken on their own merit): f[x;y] is defined for atomsx andy - for an atom x and a listy , the resultf[x;y] is a list whose ith item isf[x;y[i]] - for a list x and an atomy , the resultf[x;y] is a list whose ith item isf[x[i];y] - for lists x andy , the resultf[x;y] is a list whose ith item isf[x[i];y[i]] For example, the operator Add is atomic. q)2 + 3 q)2 6 + 3 5 5 9 q)2 + 3 -8 q)2 6 + 3 -8 5 -6 5 -2 q)(2; 3 4) + ((5 6; 7 8 9); (10; 11 12)) 7 8 9 10 11 13 15 16 In the last example both arguments have count 2. The first item of the left argument, 2 , is added to the first item of the right argument, (5 6; 7 8 9) , while the second argument of the left argument, 3 4 , is added to the second argument of the right argument, (10; 11 12) . When adding the first items of the two lists, the atom 2 is added to every atom in (5 6; 7 8 9) to give (7 8; 9 10 11) , and when adding the second items, 3 is added to 10 to give 13 , and 4 is added to both atoms of 11 12 to give 15 16 . Add can be defined recursively in terms of Add for atoms as follows: q)Add:{$[(0>type x) & 0>type y; x + y; Add'[x;y]]} Length and type¶ The arguments of an atomic function must be conformable. q)1 2 3 + 4 5 'length [0] 1 2 3 + 4 5 ^ Type errors can arise at depth. q)1 2 3 + (4;"a";5) 'type [0] 1 2 3 + (4;"a";5) ^ Rank¶ Atomic functions are not restricted to ranks 1 and 2. For example, the ternary {x+y xexp z} (“x plus y to the power z”) is atomic. Left- and right-atomic¶ A function can be atomic relative to some of its arguments but not all. For example, the Index At operator @[x;y] is an atomic function of its right argument but not its left, and is said to be right-atomic, or atomic in its second argument. That is, for every left argument x the projected unary function x@ is atomic. This primitive function, like x[y] , selects items from x according to the atoms in y , and the result is structurally like y , except that every atom in y is replaced by the item of x that it selects. q)2 4 -23 8 7 @ (0 4 ; 2) 2 7 -23 Index 0 selects 2; index 4 selects 7; and index 2 selects -23. String-atomic¶ Q does not have a string datatype. What we call strings are char vectors. Some functions that apply to strings recurse until they find either strings or char atoms. They are string-atomic. q)upper ("quick";("brown";"fox");"x") "QUICK" ("BROWN";"FOX") "X" Q for Mortals §6.6 Atomic Functions Q language resources by topic¶ Casting and encoding¶ $ Cast between datatypes $ Tok: interpret string as value ! Enumeration parse parse string to function string cast to character sv decode to integer value parse string to function vs encode Comparison¶ < Less Than > Greater Than deltas differences <= Up To >= At Least differ flag changes & Lesser | Greater min least, minimum max greatest, maximum mins running minimums maxs running maximums mmin moving minimums mmax moving maximums Dictionaries¶ Environment¶ | variable | defines | default | |---|---|---| | COLUMNS | \c | 80 | | LINES | \c | 25 | | QHOME | folder searched for q.k and unqualified script names | $HOME/q C:\q | | QINIT | additional file loaded after q.k has initialized,executed in the default namespace | $QHOME/q.q %QHOME%\q.q | | QLIC | folder searched for k4.lic or kc.lic license key file | $QHOME %QHOME% | getenv get value of an environment variable gtime UTC equivalent of local timestamp ltime local equivalent of UTC timestamp setenv set value of an environment variable Evaluation control¶ ' ': /: \: each peach prior $[test;et;ef;…] Cond \ / scan over do if while .[f;x] Apply .[f;x;e] Trap : Return exit @[f;x] Apply-At @[f;x;e] Trap-At ' Signal File system¶ get set read/write or memory-map a data file¹ value read a data file¹ hcount file size hdel delete a file or folder hsym symbol/s to file symbol/s¹ 0: File Text read/write chars¹ read0 read chars¹ 1: File Binary read/write bytes¹ read1 read bytes¹ 2: Dynamic Load load shared object save load a variable rsave rload a splayed table dsave tables ? Enum Extend ¹ Has application beyond the file system. Functional qSQL¶ ![t;c;b;a] / update and delete ?[t;i;p] / simple exec ?[t;c;b;a] / select or exec ?[t;c;b;a;n] / select up to n records ?[t;c;b;a;n;(g;cn)] / select up to n records sorted by g on cn Interprocess communication¶ \p -p listen to port hopen hclose open/close connection .z callbacks Joins¶ Keyed As of ej equi aj aj0 as-of ij ijf inner ajf ajf0 lj ljf left asof simple as-of pj plus wj wj1 window uj ujf union upsert , Join ^ Coalesce Logic¶ all whether all items are non-zero & and lesser of two values; logical AND any whether any item is zero not whether if argument is zero null whether is null | or greater of two values; logical OR Math and statistics¶ abs absolute value mins minimums acos arccosine mmax moving maximum asin arcsine mmin moving minimum atan arctangent mmu matrix multiply avg arithmetic mean mod modulo avgs arithmetic means msum moving sum ceiling round up to integer prd product cor correlation prds products cos cosine ratios ratios cov covariance reciprocal reciprocal deltas differences scov sample covariance dev standard deviation sdev sample standard deviation div integer division signum sign ema exponential moving average sin sine exp ex sqrt square root floor round down to integer sum sum inv matrix inverse sums sums log natural logarithm svar sample variance lsq matrix divide tan tangent mavg moving average til natural numbers till max greatest var variance maxs maximums wavg weighted average mcount moving count wsum weighted sum mdev moving deviation xbar round down med median xexp xy min least xlog base-x logarithm of y QSQL query templates¶ delete delete rows or columns from a table exec return columns from a table, possibly with new columns select return part of a table, possibly with new columns update add rows or columns to a table Search¶ bin, binr binary search distinct unique items of a list ? Find find x in y in which items of x are items of y within whether x are items of list y Selection¶ except exclude items of one list or dictionary from another first first item of a list or first entry of a dictionary . Index select item at depth from a list or entries from a dictionary @ Index At select items from a list or entries from a dictionary inter intersection of two lists or dictionaries last last item of a list or last entry of a dictionary next immediately following item/s prev immediately preceding item/s sublist sublist of a list union union of two lists or dictionaries where copies of indexes of a list, or keys or of a dictionary xprev nearby list items Sort¶ asc sort ascending desc sort descending group group a list by values iasc grade ascending idesc grade descending rank position in sorted list xgroup group table by values of selected column/s xrank group by value xasc sort table ascending xdesc sort table descending Duplicate dictionary keys or table column names cause unpredictable results from sorts, grades, and groups. Re-sorting compressed data on disk decompresses it. Strings¶ $ Pad pad with spaces like match pattern lower shift to lower case ltrim trim leading space md5 hash from string rtrim trim trailing space ss string search ssr string search and replace trim trim leading and trailing space upper shift to upper case Tables¶ cols column names ungroup normalize meta metadata xasc sort ascending xcol rename cols xdesc sort descending xcols re-order cols xgroup group by values in selected cols insert insert records xkey sset cols as primary keys upsert add/insert records xdesc sort descending ! Enkey, Unkey add/remove keys</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="32"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">WebSockets¶ kdb+ supports the WebSocket protocol since V3.0 WebSockets provide a protocol between a client and server which runs over a persistent TCP connection. The client-server connection can be kept open as long as needed and can be closed by either the client or the server. This open connection allows bi-directional, full-duplex messages to be sent over the single TCP socket connection. The connection allows data transfer in both directions, and both client and server can send messages simultaneously. WebSockets were designed to be implemented in web browsers and web servers, but they can be used by any client or server application. The ability for bi-directional real-time functionality means it provides a basis for creating real-time applications on both web and mobile platforms. All messages sent across a WebSocket connection are asynchronous. WebSocket server¶ To enable kdb+ to accept websocket connection, simply start a q session listening on a port of your choice. The .z.ws function will be called by the server for every client message. To customise the kdb+ websocket server, define the .z.ws function to your chosen logic. Note that .z.w is used for obtaining the current connection handle, which represents the client connection when called within the .z.ws callback. .z.wo and .z.wc are used to define callback functions in the event of a client connection opening or closing respectively. These have no default action, and can be customised with user required logic e.g. for tracking connections q)activeWSConnections: ([] handle:(); connectTime:()) //x argument supplied to .z.wc & .z.wo is the connection handle q).z.wo:{`activeWSConnections upsert (x;.z.t)} q).z.wc:{ delete from `activeWSConnections where handle =x} //websocket connects q)activeWSConnections handle connectTime ------------------- 548 13:15:24.737 //websocket disconnects q)activeWSConnections handle connectTime ------------------ The internal function -38! can also be used to view current WebSocket connections and connection handles. Example¶ To start a q session listening on port 5000, which then handles any websocket requests by echoing whatever it receives: q)\p 5000 q).z.ws:{neg[.z.w] x} The handler {neg[.z.w]x} echoes the message back to the client. Download KxSystems/cookbook/ws.htm, a simple WebSocket client, and open it in a WebSocket-capable browser. You should see something like this: Now click connect and type e.g. 4+til 3 in the edit box. Hit Enter or click send. Note that it is echoed in the output text area. The example can be enhanced further, to run any q code typed into the browser. In your q session, redefine .z.ws: .z.ws:{neg[.z.w].Q.s value x} Then try typing 4+til 3 in the edit box and click send. You will see a result this time: To catch any bad q code that is submitted, redo the definition of .z.ws to trap errors: .z.ws:{neg[.z.w]@[.Q.s value@;x;{"`",x,"\n"}]} Connection handles¶ Data should be sent async to a websocket connection. When no longer required, connection handles are closed using hclose . As communication is async, if you wish to flush any pending data prior to close, see the following example where h is a connection handle: q)neg[h][] / flush any pending data (blocks til all data sent) q)hclose h / close handle Authentication / Authoriation¶ In order to initialize a WebSocket connection, a WebSocket ‘handshake’ must be successfully made between the client and server processes. First, the client sends a HTTP request to the server to upgrade from the HTTP protocol to the WebSocket protocol. Client HTTP requests can be authenticated/authorized using .z.ac. This allows kdb+ to be customized with a variety of mechanisms for securing HTTP requests e.g. LDAP, OAuth2, OpenID Connect, etc. WebSocket client¶ A WebSocket API exists for a number of languages (inc a native JavaScript WebSocket API), and web browsers are often used as WebSocket clients. Since V3.2t 2014.07.26, q can also create a WebSocket connection, i.e. operate as a client as well as a server. The .z.ws function will be called by the client for every server message. .z.ws must be defined before opening a WebSocket. To open a client connection to a server, use the following syntax: (`$":ws://host:port")"GET / HTTP/1.1\r\nHost: host:port\r\n\r\n" If successful it will return a 2-item list of (handle;HTTP response), e.g. (3i;"HTTP/1.1 101 Switching Protocols\r\nConnection: Upgrade\r\nUpgrade: websocket\r\nSec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk=\r\nSec-WebSocket-Extensions: permessage-deflate\r\n\r\n") If the protocol upgrade from HTTP to WebSocket failed, it returns the 2-item list, with the handle as 0Ni , e.g. (0Ni;"HTTP/1.1 400 Bad Request\r\nContent-Type: text/html; charset=UTF-8...") Any other error is signalled as usual, e.g. 'www.nonexist.badcom: No route to host To use SSL/TLS, kdb+ should first be configured to use SSL/TLS. For any request requiring SSL/TLS, replace ws://host:port with wss://host:port . An alternative is to use stunnel, and open from kdb+ to that stunnel with ws:// . Both client and server support permessage-deflate compression. .z.wc is used to define callback functions in the event of a client connection closing. This have no default action, and can be customised with user required logic. The callback function .z.wo is not used with client initiated connections. Example¶ Open 2 terminal windows, one for the websocket server, and one for the client. In the server q session, listen on a chosen port (e.g. 5000) and define a callback that replies with a string to client q)\p 5000 q).z.ws:{neg[.z.w] "server replied with ",$[10=type x;x;raze string x];} In the client q session, define a callback to echo incoming messages and connect to the server q).z.ws:{0N!"Client Received Msg:";0N!x;} q)r:(`$":ws://127.0.0.1:5000")"GET / HTTP/1.1\r\nHost: 127.0.0.1:5000\r\n\r\n" q)r 6i "HTTP/1.1 101 Switching Protocols\r\nConnection: Upgrade\r\nUpgrade: websocket\r\nSec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk=\r\nSec-WebSocket-Extensions: permessage-deflate\r\n\r\n" The client can then send a message to the server using: q)neg[r[0]]"test" / a char vector q)neg[r[0]]0x010203 / a bytevector The client should then see the reply received from the server echoed to the terminal. Connection handles¶ Use as per server connection handles. Authentication¶ As the HTTP header can be customised as part of the connection string, various means of authentication can be implemented (e.g. adding bearer token, cookie, etc). The client connection also allows username:password to be specified for basic access authentication. e.g. q)`:ws://username:<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="e393829090948c9187a3d2d1d4cdd3cdd3cdd2">[email protected]</a>:5001 "GET / HTTP/1.1\r\nHost: 127.0.0.1:5001\r\n\r\n" JavaScript serialization¶ c.js provides functions serialize and deserialize to simplify IPC between the browser and a kdb+ server. Example¶ An example, wslogin.htm shows how to send a JavaScript dictionary to kdb+. It receives a dictionary and replies with a vector of strings to the browser (the dictionaries values). To decode a serialized string using q, use -9! and to encode, use -8! . To run this example - start the server listening on port 5000 and define the callback to deserialize the dictionary, and reply with the serialized dictionary values. Note: to handle both byte and char, check for the type of the input. q)\p 5000 q).z.ws:{neg[.z.w] -8!value -9!x;} - download wslogin.htm andc.js to the same location - open wslogin.htm in your browser - client the login button. The will cause the browser to serialise its data and send to kdb+. The kdb+ server will receive a byte vector of an encoded kdb+ dictionary. The kdb+ server will then deserialise the dictionary, and reply to the browser with the values found within the dictionary (for display in its text box). The dictionary has the following form: and therefore, it will reply with the serialised form of the dictionary values e.g.`u`p!("user";"At0mbang.")` ("user";"At0mbang.") This example works, because the default .z.ws echoes the byte vector over the WebSocket. Downloads: JSON¶ JSON can be parsed and generated using functions found within the .j namespace. Compression¶ V3.2t 2014.05.08 added ‘permessage-deflate’ WebSockets compression. One way to observe whether compression is active on a connection is to observe the queued data. For example / generate some compressible data q)v:10000#.Q.a / queue 1000 msgs to an existing websocket handle / and observe the queue q)\ts do[1000;(-5)v];show sum each .z.W 5| 10004000 6| 0 14 20610976 / now do same again, but this time with a handle which requested compression q)\ts do[1000;(-6)v];show sum each .z.W 5| 0 6| 47022 94 4354944 Here we can see the uncompressed data was quicker to add to the queue, and consumed more memory overall. The compressed data took longer to queue, and in this case was 200× smaller. Compression speed and ratio achieved will depend on your data. In Chrome you can also observe the network handshake in View>Developer>Developer tools; a successful negotiation will have “Sec-WebSocket-Extensions:permessage-deflate” in the HTTP response header. Since 4.1 2024.03.12, 4.0 2024.03.04 websocket compression is disabled if kdb+ receives the sec-websocket-protocol http header with value kxnodeflate , for example client javascript: ws=new WebSocket(url),"kxnodeflate"); Secure sockets: stunnel¶ Stunnel will provide secure sockets (TLS/SSL) using the OpenSSL library. Stunnel will take any WebSocket server, HTTP server, or similar and secure it – you get https:// and wss:// for free. UTF-8 encoding¶ The WebSocket requires that text is UTF-8 encoded. If you try to send invalidly encoded text it will signal 'utf8 . Real-time Demo¶ This section will present a simple example in which some tables will be updated in the browser in real-time, as shown: The web page shows the last quote and trade values for each symbol, and gives the user the ability to filter the syms in view Setup¶ - Download files from https://github.com/kxcontrib/websocket/tree/master/AppendixB - Run q pubsub.q . It will create the q interface for the WebSocket connections and contains a simple pubsub mechanism to push data to clients when there are updates - Run q fh.q . This will generate dummy trade and quote data and push it to the pubsub process. The script can be edited to change the number of symbols and frequency of updates. - Open websockets.html in your browser. This will connect to kdb+ and display trade data in real-time, which can be filtered. Explanation¶ The idea behind the pubsub mechanism here is that a client will make subscriptions to specific functions and provide parameters that they should be executed with. The subscription messages we send to the server will be sent as query strings so our .z.ws message handler is defined to simply evaluate them. q).z.ws:{value x} Next, we initialize the trade and quote tables and upd function to mimic a simple Real-Time Subscriber, along with a further table called subs , which we will use to keep track of subscriptions. // subs table to keep track of current subscriptions q)subs:2!flip `handle`func`params`curData!"is**"$\:() The subs table will store the handle, function name and function parameters for each client. As we only want to send updates to a subscriber when something has changed, we store the current data held by each subscriber so that we can compare against it later. The functions that can be called and subscribed to by clients through the WebSocket should be defined as necessary. In this example, we have defined a simple function that will return a list of distinct syms that will be used to generate the filter checkboxes on the client and additional functions to display the last record for each sym in both the trade and quote tables. The aforementioned trade and quote table functions will also accept an argument by which to filter the data if it is present. //subscribe to something sub:{`subs upsert(.z.w;x;enlist y)} //publish data according to subs table pub:{ row:(0!subs)[x]; (neg row[`handle]) .j.j (value row[`func])[row[`params]] } // trigger refresh every 1000ms .z.ts:{pub each til count subs} \t 1000 The subfunction will handle new subscriptions by upserting the handle, function name and function parameters into the subs table. .z.wc will handle removing subscriptions from the table whenever a connection is dropped. The pub function is responsible for publishing data to the client. It takes an argument that refers to a row index in the subs table and uses it to get the subscriptions function, the parameters to use when calling that function and the handle that it will use in sending the result to the client. Before doing so, it will also use .j.j to parse the result into a JSON string. The client can then parse the JSON into a JavaScript object upon arrival as it did in the earlier example. The pub function itself will be called on a timer every second for each row in the subs table. One thing that is important to be consider whenever using WebSockets is that the JavaScript onmessage function needs a way in which to identify different responses from one another. Each different response could have a different data structure that will need to be handled differently. Perhaps some data should be used in populating charts while other data for updating a table. If an identifier is present, it can be used to ensure each response is handled accordingly. In this example, the responses func value acts as our identifier. We can look at the func value and from that determine which function should be called in order to handle the associated data. ws.onmessage = function(e) { /*parse message from JSON String into Object*/ var d = JSON.parse(e.data); /* depending on the messages func value, pass the result to the appropriate handler function */ switch(d.func){ case 'getSyms' : setSyms(d.result); break; case 'getQuotes' : setQuotes(d.result); break; case 'getTrades' : setTrades(d.result); } }; The rest of the JavaScript code for the client has been seen in previous examples. The tables that update in the browser are simply being redrawn every time the client receives a new response for the appropriate table. The end result is a simplistic, interactive, real-time web application showing the latest trade and quote data for a range of symbols. Its intention is to help readers understand the basic concepts of kdb+ and WebSocket integration. Q for Mortals §11.7.2 Basic WebSockets</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="33"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">//postconditions `.finos.init.priv.dependency.edges insert flip flip(providesEscaped;funNameEscaped); .finos.init.priv.dependency.nodes[providesEscaped]:flip flip(provides;`condition); }; .finos.init.priv.dependency.addProviderDependency:{[name] provider: `$"provide_",string[name]; $[provider in key .finos.init.priv.dependency.provideCount; .finos.init.priv.dependency.provideCount[provider]:.finos.init.priv.dependency.provideCount[provider]+1; [ .finos.init.priv.dependency.provideCount[provider]:1; .finos.init.priv.dependency.nodes[.finos.init.priv.dependency.escapeDot[name]]:(name;`condition); .finos.init.priv.dependency.nodes[provider]:(`;`provider); `.finos.init.priv.dependency.edges insert (provider; .finos.init.priv.dependency.escapeDot[name]); ] ]; }; .finos.init.priv.dependency.convertToDotFormat:{[] cmd:"digraph G {\n"; cmd,:raze { " ",string[x`name]," [shape=ellipse, style=filled, color=palegreen, label=\"",string[x`label],"\"];\n" } each () xkey select from .finos.init.priv.dependency.nodes where nodeType=`function; cmd,:raze { " ",string[x`name]," [shape=diamond, style = filled, color=salmon2, label=\"",string[x`label],"\"];\n" } each () xkey select from .finos.init.priv.dependency.nodes where nodeType=`condition; cmd,:raze { " ",string[x`name]," [shape=septagon, label=\"",string[.finos.init.priv.dependency.provideCount[x`name]],"\"];\n" } each () xkey select from .finos.init.priv.dependency.nodes where nodeType=`provider; cmd,:raze { " ",string[x`from]," -> ",string[x`to],";\n" } each .finos.init.priv.dependency.edges; cmd,:"}\n"; cmd}; .finos.init.saveDependencyToSvg:{[outputFile] if[not 10h = type outputFile; '".finos.init.saveDependencyToSvg expects a string as argument - for example \"inithook_graph.svg\""]; inputFile: first system"mktemp"; inputFileH:hsym `$inputFile; inputFileH 0: enlist .finos.init.priv.dependency.convertToDotFormat[]; //should be safesystem res:@[{(1b;system x)};"dot -Tsvg ",inputFile," -o '",ssr[outputFile;"'";"'\\''"];(0b;)@]; hdel inputFileH; if[not first res; '"failed to run graphviz, check stderr"]; }; .finos.init.getExecTimeByFunction:{ `elapsedTime xasc .finos.init.priv.stat}; /******************************************************************************* /* code that actually executes something /******************************************************************************* .finos.init.add[();`.finos.init.priv.start;`start]; ================================================================================ FILE: kdb_q_misc_mline.q SIZE: 620 characters ================================================================================ // .finos.mline[] allows multiple multi-line functions to be // pasted into the console. // Will read STDIN until it sees "done" on a line by itself. // It will then evaluate everything and return you to the regular // command prompt. // // Useful for tactically loading utility functions or testing out // monkey patches. // // Note! This simple version does not play nice with \d . // .finos.mline:{ r:(); while[not "done"~line:read0 0 // Put semicolons at beginning of lines that // are not continuations or starts of comments. ;r,:enlist$[line like"[ \t/]*";"";";"],line]; value` sv r} ================================================================================ FILE: kdb_q_objutils_memusage.q SIZE: 3,262 characters ================================================================================ // Functionality to return approx. memory size of kdb+ objects // half size for 2.x .finos.objutils.version:.5*1+3.0<=.z.K; // set the pointer size based on architecture .finos.objutils.ptrsize:$["32"~1_string .z.o;4;8]; .finos.objutils.attrsize:{.finos.objutils.version* // `u#2 4 5 unique 32*u $[`u=a:attr x;32*count distinct x; // `p#2 2 1 parted (8*u;32*u;8*u+1) `p=a;8+48*count distinct x; 0] }; // (16 bytes + attribute overheads + raw size) to the nearest power of 2 .finos.objutils.calcsize:{[c;s;a] `long$2 xexp ceiling 2 xlog 16+a+s*c}; .finos.objutils.vectorsize:{.finos.objutils.calcsize[count x;.finos.objutils.typesize x;.finos.objutils.attrsize x]}; // raw size of atoms according to type, type 20h->76h have 4 bytes pointer size .finos.objutils.typesize:{4^0N 1 16 0N 1 2 4 8 4 8 1 8 8 4 4 8 8 4 4 4 abs type x}; .finos.objutils.threshold:100000; // pick samples randomly accoding to threshold and apply function .finos.objutils.sampling:{[func;obj] $[.finos.objutils.threshold<c:count obj;<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="0d6b78636e4d236b6463627e23626f67787964617e2379657f687e65626169">[email protected]</a>?obj;func obj]}; // scale sampling result back to total population .finos.objutils.scaleSampling:{[func;obj] .finos.objutils.sampling[func;obj]*max(1;count[obj]%.finos.objutils.threshold) }; // return full variable names .finos.objutils.varnames:{[ns;vartype;shortpath] vars:system vartype," ",string ns; `$$[shortpath and ns in `.`.q;"";(string ns),"."],/:string vars }; // return all non-single character namespaces in current process .finos.objutils.getall:{a:(enlist enlist "."),".",/:string key `;`$(a where 2<count each a)}; .finos.objutils.objsize:{ // count 0 if[not count x;:0]; // flatten table/dict into list of objects x:$[.Q.qt x;(key x;value x:flip 0!x); 99h=type x;(key x;value x); x]; // special case to handle `g# attr // raw list + hash if[`g=attr x;x:(`#x;group x)]; // atom is fixed at 16 bytes, GUID is 32 bytes $[0h>t:type x;$[-2h=t;32;16]; // list & enum list t within 1 76h;.finos.objutils.vectorsize x; // exit early for anything above 76h 76h<t;0; // complex = complex type in list, pointers + size of each objects // assume count>1000 has no attrbutes (i.e. table unlikely to have 1000 columns, list of strings unlikely to have attr for some objects only (d[0] within 1 76h)&1=count d:distinct (),t;.finos.objutils.calcsize[count x;.finos.objutils.ptrsize;0]+"j"$.finos.objutils.scaleSampling[{sum .finos.objutils.calcsize[count each x;.finos.objutils.typesize x 0;$[1000<count x;0;.finos.objutils.attrsize each x]]};x]; // other complex, pointers + size of each objects .finos.objutils.calcsize[count x;.finos.objutils.ptrsize;0]+"j"$.finos.objutils.scaleSampling[{[f;x]sum f each x}[.z.s];x]] }; // get sizes of all variables within a specified namespace (enter .finos.objutils.allsizes[`] to return all namespaces) .finos.objutils.allsizes:{ $[x~`.; vbl:key `.; [vbl:raze .finos.objutils.varnames[;;0b] .' .finos.objutils.getall[] cross "vb"; $[x<>`; [x:raze "*",string[x],".*";vbl:vbl where string[vbl] like x]; vbl:vbl,key `. ]]]; tab:update sizeMB:sizeMB%2 xexp 20 from update sizeMB:{.finos.objutils.objsize[value x]}each vbl from ([]vbl); `sizeMB xdesc tab }; ================================================================================ FILE: kdb_q_psutil_psutil.q SIZE: 1,406 characters ================================================================================ // only implemented for linux if[not(first string .z.o)in"l"; '`nyi; ] .finos.dep.include"../util/util.q" /// // Get memory information about a process, possibly including USS, PSS, and swap. // @param x pid // @return A dictionary of memory information about the process. .finos.psutil.memory_full_info:{ pagesize:"J"$first system"getconf PAGE_SIZE"; attrs:`vms`rss`shared`text`lib`data`dirty; memory_info:pagesize*first flip attrs!("JJJJJJJ";" ")0:.finos.util.read0f .Q.dd[`:/proc;x,`statm]; memory_info:`rss`vms xcols memory_info; smaps:.Q.dd[`:/proc]x,`smaps; has_smaps:not not type key smaps; memory_info_maps:$[has_smaps; [ r:.finos.util.read0f smaps; r:{y where x y}[{$[2=count t:":"vs x;any(first t)like/:("Private_*";"Pss";"Swap");0b]}']r; r:1024*sum each"J"${y group x}."S:\n"0:` sv -3_'r; r:{?[x;();();`uss`pss`swap!((sum;enlist,{x where x like"Private_*"}key x);`Pss;`Swap)]}r; r]; ()]; memory_info,memory_info_maps} /// // Get some memory statistic for a process as a fraction of total physical system memory. // The statistic should be one of the symbol keys of the dictionary returned by memory_full_info. // @param x statistic // @param y pid // @return A float of the memory statistic for the process divided by total physical system memory. .finos.psutil.memory_fraction:{.finos.psutil.memory_full_info[y][x]%last system"w"} ================================================================================ FILE: kdb_q_qclone_finos_clib.q SIZE: 1,983 characters ================================================================================ //### fork / waitpid used by qclone. // requires FFI // bind doesn't support null arg? // Just throw it away and pass dummy int. .finos.clib.fork:{[f;arg] f 0i}[.ffi.bind[`fork;enlist"i";"i"];] /usr/include/sys/wait.h .finos.clib.WAIT_ANY:-1i // /usr/include/bits/waitflags.h .finos.clib.WNOHANG:1i /usr/include/asm-generic/errno-base.h .finos.clib.ECHILD:10i /* No child processes */ .finos.clib.EFAULT:14i /* Bad address */ .finos.clib.waitpid:.ffi.bind[`waitpid;"iIi";"i"] .finos.clib.testBitFlag:{[x;y] any(0b vs x)&0b vs y} .finos.clib.bitAnd:{[x;y] 2 sv(0b vs x)&0b vs y} .finos.clib.waitNohang:{[] // Storage for status. status_ints:enlist 0Ni; pid:.finos.clib.waitpid(.finos.clib.WAIT_ANY;status_ints;.finos.clib.WNOHANG); // Break out the bytes, little-endian. status_bytes:reverse 0x00 vs status_ints[0]; .finos.log.debug"status_bytes=",-3!status_bytes; status0:status_bytes[0]; wexitstatus:status_bytes[1]; wtermsig:.finos.clib.bitAnd[status0;0x7f]; wstopsig:wexitstatus; wifexited:0=wtermsig; wifsignaled:0<(1+wtermsig)div 2 wifstopped:status0=0x7f; wcoreflag:.finos.clib.bitAnd[status0;0x80]; statusDict:`pid`exited`status`signaled`termsig`coredumped`stopped`stopsig!( pid ;wifexited ;wexitstatus ;wifsignaled ;wtermsig ;wcoreflag ;wifstopped ;wstopsig); statusDict} //### write / close // Close file descriptors that weren't created via hopen. .finos.clib.close0:.ffi.bind[`close;enlist"i";"i"] .finos.clib.close:{[x] .finos.clib.close0`int$x} // Aggressive shutdown to avoid hanging due to libs that could deadlock. .finos.clib.underscoreExit0:.ffi.bind[`$"_exit";enlist"i";"i"] .finos.clib.underscoreExit:{[x] .finos.clib.underscoreExit0`int$x} // enable / disable blocking I/O - NOP for now .finos.clib.setBlocking:{[handle;onOff]} .finos.clib.write0:.ffi.bind[`write;"igj";"i"] .finos.clib.write:{[handle;buf] .finos.clib.write0(`int$handle;buf;count buf)} ================================================================================ FILE: kdb_q_qclone_finos_qclone.q SIZE: 21,288 characters ================================================================================</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="34"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // @kind function // @category utility // @desc Warning function i.deprecatedWarning:"Deprecation Warning: function no longer supported as of", " version '" // @private // @kind function // @category utility // @desc Warning function i.futureWarning:"Future Deprecation Warning: function will no longer be ", "callable after version '" // @private // @kind function // @category utility // @desc Give deprecation warning along with returning the result // of the function // @param func {string} Name of updated function // @pararm warn {string} Warning message to use // @param ver {string} Version of the update // @param res {any} Result from the updated function // @returns {any} Results from the function i.depWarn :{[func;warn;ver;res] if[not i.ignoreWarning; depFunction:$[warn~"deprecatedWarning";{'x};-1]; depFunction get[".ml.i.",warn],ver,"'. Please use '",func,"' instead." ]; res } // @private // @kind function // @category utility // @desc Run new function and warn user of deprecation of old function // @param dict {dictionary} Contains information pertaining to what the new // function name is along with warning error information needed // @returns {any} Results from the updated function i.depApply:{[dict] (i.depWarn . dict`function`warning`version)get[dict`function]:: } // @private // @kind function // @category utility // @desc Run new function and warn user of deprecation of old function // @param dict {dictionary} Contains information pertaining to what the new // function name is along with warning error information needed // @returns {any} Results from the updated function i.deprecWarning:{[nameKey;versionMap] mapping:versionMap nameKey; newNames:key mapping; newFunctions:i.depApply each value mapping; {@[x set y]}'[newNames;newFunctions]; }[;i.versionMap] ================================================================================ FILE: ml_ml_xval_init.q SIZE: 386 characters ================================================================================ // xval/init.q - Load cross validation library // Copyright (c) 2021 Kx Systems Inc // // These algorithms are used in machine learning to test how // robust or stable a model is to changes in the volume of data // or to the specific subsets of data used for model generation. .ml.loadfile`:xval/utils.q .ml.loadfile`:xval/xval.q .ml.loadfile`:util/utils.q .ml.i.deprecWarning`xval ================================================================================ FILE: ml_ml_xval_utils.q SIZE: 12,030 characters ================================================================================ // xval/utils.q - Cross validation utilities // Copyright (c) 2021 Kx Systems Inc // // Utilities for cross validation library \d .ml // Cross validation indexing // @private // @kind function // @category xvUtility // @desc Shuffle data point indices // @param data {any} Table, matrix or list // @return {long[]} Indices of data shuffled xv.i.shuffle:{[data] 0N?count data } // @private // @kind function // @category xvUtility // @desc Find indices required to split data into k-folds // @param k {int} Number of folds // @param data {any} Table, matrix or list // @return {long[][]} Indices required to split data into k sub-sets xv.i.splitIdx:{[k;data] (k;0N)#til count data } // @private // @kind function // @category xvUtility // @desc Find shuffled indices required to split data into k-folds // @param k {int} Number of folds // @param data {any} Table, matrix or list // @return {long[][]} Shuffled indices required to split data into k // sub-sets xv.i.shuffIdx:{[k;data] (k;0N)#xv.i.shuffle data } // @private // @kind function // @category xvUtility // @desc Split target data ensuring that the percentage of each class are // preserved in each fold // @param k {int} Number of folds // @param data {any} Table, matrix or list // @return {long[][]} Data split into k-folds with distinct values // appearing in each xv.i.stratIdx:{[k;data] // Find indices for each distinct group idx:group data; // Shuffle/split groups into folds with distinct groups present in each fold fold:(,'/)(k;0N)#/:value idx@'xv.i.shuffle each idx; // Shuffle each fold fold@'xv.i.shuffle each fold } // @private // @kind function // @category xvUtility // @desc Get training and testing indices for each fold // @param k {int} Number of folds // @return {long[][]} Training and testing indices for each fold xv.i.groupIdx:{[k] (0;k-1)_/:rotate[-1]\[til k] } // @private // @kind function // @category xvUtility // @desc Get training/testing indices for equi-distanced bins of data // across k-folds // @param k {int} Number of folds // @return {long[][]} Indices for equi-distanced bins of data based on k xv.i.tsRollsIdx:{[k] enlist@''0 1+/:til k-1 } // @private // @kind function // @category xvUtility // @desc Get training/testing indices for equi-distanced bins of data // across k-folds with increasing amounts of data added to the training set // at each stage // @param k {int} Number of folds // @return {long[][]} Indices for equi-distanced bins of data based on k xv.i.tsChainIdx:{[k] flip(til each j;enlist@'j:1+til k-1) } // @private // @kind function // @category xvUtility // @desc Creates projection contining data split according to k // in ((xtrain;ytrain);(xtest;ytest)) format for each fold // @param func1 {fn} Function to be applied to x data // @param func2 {fn} Function to be applied to k // @param k {int} Number of folds // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @return {fn} Projection of data split per fold xv.i.idx1:{[func1;func2;k;features;target] dataSplit:flip@'((features;target)@/:\:func1[k;target])@\:/:func2 k; {{raze@''y}[;x]}each dataSplit } // @private // @kind function // @category xvUtility // @desc Creates projection contining data split according to k // in ((xtrain;ytrain);(xtest;ytest)) format for each fold // @param func1 {fn} Function to be applied to x data // @param func2 {fn} Function to be applied to k // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @return {fn} Projection of data split per fold xv.i.idxR:{[func1;func2;k;n;features;target] n#enlist xv.i.idx1[func1;func2;k;features;target] } // @private // @kind function // @category xvUtility // @desc Creates projection contining data split according to k // in ((xtrain;ytrain);(xtest;ytest)) format for each fold // @param func1 {fn} Function to be applied to x data // @param func2 {fn} Function to be applied to k // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @return {fn} Projection of data split per fold xv.i.idxN:{[func1;func2;k;n;features;target] xv.i.idx1[func1;func2;;features;target]@'n#k } // @private // @kind function // @category xvUtility // @desc Apply funct to data split using specified indexing functions // @param idx {long[][]} Indicies to apply to data // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function which takes data as input // @return {any} Output of func with idx applied to data xv.i.applyIdx:{[idx;k;n;features;target;function] splitData:raze idx[k;n;features;target]; {[function;data]function data[]}[function]peach splitData } // Python utilities required for xval.q // @private // @kind function // @category xvUtility // @desc Convert q list to numpy array // @param x {any[]} q list to be converted // @return {<} embedPy object following numpy array conversion numpyArray:.p.import[`numpy]`:array // Hyperparameter search functionality // @private // @kind function // @category hyperparameterUtility // @desc Perform hyperparameter generation and cross validation // @param paramFunc {fn} Parameter function // @param xvalFunc {fn} Cross validation function // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param dataFunc {fn} Function which takes data as input // @param hyperparams {dictionary} Hyperparameters // @return {table} Cross validation scores for each hyperparameter set hp.i.xvScore:{[paramFunc;xvalFunc;k;n;features;target;dataFunc;hyperparams] // Generate hyperparameter sets hyperparams:paramFunc hyperparams; // Perform cross validation for each set hyperparams!(xvalFunc[k;n;features;target]dataFunc pykwargs@)@'hyperparams } // @private // @kind function // @category hyperparameterUtility // @desc Hyperparameter search with option to test final model // @param scoreFunc {fn} Scoring function // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param dataFunc {fn} Function which takes data as input // @param hyperparams {dictionary} Dictionary of hyperparameters // @param testType {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Either validation or testing results from // hyperparameter search with (full results;best set;testing score) hp.i.search:{[scoreFunc;k;n;features;target;dataFunc;hyperparams;testType] if[testType=0;:scoreFunc[k;n;features;target;dataFunc;hyperparams]]; dataShuffle:$[testType<0;xv.i.shuffle;til count@]target; i:(0,floor count[target]*1-abs testType)_dataShuffle; r:scoreFunc[k;n;features i 0;target i 0;dataFunc;hyperparams]; res:dataFunc[pykwargs pr:first key desc avg each r](features;target)@\:/:i; (r;pr;res) } // @private // @kind function // @category hyperparameterUtility // @desc Hyperparameter generation for .ml.gs // @param hyperparams {dictionary} Hyperparameters with all possible values // for a given parameter specified by the user, e.g. // pdict = `randomState`max_depth!(42 72 84;1 3 4 7) // @return {table} All possible hyperparameter sets hp.i.gsGen:{[hyperparams] key[hyperparams]!/:1_'(::)cross/value hyperparams } // @private // @kind function // @category hyperparameterUtility // @desc Hyperparameter generation for .ml.rs // @param params {dictionary} Parameters with form `randomState`n`typ`p where // randomState is the seed, n is the number of hyperparameters to generate // (must equal 2^n for sobol), typ is the type of search (random/sobol) and p // is a dictionary of hyperparameter spaces - see documentation for more info // @return {table} Hyperparameters hp.i.rsGen:{[params] // Set default number of trials if[(::)~n:params`n;n:16]; // Check sobol trials = 2^n if[(`sobol=params`typ)&k<>floor k:xlog[2]n; '"trials must equal 2^n for sobol search" ]; // Find numerical hyperparameter spaces num:where any`uniform`loguniform=\:first each p:params`p; // Set random seed system"S ",string$[(::)~params`randomState;42;params`random_state]; // Import sobol sequence generator and check requirements pySobol:.p.import[`sobol_seq;`:i4_sobol_generate;<]; genPts:$[`sobol~typ:params`typ; enlist each flip pySobol[count num;n]; `random~typ; n; '"hyperparam type not supported" ]; // Generate hyperparameters hyperparams:distinct flip hp.i.hpGen[typ;n]each p,:num!p[num],'genPts; // Take distinct sets if[n>dst:count hyperparams; -1"Distinct hp sets less than n - returning ",string[dst]," sets." ]; hyperparams } // @private // @kind function // @category hyperparameterUtility // @desc Random/sobol hyperparameter generation for .ml.rs // @param randomType {symbol} Type of random search, denoting the namespace // to use // @param n {long} Number of hyperparameter sets // @param params {dictionary} Parameters // @return {any} Hyperparameters hp.i.hpGen:{[randomType;n;params] // Split parameters params:@[;0;first](0;1)_params,(); targetType:params[1;2]; // Respective parameter generation $[(typ:params 0)~`boolean;n?0b; typ in`rand`symbol; n?(),params[1]0; typ~`uniform; targetType$hp.i.uniform[randomType]. params 1; typ~`loguniform; targetType$hp.i.logUniform[randomType]. params 1; '"please enter a valid type" ] }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="35"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">A mountain tour of kdb+ and q¶ This is a mountain tour of q, the programming language built into kdb+. It takes the form of a one-page ‘ridge walk’ along the tops, with optional side descents to see more detail. Use it as a very fast start with kdb+, or for a quick overview of what it is like to work in q. Before you start Download, install and launch q. Experiment with the expressions as you read. Follow links to deeper treatments of topics. The q session¶ The q session is a REPL. It evaluates a q expression and prints the result. You can use it as a calculator. $ q KDB+ 3.7t 2020.03.05 Copyright (C) 1993-2020 Kx Systems m64/ 4()core 8192MB sjt ... q)2+2 3 4 4 5 6 q)acos -1 3.141593 End your session with the Terminate system command. q)\\ $ Databases¶ Tables¶ Tables are first-class objects in q. Load the Suppliers and Parts database. q)\l sp.q +`p`city!(`p$`p1`p2`p3`p4`p5`p6`p1`p2;`london`london`london`london`london`lon.. (`s#+(,`color)!,`s#`blue`green`red)!+(,`qty)!,900 1000 1200 +`s`p`qty!(`s$`s1`s1`s1`s2`s3`s4;`p$`p1`p4`p6`p2`p2`p4;300 200 100 400 200 300) q)\a `p`s`sp The \l system command loaded and ran the script sp.q . The script defined three tables, ran three queries against them and displayed the results. The \a system command listed the names of tables. q)sp / suppliers and parts s p qty --------- s1 p1 300 s1 p2 200 s1 p3 400 s1 p4 200 s4 p5 100 s1 p6 100 s2 p1 300 s2 p2 400 s3 p2 200 s4 p2 200 s4 p4 300 s1 p5 400 Save a table to a file of the same name. q)save `:path/to/sp / kdb+ format `:path/to/sp q)save `:path/to/sp.xls / Excel spreadsheet `:path/to/sp.xls Other save formats include CSV, plain text and XML. Large tables can be splayed (each column written as its own file) and partitioned into time periods. CSVs¶ Fetch a CSV example from this website. q)url:"https://code.kx.com/download/data/example.csv" q)count t:("SFI";enlist csv)0: system "curl -Ls ",url 10000 q)t id price qty -------------- kikb 36.05 90 hlfe 96.57 84 mcej 91.34 63 iemn 57.12 93 femn 63.64 54 engn 94.56 38 edhp 63.31 97 ggna 72.39 88 mjlg 12.04 58 fpjb 34.3 68 gfpl 25.34 45 jogj 78.67 2 gpna 23.08 39 njoh 91.46 64 aoap 48.38 49 bhan 63.2 82 enmc 70 40 niom 58.92 88 nblh 42.9 77 jdok 9.42 30 .. Above, we used system to get a result direct from the operating system, and passed the result to the Load CSV operator 0: to interpret as a table. Table t has 10,000 rows. (The interpreter displayed only the top of the table.) Save t as example.csv . q)`:example.csv set t `:example.csv Queries¶ If you know SQL you can work with q tables. q)select from sp where qty>200 s p qty --------- s1 p1 300 s1 p3 400 s2 p1 300 s2 p2 400 s4 p4 300 s1 p5 400 q)/ double all stocks of p2 q)update qty:qty*2 from sp where p=`p2 s p qty --------- s1 p1 300 s1 p2 400 s1 p3 400 s1 p4 200 s4 p5 100 s1 p6 100 s2 p1 300 s2 p2 800 s3 p2 400 s4 p2 400 s4 p4 300 s1 p5 400 In qSQL queries you can use q operators, keywords, and functions you define yourself. Language¶ Data¶ Q handles numerical data, including times, dates, and periods. Also booleans, characters, GUIDs, and immutable strings called symbols. Lists of the same datatype are called vectors and have simple representations. q)1 2 3*acos -1 / three floats 3.141593 6.283185 9.424778 q)count 2019.07.05 2019.09.15 2019.11.16 / three dates 3 q)count 08:30 12:45 17:15 / three times 3 q)count 22:45:53.600 22:45:53.601 22:45:53.602 / three timestamps 3 q)count "fox" / three characters 3 q)count ("quick";"brown";"fox") / three character lists 3 q)count each ("quick";"brown";"fox") 5 5 3 q)count `quick`brown`fox / three symbols 3 q)count each `quick`brown`foxes / symbols are 'atoms' 1 1 1 Lists¶ Anything can be an item in a list. q)count (42;"foxes";`screw`bolt;2020.09.15) 4 In the list above the first and last items, 42 and 2020.09.15 , are single values, known as atoms. The other items "foxes" and `screw`bolt are themselves lists. A list in which all items are atoms of the same datatype is a simple list or vector. Vectors are key to the high performance of kdb+ and have simple representations. q)count 3 1 4 5 / integers 4 q)count 3 1 4 5.9 / floats 4 q)count "jump" / characters 4 q)"jump"<"n" / four booleans 1010b q)count `cow`sheep`cat`dog / four symbols 4 q)2020.01.01+30 60 90 120 / four dates 2020.01.31 2020.03.01 2020.03.31 2020.04.30 q)12:00+30 60 90 120 / four times 12:30 13:00 13:30 14:00 Indexing is zero-origin. q)"abcdef"[3 4 0 5] "deaf" Brackets are not always needed. Indexing and function application have the same syntax. q)count[3 1 4 5] 4 q)count 3 1 4 5 4 q)"abcdef" 5 4 4 3 "feed" You can index a table. q)sp 0 2 / first and third rows s p qty --------- s1 p1 300 s1 p3 400 q)sp `s`p / two columns s1 s1 s1 s1 s4 s1 s2 s2 s3 s4 s4 s1 p1 p2 p3 p4 p5 p6 p1 p2 p2 p2 p4 p5 q)sp[`qty] / one column 300 200 400 200 100 100 300 400 200 200 300 400 q)sp[`qty]>200 / flags 101000110011b q)where sp[`qty]>200 / indexes 0 2 6 7 10 11 q)sp where sp[`qty]>200 / indexed rows s p qty --------- s1 p1 300 s1 p3 400 s2 p1 300 s2 p2 400 s4 p4 300 s1 p5 400 A table is a list of dictionaries. Dictionaries¶ Dictionaries are first-class objects. q)`item`qty`price!(`screw;500;1.95) / record of a sale item | `screw qty | 500 price| 1.95 q)pr:`screw`nail`bolt`nut!0.75 3 2.85 0.55 / price dictionary q)pr screw| 0.75 nail | 3 bolt | 2.85 nut | 0.55 q)pr `bolt`nail / indexing 2.85 3 q)pr>2 screw| 0 nail | 1 bolt | 1 nut | 0 q)pr*1+.05*pr>2 / 5% increase where price>2 screw| 0.75 nail | 3.15 bolt | 2.9925 nut | 0.55 A table is a list of dictionaries. q)sp 0 / first row s | `s$`s1 p | `p$`p1 qty| 300 Joining a dictionary to a table appends a tuple. q)sp,`s`p`qty!(`s5;`p3;159) s p qty --------- s1 p1 300 s1 p2 200 s1 p3 400 s1 p4 200 s4 p5 100 s1 p6 100 s2 p1 300 s2 p2 400 s3 p2 200 s4 p2 200 s4 p4 300 s1 p5 400 s5 p3 159 Functions¶ Function notation is simple. A function can have up to eight arguments. Unless explicitly named, the first three arguments are assumed to be x , y , and z . q){x*x}2 -1.5 17 4 2.25 289 q)el:{[e;a;v;c]"<",e," ",a,"=\"",v,"\">",c,"</",e,">"} q)el["a";"href";"https://example.com/";"link text"] "<a href=\"https://example.com/\">link text</a>" Iteration¶ Control structures such as do and while are rarely used for iteration. Much iteration is implicit in the operators. q)2 3 4 + 10 12 13 14 q)2 3 4 + 10 100 1000 12 103 1004 Most other iteration is handled by keywords, and special operators called iterators. q)count each ("quick";"brown";"fox") 5 5 3 q).h.htc[`p;"The quick brown fox"] / mark up "<p>The quick brown fox</p>" q)"The quick brown fox" {.h.htc[y;x]}/ `p`body`html "<html><body><p>The quick brown fox</p></body></html>" q)8 {x,sum -2#x}\1 1 / 10 Fibonacci numbers 1 1 1 1 2 1 1 2 3 1 1 2 3 5 1 1 2 3 5 8 1 1 2 3 5 8 13 1 1 2 3 5 8 13 21 1 1 2 3 5 8 13 21 34 1 1 2 3 5 8 13 21 34 55 Communication¶ Interprocess communication is ‘baked in’ to q. It requires no library code and is easy to set up. Watch two kdb+ processes communicating through TCP/IP. Client/server¶ Use two command shells for this. On the left, we have the server task; on the right, the client. KDB+ 3.7t 2020.01.22 … | KDB+ 3.7t 2020.01.22 m64/ 4()core 8192MB … | m64/ 4()core 8192MB … | q)\p 5432 | | q)h:hopen `::5432 | q)h"2+2" | 4 | | q)h "system\"l /Users/sjt/q/sp.q\"" q)+`p`city!(`p$`p1`p2`p3`p4`p5 | (`s#+(,`color)!,`s#`blue`green | +`s`p`qty!(`s$`s1`s1`s1`s2`s3` | | q)h "select from sp where s in `s2`s3" | s p qty | --------- | s2 p1 300 | s2 p2 400 | s3 p2 200 | q) On the left, the server task started listening on port 5432. The client task opened a socket to port 5432, getting a handle, which it dubbed h . The client task sent to the server the expression 2+2 to be evaluated, and received the value 4 in return. The client task told the server to load the Suppliers and Parts script. The server task session showed that script loaded. The client sent the server a qSQL query and got a table as a result. Asynchronous calls are only slightly more complicated. A production system requires code in the callbacks to secure communications but you can see from the above that the basics are very simple. The baked-in interprocess communications make it simple to implement systems as tasks distributed over multiple machines. Webserver¶ A q session can listen for HTTP requests and act as a webserver. The default callback composes a page for browsing tables in the session. q)tables[] / Suppliers & Parts `p`s`sp q)\p 8090 / listen to port 8090 Browse to http://localhost:8090 . Development¶ Scripts¶ Write and load scripts to define an application. Scripts are text files. The sp.q script defines the Suppliers & Parts database and runs some queries on it. s:([s:`s1`s2`s3`s4`s5] name:`smith`jones`blake`clark`adams; status:20 10 30 20 30; city:`london`paris`paris`london`athens) p:([p:`p1`p2`p3`p4`p5`p6] name:`nut`bolt`screw`screw`cam`cog; color:`red`green`blue`red`blue`red; weight:12 17 17 14 12 19; city:`london`paris`rome`london`paris`london) sp:([] s:`s$`s1`s1`s1`s1`s4`s1`s2`s2`s3`s4`s4`s1; / fkey p:`p$`p1`p2`p3`p4`p5`p6`p1`p2`p2`p2`p4`p5; / fkey qty:300 200 400 200 100 100 300 400 200 200 300 400) select distinct p,s.city from sp select sum qty by p.color from sp select from sp where s.city=p.city In scripts, q expressions can be written across multiple lines. IDE¶ KX Developer is a free interactive development environment (IDE) for q.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="36"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Skip to content kdb+ and q documentation and – Reference – kdb+ and q documentation Initializing search Ask a question Home kdb+ and q kdb Insights SDK kdb Insights Enterprise KDB.AI PyKX APIs Help kdb+ and q documentation Home kdb+ and q kdb+ and q About Getting Started Getting Started Install Licenses Learn Learn Overview Mountain tour Mountain tour Overview Begin here The q session Tables CSVs Datatypes Scripts IDE Q for quants Q by Examples Q for All (video) Examples from Python Examples from Python Basic Array List Strings Dictionaries Q for Mortals 3 Q by Puzzles Q by Puzzles About 12 Days of Xmas ABC problem Abundant odds Four is magic Name Game Summarize and Say Word wheel Reading room Reading room Information desk Boggle Cats cradle Fizz buzz Klondike Phrasebook Scrabble Application examples Application examples Astronomy Detecting card counters Corporate actions Disaster management Exoplanets Market depth Market fragmentation Option pricing Predicting floods Signal processing Space weather Trading surveillance Transaction-cost analysis Trend indicators Advanced q Advanced q Remarks on Style Shifts & scans Technical articles Views Origins Terminology Starting kdb+ Starting kdb+ Overview The q language IPC Tables Historical database Realtime database Language Language Reference card By topic Iteration Iteration Overview Implicit iteration Iterators Maps Accumulators Guide to iterators Keywords Keywords abs aj, aj0, ajf, ajf0 all, any and asc, iasc, xasc asof attr avg, avgs, mavg, wavg bin, binr ceiling count, mcount cols, xcol, xcols cor cos, acos cov, scov cross csv cut delete deltas desc, idesc, xdesc dev, mdev, sdev differ distinct div dsave each, peach ej ema enlist eval, reval except exec exit exp, xexp fby fills first, last fkeys flip floor get, set getenv, setenv group gtime, ltime hcount hdel hopen, hclose hsym ij, ijf in insert inter inv key keys, xkey like lj, ljf load, rload log, xlog lower lsq max, maxs, mmax md5 med meta min, mins, mmin mmu mod neg next, prev, xprev not null or over, scan parse pj prd, prds prior rand rank ratios raze read0 read1 reciprocal reverse rotate save, rsave select show signum sin, asin sqrt ss, ssr string sublist sum, sums, msum, wsum sv system tables tan, atan til trim, ltrim, rtrim type uj, ujf union ungroup update upsert value var, svar view, views vs where within wj, wj1 xbar xgroup xrank Overloaded glyphs Operators Operators Add Amend Apply, Index, Trap Assign Cast Coalesce Compose Cut Deal, Roll, Permute Delete Display Dict Divide Dynamic Load Drop Enkey, Unkey Enumerate Enumeration Enum Extend Equal Exec File Binary File Text Fill Find Flip Splayed Greater Greater Than Identity, Null Join Less Than Lesser Match Matrix Multiply Multiply Not Equal Pad Select Set Attribute Simple Exec Signal Subtract Take Tok Update Vector Conditional Control constructs Control constructs Cond do if while Namespaces Namespaces .h (markup) .j (JSON) .m (memory backed files) .Q (utils) .z (env, callbacks) Application Atomic functions Comparison Conformability Connection handles Command-line options Datatypes Dictionaries Enumerations Evaluation control Exposed infrastructure File system Function notation Glossary Internal functions Joins Mathematics Metadata Namespaces Pattern matching Parse trees qSQL qSQL qSQL queries Functional qSQL Regular Expressions Syntax System commands Tables Variadic syntax Database Database Tables in the filesystem Populating tables Populating tables Loading from large files Foreign keys Linking columns Data loaders From MDB via ODBC Persisting tables Persisting tables Serializing an object Splayed tables Partitioned tables Segmented databases Multiple partitions Maintenance Maintenance Data management Data-At-Rest Encryption Compression Compression File compression Compression examples FSI case study Permissions Query optimization Query scaling Time-series simplification Compacting HDB sym Working with sym files Developing Developing IPC IPC Overview Listening port Deferred response Async callbacks Named pipes Serialization examples Socket sharding SSL/TLS HTTP WebSockets Tools Tools Code profiler Debugging Errors man.q Unit tests Monitor & control execution Coding Coding Geospatial indexing Linear programming Multithreaded primitives Pivoting tables Precision Programming examples Programming idioms Temporal data Timezones Unicode DevOps DevOps CPU affinity Daemon Firewalling inetd, xinetd Linux production notes Log Files Multi-threading Multiple versions Parallel processing Performance tips Shebang script Surveillance latency Windows service Optane Memory Optane Memory Optane Memory and kdb+ Performance tests Release notes Release notes History Changes in 4.1 Changes in 4.0 Changes in 3.6 Changes in 3.5 Changes in 3.4 Changes in 3.3 Changes in 3.2 Changes in 3.1 Changes in 3.0 Changes in 2.8 Changes in 2.7 Changes in 2.6 Changes in 2.5 Changes in 2.4 Withdrawn Developer tools FAQ Streaming Streaming General architecture General architecture Overview kdb+tick kdb+tick Tickerplant (tick.q) Tickerplant pub/sub (u.q) RDB (r.q) Alternative architecture TP Log (data recovery) RTEs (real-time engines) Gateway design Query routing Load balancing Profiling Disaster recovery Kubernetes Order Book Alternative in-memory layouts Corporate actions Advanced Advanced Distributed systems RDB intraday writedown Interfaces Interfaces Languages Languages C/C++ C/C++ Quick guide API reference C API for kdb+ Extending q with C/C++ Async callbacks (C client) C# Foreign Function Interface (FFI) Java Python R Rust Scala KX libraries Bloomberg Excel FIX messaging GPUs Matlab ODBC ODBC ODBC client ODBC3 server ODBC3 and Tableau Solace pub/sub Open source Machine learning Using kdb+ in the cloud Using kdb+ in the cloud About Amazon Web Services Amazon Web Services Reference architecture Amazon EC2 & Storage Services Amazon EC2 & Storage Services Migrating a kdb+ HDB to Amazon EC2 Elastic Block Store (EBS) EFS (NFS) Amazon Storage Gateway FSx for Lustre AWS Lambda Microsoft Azure Microsoft Azure Reference architecture Google Cloud Google Cloud Reference architecture Auto Scaling Auto Scaling About Amazon Web Services Realtime data cluster Costs and risks Other file systems Other file systems MapR-FS Goofys S3FS S3QL ObjectiveFS WekaIO Matrix Quobyte Academy Discussion Forum White papers About this site kdb Insights SDK kdb Insights Enterprise KDB.AI PyKX APIs Help and ¶ Lesser of two values, logical AND and is a multithreaded primitive . Lesser Back to top</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="37"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/-these parameters are only used once their value has been set with values retrieved from the WBD. writedownmode:idbdir:savedir:currentpartition:symfilepath:`; symsize:partitionsize:0; /-force loads sym file loadsym:{[] .lg.o[`load;"loading the sym file"]; @[load;symfilepath; {.lg.e[`load;"failed to load sym file: ",string[symfilepath]," error: ",x]}]; symsize::hcount symfilepath; }; /-force loads IDB loadidb:{[] .lg.o[`load;"loading the db"]; @[system; "l ", 1_string idbdir; {.lg.e[`load;"failed to load IDB: ",string[idbdir]," error: ",x]}]; partitionsize::count key idbdir; }; /- force loads the idb and the sym file loaddb:{[] starttime:.proc.ct[]; loadsym[]; loadidb[]; .lg.o[`load;"IDB load has been finished for partition: ",string[currentpartition],". Time taken(ms): ",string .proc.ct[]-starttime]; }; /- sets current partition and force loads the idb and the sym file. Called by the WDB after EOD. rollover:{[pt] currentpartition::pt; idbdir::.Q.dd[savedir; $[writedownmode~`default;`;currentpartition]]; .lg.o[`rollover;"IDB folder has been set to: ",string[idbdir]]; loaddb[]; }; /- reloads the db. Called by wdb process midday/eod. intradayreload:{[] starttime:.proc.ct[]; if[symfilehaschanged[];loadsym[]]; if[partitioncounthaschanged[];loadidb[]]; clearrowcountcache[]; .lg.o[`intradayreload;"IDB reload has been finished for partition: ",string[currentpartition],". Time taken(ms): ",string .proc.ct[]-starttime]; }; /- checks if sym file has changed since last reload of the IDB. Records new sym size if changed. symfilehaschanged:{[] $[symsize<>c:hcount symfilepath;[symsize::c; 1b];0b] }; /- checks if count of partitions has changed since last reload of the IDB. Records new partition count if changed. /- the default writedown method doesn't need db reloading as no new directory is being created there. /- First check is to ensure that a single intraday partition exists (so loadidb doesn't fail) partitioncounthaschanged:{[] if[(1j~partitionsize)&writedownmode~`default;:0b]; $[partitionsize<>c:count key idbdir;[partitionsize::c; 1b];0b] }; /- each time data gets appended to current partition we are invalidating the row count cache /- this makes sure running "count trade" queries will return correct row count clearrowcountcache:{.Q.pn:.Q.pt!(count .Q.pt)#()}; setparametersfromwdb:{[wdbHandle] .lg.o[`init;"querying WDB, HDB locations, current partition and writedown mode from WDB"]; params:@[wdbHandle; (each;value;`.wdb.savedir`.wdb.hdbdir`.wdb.currentpartition`.wdb.writedownmode); {.lg.e[`connection; "Failed to retrieve values from WDB."]; 'x}]; savedir::hsym params[0]; currentpartition::params[2]; symfilepath::.Q.dd[hsym params[1]; `sym]; writedownmode::params[3]; idbdir::.Q.dd[savedir; $[writedownmode~`default;`;currentpartition]]; .lg.o[`init;"Current settings: db folder: ",string[idbdir],", sym file: ",string[symfilepath],", writedownmode: ", string writedownmode]; }; init:{[] .lg.o[`init; "searching for servers"]; /- If no valid conneciton to wdb, reattempt .servers.startupdepcycles[`wdb;wdbconnsleepintv;wdbcheckcycles]; .lg.o[`init;"getting connection handle to the WDB"]; w:.servers.gethandlebytype[wdbtypes;`any]; /-exit if no valid handle if[0=count w; .lg.e[`connection;"no connection to the WDB could be established... failed to initialise."];:()]; .lg.o[`init;"found a WDB process"]; /-setting parameters in .idb namespace from WDB setparametersfromwdb[w]; .lg.o[`init;"loading the db and the sym file first time"]; loaddb[]; .lg.o[`init;"registering IDBs on WDB process..."]; /-send sync message to WDB to register the existing IDBs. @[w;(`.servers.registerfromdiscovery;`idb;0b);{.lg.e[`connection;"Failed to register IDB with WDB."];'x}]; .lg.o[`init; "Initialisation of the IDB is done."]; } \d . /- set the reload the function reload:.idb.intradayreload; /-Get the relevant IDB attributes .proc.getattributes:{`partition`tables!(.idb.currentpartition;tables[])}; .idb.init[]; /- helper function to support queries against the sym column maptoint:{[val] $[(abs type val) in 5 6 7h; /- if using an integer column, clamp value between 0 and max int (null maps to 0) 0| 2147483647& `long$ val; /- if using a symbol column, enumerate against the hdb sym file sym?`TORQNULLSYMBOL^val] }; ================================================================================ FILE: TorQ_code_processes_kill.q SIZE: 873 characters ================================================================================ killhandle:{@[x:neg x;"exit 0";()]; @[x;[];()]} // make the connections .servers.startup[] // if killnames is on the commandline, then only kill the servers with the specific names // need to make sure that for each name we retrieve the type that the servers is a part of as well $[`killnames in key .proc.params; [names:"S"$'.proc.params[`killnames]; .lg.o[`kill;"killing processes with names ",-3!names]; s:.servers.getservers[`procname;"S"$'.proc.params[`killnames];()!();1b;0b]]; s:.servers.getservers[`proctype;.servers.CONNECTIONS;()!();1b;0b]]; // exit if no connections if[0=count s; .lg.o[`kill;"Failed to find any valid connections"]; exit 0] // kill each connection {.lg.o[`kill;"Sending kill command to ",(string x`proctype)," process with name ",(string x`procname)," at hp ",string x`hpup]; killhandle x`w;}each s; .lg.o[`kill;"Exiting"] exit 0 ================================================================================ FILE: TorQ_code_processes_monitor.q SIZE: 4,730 characters ================================================================================ /TorQ Monitor Process //configurable parameters for check monitoring .monitor.configcsv:@[value;`.monitor.configcsv;first .proc.getconfigfile["monitorconfig.csv"]]; //name of config csv to load in .monitor.configstored:@[value;`.monitor.configstored;`]; //name of stored table for save and reload .monitor.runcheckinterval:@[value;`.monitor.runcheckinterval;0D00:00:05]; //interval to run checks .monitor.checkinginterval:@[value;`.monitor.checkinginterval;0D00:00:05]; //interval to make sure checks are not lagging .monitor.cleartrackinterval:@[value;`.monitor.cleartrackinterval;0D01:00:00]; //interval to check tracks are under certain age in checktracker .monitor.agecheck:@[value;`.monitor.agecheck;0D12:00:00]; //if check over agecheck, delete from tracker .monitor.lagtime:@[value;`.monitor.lagtime;0D00:01:00]; //if check has been running over this time, set to neg // set up the upd function to handle heartbeats upd:{[t;x] $[t=`heartbeat; [ // publish single heartbeat row to web pages .html.pub[`heartbeat;$[min (`warning`error in cols exec from x);x;[.hb.storeheartbeat[x];hb_x::x;select from .hb.hb where procname in x`procname]]]]; t=`logmsg; [ insert[`logmsg;x]; // publish single logmsg row to web page .html.pub[`logmsg;x]; // publish all lmchart data - DEV - could publish single cols and update svg internally .html.pub[`lmchart;lmchart[]]]; ()]} subscribedhandles:0 0Ni // subscribe to heartbeats and log messages on a handle subscribe:{[handle] subscribedhandles,::handle; @[handle;(`.ps.subscribe;`heartbeat;`);{.lg.e[`monitor;"failed to subscribe to heartbeat on handle ",(string x),": ",y]}[handle]]; @[handle;(`.ps.subscribe;`logmsg;`);{.lg.e[`monitor;"failed to subscribe to logmsg on handle ",(string x),": ",y]}[handle]]; } // if a handle is closed, remove it from the list .dotz.set[`.z.pc;{if[y;subscribedhandles::subscribedhandles except y]; x@y}@[value;.dotz.getcommand[`.z.pc];{{[x]}}]] // Make the connections and subscribe .servers.startup[] subscribe each (exec w from .servers.SERVERS) except subscribedhandles; // As new processes become available, try to connect .servers.addprocscustom:{[connectiontab;procs] .lg.o[`monitor;"received process update from discovery service for process of type "," " sv string procs,:()]; .servers.retry[]; subscribe each (exec w from .servers.SERVERS) except subscribedhandles; } .servers.connectcustom:{[connectiontab] .lg.o[`monitor;"created outgoing connections"]; subscribe each (exec w from connectiontab) except subscribedhandles; } // GUI /- Table data functions - Return unkeyed sorted tables hbdata:{0!`error`warning xdesc .hb.hb} lmdata:{0!`time xdesc -20 sublist logmsg} /- Chart data functions - Return unkeyed chart data lmchart:{0!select errcount:count i by 0D00:05 xbar time from logmsg where loglevel=`ERR} bucketlmchartdata:{[x] x:`minute$$[x=0;1;x];0!select errcount:count i by (0D00:00+x) xbar time from logmsg where loglevel=`ERR} /- Data functions - These are functions that are requested by the front end /- start is sent on each connection and refresh. Where there are more than one table it is wise to identify each one using a dictionary as shown start:{.html.wssub each `heartbeat`logmsg`lmchart; .html.dataformat["start";(`hbtable`lmtable`lmchart)!(hbdata[];lmdata[];lmchart[])]} bucketlmchart:{.html.dataformat["bucketlmchart";enlist bucketlmchartdata[x]]} monitorui:.html.readpagereplaceHP["index.html"] // initialise pubsub .html.init`heartbeat`logmsg`lmchart //function to iniitialise process check monitoring- checks for last saved config file initcheck:{ if[not readstoredconfig[.monitor.configstored]; readmonitoringconfig[.monitor.configcsv]]}; // specify .z.exit to save config // capture any prior definition .dotz.set[`.z.exit;{[x;y] saveconfig[.monitor.configstored;checkconfig];x@y}[@[value;.dotz.getcommand[`.z.exit];{{[x]}}]]] //initialise monitor checks initcheck[] //Timers .timer.repeat[.proc.cp[];0Wp;.monitor.runcheckinterval;(`runnow;`);"run the monitoring checks"] .timer.repeat[.proc.cp[];0Wp;.monitor.checkinginterval;(`checkruntime;.monitor.lagtime);"update status if running slow"] .timer.repeat[.proc.cp[];0Wp;.monitor.cleartrackinterval;(`cleartracker;.monitor.agecheck);"delete rows if over certain age"] ================================================================================ FILE: TorQ_code_processes_rdb.q SIZE: 13,403 characters ================================================================================ /TorQ rdb process - based on r.q from kdb+tick /http://code.kx.com/wsvn/code/kx/kdb+tick/ /-changes added /-Can specify the hdb directory rather than relying on the tickerplant /-default parameters \d .rdb</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="38"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Serialize a table as an object¶ The simplest way to serialize a table is as a single object. save and load ¶ Keywords save and load let you serialize and write any q object to a file of the same name in the working directory. That includes tables, and is the simplest way to persist one. q)cities:([]city:`Tokyo`Delhi`Shanghai;pop:37435191 29399141 26317104) q)key `:. / nothing in working directory `symbol$() q)save `cities `:cities q)key `:. / file in working directory ,`cities q)delete cities from `. / delete from memory `. q)cities 'cities [0] cities ^ q)load `cities / load from filesystem `cities q)cities city pop ----------------- Tokyo 37435191 Delhi 29399141 Shanghai 26317104 Perfect for casual use. For more organized writing and reading we need the keywords used to define save and load . set and get ¶ Keywords set and get differ from save and load : set is a binary; its left argument says where in the filesystem to writeget returns the table value rather than the name of the variable it has been assigned to Notice the similarity of reading a value from memory to reading it from the filesystem. q)get `:cities / from filesystem Tokyo | 37435191 Delhi | 29399141 Shanghai| 26317104 q)get `cities / from memory city pop ----------------- Tokyo 37435191 Delhi 29399141 Shanghai 26317104 q)`:foo/bar/bigcities set cities `:foo/bar/bigcities q)get `:foo/bar/bigcities city pop ----------------- Tokyo 37435191 Delhi 29399141 Shanghai 26317104 Enumerations and foreign keys¶ The city column is a symbol vector, that is, an enumeration. It is represented in memory as indexes into the sym table. Serialization and deserialization survives the session’s sym list. KDB+ 4.0 2020.10.02 Copyright (C) 1993-2020 Kx Systems m64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 EXPIRE .. q)get `:foo/bar/bigcities city pop ----------------- Tokyo 37435191 Delhi 29399141 Shanghai 26317104 Similarly for foreign keys, enumerated against another table. q)countries:([country:`China`India`Japan];cont:3#`Asia;code:86 91 81) q)cities:([] city:`Tokyo`Delhi`Shanghai; country: `countries$`Japan`India`China; pop:37435191 29399141 26317104) q)`:linked/countries`:linked/cities set'(countries;cities) `:linked/countries`:linked/cities q)\\ ❯ q KDB+ 4.0 2020.10.02 Copyright (C) 1993-2020 Kx Systems m64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 EXPIRE .. q)countries:get`:linked/countries q)cities:get`:linked/cities q)select city,pop,country.code from cities city pop code ---------------------- Tokyo 37435191 81 Delhi 29399141 91 Shanghai 26317104 86 Use cases¶ Serialization as an object suits a table that is - small relative to memory - frequently read - has most of its columns required by most queries Segmented databases¶ Partitioned tables can be distributed across multiple storage devices to - give them more space - support parallelization The root of a segmented database contains only the sym list and a file par.txt , which is used to unify the partitions of a database, presenting them as a single database for querying. par.txt ¶ File par.txt defines the top-level partitioning of the database into directories. Each row of par.txt is a directory path. Each such directory is itself partitioned in the usual way, typically by date. The directories should not be empty. DISK 0 DISK 1 DISK 2 db db db ├── par.txt ├── 2020.10.03 ├── 2020.10.04 └── sym │ ├── quotes │ ├── quotes │ │ ├── price │ │ ├── price │ │ ├── sym │ │ ├── sym │ │ └── time │ │ └── time │ └── trades │ └── trades │ ├── price │ ├── price │ ├── sym │ ├── sym │ ├── time │ ├── time │ └── vol │ └── vol ├── 2020.10.05 ├── 2020.10.06 │ ├── quotes │ ├── quotes .. .. par.txt for the above: /1/db /2/db Do not end the paths with a folder delimiter /0/db is good, but /0/db/ can be bad, depending on the filesystem. Using symlinks¶ Security related options such as reval , or the command line option -u 1 , restrict access to within the current directory. This can prevent users from accessing a segmented database when par.txt contains references to partitions that are situated outside the current directory. In order to provide access, symlinks can be used. An example of using symlinks is as follows: $ ln -s /db1 db1$ $ ln -s /db2 db2$ $ ls -l total 16 lrwxr-xr-x 1 user kx 6 18 Sep 12:30 db1$ -> /db1 lrwxr-xr-x 1 user kx 6 18 Sep 12:30 db2$ -> /db2 -rw-r--r-- 1 user kx 10 18 Sep 12:30 par.txt -rw-r--r-- 1 user kx 48 18 Sep 12:30 sym $ cat par.txt db1$ db2$ Using a trailing $ in the directory name ensures the originating symlinks are not picked up by kdb+, instead using the directory referenced. Multithreading¶ Segmentation is particularly useful in combination with multithreading. Starting kdb+ with secondary threads, with each partition in par.txt on a separate local disk, the partitions in par.txt are allocated to secondary threads on a round robin. That is, if kdb+ is started with n secondary threads, then partition p is assigned to secondary thread p mod n . This gives maximum parallelization for queries over date ranges. Each thread gets its own disk or disks, and there should be no disk contention, i.e. not more than one thread issuing commands to any one disk. Ideally there is one disk per thread. This works best where the disks have fully independent access paths CPU-disk controller-disk, but may be of little use with shared access due to disk contention, e.g. with SAN/RAID. For example, par.txt might be: /0/db /1/db /2/db /3/db with directories : ~$ ls /0/db 2019.06.01 2019.06.05 2019.06.11 ... ~$ ls /1/db 2019.06.02 2019.06.06 2019.06.12 ... ... Since 4.1 2025.01.17 queries on partitioned tables in segmented databases use secondary threads if available on each segment and partition. Previously parallelism was only at the segment level. Considerations¶ Partition data correctly: data for a particular date must reside in the partition for that date. The secondary/directory partitioning is for both read and write. The directories pointed to in par.txt may contain only appropriate database subdirectories. Any other content (file or directory) will give an error. The same subdirectory name may be in multiple par.txt partitions. For example, this would allow symbols to be split, as in A-M on /0/db , N-Z on /1/db . Aggregations are handled correctly, as long as data is properly split (not duplicated). Note that in this case, the same day would appear on multiple partitions. There was a 2-billion row limit prior to version 3 of kdb+, which could use this method as a work around. Multithreading primitives Multi-partitioned kdb+ databases Multithreading in kdb+ Q for Mortals §14.4 Segmented Tables Execute a q script as a shebang script¶ $ more ./test.q #!/usr/bin/env q 2+3 \\ $ chmod +x ./test.q $ ./test.q KDB+ 3.1 2013.11.20 Copyright (C) 1993-2013 Kx Systems l64/ ... 5 Shebang</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="39"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category tests // @fileoverview Ensure that a test that is expected to pass, // does so with an appropriate return // @param function {(func;proj)} The function or projection to be tested // @param data {any} The data to be applied to the function as an individual item for // unary functions or a list of variables for multivariant functions // @param applyType {boolean} Is the function to be applied unary(1b) or multivariant(0b) // @param expectedReturn {string} The data expected to be returned on // execution of the function with the supplied data // @return {boolean} Function returned the appropriate output (1b), function failed // or executed with incorrect output (0b) passingTest:{[function;data;applyType;expectedReturn] // Is function to be applied unary or multivariant applyType:$[applyType;@;.]; functionReturn:applyType[function;data]; expectedReturn~functionReturn } ================================================================================ FILE: ml_ml_examples_code_torch_torch.q SIZE: 869 characters ================================================================================ \d .torch // Example invocation of a torch model being fit using embedPy fitModel:{[xtrain;ytrain;model] optimArg:enlist[`lr]!enlist 0.9; optimizer:.p.import[`torch.optim][`:Adam][model[`:parameters][];pykwargs optimArg]; criterion:.p.import[`torch.nn][`:BCEWithLogitsLoss][]; dataX:.p.import[`torch][`:from_numpy][.p.import[`numpy][`:array][xtrain]][`:float][]; dataY:.p.import[`torch][`:from_numpy][.p.import[`numpy][`:array][ytrain]][`:float][]; tensorXY:.p.import[`torch.utils.data][`:TensorDataset][dataX;dataY]; modelValues:(count first xtrain;1b;0); modelArgs:`batch_size`shuffle`num_workers!$[.pykx.loaded;.pykx.topy each modelValues;modelValues]; dataLoader:.p.import[`torch.utils.data][`:DataLoader][tensorXY;pykwargs modelArgs]; nEpochs:10|`int$(count[xtrain]%1000); .p.get[`runmodel][model;optimizer;criterion;dataLoader;nEpochs] } ================================================================================ FILE: ml_ml_examples_q_deploy.q SIZE: 4,257 characters ================================================================================ \l init.q // Retrieve command line arguments and ensure a user is // cognizant that they will delete the current registry // if they invoke the example by accident cmdLine:.Q.opt .z.x if[not `run in key cmdLine; -1"This example will delete the registry", " in your current folder, use '-run' command line arg"; exit 1; ]; .[.ml.registry.delete.registry;(::;::);{}] // All models solving the clustering problem are associated with the // "cluster" experiment experiment:enlist[`experimentName]!enlist "cluster" // Generate and format the dataset skldata:.p.import`sklearn.datasets blobs:skldata[`:make_blobs;<] dset:blobs[`n_samples pykw 1000;`centers pykw 2;`random_state pykw 500] // Generate two separate Affinity Propagation models using the ML Toolkit qmdl :.ml.clust.ap.fit[flip dset 0;`nege2dist;0.8;min;::] qmdl2:.ml.clust.ap.fit[flip dset 0;`nege2dist;0.5;min;::] // Add the two q models to the KX_ML_REGISTRY .ml.registry.set.model[::;"cluster";qmdl ;"qAPmodel";"q";enlist[`axis]!enlist 1b] .ml.registry.set.model[::;"cluster";qmdl2;"qAPmodel";"q";enlist[`axis]!enlist 1b] // Generate equivalent Affinity Propagation models using Scikit-Learn skmdl :.p.import[`sklearn.cluster][`:AffinityPropagation][`damping pykw 0.8][`:fit]dset 0 skmdl2:.p.import[`sklearn.cluster][`:AffinityPropagation][`damping pykw 0.5][`:fit]dset 0 // Add the two models to the KX_ML_REGISTRY with the second model version 2.0 not 1.1 .ml.registry.set.model[::;"cluster";skmdl ;"skAPmodel";"sklearn";::] .ml.registry.set.model[::;"cluster";skmdl2;"skAPmodel";"sklearn";enlist[`major]!enlist 1b] // Generate and fit two Keras models adding these to the registry if[@[{.p.import[x];1b};`keras;0b]; seq :.p.import[`keras.models][`:Sequential]; dense:.p.import[`keras.layers][`:Dense]; nparray:.p.import[`numpy]`:array; kerasModel:seq[]; kerasModel[`:add]dense[4;pykwargs `input_dim`activation!(2;`relu)]; kerasModel[`:add]dense[4;`activation pykw `relu]; kerasModel[`:add]dense[1;`activation pykw `sigmoid]; kerasModel[`:compile][pykwargs `loss`optimizer!`binary_crossentropy`adam]; kerasModel[`:fit][nparray dset 0;dset 1;pykwargs `epochs`verbose!200 0]; kerasModel2:seq[]; kerasModel2[`:add]dense[4;pykwargs `input_dim`activation!(2;`relu)]; kerasModel2[`:add]dense[4;`activation pykw `relu]; kerasModel2[`:add]dense[1;`activation pykw `sigmoid]; kerasModel2[`:compile][pykwargs `loss`optimizer!`mse`adam]; kerasModel2[`:fit][nparray dset 0;dset 1;pykwargs `epochs`verbose!10 0]; // Add the two models to the KX_ML_REGISTRY .ml.registry.set.model[::;"cluster";kerasModel ;"kerasModel";"keras";::]; .ml.registry.set.model[::;"cluster";kerasModel2;"kerasModel";"keras";::]; ]; // Generate and add two Python functions to the KX_ML_REGISTRY. // These are not associated with a named experiment or solve the problem that // the above do, they are purely for demonstration if[@[{.p.import x;1b};`statsmodels;0b]; pyModel :.p.import[`statsmodels.api][`:OLS]; pyModel2:.p.import[`statsmodels.api][`:WLS]; // Add the two functions to the KX_ML_REGISTRY. .ml.registry.set.model[::;::;pyModel ;"pythonModel";"python";::]; .ml.registry.set.model[::;::;pyModel2;"pythonModel";"python";::] ] // Online/out-of-core Models // Generate and add two q 'online' models to the KX_ML_REGISTRY. // These models contain an 'update' key which allows the models to // be updated as new data becomes available online1:.ml.online.clust.sequentialKMeans.fit[2 200#400?1f;`e2dist;3;::;::] online2:.ml.online.sgd.linearRegression.fit[100 2#400?1f;100?1f;1b;::] online3:.ml.online.sgd.logClassifier.fit[100 2#400?1f;100?0b;1b;::] .ml.registry.set.model[::;::;online1;"onlineCluster" ;"q";::] .ml.registry.set.model[::;::;online2;"onlineRegression";"q";::] .ml.registry.set.model[::;::;online3;"onlineClassifier";"q";::] // Generate and add two Python 'online' models to the KX_ML_REGISTRY. // These models must contain a 'partial_fit' method in order to be // considered suitable for retrieval as update functions sgdClass:.p.import[`sklearn.linear_model][`:SGDClassifier] sgdModel:sgdClass[pykwargs `max_iter`tol!(1000;0.003) ][`:fit] . dset 0 1 .ml.registry.set.model[::;::;sgdModel;"SklearnSGD";"sklearn";::] exit 0 ================================================================================ FILE: ml_ml_examples_q_registry.q SIZE: 2,675 characters ================================================================================ // Initialize all relevant functionality \l init.q // Set the screen width/lengths for better display \c 200 200 // Retrieve command line arguments and ensure a user is // cognizant that they will delete the current registry // if they invoke the example by accident cmdLine:.Q.opt .z.x if[not `run in key cmdLine; -1"This example will delete the registry", " in your current folder, use '-run' command line arg"; exit 1; ]; .[.ml.registry.delete.registry;(::;::);{}] -1"Generate a model registry and retrieve the 'modelStore'"; .ml.registry.new.registry[::;::]; .ml.registry.get.modelStore[::;::]; show modelStore; -1"\nAdd several 'basic q models' to the registry\n"; modelName:"basic-model" // Incrementing versions from '1.0' .ml.registry.set.model[::;{x} ;modelName;"q";::] .ml.registry.set.model[::;{x+1};modelName;"q";::] .ml.registry.set.model[::;{x+2};modelName;"q";::] // Set major version and increment from '2.0' .ml.registry.set.model[::;{x+3};modelName;"q";enlist[`major]!enlist 1b] .ml.registry.set.model[::;{x+4};modelName;"q";::] // Add another version of '1.x' .ml.registry.set.model[::;{x+5};modelName;"q";enlist[`majorVersion]!enlist 1] -1"Display the modelStore following model addition"; show modelStore; -1"\nAdd models associated with an experiment\n"; modelName:"new-model" experiment:enlist[`experimentName]!enlist "testExperiment" // Incrementing versions from '1.0' .ml.registry.set.model[::;{x} ;modelName;"q";experiment] .ml.registry.set.model[::;{x+1};modelName;"q";experiment,enlist[`major]!enlist 1b] .ml.registry.set.model[::;{x+2};modelName;"q";experiment] -1"Display the modelStore following experiment addition"; show modelStore; -1"\nRetrieve version 1.1 of the 'basic-model':\n"; .ml.registry.get.model[::;::;"basic-model";1 1]`model -1"\nRetrieve the most up to date model associated with the 'testExperiment':\n"; .ml.registry.get.model[::;"testExperiment";"new-model";::]`model -1"\nRetrieve the last model added to the registry:\n"; .ml.registry.get.model[::;::;::;::]`model -1"\nDelete the experiment from the registry"; .ml.registry.delete.experiment[::;"testExperiment"] -1"\nDisplay the modelStore following experiment deletion"; show modelStore -1"\nDelete version 1.3 of the 'basic-model'"; .ml.registry.delete.model[::;::;"basic-model";1 3]; -1"\nDisplay the modelStore following deletion of 1.3 of the 'basic-model'"; show modelStore -1"\nDelete all models associated with the 'basic-model'"; .ml.registry.delete.model[::;::;"basic-model";::] -1"\nDisplay the modelStore following deletion of 'basic-model'"; show modelStore // Delete the registry .ml.registry.delete.registry[::;::] exit 0 ================================================================================ FILE: ml_ml_fresh_extract.q SIZE: 3,084 characters ================================================================================ // fresh/extract.q - Extract features // Copyright (c) 2021 Kx Systems Inc // // Generate features based on params \d .ml // @kind table // @category fresh // @desc Table containing .ml.fresh.feat functions fresh.params:update pnum:{count 1_get[fresh.feat x]1}each f,pnames:count[i]#(), pvals:count[i]#()from([]f:1_key fresh.feat) fresh.params:1!`pnum xasc update valid:pnum=count each pnames from fresh.params // @kind function // @category fresh // @desc Load in hyperparameters for FRESH functions and add to // .ml.fresh.params table // @param filePath {string} File path within ML where hyperparameter JSON // file is // @return {::} Null on success with .ml.fresh.params updated fresh.loadparams:{[filePath] hyperparamFile:.ml.path,filePath; p:.j.k raze read0`$hyperparamFile; p:inter[kp:key p;exec f from fresh.params]#p; fresh.params[([]f:kp);`pnames]:key each vp:value p; fresh.params[([]f:kp);`pvals]:{(`$x`type)$x`value}each value each vp; fresh.params:update valid:pnum=count each pnames from fresh.params where f in kp; } // @kind function // @category fresh // @desc Add hyperparameter values to .ml.fresh.params fresh.loadparams"/fresh/hyperparameters.json";</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="40"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// Bespoke WDB config .merge.mergebybytelimit:0b // merge limit configuration - default is 0b row count limit 1b is byte size limit .merge.partlimit:1000 // limit the number of partitions in a chunk \d .wdb ignorelist:`heartbeat`logmsg // list of tables to ignore hdbtypes:`hdb // list of hdb types to look for and call in hdb reload rdbtypes:`rdb // list of rdb types to look for and call in rdb reload idbtypes:`idb // list of idb types to look for and call in rdb reload wdbtypes:() // wdb does not need to connect to itself gatewaytypes:`gateway // list of gateway types to inform at reload tickerplanttypes:`segmentedtickerplant // list of tickerplant types to try and make a connection to subtabs:` // list of tables to subscribe for (` for all) subsyms:` // list of syms to subscribe for (` for all) savedir:hsym`$getenv[`TORQHOME],"/wdbhdb" // location to save wdb data numrows:100000 // default number of rows numtab:`quote`trade!10000 50000 // specify number of rows per table replaynumrows:numrows // 0W for replaying all messages at once then flushing replaynumtab:numtab // enlist[`]!enlist 0W for replaying all messages at once then flushing mode:`save // the wdb process can operate in three modes // 1. saveandsort: the process will subscribe for data, // periodically write data to disk and at EOD it will flush // remaining data to disk before sorting it and informing // GWs, RDBs and HDBs etc... // 2. save: the process will subscribe for data, // periodically write data to disk and at EOD it will flush // remaining data to disk. It will then inform it's respective // sort mode process to sort the data // 3. sort: the process will wait to get a trigger from it's respective // save mode process. When this is triggered it will sort the // data on disk, apply attributes and the trigger a reload on the // rdb and hdb processes writedownmode:`default // the wdb process can periodically write data to disc and sort at EOD in two ways: // 1. default - the data is partitioned by [ partitiontype ] // at EOD the data will be sorted and given attributes according to sort.csv before being moved to hdb // 2. partbyattr - the data is partitioned by [ partitiontype ] and the column(s)assigned the parted attributed in sort.csv // at EOD the data will be merged from each partition before being moved to hdb // 3. partbyenum - the data is partitioned by [ partitiontype ] and a symbol or integer column with parted attribution assigned in sort.csv // at EOD the data will be merged from each partition before being moved to hdb</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="41"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">asc , iasc , xasc ¶ Sort and grade: ascending Q chooses from a variety of algorithms, depending on the type and data distribution. asc ¶ Ascending sort asc x asc[x] Where x is a: - vector, returns its items in ascending order of value, with the sorted attribute set, indicating the list is sorted; where the argument vector is found to be in ascending order already, it is assigned the sorted attribute - mixed list, returns the items sorted within datatype and with the sorted attribute set - dictionary, returns it sorted by the values - table, returns it sorted by the first non-key column and with - the sorted attribute set on that column if there is only one non-key column; otherwise - the parted attribute set The function is uniform. The sort is stable: it preserves order between equals. Vector¶ q)asc 2 1 3 4 2 1 2 `s#1 1 2 2 2 3 4 q)a:0 1 q)b:a q)asc b / result has sorted attribute `s#0 1 q)b / argument was already in ascending order `s#0 1 q)a / b was a shallow copy of a `s#0 1 Mixed list¶ In the example below, the boolean is returned first, then the sorted integers, the sorted characters, and then the date. q)asc (1;1b;"b";2009.01.01;"a";0) 1b 0 1 "a" "b" 2009.01.01 Note how the type numbers are used. q)asc(2f;3;4i;5h) 5h 4i 3 2f q){(asc;x iasc abs t)fby t:type each x}(2f;3;4i;5h) / compare asc 5h 4i 3 2f Dictionary¶ q)asc `a`b`c!2 1 3 b| 1 a| 2 c| 3 Table¶ q)/ simple table q)asc ([]a:3 4 1;b:`a`d`s) a b --- 1 s 3 a 4 d q)meta asc ([]a:3 4 1;b:`a`d`s) / sets parted attribute c| t f a -| ----- a| j p b| s q)meta asc([]a:3 4 1) / sets sorted attribute c| t f a -| ----- a| j s q)/ keyed table q)meta asc ([c1:`a`b] c2:2 1; c3:01b) / sets parted attribute c | t f a --| ----- c1| s c2| j p c3| b q)meta asc ([c1:`a`b] c2:2 1) / sets sorted attribute c | t f a --| ----- c1| s c2| j s domain: b g x h i j e f c s p m d z n u v t range: b g x h i j e f c s p m d z n u v t iasc ¶ Ascending grade iasc x iasc[x] Where x is a list or dictionary, returns the indexes needed to sort list x in ascending order. q)L:2 1 3 4 2 1 2 q)iasc L 1 5 0 4 6 2 3 q)L iasc L 1 1 2 2 2 3 4 q)(asc L)~L iasc L 1b q)iasc `a`c`b!1 2 3 `a`c`b Reverse a sort with iasc iasc : q)x:100?100 q)b:100?.Q.a q)c:b iasc x q)b~c iasc iasc x 1b domain: b g x h i j e f c s p m d z n u v t range: j j j j j j j j j j j j j j j j j j xasc ¶ Sort a table in ascending order of specified columns. x xasc y xasc[x;y] Where x is a symbol vector of column names defined in table y , which is passed by - value, returns - reference, updates y sorted in ascending order by x . The sort is by the first column specified, then by the second column within the first, and so on. The sorted attribute is set on the first column given (if possible). The sort is stable, i.e. it preserves order amongst equals. q)\l sp.q q)s s | name status city --| ------------------- s1| smith 20 london s2| jones 10 paris s3| blake 30 paris s4| clark 20 london s5| adams 30 athens q)`city xasc s / sort on city s | name status city --| ------------------- s5| adams 30 athens s1| smith 20 london s4| clark 20 london s2| jones 10 paris s3| blake 30 paris q)`city`name xasc s / sort on city, and name within city s | name status city --| ------------------- s5| adams 30 athens s4| clark 20 london s1| smith 20 london s3| blake 30 paris s2| jones 10 paris q)`status`city`name xasc s / sort on 3 columns, status first s | name status city --| ------------------- s2| jones 10 paris s4| clark 20 london s1| smith 20 london s5| adams 30 athens s3| blake 30 paris q)`status`city`name xasc `s / table given by reference, updated in place `s q)s s | name status city --| ------------------- s2| jones 10 paris s4| clark 20 london s1| smith 20 london s5| adams 30 athens s3| blake 30 paris q)meta s / status column has sorted attribute c | t f a ------| ----- s | s name | s status| i s city | s Duplicate column names xasc signals dup if it finds duplicate columns in the right argument. (Since V3.6 2019.02.19.) Sorting data on disk¶ xasc can sort data on disk directly, without loading the entire table into memory. q)t:([]b:`s`g`a`s`a;c:30 10 43 13 24;g:til 5) q)`:dat/t/ set .Q.en[`:dat]t / write splayed table `:dat/t/ q)\ls dat/t / splayed columns ,"b" ,"c" ,"g" q)`c xasc `:dat/t / sort table on disk by column c `:dat/t q)t / in-memory table is unsorted b c g ------ s 30 0 g 10 1 a 43 2 s 13 3 a 24 4 q)\l dat/t / load table from disk `t q)t / table is sorted b c g ------ g 10 1 s 13 3 a 24 4 s 30 0 a 43 2 Duplicate keys in a dictionary or duplicate column names in a table cause sorts and grades to return unpredictable results. attr , desc , idesc , xdesc , Set Attribute Dictionaries & tables, Metadata, Sorting Q for Mortals §8.8 Attributes asof ¶ As-of join t1 asof t2 asof[t1;t2] Where t1 is a tablet2 is a table or dictionary- the last key or column of t2 corresponds to a time column int1 returns the values from the last rows matching the rest of the keys and time ≤ the time in t2 . q)show trade asof`sym`time!(`IBM;09:30:00.0) price| 96.3e size | 200 stop | 0b corr | 0 cond | "T" ex | "D" q)show trade asof([]sym:`AAPL`IBM;ex:"TD";time:09:30:00.0) price size stop corr cond ------------------------- 78.14 100 0 0 T 96.3 200 0 0 T The following examples use the mas table from TAQ. q)`date xasc`mas / sort by date `mas q)show a!mas asof a:([]sym:`A`B`C`GOOG;date:1995.01.01) sym date | cusip name wi ex uot ---------------| -------------------------------------------------- A 1995.01.01| 049870207 ATTWOODS PLC ADS REP5 ORD/5PNC 0 N 100 B 1995.01.01| 067806109 BARNES GROUP INCORPORATED 0 N 100 C 1995.01.01| 171196108 CHRYSLER CORP 0 N 100 GOOG 1995.01.01| 0 q)show a!mas asof a:([]sym:`A`B`C`GOOG;date:2006.01.01) sym date | cusip name wi ex uot ---------------| --------------------------------------------- A 2006.01.01| 00846U101 AGILENT TECHNOLOGIES, INC 0 N 100 B 2006.01.01| 067806109 BARNES GROUP INCORPORATED 0 N 100 C 2006.01.01| 172967101 CITIGROUP 0 N 100 GOOG 2006.01.01| 38259P508 GOOGLE INC CLASS A 0 T 100 q)show a!mas asof a:([]sym:`A;date:1993.01.05 1996.05.23 2000.08.04) sym date | cusip name wi ex uot --------------| -------------------------------------------------- A 1993.01.05| 049870207 ATTWOODS PLC ADS REP5 ORD/5PNC 0 N 100 A 1996.05.23| 046298105 ASTRA AB CL-A ADS 1CL-ASEK2.50 0 N 100 A 2000.08.04| 00846U101 AGILENT TECHNOLOGIES INC 0 N 100 asof is a multithreaded primitive. aj , wj Joins Q for Mortals §9.9.8 As-of Joins Assign¶ Name a value; amend a named value Simple assign¶ x:y Where x is a name and y is a value, the value of y is associated with the name x . q)a:42 / assign q)a 42 q)a:3.14159 / amend The Equal operator = tests equality. It has nothing to do with naming or amending values. There is no need to declare the type of a variable. A variable acquires the type of the value assigned to it. (Known as dynamic typing.) q)type a:til 5 / integer vector 7h q)type a:3.14159 / float atom -9h Indexed assign¶ x[i]:y Where x is the name of a list, dictionary or tablei is a value that indexesx y is a scalar, or a list of the same length asi the value of y is assigned to x at indexes i . Indexed assignment cannot change the type of x . If x is a vector (has negative type) then (=). abs type each(x;y) must be true. Where x is a dictionary, assignment has upsert semantics. q)d:`tom`dick`harry!1 2 3 q)d[`dick`jane]:100 200 q)d tom | 1 dick | 100 harry| 3 jane | 200 Assign through operator¶ x op:y op:[x;y] x[i]op:y op:[x i;y] Where op is a binary operator with infix syntaxx is an applicable value (i.e. not an atom) in the left domain ofop i is a value that indexesx y is a value in the right domain ofop that conforms to eitheri orx x op y (orx[i]op y ) has the same type asx the value of x (or x[i] ) becomes x op y (or x[i]op y ). q)s:("the";"quick";"brown";"fox") q)s[1 2],:("er";"ish") q)s "the" "quicker" "brownish" "fox" Extend Assign-through-operator to derived functions, keywords and lambdas. q)s:("the";"quick";"brown";"fox") q)@[s;1 2;,;("er";"ish")] "the" "quicker" "brownish" "fox" Amend At is more general, and extends assignment-through-operator to derived functions, keywords and lambdas. If x is undefined, the identity element for op is used as a default. q)bar 'bar [0] bar ^ q)bar+:1 q)bar 1 Pattern match¶ See Pattern matching Syntax¶ An expression with an assignment on the left returns no value to the console. q)a:til 5 q) The value of an assignment is the value assigned. q)3+a:til 5 3 4 5 6 7 q)1+a[2]+:5 8 q)a 0 1 7 3 4 Amend, Amend At Q for Mortals §4.6.2 Simple q Amend attr ¶ Attributes of an object attr x attr[x] Where x is any object, returns its attributes as a symbol vector. The possible attributes are: | code | attribute | |---|---| | s | sorted | | u | unique (hash table) | | p | partitioned (grouped) | | g | true index (dynamic attribute): enables constant time update and access for real-time tables | A null symbol result ` means no attributes are set on x . q)attr 1 3 4 ` q)attr asc 1 3 4 `s q)attr ({x+y}) ` Set Attribute Metadata Q for Mortals §8.8 Attributes</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="42"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category dataCheck // @desc Ensure that any non-default functions a user wishes to use // exist within the current process such that they are callable // @param config {dictionary} Information relating to the current run of AutoML // @return {::|err} Null on success, error if function invalid dataCheck.functions:{[config] // List of possible objects where user may input a custom function funcs2Search:`predictionFunction`trainTestSplit`significantFeatures, `scoringFunctionClassification`scoringFunctionRegression, `gridSearchFunction`randomSearchFunction`crossValidationFunction; funcs:raze config funcs2Search; // Ensure the custom inputs are suitably typed typeCheck:{$[not type[utils.qpyFuncSearch x]in(99h;100h;104h;105h);'err;0b]}; locs:@[typeCheck;;{[err]err;1b}]each funcs; if[0<cnt:sum locs; strFunc:{$[2>x;" ",raze[y]," is";"s ",sv[", ";y]," are"]}; functionList:strFunc[cnt]string funcs where locs; '`$"The function",/functionList," not defined in your process\n" ]; } // @kind function // @category dataCheck // @desc Ensure that NLP functionality is available // @param config {dictionary} Information relating to the current run of AutoML // @return {::|err} Null on success, error if requirements insufficient dataCheck.NLPLoad:{[config] if[not`nlp~config`featureExtractionType;:()]; if[not(0~checkimport 3)&(::)~@[{system"l ",x};"nlp/nlp.q";{0b}]; '"User attempting to run NLP models with insufficient requirements,", " see documentation" ]; if[(""~getenv`PYTHONHASHSEED)&utils.ignoreWarnings>0; config[`logFunc]utils.printWarnings`pythonHashSeed ]; } // @kind function // @category dataCheck // @desc Ensure data contains appropriate types for application of NLP // @param config {dictionary} Information relating to the current run of AutoML // @param features {table} Feature data as a table // @return {::|err} Null on success, error for inappropriate data dataCheck.NLPSchema:{[config;features] if[not`nlp~config`featureExtractionType;:()]; if[0~count .ml.i.findCols[features;"C"]; '`$"User wishing to apply nlp functionality must pass a table containing ", "a character column." ]; } // @kind function // @category dataCheck // @desc Remove feature columns which do not conform to allowed schema // @param features {table} Feature data as a table // @param config {dictionary} Information relating to the current run of AutoML // @return {table} Feature dataset with inappropriate columns removed dataCheck.featureTypes:{[features;config] typ:config`featureExtractionType; $[typ in`tseries`normal; [fCols:.ml.i.findCols[features;"sfihjbepmdznuvt"]; tab:flip fCols!features fCols ]; typ=`fresh; // Ignore aggregating columns for FRESH as these can be of any type [apprCols:flip(aggCols:config[`aggregationColumns])_ flip features; cls:.ml.i.findCols[apprCols;"sfiehjb"]; // Restore aggregating columns tab:flip(aggCols!features aggCols,:()),cls!features cls; fCols:cols tab ]; typ=`nlp; [fCols:.ml.i.findCols[features;"sfihjbepmdznuvtC"]; tab:flip fCols!features fCols ]; '`$"This form of feature extraction is not currently supported" ]; dataCheck.i.errColumns[cols features;fCols;typ;config]; tab } // @kind function // @category dataCheck // @desc Ensure target data and final feature dataset are same length // @param features {table} Feature data as a table // @param target {number[]|symbol[]} Target data as a numeric/symbol vector // @param config {dictionary} Information relating to the current run of AutoML // @return {::|err} Null on success, error if mismatch in length dataCheck.length:{[features;target;config] typ:config`featureExtractionType; $[-11h=type typ; $[`fresh=typ; // Check that the number of unique aggregate equals the number of targets [aggcols:config`aggregationColumns; featAggCols:$[1=count aggcols;features aggcols;(,'/)features aggcols]; if[count[target]<>count distinct featAggCols; '`$"Target count must equal count of unique agg values for FRESH" ]; ]; typ in`normal`nlp; if[count[target]<>count features; '"Must have the same number of targets as values in table" ]; '"Input for typ must be a supported type" ]; '"Input for typ must be a supported symbol" ]; } // @kind function // @category dataCheck // @desc Ensure target data contains more than one unique value // @param target {(number[]|symbol[])} Target data as a numeric/symbol vector // @return {::|err} Null on success, error on unsuitable target dataCheck.target:{[target] if[1=count distinct target;'"Target must have more than one unique value"] } // @kind function // @category dataCheck // @desc Checks that the trainTestSplit size provided in config is a // floating value between 0 and 1 // @param config {dictionary} Information relating to the current run of AutoML // @return {::|err} Null on success, error on unsuitable target dataCheck.ttsSize:{[config] if[(sz<0.)|(sz>1.)|-9h<>type sz:config`testingSize; '"Testing size must be in range 0-1" ] } ================================================================================ FILE: ml_automl_code_nodes_dataCheck_init.q SIZE: 341 characters ================================================================================ // code/nodes/dataCheck/init.q - Load dataCheck node // Copyright (c) 2021 Kx Systems Inc // // Load code for dataCheck node \d .automl loadfile`:code/nodes/dataCheck/checkimport.p checkimport: .p.get[`checkimport;<] loadfile`:code/nodes/dataCheck/utils.q loadfile`:code/nodes/dataCheck/funcs.q loadfile`:code/nodes/dataCheck/dataCheck.q ================================================================================ FILE: ml_automl_code_nodes_dataCheck_utils.q SIZE: 9,809 characters ================================================================================ // code/nodes/dataCheck/utils.q - Utilities for the dataCheck node // Copyright (c) 2021 Kx Systems Inc // // Utility functions specific the the dataCheck node implementation \d .automl // Error presentation // @kind function // @category dataCheckUtility // @desc Print to standard out flagging the removal of inappropriate // columns // @param clist {symbol[]} List of all columns in the dataset // @param slist {symbol[]} Sublist of columns appropriate for the use case // @param typ {symbol} Feature extraction type being implemented // @param config {dictionary} Configuration information assigned by the user // and related to the current run // @return {::|stdout} Generic null if all columns suitable, appropriate // print out in the case there are outstanding issues dataCheck.i.errColumns:{[clist;slist;typ;config] if[count[clist]<>count slist; errString:utils.printDict[`errColumns],string typ; removedCols:", "sv string clist where not clist in slist; config[`logFunc] errString,": ",removedCols ] } // Parameter retrieval functionality // @kind function // @category dataCheckUtility // @desc Retrieve default parameters and update with custom information // @param feat {table} The feature data as a table // @param config {dictionary} Configuration information assigned by the user // and related to the current run // @param default {dictionary} Default dictionary which may need to be updated // @param ptyp {symbol} Problem type being solved (`nlp/`normal/`fresh) // @return {dictionary} configuration dictionary modified with any custom // information dataCheck.i.getCustomConfig:{[feat;config;default;ptyp] dict:$[(typ:type config)in 10 -11 99h; [if[10h~typ; config:dataCheck.i.getData[config;ptyp] ]; if[-11h~typ; config:dataCheck.i.getData[;ptyp]$[":"~first config;1_;] config:string config ]; $[min key[config]in key default; default,config; '`$"Inappropriate key provided for configuration input" ] ]; not any config;d; '`$"config must be passed the identity `(::)`, a filepath to a ", "parameter flatfile or a dictionary with appropriate key/value pairs" ]; if[ptyp=`fresh; aggcols:dict`aggregationColumns; dict[`aggregationColumns]:$[100h~typAgg:type aggcols; aggcols feat; 11h~abs typAgg; aggcols; '`$"aggcols must be passed function or list of columns" ] ]; dict } // @kind function // @category dataCheckUtility // @desc Retrieve a json flatfile from disk // @param fileName {char[]} Name of the file from which the dictionary is // being extracted // @param ptype {symbol} The problem type being solved(`nlp`normal`fresh) // @return {dictionary} Configuration dictionary retrieved from a flatfile dataCheck.i.getData:{[fileName;ptype] customFile:cli.i.checkCustom fileName; customJson:.j.k raze read0 `$customFile; (,/)cli.i.parseParameters[customJson]each(`general;ptype) } // Save path generation functionality // @kind function // @category dataCheckUtility // @desc Create the folders that are required for the saving of the // config, models, images and reports // @param config {dictionary} Configuration information assigned by the user // and related to the current run // @return {dictionary} File paths relevant for saving reports/config etc to // file, both as full path format and truncated for use in outputs to // terminal dataCheck.i.pathConstruct:{[config] names:`config`models; if[config[`saveOption]=2;names:names,`images`report]; pname:$[`~config`savedModelName; dataCheck.i.dateTimePath; dataCheck.i.customPath ]config; paths:pname,/:string[names],\:"/"; dictNames:`$string[names],\:"SavePath"; (dictNames!paths),enlist[`mainSavePath]!enlist pname } // @kind function // @category dataCheckUtility // @desc Construct save path using date and time of the run // @param config {dictionary} Configuration information assigned by the user // and related to the current run // @return {string} Path constructed based on run date and time dataCheck.i.dateTimePath:{[config] date:string config`startDate; time:string config`startTime; dirString:"outputs/dateTimeModels/",date,"/run_",time,"/"; path,"/",dataCheck.i.dateTimeStr[dirString] } // @kind function // @category dataCheckUtility // @desc Construct save path using custom model name // @param config {dictionary} Configuration information assigned by the user // and related to the current run // @return {string} Path constructed based on user defined custom model name dataCheck.i.customPath:{[config] modelName:config[`savedModelName]; modelName:$[10h=type modelName; modelName; -11h=type modelName;string modelName; '"unsupported input type, model name must be a symbol atom or string" ]; config[`savedModelName]:modelName; path,"/outputs/namedModels/",modelName,"/" } // @kind function // @category dataCheckUtility // @desc Construct saved logged file path // @param config {dictionary} Configuration information assigned by the user // and related to the current run // @return {string} Path constructed to log file based on user defined paths dataCheck.i.logging:{[config] if[0~config`saveOption; if[`~config`loggingDir; -1"\nIf saveOption is 0 and loggingDir is not defined,", " logging is disabled.\n"; .automl.utils.printing:1b; .automl.utils.logging:0b; :config ] ]; if[10h<>type config`loggingDir;string config`loggingDir] printDir:$[`~config`loggingDir; config[`mainSavePath],"/log/"; [typeLogDir:type config`loggingDir; loggingDir:$[10h=typeLogDir;; -11h=typeLogDir;string; '"type must be a char array or symbol"]config`loggingDir; path,"/",loggingDir,"/" ] ]; if[`~config`loggingFile; date:string config`startDate; time:string config`startTime; logStr:"logFile_",date,"_",time,".txt"; config[`loggingFile]:dataCheck.i.dateTimeStr logStr ]; typeLoggingFile:type config[`loggingFile]; loggingFile:$[10h=typeLoggingFile;; -11h=typeLoggingFile;string; '"loggingFile input must be a char array or symbol"]config`loggingFile; config[`printFile]:printDir,loggingFile; config }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="43"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/ return weekdays from list of dates wday:{x where 1<x mod 7} / return a range of numbers between (s)tart and (e)nd / with specified (w)indow size rng:{[w;s;e]s+w*til ceiling(e-s)%w} / round y to nearest x rnd:{x*"j"$y%x} / generate (n) uniform random numbers between (s)tart and (e)nd randrng:{[n;s;e]s+n?e-s} / automatically set attributes on first column of (t)able sattr:{[t] c:first cols t; a:`g`u 1=n:count keys t; t:n!@[;c;a#]0!t; t} / rename columns of (t)able based on (d)ictionary mapcol:{[d;t](c^d c:cols t) xcol t} / sort dictionary (or keyed table) by key kasc:{$[`s=attr k:key x;x;(`s#k i)!value[x]i:iasc k]} / string implementation of pivot / pivot (c)olumns, (g)roup column, (d)ata column, (t)able pivots:{[c;g;d;t] s:"exec (`$exec string asc distinct ",string[c]," from t)"; s,:"#(`$string ",string[c],")!",string d; s,:" by ", "," sv ":" sv'string flip 2#enlist g,(); s,:" from t"; p:eval @[parse s;1;:;t]; p} / parse-tree implementation of pivot / pivot (c)olumns, (g)roup column, (d)ata column, (t)able pivotp:{[c;g;d;t] u:`$string asc distinct t c; pf:{x#(`$string y)!z}; p:?[t;();g!g,:();(pf;`u;c;d)]; p} / q implementation of pivot / pivot (c)olumns, (g)roup column, (d)ata column, (t)able pivotq:{[c;g;d;t] u:`$string asc distinct t c; p:asc[key p]#p:group (g,())#t; p:u#/:(`$string t c)[p]!'t[d] p; p} / keyed-(t)able implementation of pivot / last column of key are pivot columns / remaing columns of key are group by columns / last column of table is data pivot:{[t] u:`$string asc distinct last f:flip key t; pf:{x#(`$string y)!z}; p:?[t;();g!g:-1_ k;(pf;`u;last k:key f;last key flip value t)]; p} / splay table to disk without enumerating sym columns splay:{@[x;`.d,c;:;enlist[c],y c:cols y]} / generate a list of nodes(files or variables) within tree node tree:{$[x~k:key x;x;11h=type k;raze (.z.s ` sv x,) each k;()]} / unenumerate any enumerated columns in table unenum:{@[x;where (type each flip x) within 20 76;get]} / bid-ask volume (example HDB query) / (t)rade table, (q)uote table, (d)a(t)e bav:{[t;q;dt] r:select id,time,tp,ts from t where date=dt; r:aj[`id`time;r] select id,time,bp,ap from q where date=dt; r:update bv:ts*tp<=bp,av:ts*tp>=ap from r; r:0!select date:dt,sum bv,sum av,tv:sum ts by id from r; r} ================================================================================ FILE: reQ_examples_aoc.q SIZE: 1,977 characters ================================================================================ /Advent of Code example for reQ library /Retrieve a private leaderboard or download daily challenge input / load reQ library \l req.q / load util funcs for examples \l examples/util.q \d .aoc cfg:.utl.cfg`aoc //get config details opts:.Q.def[`year`day`o!(`year$.z.D;`dd$.z.D;`$first system"pwd")] .Q.opt .z.x; //get cmd line params opts:string each opts; //string params int:.z.f like "*aoc.q"; //check if aoc.q on cmd line - if not, library funcs .req.addcookie["adventofcode.com";"session=",cfg`session]; board:{[y;b] /* get a leaderboard for a given year */ r:.req.g"http://adventofcode.com/",y,"/leaderboard/private/view/",b,".json"; //request board r:`name`local_score`stars`global_score`id`last_star_ts#/:value r`members; //pull out relevant fields :`local_score xdesc update name:("anon",/:id) from r where 10h<>type each name; //fix anon users, sort } day:{[y;d;o] /* get challenge input for a given day & save locally */ -1"Downloading input for ",y," day ",d," to: ",string o; //log day being dowloaded & output file r:.req.g"http://adventofcode.com/",y,"/day/",d,"/input"; //download input o 0: -1_"\n" vs r; //write to file } \d . / get leaderboard if requested if[.aoc.int&`board in key .aoc.opts; //do nothing if loaded as lib show .aoc.board . .aoc.opts`year`board; exit 0; ]; / otherwise download challenge input if aoc.q on cmd line if[.aoc.int; //do nothing if loaded as lib f:` sv hsym[`$.aoc.opts`o],`$"p",.aoc.opts`day; .aoc.day[.aoc.opts`year;.aoc.opts`day;f]; exit 0; ] ================================================================================ FILE: reQ_examples_github.q SIZE: 2,246 characters ================================================================================ /GitHub example for reQ library / load reQ library \l req.q / load util funcs for examples \l examples/util.q \d .gh cfg:.utl.cfg`github //get config details int:.z.f like "*github.q"; //check if github.q on cmd line - if not, library funcs url:"https://api.github.com/" //basic URL repo:{[u;r] r:.req.get[url,"repos/",u,"/",r;enlist[`Authorization]!enlist"token ",cfg`token]; //get repo r:`name`owner`html_url`description`size`stargazers_count`watchers_count#r; //take summary info :@[r;`owner;@[;`login]]; //return summary of repo details } createissue:{[u;r;title;body;labels] ul:url,"repos/",u,"/",r,"/issues"; //build URL hd:("Authorization";"Content_Type")!("token ",cfg`token;.req.ty`json); //build HTTP headers labels:$[-11=t:type labels;(),labels;10=t;enlist labels;labels]; //ensure list of syms/strings d:`title`body`labels!(title;body;labels); //build input object r:.req.post[ul;hd;.j.j d]; //perform API request :r`html_url; //return URL of new issue } auth:{[x] -1"Please enter GitHub username & password (will be transmitted over HTTPS)"; -1"WARNING: Username & password will display in plain text here:"; 1"Username: ";u:read0 0; 1"Password: ";p:read0 0; r:.req.post["https://",u,":",p,"@api.github.com/authorizations"; enlist["Content-Type"]!enlist .req.ty`json; .j.j `scopes`note!(enlist`public_repo;"reQ ",string .z.P) ]; :r`token; } user:{[u] .req.g url,"users/",u} orgs:{[u] .req.g url,"users/",u,"/orgs"} if[cfg[`token]like"{insert your token here}"; cfg[`token]:.gh.auth[]; .utl.writecfg[`github] cfg ]; \d . if[.gh.int&first .z.x[0] like "*/*"; show .gh.repo . "/" vs .z.x 0; exit 0; ]; if[.gh.int; show .gh.repo . 2#.z.x; exit 0; ]; ================================================================================ FILE: reQ_examples_jira.q SIZE: 1,874 characters ================================================================================ /JIRA example for reQ library / load reQ library \l req.q / load util funcs for examples \l examples/util.q \d .jira cfg:.utl.cfg`jira //get config details int:.z.f like "*jira.q"; //check if jira.q on cmd line - if not, library funcs url:.req.prot[cfg`url],cfg[`user],"@",.req.host[cfg`url],"/rest/api/2/"; //base URL to use</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="44"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Joins¶ Keyed: As of: ej equi aj aj0 as-of ij ijf inner ajf ajf0 lj ljf left asof simple as-of pj plus wj wj1 window uj ujf union upsert , join ^ coalesce A join combines data from two tables, or from a table and a dictionary. Some joins are keyed, in that columns in the first argument are matched with the key columns of the second argument. Some joins are as-of, where a time column in the first argument specifies corresponding intervals in a time column of the second argument. Such joins are not keyed. In each case, the result has the merge of columns from both arguments. Where necessary, rows are filled with nulls or zeroes. Keyed joins¶ ^ Coalesce- The Coalesce operator merges keyed tables ignoring nulls ej Equi join- Similar to ij , where the columns to be matched are given as a parameter. ij ijf Inner join- Joins on the key columns of the second table. The result has one row for each row of the first table that matches the key columns of the second table. , Join- The Join operator , joins tables and dictionaries as well as lists. For tablesx andy :x,y isx upsert y x,'y joins records to recordsx,\:y isx lj y lj ljf Left join- Outer join on the key columns of the second table. The result has one row for each row of the first table. Null values are used where a row of the first table has no match in the second table. This is now built-in to ,\: . (Reverse the arguments to make a right outer join.) pj Plus join- A variation on left join. For each matching row, values from the second table are added to the first table, instead of replacing values from the first table. uj ujf Union join- Uses all rows from both tables. If the second table is not keyed, the result is the catenation of the two tables. Otherwise, the result is the left join of the tables, catenated with the unmatched rows of the second table. upsert - Can be used to join two tables with matching columns (as well as add new records to a table). If the first table is keyed, any records that match on key are updated. The remaining records are appended. As-of joins¶ In each case, the time column in the first argument specifies [) intervals in the second argument. wj ,wj1 Window join- The most general forms of as-of join. Function parameters aggregate values in the time intervals of the second table. In wj , prevailing values on entry to each interval are considered. Inwj1 , only values occurring within each interval are considered. aj ,aj0 ,ajf ,ajf0 As-of join- Simpler window joins where only the last value in each interval is used. In the aj result, the time column is from the first table, while in theaj0 result, the time column is from the second table. asof - A simpler aj where all columns (or dictionary keys) of the second argument are used in the join. Implicit joins¶ A foreign key is made by enumerating over the column/s of a keyed table. Where a primary key table m has a key column k and a table d has a column c and foreign key linking to k , a left join is implicit in the query select m.k, c from d This generalizes to multiple foreign keys in d . Suppliers and parts database sp.q q)\l sp.q +`p`city!(`p$`p1`p2`p3`p4`p5`p6`p1`p2;`london`london`london`london`london`lon.. (`s#+(,`color)!,`s#`blue`green`red)!+(,`qty)!,900 1000 1200 +`s`p`qty!(`s$`s1`s1`s1`s2`s3`s4;`p$`p1`p4`p6`p2`p2`p4;300 200 100 400 200 300) q)select sname:s.name, qty from sp sname qty --------- smith 300 smith 200 smith 400 smith 200 clark 100 smith 100 jones 300 jones 400 blake 200 clark 200 clark 300 smith 400 Implicit joins extend to the situation in which the targeted keyed table itself has a foreign key to another keyed table. q)emaster:([eid:1001 1002 1003 1004 1005] currency:`gbp`eur`eur`gbp`eur) q)update eid:`emaster$1001 1002 1005 1004 1003 from `s `s q)select s.name, qty, s.eid.currency from sp name qty currency ------------------ smith 300 gbp smith 200 gbp smith 400 gbp smith 200 gbp clark 100 gbp smith 100 gbp jones 300 eur jones 400 eur blake 200 eur clark 200 gbp clark 300 gbp smith 400 gbp Q for Mortals §9.9.1 Implicit Joins Q for Mortals §9.9 Joins Listening port¶ Use the -p command-line option or the \p system command to tell kdb+ to listen to a port. The command-line option and the system command take the same parameters. \p [rp,][hostname:][portnumber|servicename] -p [rp,][hostname:](portnumber|servicename) Where portnumber is an integer or long infinityservicename is defined in/etc/services kdb+ will listen to portnumber or the port number of servicename on all interfaces, or on hostname only if specified. The port must be available and the process must have permission for the port. As of 4.1t 2022.11.01 (or 4.0 2022.10.26) a port range can be specified in place of a portnumber. The range of ports is inclusive and tried in a random order. A service name can be used instead of each port number. Using 0W to choose a free ephemeral port can be more efficient (where suitable). q)\p 80/85 q)\p 81 Where no parameter is specified in the system command, the listening port is reported. The default is 0 (no listening port). q)\p 0i Given a servicename, q will look up its port number in /etc/services . q)\p commplex-main / servicename q)\p 5000i If you know the process is for clients on the localhost only, choose localhost:port for maximum security. Preventing connections¶ To stop the process listening on a port at runtime, instruct it to listen on port 0: q)\p 0 By default, kdb+ won't listen to a port unless a port is specified. Load balancing¶ Optional parameter rp enables the use of the SO_REUSEPORT socket option, which is available in newer versions of many operating systems, including Linux (kernel version 3.9 and later). This socket option allows multiple sockets (kdb+ processes) to listen on the same IP address and port combination. The kernel then load-balances incoming connections across the processes. (Since V3.5.) Socket sharding with kdb+ and Linux Ephemeral port¶ A portnumber of 0W means pick a random available port within the range 32768–60999. q)\p 5010 / set port 5010 q)\p 5010 q)\p 0W / pick a random available port within the range 32768 - 60999 q)\p 45512 q)\p 0 / turn off listening port Port range¶ An inclusive range of ports can be used in place of a portnumber , to randomly use an available port within the given range (since V3.5/3.6 2023.03.13,V4.0 2022.10.26,V4.1 2022.11.01). A service name can be used instead of a port number within the range. Note that the ephemeral port option also provides the ability to choose from a range of ports. q)\p 2000/2010 / use a free port between 2000 and 2010 q)\p -2000/2010 / use a free port between 2000 and 2010 in multithreaded mode q)\p myhost:2000/2010 / use a free port between 2000 and 2010, using given hostname Multi-threaded input mode¶ A negative port sets a multi-threaded port and if used it must be the initial and only mode of operation, i.e. do not dynamically switch between positive port and negative port. When active, each IPC connection will create a new thread for its sole use. Each connection uses its own heap with a minimum of 64MB, the real amount depending on the working space required by the query being executed. \ts can be used to find the memory requirement of a query. It is designed for serving in-memory static data to an externally constrained number of clients. It is not intended for use as a gateway, or serving mutable data. Note that there are a number of restrictions in multithreaded mode: - queries are unable to update globals - .z.po is not called on connect - .z.pc is not called on disconnect - .z.W has a view on main thread sockets only - Cannot send async message - Views can be recalculated from the main thread only - Uncompressed pages will not be shared between threads (i.e. same situation as with starting a separate hdb for each request). The main thread is allowed to update globals. The main thread is responsible for reading from stdin (i.e. the console) and executing any loaded scripts on start-up. It also invokes .z.ts on timer expiry. Any connections made via IPC from the main thread, can be monitored for callbacks (for example via an async callback) which in turn can update globals. While the main thread is processing an update (for example, a timer firing or console input) none of the connection threads will be processing any input. Updates should not be frequent, as they wait for completion of exiting queries and block new queries (using multiple-read single-write lock), thus slowing processing speeds. If an attempt is made to update globals from threads other than main, a 'no update error is issued. Multithreaded input mode supports WebSockets and HTTP (but not TLS) since 4.1t 2021.03.30. TLS support available since 4.1t 2023.12.14. A custom .z.ph which does not update global state should be used with HTTP. The use of sockets from within those threads is allowed only for the one-shot sync request and HTTP client request (TLS/SSL support added in 4.1t 2023.11.10). These can be inefficient, as it opens, queries and closes each time. Erroneous socket usage is blocked and signals a nosocket error. In multithreaded input mode, the seed for the random-number generator used for threads other than the main thread is based on the socket descriptor for that connection; these threads are transient – destroyed when the socket is closed, and no context is carried over for new threads/connections. Unix domain socket¶ Setting the listening port with -p 5000 in addition to listening on TCP port 5000, also creates a UDS (Unix domain socket) on /tmp/kx.5000 . You can disable listening on the UDS, or change the default path from /tmp using environment variable QUDSPATH . q)/ disable listening on unix domain socket q)system"p 0";setenv[`QUDSPATH;""];system"p 6000" q)/ use /home/kdbuser as path q)system"p 0";setenv[`QUDSPATH;"/home/kdbuser"];system"p 6000" V3.5+ uses abstract namespace for Unix domain sockets on Linux to avoid file-permission issues in /tmp . N.B. hence V3.5 cannot connect to V3.4 using UDS. q)hopen`:unix://5000 On macOS: q)\p 5000 q)\ls /tmp/kx* "/tmp/kx.5000" q)system"p 0";setenv[`QUDSPATH;""];system"p 5000" q)\ls /tmp/kx* ls: /tmp/kx*: No such file or directory 'os q)system"p 0";setenv[`QUDSPATH;"/tmp/kxuds"];system"p 5000" 'cannot listen on uds /tmp/kxuds/kx.5000. OS reports: No such file or directory [0] system"p 0";setenv[`QUDSPATH;"/tmp/kxuds"];system"p 5000" ^ q)\mkdir /tmp/kxuds q)system"p 0";setenv[`QUDSPATH;"/tmp/kxuds"];system"p 5000" q)\ls /tmp/kxuds "kx.5000" Security¶ Once you open a port in q session, it is open to all connections, including HTTP requests. In a production environment secure any process with an open port.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="45"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Variadic syntax¶ An applicable value is variadic if its rank is not fixed. Lists and dictionaries of depth ≥2 and tables are variadic. q)m:4 5#"abcdefghijklmnopqrst" q)m[1 3] / unary "fghij" "pqrst" q)m[1 3;2 4] / binary "hj" "rt" q)t:([]name:`Tom`Dick`Harry;city:`London`Paris`Rome) q)t[`name] / unary `Tom`Dick`Harry q)t 1 / unary name| Dick city| Paris q)t[1;`city] / binary `Paris Some operators are variadic, for example Apply and Amend. Each Prior, Over and Scan applied to binary values derive variadic functions. q)+/[2 3 4] / unary 9 q)+/[1000000;2 3 4] / binary 1000009 q)-':[1952 1954 1960] / unary 1952 2 6 q)-':[1900;1952 1954 1960] / binary 52 2 6 Keywords defined from such extensions are also variadic. q)deltas / Subtract Each Prior -': q)deltas[15 27 93] / unary 15 12 66 q)deltas[10;15 27 93] / binary - unsupported 5 12 66 q)-':[10;15 27 93] / binary - supported 5 12 66 Projection¶ Variadic values do not project unless the omitted argument/s are specified as nulls in the argument list. To project a variadic value as a unary, use a 2-item argument list to resolve the binary form. q)g:+/[100;] / 2-item argument list resolves the binary form q)g 2 3 4 5 / the projection is unary 114 Unary forms of binary operators¶ Many binary operators are variadic: they have unary forms. The unary form can be selected with a suffixed colon. q)|[2;til 5] / binary: maximum 2 2 2 3 4 q)|:[til 5] / unary: reverse 4 3 2 1 0 Binary operators are infixes. Like an infix extension, the unary form can be parenthesized and applied prefix. q)2|til 5 / maximum 2 2 2 3 4 q)(|:)"zero" / reverse "orez" q)2#"zero" / take "ze" q)(#:)"zero" / count 4 Unary forms can also be applied by Apply At. q)|:["zero"] / bracket notation "orez" q)(|:)"zero" / prefix "orez" q)(|:)@"zero" / apply-at "orez" q)@[|:;"zero"] / apply-at "orez" Unary forms are poor q style The semantics of the unary and binary forms of an operator are not always closely related. For better legibility, q provides keywords for unary forms. Good q style prefers them. Write count "zero" , not (#:)"zero" . Auto Scaling for a kdb+ realtime database¶ Autoscaling the Real-time Database in the Cloud kxcontrib/cloud-autoscaling Cloud computing has fast become the new normal as more and more organizations are migrating their IT systems to the cloud. Big cloud platforms like Amazon Web Services, Google Cloud, and Microsoft Azure have made it reliable, secure, and most importantly cost-effective. The Infrastructure-as-a-Service (IaaS) model they have adopted has made it easier than ever before to provision computing resources. This model has been taken a step further with Auto Scaling technologies. Servers, storage, and networking resources can now be commissioned and decommissioned in an instant without any manual intervention. This elasticity is one of the key benefits of Cloud Computing. Customers can leverage this new technology to scale their infrastructure in order to meet system demands. As these technologies become more prevalent it will become important to start incorporating them into kdb+. This article explores how we can do this while focusing on scaling the random-access memory (RAM) needed for the real-time database (RDB). Auto Scaling is the act of monitoring the load on a system and dynamically acquiring or shutting down resources to match this load. Incorporating this technology into an application means we no longer need to provision one large computing resource whose capacity must forever meet the application’s demand. Instead we can use clusters of smaller resources and scale them in and out to follow the demand curve. Auto Scaling and kdb+¶ When it comes to databases there are three main types of computing resources that we can look to scale: - Storage - Compute - Random-access memory (RAM) Scaling storage for our kdb+ databases can be relatively simple in the cloud. As the database grows we can provision extra storage volumes for our instances, or increase the size of the ones currently in use. Reading and writing data are prime use cases for scaling compute power within a kdb+ application. Scaling compute for reading has been covered by Rebecca Kelly in her blog post KX in the Public Cloud: Autoscaling using kdb+. Here Rebecca demonstrates how to scale the number of historical database (HDB) servers to handle an increasing or decreasing number of queries. Dynamically scaling the compute needed for writing can be a bit more complicated. Given we want to maintain the data’s order, the entire stream of data for a given source must go through one point in the system to be timestamped. The same can be said for scaling the RAM needed for an RDB. For this use case the number of RDB servers will be increased throughout the day as more and more data is ingested by the tickerplant. The system must ensure that the data is not duplicated across these servers. Building a solution for this problem will be the objective of this article. Auto Scaling the RDB¶ By Auto Scaling the RDB we will improve both the cost-efficiency and the availability of our databases. Why use Auto Scaling¶ Let’s say on average we receive a total of 12GB of data which is distributed evenly throughout the day. For a regular kdb+ system we might provision one server with 16GB of RAM to allow for some contingency capacity. We then hope that the data volumes do not exceed that 16GB limit on a daily basis. In a scalable cluster we can begin the day with one small server (for this example a quarter of the size, 4GB). The RAM needed to hold real-time data in memory will grow throughout the day, as it does we can step up our capacity by launching more servers. Figure 1.1: Capacities of regular and scalable real-time databases Cost efficiency¶ In the cloud you pay only for what you use. So in a perfect system there should be no spare computing resources running idle accumulating costs. In periods of low demand like weekends or end-of-day (when the day’s data has been flushed from memory) we should have the ability to scale down. By ensuring this we can maintain the performance of a system at the lowest possible cost. Figure 1.2: Potential cost savings of a scalable RDB It is worth noting that the number of servers you provision will have no real bearing on the overall cost. For the most part, running one server with 16GB of RAM will cost the same as running four with 4GB. Below is an example of Amazon Web Service’s pricing for the varying sizes of its t3a instances. As you can see the price is largely proportional to the memory capacity of each instance. Figure 1.3: Amazon Web Services' t3a instance pricing Availability¶ Replacing one large server with a scalable cluster will make our system more reliable. By dynamically acquiring resources we can ensure that the load on our system never exceeds its capacity. Figure 1.4: Availability of a scalable RDB under high load This will safeguard against unexpected spikes in data volumes crippling our systems and we can stop guessing our capacity needs. When developing a new application there is no need to estimate how much memory the RDB is going to need throughout its lifetime. Even if the estimate turns out to be correct, we will still end up provisioning resources that will lie mostly idle during periods of low demand. Demand varies and so should our capacity. Distributing the day’s data among multiple smaller servers will also increase the system’s resiliency. One fault will no longer mean all of the day’s data is lost. The smaller RDBs will also be quicker to recover from a fault as they will only have to replay a portion of the tickerplant’s log.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="46"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Installing multiple versions of kdb+¶ For any version of q, the 64-bit and 32-bit interpreter binaries share the same q.k file, located in QHOME for that version. All versions share the same k4.lic or kc.lic license-key file. Arrange your files as in this example: $ tree q q ├── k4.lic ├── phrases.q ├── sp.q ├── trade.q ├── v3.5 │ ├── m32 │ │ └── q │ ├── m64 │ │ └── q │ └── q.k └── v4.0 ├── m64 │ └── q └── q.k In your profile export QLIC and define aliases as in this example: # versions of q export QLIC=~/q alias q='export QHOME=~/q/v4.0; rlwrap -r $QHOME/m64/q' alias q3.5='export QHOME=~/q/v3.5; rlwrap -r $QHOME/m64/q' alias q32='export QHOME=~/q/v3.5; rlwrap -r $QHOME/m32/q' In a command shell: $ q3.5 KDB+ 3.5 2019.05.15 Copyright (C) 1993-2019 Kx Systems m64/ 8()core 16384MB sjt max.local 127.0.0.1 EXPIRE 2020.08.01… q)\\ $ The 32-bit interpreter finds and reports the license-key file even though it will run without it. $ q32 KDB+ 3.6 2019.03.07 Copyright (C) 1993-2019 Kx Systems m32/ 8()core 16384MB sjt max.local 192.168.0.10 EXPIRE 2020.08.01… q)\pwd "/Users/sjt" q)\echo $QLIC "/Users/sjt/q" q)\echo $QHOME "/Users/sjt/q/v3.6" q)\l ../sp.q +`p`city!(`p$`p1`p2`p3`p4`p5`p6`p1`p2;`london`london`london`london`london`lon.. (`s#+(,`color)!,`s#`blue`green`red)!+(,`qty)!,900 1000 1200 +`s`p`qty!(`s$`s1`s1`s1`s2`s3`s4;`p$`p1`p4`p6`p2`p2`p4;300 200 100 400 200 300) q) Loading sp.q , a sibling of QHOME , requires the relative path specified.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="47"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">$ Tok¶ Interpret a string as a data value x$y $[x;y] Where y is a stringx is a non-positive short or upper-case char as below (or the null symbol as a synonym for"S" ) returns y as an atom value interpreted according to x . x values for Tok: q){([result:key'[x$\:()]];short:neg x;char:upper .Q.t x)}5h$where" "<>20#.Q.t result | short char ---------| ---------- boolean | -1 B guid | -2 G byte | -4 X short | -5 H int | -6 I long | -7 J real | -8 E float | -9 F char | -10 C symbol | -11 S timestamp| -12 P month | -13 M date | -14 D datetime | -15 Z timespan | -16 N minute | -17 U second | -18 V time | -19 T A left argument of 0h or "*" returns the y string unchanged. Where x is a positive or zero short, a lower-case char, "*" , or a non-null symbol, see Cast. q)"E"$"3.14" 3.14e q)-8h$"3.14" 3.14e q)"D"$"2000-12-12" 2000.12.12 q)"U"$"12:13:14" 12:13 q)"T"$"123456789" 12:34:56.789 q)"P"$"2015-10-28D03:55:58.6542" 2015.10.28D03:55:58.654200000 Outside of domain¶ Parsing values outside of the types domain returns null. q)"H"$"32768" 0Nh q)"I"$"2147483648" 0Ni q)"D"$"2147483648" 0Nd Changes since 4.1t 2021.09.03,4.0 2021.10.01 Short converts to 0Nh instead of ±0Wh Iteration¶ Tok is a near-atomic function. Implicit recursion stops at strings, not atoms. q)"BXH"$("42";"42";"42") 0b 0x42 42h q)("B";"XHI")$("42";("42";"42";"42")) 0b (0x42;42h;42i) q)"B"$" Y " 1b q)"B"$'" Y " 000100b Symbols¶ Use the null symbol as a shorthand left argument for "S" . q)"S"$"hello" `hello q)`$"hello" `hello Converting a string to a symbol removes leading and trailing blanks. q)`$" IBM " `IBM Truthy characters¶ Certain characters are recognized as boolean True: q)"B"$(" Y ";" N ") 10b q)" ",.Q.an " abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789" q)"B"$'" ",.Q.an 0000000000000000000010001100000000000000000000100011000100000000b q).Q.an where"B"$'.Q.an "txyTXY1" Contrast this with casting to boolean: q)"b"$" ",.Q.an 1111111111111111111111111111111111111111111111111111111111111111b IP address¶ q)"I"$"192.168.1.34" /an IP address as an int -1062731486i q)"NT"$\:"123456123987654" / since V3.4 0D12:34:56.123987654 12:34:56.123 Unix timestamps¶ (from seconds since Unix epoch), string with 9…11 digits: q)"P"$"10129708800" 2290.12.31D00:00:00.000000000 q)"P"$"00000000000" 1970.01.01D00:00:00.000000000 If these digits are followed by a . Tok will parse what follows . as parts of second, e.g. q)"P"$"10129708800.123456789" 2290.12.31D00:00:00.123456789 q)"P"$"00000000000.123456789" 1970.01.01D00:00:00.123456789 q)"PZ"$\:"20191122-11:11:11.123" 2019.11.22D11:11:11.123000000 2019.11.22T11:11:11.123 Date formats¶ "D"$ will Tok dates with varied formats: [yy]yymmdd ddMMM[yy]yy yyyy/[mm|MMM]/dd [mm|MMM]/dd/[yy]yy / \z 0 dd/[mm|MMM]/[yy]yy / \z 1 Command-line option -z (date format) System command \z (date format) Cast Overloads of $ .h.iso8601 ISO 8601 timestamp Casting Q for Mortals §7.3.3 Parsing Data from Strings trim , ltrim , rtrim ¶ Remove leading or trailing nulls from a list trim x trim[x] ltrim x ltrim[x] rtrim x rtrim[x] Where x is a vector or non-null atom, returns x without leading (ltrim ) or trailing (rtrim ) nulls or without either (trim ). q)trim " IBM " "IBM" q)trim 0N 0N 1 2 3 0N 0N 4 5 0N 0N 1 2 3 0N 0N 4 5 q)ltrim" IBM " "IBM " q)rtrim" IBM " " IBM" q)trim"a" "a" q)trim 42 42 Implicit iteration¶ trim , ltrim , and rtrim are string-atomic and apply to dictionaries and tables. q)trim(("fox";("jumps ";"over "));("a";"dog ")) "fox" ("jumps";"over") "a" "dog" q)ltrim`a`b!(("fox";("jumps ";"over "));("a";"dog ")) a| "fox" ("jumps ";"over ") b| "a" "dog " q)rtrim ([]a:("fox";("jumps ";"over "));b:("a";"dog ")) a b ---------------------- "fox" "a" ("jumps";"over") "dog" Domain and range¶ domain: b g x h i j e f c s p m d z n u v t range: b g x h i j e f c s p m d z n u v t type ¶ Type of an object type x type[x] Where x is any object, returns its type. The type is a short int: - zero for a general list - negative for atoms of basic datatypes - positive for everything else q)type 5 / integer atom -7h q)type 2 3 5 / integer vector 7h q)type (2 3 5;"hello") / general list 0h q)type () / general list 0h q)type each (2;3 5;"hello") / int atom; int vector; string -7 7 10h q)type (+) / function 102h q)type (0|+) / composition 105h uj , ujf ¶ Union join x uj y uj [x;y] x ujf y ujf[x;y] Where x and y are both keyed or both unkeyed tables, returns the union of the columns, filled with nulls where necessary: - if x andy have matching key column/s, then records iny update matching records inx - otherwise, y records are inserted. q)show s:([]a:1 2;b:2 3;c:5 7) a b c ----- 1 2 5 2 3 7 q)show t:([]a:1 2 3;b:2 3 7;c:10 20 30;d:"ABC") a b c d -------- 1 2 10 A 2 3 20 B 3 7 30 C q)s,t / tables do not conform for , 'mismatch q)s uj t / simple, so second table is inserted a b c d -------- 1 2 5 2 3 7 1 2 10 A 2 3 20 B 3 7 30 C q)(2!s) uj 2!t / keyed, so matching records are updated a b| c d ---| ---- 1 2| 10 A 2 3| 20 B 3 7| 30 C uj is a multithreaded primitive. uj generalizes the , Join operator. Changes in V3.0 The union join of two keyed tables is equivalent to a left join of the two tables with the catenation of unmatched rows from the second table. As a result a change in the behavior of lj causes a change in the behavior of uj : q)show x:([a:1 2]b:`x`y;c:10 20) a| b c -| ---- 1| x 10 2| y 20 q)show y:([a:1 2]b:``z;c:1 0N) a| b c -| --- 1| 1 2| z q)x uj y / kdb+ 3.0 a| b c -| --- 1| 1 2| z q)x uj y / kdb+ 2.8 a| b c -| ---- 1| x 1 2| z 20 Since 2017.04.10, the earlier version is available in all V3.5 and later versions as ujf . Joins Q for Mortals §9.9.7 Union Join ungroup ¶ ungroup x ungroup[x] Where x is a table, in which some cells are lists, but for any row, all lists are of the same length, returns the normalized table, with one row for each item of a lists. q)p:((enlist 2);5 7 11;13 17) q)r:((enlist"A");"CDE";"FG") q)show t:([]s:`a`b`c;p;q:10 20 30;r) s p q r ----------------- a ,2 10 ,"A" b 5 7 11 20 "CDE" c 13 17 30 "FG" q)ungroup t / flatten lists p and r s p q r --------- a 2 10 A b 5 20 C b 7 20 D b 11 20 E c 13 30 F c 17 30 G Typically used on the result of xgroup or select . q)\l sp.q q)show t:select p,qty by s from sp where qty>200 s | p qty --| ------------------------ s1| `p$`p1`p3`p5 300 400 400 s2| `p$`p1`p2 300 400 s4| `p$,`p4 ,300 q)ungroup t s p qty --------- s1 p1 300 s1 p3 400 s1 p5 400 s2 p1 300 s2 p2 400 s4 p4 300 ungroup is not the exact inverse of grouping Grouping sorts on the keys, so a subsequent ungroup returns the original records sorted by the grouped column/s. group , select , xgroup Q for Mortals §9.3.4.2 Grouping without Aggregation union ¶ Union of two lists x union y union[x;y] Where x and y are lists or atoms, returns a list of the distinct items of its combined arguments, i.e. distinct x,y . q)1 2 3 3 6 union 2 4 6 8 1 2 3 6 4 8 q)distinct 1 2 3 3 6, 2 4 6 8 / same as distinct on join 1 2 3 6 4 8 q)t0:([]x:2 3 5;y:"abc") q)t1:([]x:2 4;y:"ad") q)t0 union t1 / also on tables x y --- 2 a 3 b 5 c 4 d q)(distinct t0,t1)~t0 union t1 1b update ¶ Add or amend rows or columns of a table or entries in a dictionary update is a qSQL query template and varies from regular q syntax. For the Update operator ! , see Functional SQL Since 4.1t 2021.06.04 updates from splayed table and path@tablename now leverage peach to load columns (when running with secondary threads). q)update x:0 from get`:mysplay Syntax¶ update ps [by pb] from texp [where pw] From phrase¶ update will not modify a splayed table on disk. Select phrase¶ Names in the Select phrase refer to new or modified columns in the table expression. q)t:([] name:`tom`dick`harry; age:28 29 35) q)update eye:`blue`brown`green from t name age eye --------------- tom 28 blue dick 29 brown harry 35 green Where phrase¶ The Where phrase restricts the scope of updates. q)t:([] name:`tom`dick`harry; hair:`fair`dark`fair; eye:`green`brown`gray) q)t name hair eye ---------------- tom fair green dick dark brown harry fair gray q)update eye:`blue from t where hair=`fair name hair eye ---------------- tom fair blue dick dark brown harry fair blue New values must have the type of the column being amended. If the query adds a new column it will have values only as determined by the Where phrase. At other positions, it will have nulls of the column’s type. By phrase¶ The By phrase applies the update along groups. This is most useful with aggregate and uniform functions. With an aggregate function, the entire group gets the value of the aggregation on the group. q)update avg weight by city from p p | name color weight city --| ------------------------- p1| nut red 15 london p2| bolt green 14.5 paris p3| screw blue 17 rome p4| screw red 15 london p5| cam blue 14.5 paris p6| cog red 15 london A uniform function is applied along the group in place. This can be used, for example, to compute cumulative volume of orders. q)update cumqty:sums qty by s from sp s p qty cumqty --------------- 0 p1 300 300 0 p2 200 500 0 p3 400 900 0 p4 200 1100 3 p5 100 100 0 p6 100 1200 1 p1 300 300 1 p2 400 700 2 p2 200 200 3 p2 200 300 3 p4 300 600 0 p5 400 1600 Since 4.1 2024.04.29 throws type error if dictionary update contains by clause (previously ignored). Cond¶ Cond is not supported inside query templates: see qSQL. delete , exec , select qSQL, Functional SQL Q for Mortals §9.5 The update template</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="48"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">File compression¶ kdb+ can compress data as it is written to disk. Q operators and keywords read both compressed and uncompressed files. Write compressed files¶ Use set with a left argument that specifies the file or splay target, and the compression parameters. (For a splayed table, you can specify the compression of each column.) q)`:a set 1000#enlist asc 1000?10 / uncompressed file `:a q)(`:za;17;2;9)set get`:a / compressed file `:za q)get[`:a]~get`:za 1b Using real NYSE trade data, we observed the gzip algorithm at level 9 compressing to 15% of original size, and the IPC compression algorithm compressing to 33% of original size. The compressed file allows random access to the data. Source and target file on the same drive might run slowly Compression reads from the source file, compresses the data and writes to the target file. The disk is likely receiving many seek requests. If you move the target file to a different physical disk, you will reduce the number of seeks needed. Cautions: - Do not use streaming compression with log files. After a crash, the log file would be unusable as it will be missing meta information from the end of the file. Streaming compression maintains the last block in memory and compresses/purges it as needed or latest on close of file handle. - When a nested data column file, e.g. name , is compressed, its companion filename# orname## is also compressed: do not try to compress it explicitly. - Use set and not gzip: they produce different results. Compression parameters¶ Compression is specified by three integers representing logical block size, algorithm, and compression level. - Logical block size - A power of 2 between 12 and 20: pageSize or allocation granularity to 1MB. - PageSize for AMD64 is 4kB, SPARC is 8kB. Windows seems to have a default allocation granularity of 64kB. Apple Silicon is 16kB. - When choosing the logical block size, consider the minimum of all the platforms that will access the files directly – otherwise you may encounter disk compression - bad logicalBlockSize . - This value affects both compression speed and compression ratio: larger blocks can be slower and better compressed. - Algorithm and compression level - Pick from: alg algorithm level since ---------------------------- 0 none 0 1 q IPC 0 2 gzip 0-9 3 snappy 0 V3.4 4 lz4hc 0-16† V3.6 5 zstd -7-22 V4.1 Level 0 for lz4hc default compression; level>16 behaves the same as 16 Algorithm is also used to specify the encryption algorithm which can be used with compression Selective compression¶ You can choose which files to compress, and which algorithm/level to use per file. Q operators read both compressed and uncompressed files. So files that do not compress well, or have an access pattern that does not perform well with compression, can be left uncompressed. Compression statistics¶ The -21! internal function returns a dictionary of compression statistics, or an empty dictionary if the file is not compressed. hcount returns the uncompressed file length. Compression by default¶ kdb+ can write compressed files by default. This is governed by the zip defaults .z.zd . Set this as an integer vector, e.g. .z.zd:17 2 6 and set will write files (with no extension) compressed in this way unless given different parameters. To disable compression by default, set .z.zd to 3#0 , or expunge it. .z.zd:3#0 / no compression \x .z.zd / no compression By default, .z.zd is undefined and q writes files uncompressed. Append to a compressed file or splay¶ q)(`:zippedTest;17;2;6) set 100000?10 `:zippedTest q)`:zippedTest upsert 100000?10 `:zippedTest q)-21!`:zippedTest compressedLength | 148946 uncompressedLength| 1600016 algorithm | 2i logicalBlockSize | 17i zipLevel | 6i Appending to files with an attribute (e.g. `p# on sym) causes the whole file to be read and rewritten. Appending to compressed enum files in V3.0 2012.05.17 Appending to compressed enum files was blocked in V3.0 2012.05.17 due to potential concurrency issues, hence these files should not be compressed. Decompression¶ Decompression is implicit: q operators and keywords read both compressed and uncompressed files. get`:compressedFile \x .z.zd / write uncompressed by default `:uncompressedFile set get `:compressedFile / store again decompressed Files are mapped or unmapped on demand during a query. Only the areas of the file that are touched are decompressed, i.e. kdb+ uses random access. Decompressed data is cached while a file is mapped. Columns are mapped for the duration of the select. For example, say you are querying by date and sum over a date-partitioned table, with each partition parted by sym. The query decompresses only the parts of the column data for the syms in the query predicate. Concurrently open files¶ The number of concurrently open files is limited by the environment/OS only (e.g. ulimit -n ). Prior to V3.2 V3.2+ uses two file descriptors per file: you might need to increase the ulimit -n value used in prior versions. Prior to V3.1 2013.02.21 no more than 4096 compressed files could be open concurrently. There is no practical internal limit on the number of uncompressed files. Memory allocation¶ kdb+ allocates enough memory to decompress the whole vector, regardless of how much it finally uses. This reservation is required as there is no backing store for the decompressed data, unlike with mapped files of uncompressed data, which can always read the pages from file again should they have been dropped. This is reservation only, and can be accommodated by increasing the swap space available: even though the swap should never actually be written to, the OS has to be assured that in the worst-case scenario of decompressing the data in full, it could swap it out if needed. If you experience wsfull even with sufficient swap space configured, check whether you have any soft/hard limits imposed with ulimit -v . Memory overcommit settings on Linux /proc/sys/vm/overcommit_memory and /proc/sys/vm/overcommit_ratio – these control how careful Linux is when allocating address space with respect to available physical memory plus swap. Performance¶ There are three key aspects of compression algorithms: - Compression ratio: This indicates how much the final data file size is reduced. A high compression ratio means smaller files and lower storage, I/O costs. If the column files are smaller, we can store more data on a storage of a given size. Similarly, more storage space costs more (especially in the cloud). Smaller files may reduce query execution time if the storage is slow because smaller files are faster read. You can check the compression ratio of a popular financial database in a case study. - Compression speed: This measures the time required to compress a file. Compression is typically CPU-intensive, so a high compression speed minimizes CPU usage and associated costs. High compression speed is good. The time to save a column file determines the upper bound of data ingestion. The faster we can save a file, the more a kdb+ system can ingest. In the kdb+ tick system, the RDB is unavailable for queries during write, meaning that write speed also affects system availability. - Decompression speed: This reflects the time taken to restore the original file from the compressed (encrypted) version. High decompression speed means faster queries. There is no single best compression algorithm that outperforms all others in all aspects. You need to select compression (or avoid compression) based on your priorities: - Is achieving the fastest possible query execution more important to you, or do you prefer to minimize storage costs? - Does your kdb+ system handle a high volume of incoming data, requiring a reliable intraday write process to manage the data effectively? - Are you looking for a general solution that provides balanced performance across various aspects without excelling or underperforming in any particular area? A single thread with full use of a core can decompress approx 300MB/s, depending on data/algorithm and level. Benchmarking¶ It is difficult to estimate the impact of compression on performance. On the one hand, compression does trade CPU utilization for disk-space savings. And up to a point, if you’re willing to trade more CPU time, you can save more space. But by reducing the space used, you end up doing less disk I/O, which can improve overall performance if your workload is bandwidth-limited. The only way to know the real impact of compression on your disk utilization and system performance is to run your workload with different levels of compression and observe the results. Currently, ZFS compression probably has an edge over native kdb+ compression, due to keeping more decompressed data in cache, which is available to all processes. Perform your benchmarks on the same hardware setup as you would use for production and be aware of the disk cache – flush the cache before each test. The disk cache can be flushed on Linux using sync ; sudo echo 3 | sudo tee /proc/sys/vm/drop_caches and on macOS, the OS command purge can be used. Compression parameters¶ The logicalBlockSize represents how much data is taken as a compression unit, and consequently the minimum size of a block to decompress. E.g. using a logicalBlockSize of 128kB, a file of size 128000kB would be cut into 1000 blocks, and each block compressed independently of the others. Later, if a single byte is requested from that compressed file, a minimum of 128kB would be decompressed to access that byte. Fortunately, those types of access patterns are rare, and typically you would be extracting clumps of data that make a logical block size of 128kB quite reasonable. Experiment to discover what suits your data, hardware and access patterns best. Kernel settings¶ Tweaking the kernel settings on Linux may help – it really depends on the size and number of compressed files you have open at any time, and the access patterns used. For example, random access to a compressed file will use many more kernel resources than sequential access. Linux production notes/Compression Multithreading¶ Do not read or write a compressed file concurrently from multiple threads. However, multiple files can be read or written from their own threads concurrently (one file per thread). For example, a segmented historical database with secondary threads will be using the decompression in a multithreaded mode. Requirements¶ Compression libraries may already be installed on your system. kdb+ binds dynamically to the compression libraries when required. 64-bit and 32-bit kdb+ require corresponding 64-bit and 32-bit libs If in doubt, consult your system administrator for assistance. Gzip¶ Compression algorithm 2 uses Gzip. Source and algorithm details can be found here. The following libraries are required by kdb+: | Linux | macOS | Windows | |---|---|---| | libz.so.1 | libz.dylib (pre-installed) | zlibwapi.dll (32-bit and 64-bit versions available from WinImage) | Gzip has very good compression ratio and average compression/decompression speed. Avoid high compression levels (like 8 and 9) if write speed is important for you. Gzip with level 5 is a good general solution. Snappy¶ Compression algorithm 3 uses Snappy. Source and algorithm details can be found here. The following libraries are required by kdb+: | Linux | macOS | Windows | |---|---|---| | libsnappy.so.1 | libsnappy.dylib (available via package managers such as Homebrew or MacPorts) | snappy.dll | Snappy has excellent compression and decompression speed so it is a good choice if you optimize for query speed and ingestion times. Snappy falls behind the other compression solutions in compression ratio. LZ4¶ Compression algorithm 4 uses LZ4. Source and algorithm details can be found here. The following libraries are required by kdb+: | Linux | macOS | Windows | |---|---|---| | liblz4.so.1 | liblz4.dylib (available through package managers such as Homebrew or MacPorts) | liblz4.dll (build the liblz4-dll project on Windows as outlined in the README at GitHub) | Certain releases of lz4 do not function correctly within kdb+ Notably, lz4-1.7.5 does not compress, and lz4-1.8.0 appears to hang the process. kdb+ requires at least lz4-r129 . lz4-1.8.3 works. We recommend using the latest lz4 release available. LZ4 is great at decompression speed, but is average in compression ratio. The compression level has a significant impact on compression speed. Level 5 is a good choice if you aim fast queries and low storage costs. Avoid high compression levels (above 11). Zstd¶ Compression algorithm 5 uses zstd (Zstandard). Source and algorithm details can be found here. The following libraries are required by kdb+: | Linux | macOS | Windows | |---|---|---| | libzstd.so.1 | libzstd.1.dylib (available via package managers such as Homebrew or MacPorts) | libzstd.dll | Zstd is outstanding in compression ratio of low entropy columns. Use low compression level (like 1) if you optimize for compression (write) speed and increase level to achieve better compression ratio. Avoid high levels (above 14). Running kdb+ under Gdb¶ You should only ever need to run Gdb (the GNU debugger) if you are debugging your own custom shared libs loaded into kdb+. Gdb will intercept SIGSEGV which should be passed to q. To tell it to do so, issue the following command at the Gdb prompt (gdb) handle SIGSEGV nostop noprint set Compression in kdb+ Linux production notes: Huge Pages and Transparent Huge Pages Firewalling¶ Tips for securing your application Run kdb+ as a separate (non-root) user. If you need it to run on port 80, use authbind or iptables redirect. Do not allow that user to write to any directory or files. If you need file access, arbitrate it via IPC with another kdb+ process. Pay attention to how that process will return values via .z.pg or .z.ps or similar. Firewall all ports inbound and outbound except ones explicitly used. For any backend kdb+ processes, restrict them to localhost or a protected network (e.g. iptables --pol ipsec ) Set process limits with ulimit no larger than you need them. Restrict input by defining at least: .z.pc:{} .z.pg:{} .z.ph:{} .z.pi:{} .z.pm:{} .z.po:{} .z.pp:{} .z.pq:{} .z.ps:{} To allow certain IPC calls, implement only the ones you want. A denylist for functions is tricky because some otherwise useful functions may have a mode that accesses the disk which may cause information leak (e.g. key). It is much easier to use an allowlist. As IPC functions either receive a parse tree or a string (that you could parse yourself), check the type of the input e.g. x:$[10h=type x;parse x;x] If you use WebSockets, define: .z.wc:{a[.z.a]-:1} .z.wo:{$[2<;a[.z.a]+:1;hclose .z.w;1]} When handling untrusted input, consider designing your application to wrap public entrypoints with reval . Pay attention to the fact that each WebSocket client can open up a lot of connections (200 on Mozilla, 256 for Chrome), so limit using .z.a . Log connections and consider using fail2ban to block suspicious traffic. Callbacks, Using .z Permissions with kdb+ Q for Mortals: §11.6 Interprocess Communication</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="49"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">System commands¶ \a tables \s number of secondary threads \b views \S random seed \B pending views \t timer \c console size \T timeout \cd change directory \ts time and space \C HTTP size \u reload user password file \d directory \v variables \e error trap clients \w workspace \E TLS server mode \W week offset \f functions \x expunge \g garbage collection mode \z date parsing \l load file or directory \1 & \2 redirect \o offset from UTC \_ hide q code \p listening port \ terminate \P precision \ toggle q/k \r replication primary \\ quit \r rename System commands control the q environment. They have the form: \cmd [p] for some command cmd , and optional parameter list p . Commands with optional parameters that set values, will show the current values if the parameters are omitted. Some system commands have equivalent command-line parameters. An alternative method to executing system commands is to use the system keyword. This executes a string representation of a system command and returns its result. \a (tables)¶ List tables \a \a ns Lists tables in namespace ns – defaults to current namespace. q)\a `symbol$() q)aa:bb:23 q)\a `symbol$() q)tt:([]dd:12 34) q)\a ,`tt q).nn.vv:([]uu:12 45) q)\a ,`tt q)\a .n '.n q)\a .nn ,`vv q)\d .nn q.nn)\a ,`vv q.nn)vv uu -- 12 45 q.nn) \b (views)¶ List dependencies \b \b ns Lists dependencies (views) in namespace ns – defaults to current namespace. q)a::x+y q)b::x+1 q)\b `s#`a`b \B (pending views)¶ List pending dependencies \B \B ns Lists pending dependencies (views) in namespace ns , i.e. dependencies not yet referenced, or not referenced after their referents have changed. Defaults to current namespace. q)a::x+1 / a depends on x q)\B / the dependency is pending ,`a q)x:10 q)\B / still pending after x is defined ,`a q)a / use a 11 q)\B / no longer pending `symbol$() \c (console size)¶ Console maximum rows and columns \c \c size Where size is a pair of integers: rows and columns, these values determine when q truncates output with .. . The values are coerced to the range [10,2000]. The default values are as set by environment variables LINES and COLUMNS . If the environment variables are undefined, the defaults are V4.0 or less 25 80 V4.1+ dimensions of the command-shell window Environment variables LINES and COLUMNS See Bash documentation for shopt parameter checkwinsize to make sure they’re reset as needed. q)\c 45 160 q)\c 5 5 q)\c 10 10 q)til each 20+til 10 0 1 2 3.. 0 1 2 3.. 0 1 2 3.. 0 1 2 3.. 0 1 2 3.. 0 1 2 3.. 0 1 2 3.. .. \C (HTTP size)¶ HTTP display maximum rows and columns \C \C size Where size is a pair of integers: rows and columns, the values determine when q truncates output with .. . The default is 36 2000 ; values are coerced to the range [10,2000]. \cd (change directory)¶ Current directory \cd \cd fp Where fp is a filepath, sets the current directory. Creates the directory if it does not exist. q)\cd "/home/guest/q" q)\cd /home/guest/dev q)\cd "/home/guest/dev" q)\pwd "/home/guest/dev" \d (directory)¶ Current namespace \d \d ns Where ns is the name of a namespace, shows or sets the current namespace, also known as directory or context. The namespace can be empty, and a new namespace is created when an object is defined in it. The q session prompt indicates the current namespace. q)\d / default namespace `. q)\d .o / change to .o q.o)\f `Cols`Columns`FG`Fkey`Gkey`Key`Special.. q.o)\d . / return to default q)key` / lists namespaces other than .z `q`Q`o`h q)\d .s / change to non-existent namespace q.s)key` / not yet created `q`Q`o`h q.s)a:1 / create object, also creates namespace q.s)key` `q`Q`o`h`s Q for Mortals §12.7 Working in a Context \e (error trap clients)¶ Error trapping \e \e mode Governs error trapping for client requests. The default mode is 0 (off). | mode | behavior | |---|---| | 0 | When a client request has an error, by default the server clears the stack. Appropriate for production use as it enables the server to continue processing other client requests. | | 1 | The server suspends on an error, and does not process other requests until the stack is cleared. Appropriate for development: enables debugging on the server. | | 2 | Dumps stack to stderr for untrapped errors during request from a remote. (Since V3.5 2016.10.03) | \E (TLS server mode)¶ \E Displays TLS server mode as an int: 0i plain 1i plain and TLS 2i TLS only Command-line option -E to set the mode \f (functions)¶ List functions \f \f ns Where ns is the name of a namespace, lists functions in it; defaults to current namespace. q)f:g:h:{x+2*y} q)\f `f`g`h q)\f .h `cd`code`data`eb`ec`ed`es`estr`fram`ha`hb`hc`he`hn`hp`hr`ht`hta`htac`htc`html`http`hu`hu.. q){x where x like"ht??"}system"f .h" `htac`html`http \g (garbage collection mode)¶ \g / current garbage-collection mode \g mode / set garbage-collection mode Show or set garbage-collection mode. The default mode is 0 (deferred). Setting the garbage-collection mode will automatically call .Q.gc[] after setting the provided value. Q manages its own thread-local heap. Objects in q use reference counting. As soon as there are no references to an object, its memory is eligable to be returned to the heap. - 0 (deferred) - Returns memory to the thread-local heap. Will subsequently return memory to the OS when either .Q.gc[] is called or an allocation fails, hence has a performance advantage, but can be more difficult to dimension or manage memory requirements. - 1 (immediate) - As memory is returned to the thread-local heap, if the object is ≥64MB then the memory is returned to the OS instead. This has an associated performance overhead. As per defered mode , memory used by the heap may be subsequently returned to the OS when either.Q.gc[] is called or an allocation fails. When q is denied additional address space from the OS, it invokes .Q.gc[] and retries the request to the OS. If the subsequent attempt fail, the request exits with 'wsfull . Notes on the allocator Q’s allocator bins objects in power-of-two size categories, from 16b (e.g. an atom) to 64MB. In this example, various vectors of longs (8 bytes per long) are created of different sizes using til . The memory used for the operation is shown via \ts . Note that more bytes are reported that only the pure vector size due to other house keeping, for example the type information. q)\ts til 800 / 800*8=6400, needs a 2^13=8192 byte slab (too big for a 2^12=4096 byte slab) 0 8368 q)\ts til 1000 / 1000*8=8000, needs a 2^13=8192 byte slab (memory same as smaller vector above) 0 8368 q)\ts til 1200 / 1200*8=9600, cant fit in a 2^13=8192 bytes slab, needs 2^14=16384 byte slab 0 16560 If there is already a slab in the object category’s freelist, it is reused. If there are no available slabs, a larger slab is recursively split in two until the needed category size is reached. If there are no free slabs available, a new 64MB slab is requested from the system. When an object is de-allocated, its memory slab is returned to the corresponding category’s freelist. Allocations larger than 64MB are requested from the OS directly, and this is what -g 1 causes to be immediately returned. Note that larger allocations do not cause any fragmentation and in case of -g 1 always immediately return. It is the smaller allocations (<64MB) that typically represent the bulk of a process allocation workload that can cause the heap to become fragmented. There are two primary cases of heap fragmentation: - split slab - Suppose that at some point q needed a 32MB allocation. It requested a new 64MB slab from the OS, split it in half, used and freed the object, and returned the two 32MB slabs to the freelist. Now if q needs to allocate 64MB, it will have to make another request to the OS. When .Q.gc is called (or an allocation fails), it would attempt to coalesce these two 32MB slabs together back into one 64MB, which would allow it to be returned to the OS (or reused for larger allocations, if the resulting slab is <64MB). - leftover objects - If most of the objects allocated from a 64MB slab are freed but one remains, the slab still cannot be returned to the OS (or coalesced). The following example shows freeing an object ≥64MB in deferred mode, while inspecting memory usage via .Q.w[] : q).Q.w[]`used`heap / original memory used and memory reserved by kdb+ at time of test 371552 67108864 q)a:til 10000000 / need memory ≥64MB to store value q).Q.w[]`used`heap / heap (memory reserved by kdb+) has grown, and used memory grown from the heap has grown 134589328 201326592 q)a:1 / variable assigned different value, old value no longer used q).Q.w[]`used`heap / heap (memory reserved by kdb+) hasn't reduced as it is kept for future use, used memory has reduced 371616 201326592 q)a:til 10000000 / need memory ≥64MB to store value again q).Q.w[]`used`heap / heap memory (no increase) as memory used has been taken from the available heap 134589328 201326592 The same example will differ when using immediate mode, by returning memory to the OS (as the object free'd is greater than 64MB): q).Q.w[]`used`heap / original memory used and memory reserved by kdb+ at time of test 371648 67108864 q)a:til 10000000 / need memory ≥64MB to store value q).Q.w[]`used`heap / heap (memory reserved by kdb+) has grown, and used memory from the heap has grown 134589424 201326592 q)a:1 / variable assigned different value, old value no longer used q).Q.w[]`used`heap / heap (memory reserved by kdb+) has reduced, it has been returned to OS 371712 67108864 q)a:til 10000000 / need memory ≥64MB to store value again q).Q.w[]`used`heap / heap memory has increased (requested from OS) as memory used is more than whats available to use in heap 134589328 201326592 Immediate mode will not return the memory to the OS when several objects less than 64MB each are freed, even though their sum may be more than 64MB. In this situation, immediate and deferred mode operate identically by adding the freed memory to the heap for future use. The following examples shows this effect when running in immediate mode . No memory is returned to the OS on freeing the objects, and only when .Q.gc[] is run is the memory coalesced and freed. q).Q.w[]`used`heap / original memory used and memory reserved by kdb+ at time of test 371648 67108864 q)v:`a`b`c`d`e`f`g`h`i`j / create a list of 10 variable names to use q){set[x;til 1000000]} each v / create a global variable using each of the names in v, each containing 1000000 longs q).Q.w[]`used`heap / heap (memory reserved by kdb+) has grown, and used memory from the heap has grown 84258096 134217728 q)![`.;();0b;v] / delete all the variables and their contents q).Q.w[]`used`heap / used memory has been reduced, but none of the heap memory has returned to the OS 371824 134217728 q).Q.gc[] / running garbage collection freed over 64MB 67108864 Command-line option -g (garbage collection mode), Command-line parameter -w (workspace memory limit), System command \w (memory stats and workspace memory limit) Q for Mortals §13.1.10 Garbage Collection \g \l (load file or directory)¶ \l name \l . Where name is the name of a - q script, executes the script - serialized object, deserializes it into memory as variable name - directory of a splayed table, maps the table to variable name , without loading any columns into memory - directory and the value of one of the permitted partition types, the most recent partition directory is inspected for splayed directories and each such directory mapped into memory with the name of the splayed directory - directory containing a kdb+ database, recursively loads whatever it finds there: serialized objects, scripts, splayed tables, etc. Current directory When a directory is opened, it becomes the current directory. Reload current directory You can reload the current database with \l . . This will ignore scripts and reload only data. Never mind the dollars If a file or directory under the path being loaded has a dollar-sign suffix then it is ignored. e.g. db/tickdata/myfile$ and db/tickdata/mydir$ would be ignored on \l db/tickdata or on \l . if db/tickdata is the current directory. q)\l sp.q / load sp.q script ... q)\a / tables defined in sp.q `p`s`sp q)\l db/tickdata / load the data found in db/tickdata q)\a / with tables quote and trade `p`quote`s`sp`trade If logging is enabled, the command checkpoints the .qdb file and empties the log file. Operating systems may create hidden files, such as DS_Store , that block \l on a directory. load , .Q.l (load) Logging Q for Mortals §10.3 Scripts, §13.2.6 Logging -l and -L \o (offset from UTC)¶ \o \o n Show or set the local time offset, as integer n hours from UTC, or as minutes if abs[n]>23 . The initial value of 0N means the machine’s offset is used. q)\o 0N q).z.p / UTC 2010.05.31D23:45:52.086467000 q).z.P / local time is UTC + 8 2010.06.01D07:45:53.830469000 q)\o -5 / set local time as UTC - 5 q).z.P 2010.05.31D18:45:58.470468000 q)\o 390 / set local time as UTC + 6:30 q).z.P 2010.06.01D06:16:06.603981000 This corresponds to the -o command line parameter. \p (listening port)¶ Show or set listening port \p [rp,][hostname:][portnumber|servicename] See Listening port for detail. hopen -p command-line option Multithreaded input mode, Changes in 3.5 Socket sharding with kdb+ and Linux \P (precision)¶ \P \P n Show or set display precision for floating-point numbers, i.e. the number of digits shown. The default value of n is 7 and possible values are integers in the range [0,17]. A value of 0 means use maximum precision. This is used when exporting to CSV files. q)\P / default 7i q)reciprocal 7 / 7 digits shown 0.1428571 q)123456789 / integers shown in full 123456789 q)123456789f / floats shown to 7 significant digits 1.234568e+08 q)\P 3 q)1%3 0.333 q)\P 10 q)1%3 0.3333333333 Use .Q.fmt and .q.f to format numbers to given width and precision q).Q.fmt[8;6]a / format to width 8, 6 decimal places "0.142857" q).Q.f[2;]each 9.996 34.3445 7817047037.90 / format to 2 decimal places "10.00" "34.34" "7817047037.90" .Q.f (precision format), .Q.fmt (precision format with length) Precision, -P command-line option, -27! precision format with IEEE754 rounding What Every Computer Scientist Should Know About Floating-Point Arithmetic \r (replication primary)¶ \r This should not be executed manually otherwise it can disrupt replication. It is executed automatically by the replicating process on the primary process, and returns the log file name and log file count. \r (rename)¶ \r src dst Rename file src to dst . It is equivalent to the Unix mv command, or the windows move command (except that it will not rename to a different disk drive). \s (number of secondary threads)¶ \s \s N Show or , where N is an integer, set the number of secondary threads available for parallel processing, within the limit set by the -s command-line option. N is an integer. Since V3.5 2017.05.02, secondary threads can be adjusted dynamically up to the maximum specified on the command line. A negative N indicates processes should be used, instead of threads. q)0N!("current secondary threads";system"s");system"s 4";0N!("current,max secondary threads";system"s";system"s 0N"); / q -s 8 ("current secondary threads";0i) ("current,max secondary threads";4i;8i) q)system"s 0" / disable secondary threads q)system"s 0N" / show max secondary threads 8i N parallel processing uses ------------------------------------ >0 N threads <0 processes with handles in .z.pd For processes: peach or': will call.z.pd for a list of handles to the processes, which must have been started previously- the absolute value of -N in the command line is ignored -s command-line option, Parallel processing \S (random seed)¶ \S \S n Where n is - omitted: display the last value to which the random seed was initialized 0N : display the current value of the random seed (since V3.6)- non-zero integer: re-initialize the seed to n Note that \S displays the last value to which the seed was initialized: it is not updated as the random-number generator (rng) is used. q)\S / default -314159i q)5?10 8 1 9 5 4 q)5?10 6 6 1 8 5 q)\S -314159 / restore default seed q)5?10 / same random numbers generated 8 1 9 5 4 q)\S / seed is not updated -314159 q)x:system "S 0N" / current value of seed q)r:10?10 q)system "S ",string x / re-initialize seed q)r~10?10 1b Allows user to save and restore state of the rng. (Since V3.6 2017.09.26.) q)x:system"S 0N";r:10?10;system"S ",string x;r~10?10 1b Thread-local Since V3.1 2013.08.19 random-number generation (rng) is thread-local. \S 1234 sets the seed for the rng for the main thread only. The rng in a secondary thread is assigned a seed based on the secondary thread number. In multithreaded input mode, the seed is based on the socket descriptor. Instances started on ports 20000 through 20099 (secondary threads, used with e.g. q -s -4 have the main thread’s default seed based on the port number. \t (timer)¶ \t / show timer interval \t N / set timer interval \t exp / time expression \t:n exp / time n repetitions of expression This command has two different uses, according to the parameter. If the parameter is omitted, it shows the number of milliseconds between timer ticks: 0 means the timer is off. N (integer)- Set the number of milliseconds between timer ticks. If 0, the timer is disabled, otherwise the timer is enabled and the first tick given. On each tick, the function assigned to .z.ts is executed. - This usage corresponds to the -t command-line option [:n] e (expression)- A q expression e (other than a single integer) is executed and the execution time shown in milliseconds. Since V3.0 2011.11.22, ifn is specified,e is executedn times. q)/Show or set timer ticks q)\t / default off 0 q).z.ts:{show`second$.z.N} q)\t 1000 / tick each second q)13:12:52 13:12:53 13:12:54 \t 0 / turn off q)/Time an expression q)\t log til 100000 / milliseconds for log of first 100000 numbers 3 q)\t:100 log til 100000 / timing for 100 repetitions 186 Actual timer tick frequency The actual timer tick frequency is determined by the timing granularity supported by the underlying operating system. This can be considerably different from a millisecond. \T (timeout)¶ \T \T n Show or set the client execution timeout, as n (integer) number of seconds a client call will execute before timing out. The default is 0: no timeout. Note this is in seconds, not milliseconds like \t . \ts (time and space)¶ \ts exp \ts:n exp Executes the expression exp and shows the execution time in milliseconds and the space used in bytes. (Since 3.1 2014.02.07) q)\ts log til 100000 7 2621568 q)\ts:10000 log til 1000 /same as \ts do[10000; log til 1000] 329 24672 \u (reload user password file)¶ \u When q is invoked with the -u parameter specifying a user password file, then \u will reload the password file. This allows updates to the password file while the server is running. \v (variables)¶ \v \v ns Lists the variables in namespace ns ; defaults to current namespace. q)a:1+b:2 q)\v `a`b q)\v .h `HOME`br`c0`c1`logo`sa`sb`sc`tx`ty q){x where x like"????"}system"v .h" `HOME`logo To expunge a from the default namespace delete a from `. Q for Mortals §12.5 Expunging from a Context \w (workspace)¶ \w / current memory usage \w 0|1 / internalized symbols \w n / set workspace memory limit With no parameter, returns current memory usage, as a list of 6 long integers. 0 number of bytes from the heap that are currently in use 1 heap size in bytes 2 maximum heap size so far 3 limit on thread heap size, from -w command-line option or \w system command 4 mapped bytes 5 physical memory q)\w 168144 67108864 67108864 0 0 8589934592 \w 0 and \w 1 return a pair of longs: 0 number of internalized symbols 1 corresponding memory usage q)\w 0 577 25436 The utility .Q.w formats all this information. Run-time increase Since 2017.11.06, \w allows the workspace limit to be increased at run-time, if it was initialized via the -w command-line option. For example \w 128 sets the limit to 128MB if the -w command line option was specified with a smaller value. The operation will return the current setting in bytes. If the system tries to allocate more memory than allowed, it signals -w abort and terminates with exit code 1. Specifying too large a number will fall back to the same behavior as \w 0 or \w 1 . q)\w 339168 67108864 67108864 104857600 0 8589934592 q)\w 0 651 28009 q)\w 128 134217728 q)\w 1000000000 1048576000000000 q)\w 1000000000000 651 28009 If the workspace limit has not been set by the command-line option -w , an error is signalled. q)\w 3 '-w init via cmd line Domain-local Since V4.0 2020.03.17 returns information for the current memory domain only. q)value each ("\\d .m";"\\w";"\\d .";"\\w") :: 353968 67108864 67108864 0 0 8589934592 :: 354032 67108864 67108864 0 0 8589934592 -w workspace command-line option, \g (garbage-collection mode) .m namespace (DAX-enabled filesystems) \W (week offset)¶ \W \W n Show or set the start-of-week offset n , where 0 is Saturday. The default is 2, i.e Monday. \x (expunge)¶ \x .z.p* By default, callbacks like .z.po are not defined in the session. After they have been assigned, you can restore the default using \x to delete the definition that was made. q).z.pi / default has no user defined function '.z.pi q).z.pi:{">",.Q.s value x} / assign function q)2+3 >5 q)\x .z.pi / restore default Works only for .z.p* variables defined in k before q.k is loaded For example, as .z.ph is defined in q.k , there is no default for it to be reset to. \z (date parsing)¶ \z \z 0|1 Show or set the format for "D"$ date parsing. B is 0 for mm/dd/yyyy and 1 for dd/mm/yyyy. q)\z 0 q)"D"$"06/01/2010" 2010.06.01 q)\z 1 q)"D"$"06/01/2010" 2010.01.06 \1 & \2 (redirect)¶ \1 filename \2 filename \1 and \2 let you redirect stdout and stderr to files from within the q session. The files and intermediate directories are created if necessary. ~/q$ rm -f t1.txt t2.txt ~/q$ l64/q KDB+ 4.0 2021.04.26 Copyright (C) 1993-2021 Kx Systems ... q)\1 t1.txt / stdout q)\2 t2.txt / stderr til 10 2 + "hello" \\ ~/q$ cat t1.txt / entry in stdout 0 1 2 3 4 5 6 7 8 9 ~/q$ cat t2.txt / entry in stderr q)q)'type On macOS and Linux \1 /dev/stdin returns output to the default. \_ (hide q code)¶ \_ / show client write access \_ scriptname / make runtime script This command has two different uses depending on whether a parameter is given. If no parameter, then \_ checks if client write-access is blocked. q)\_ 0b If a parameter is given, it should be a scriptname and \_ f.q makes a runtime script f.q_ . The q code loaded from a runtime script cannot be viewed or serialized. q)`:t1.q 0:enlist "a:123;f:{x+2*y}" q)\_ t1.q / create locked script `t1.q_ q)\l t1.q_ / can be loaded as usual q)a / definitions are correct 123 q)f[10;1 2 3] 12 14 16 q)f / q code is not displayed locked q)-8!f / or serialized 'type [0] -8!f ^ q)read0`:t1.q "a:123;f:{x+2*y}" q)read0`:t1.q_ / file contents are scrambled "'\374E\331\207'\262\355" "S\014%\210\0273\245" \ (terminate)¶ At the debugger’s q)) prompt clears one level from the execution stack and (eventually) returns to the interactive session. q)f:{g[]} q)g:{'`xyz} q)f[] {g[]} 'xyz @ {'`xyz} :: q))\ q) Without a suspension, \ toggles in an out of the k interpreter. If there is a suspension, this exits one level of the suspension. Otherwise, it toggles between q and k mode. (To switch languages from inside a suspension, type "\ ".) q){1+x}"hello" {1+x} 'type + 1 "hello" q))\ / clear suspension (only one level) q)\ / toggle to k mode \ (toggle q/k)¶ In the interactive session \ toggles between the q and k interpreters. q)\ \ !5 / this is k 0 1 2 3 4 \ q) The k programming language is exposed infrastructure. \\ (quit)¶ \\ - In the interactive session type \\ at the prompt to quit the session. - Inside a function, use value"\\\\" orexit 0 for the same result. Final comments The text following \\ and white space is ignored by q. This is often useful in scripts where \\ can be followed by comments or usage examples. Interrupt and terminate¶ Ctl-c signals an interrupt to the interpreter. Some operations are coded so tightly the interrupt might not be registered. Ctl-z will kill the q session. Nothing in memory is saved. OS commands¶ If an expression begins with \ but is not recognized as a system command, then it is executed as an OS command. Typos can get passed to the OS q)\ls / usual ls command "help.q" "k4.lic" "l64" "odbc.k" "profile.q" "q.k" ..</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="50"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ss , ssr ¶ String search – and replace ss ¶ String search x ss y ss[x;y] Where x is a stringy is a pattern as a string (no asterisk) returns an int vector of position/s within x of substrings that match pattern y . q)"We the people of the United States" ss "the" 3 17 q)s:"toronto ontario" q)s ss "ont" 3 8 q)s ss "[ir]o" 2 13 q)s ss "t?r" 0 10 ssr ¶ String search and replace ssr[x;y;z] Where x is a stringy is a pattern as a string (no asterisk)z is a string or a function returns x with each substring matching y replaced by: z ifz is a stringz[Y] wherez is a function andY is the matched substring q)s:"toronto ontario" q)ssr[s;"ont";"x"] / replace "ont" by "x" "torxo xario" q)ssr[s;"t?r";upper] / replace matches by their uppercase "TORonto onTARio" like Regular Expressions in q Strings Using regular expressions string ¶ Cast to string string x string[x] Returns x as a string. Applies to all datatypes. q)string `ibm "ibm" q)string 2 ,"2" q)string {x*x} "{x*x}" q)string (+/) "+/" Implicit iteration¶ string is an atomic function and iterates through dictionaries and tables. q)string (2 3;"abc") (,"2";,"3") (,"a";,"b";,"c") q)string "cat" / not the no-op you might expect ,"c" ,"a" ,"t" q)string `a`b`c!2002 2004 2010 a| "2002" b| "2004" c| "2010" q)string ([]a:1 2 3;b:`ibm`goog`aapl) a b ----------- ,"1" "ibm" ,"2" "goog" ,"3" "aapl" Domain and range¶ domain b g x h i j e f c s p m d z n u v t range c c c c c c c c c c c c c c c c c c Range: c .h namespace .Q.addr (IP/host as int), .Q.f (precision format), .Q.fmt (precision format with length) Q for Mortals §7.3.1 Data to Strings sublist ¶ Select a sublist of a list x sublist y sublist[x;y] Where x is an integer atom or pairy is a list returns a sublist of y . The result contains no more items than are available in y . Head or tail¶ Where x is an integer atom returns up to x items from the beginning of y if positive, or from the end if negative q)p:2 3 5 7 11 q)3 sublist p / 3 from the front 2 3 5 q)10 sublist p / only available values 2 3 5 7 11 q)2 sublist `a`b`c!(1 2 3;"xyz";2 3 5) / 2 keys from a dictionary a| 1 2 3 b| x y z q)-3 sublist sp / last 3 rows of a table s p qty ------- 3 1 200 3 3 300 0 4 400 Taking a sample from the beginning of string can go wrong if the string turns out to be shorter than the sample taken. q)10#"take me" "take metak" Instead, compose Pad with sublist . q){x$x sublist y}[10;]"take me" "take me " Slice¶ Where x is an integer pair returns up to x[1] items from y , starting at item x[0] . q)1 2 sublist p / 2 items starting from position 1 3 5 - Subtract¶ x-y -[x;y] Where x and y are numerics or temporals, returns their difference. q)3 4 5-2 1 2 3 q)2000.11.22 - 03:44:55.666 2000.11.21D20:15:04.334000000 - is a multithreaded primitive. Implicit iteration¶ Subtract is an atomic function. q)(10;20 30)-(2;3 4) 8 17 26 It applies to dictionaries and tables. q)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 -21 3;4 5 -6) q)d-1 a| 9 -22 2 b| 3 4 -7 q)d-`b`c!(10 20 30;1000*1 2 3) / upsert semantics a| 10 -21 3 b| -6 -15 -36 c| -1000 -2000 -3000 q)t-100 a b --------- -90 -96 -121 -95 -97 -106 q)k-k k | a b ---| --- abc| 0 0 def| 0 0 ghi| 0 0 Add is generally faster than Subtract. Range and domains¶ b g x h i j e f c s p m d z n u v t ---------------------------------------- b | i . i i i j e f i . p m d z n u v t g | . . . . . . . . . . . . . . . . . . x | i . i i i j e f i . p m d z n u v t h | i . i i i j e f i . p m d z n u v t i | i . i i i j e f i . p m d z n u v t j | j . j j j j e f j . p m d z n u v t e | e . e e e e e f e . p m d z n u v t f | f . f f f f f f f . f f z z f f f f c | . . . . . . . f . . p m d z n u v t s | . . . . . . . . . . . . . . . . . . p | p . p p p p p f p . n . . . p p p p m | m . m m m m m f m . . i . . p p p p d | d . d d d d d z d . . . i . p p p p z | z . z z z z z z z . . . . f p z z z n | n . n n n n n f n . p p p p n n n n u | u . u u u u u f u . p p p z n u v t v | v . v v v v v f v . p p p z n v v t t | t . t t t t t f t . p p p z n t t t Range: defijmnptuvz Add, deltas , differ , .Q.addmonths Datatypes, Mathematics How to handle temporal data in q Q for Mortals §4.9.2 Temporal Arithmetic sum , sums , msum , wsum ¶ Totals – simple, running, moving, and weighted sum ¶ Total sum x sum[x] Where x is - a simple numeric list, returns the sums of its items - an atom, returns x - a list of numeric lists, returns their sums - a dictionary with numeric values Nulls are treated as zeros. q)sum 7 / sum atom (returned unchanged) 7 q)sum 2 3 5 7 / sum list 17 q)sum 2 3 0N 7 / 0N is treated as 0 12 q)sum (1 2 3 4;2 3 5 7) / sum list of lists 3 5 8 11 / same as 1 2 3 4 + 2 3 5 7 q)sum `a`b`c!1 2 3 6 q)\l sp.q q)select sum qty by s from sp / use in select statement s | qty --| ---- s1| 1600 s2| 700 s3| 200 s4| 600 q)sum "abc" / type error if list is not numeric 'type q)sum (0n 8;8 0n) / n.b. sum list of vectors does not ignore nulls 0n 0n q)sum 0n 8 / the vector case was modified to match sql92 (ignore nulls) 8f q)sum each flip(0n 8;8 0n) /do this to fall back to vector case 8 8f sum is an aggregate function, equivalent to +/ . Floating-point addition is not associative Different results may be obtained by changing the order of the summation. ❯ q -s 4 KDB+ 4.0 2021.01.20 Copyright (C) 1993-2021 Kx Systems m64/ 12()core 65536MB sjt mackenzie.local 127.0.0.1 .. q)\s 0 q)a:100000000?1. q)\P 0 q)sum a 49999897.181930684 q)sum reverse a 49999897.181931004 The order of summation changes when the primitive is able to use threads. q)\s 4 q)sum a 49999897.181933172 sum is a multithreaded primitive. sums ¶ Running totals sums x sums[x] Where x is a numeric or temporal list, returns the cumulative sums of the items of x . The sum of an atom is itself. Nulls are treated as zeros. q)sums 7 / cumulative sum atom (returned unchanged) 7 q)sums 2 3 5 7 / cumulative sum list 2 5 10 17 q)sums 2 3 0N 7 / 0N is treated as 0 2 5 5 12 q)sums (1 2 3;2 3 5) / cumulative sum list of lists 1 2 3 / same as (1 2 3;1 2 3 + 2 3 5) 3 5 8 q)\l sp.q q)select sums qty by s from sp / use in select statement s | qty --| -------------------------- s1| 300 500 900 1100 1200 1600 s2| 300 700 s3| ,200 s4| 100 300 600 q)sums "abc" / type error if list is not numeric 'type sums is a uniform function, equivalent to +\ . msum ¶ Moving sums x msum y msum[x;y] Where x is a positive int atomy is a numeric list returns the x -item moving sums of y , with nulls replaced by zero. The first x items of the result are the sums of the terms so far, and thereafter the result is the moving sum. q)3 msum 1 2 3 5 7 11 1 3 6 10 15 23 q)3 msum 0N 2 3 5 0N 11 / nulls treated as zero 0 2 5 10 8 16 msum is a uniform function. wsum ¶ Weighted sum x wsum y wsum[x;y] Where x and y are numeric lists, returns the weighted sum of the products of x and y . When both x and y are integer lists, they are first converted to floats. q)2 3 4 wsum 1 2 4 / equivalent to sum 2 3 4 * 1 2 4f 24f q)2 wsum 1 2 4 / equivalent to sum 2 * 1 2 4 14 q)(1 2;3 4) wsum (500 400;300 200) 1400 1600 wsum is an aggregate function, equivalent to {sum x*y} . Implicit iteration¶ sum , sums , and msum apply to dictionaries and tables. wsum applies to dictionaries. q)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 21 3;4 5 6) q)sum d 14 26 9 q)sum t a| 34 b| 15 q)sum k a| 34 b| 15 q)sums d a| 10 21 3 b| 14 26 9 q)2 msum t a b ----- 10 4 31 9 24 11 q)1 2 wsum d 18 31 15 Aggregating nulls¶ avg , min , max and sum are special: they ignore nulls, in order to be similar to SQL92. But for nested x these functions preserve the nulls. q)sum (1 2;0N 4) 0N 6 Domains and ranges¶ sum and sums domain: b g x h i j e f c s p m d z n u v t range: i . i i i j e f i . p m d z n u v t msum b g x h i j e f c s p m d z n u v t ---------------------------------------- b | i . i i i j e f . . n i i f n u v t g | . . . . . . . . . . . . . . . . . . x | i . i i i j e f . . n i i f n u v t h | i . i i i j e f . . n i i f n u v t i | i . i i i j e f . . n i i f n u v t j | i . i i i j e f . . n i i f n u v t e | . . . . . . . . . . . . . . . . . . f | . . . . . . . . . . . . . . . . . . c | . . . . . . . . . . . . . . . . . . s | . . . . . . . . . . . . . . . . . . p | . . . . . . . . . . . . . . . . . . m | . . . . . . . . . . . . . . . . . . d | . . . . . . . . . . . . . . . . . . z | . . . . . . . . . . . . . . . . . . n | . . . . . . . . . . . . . . . . . . u | . . . . . . . . . . . . . . . . . . v | . . . . . . . . . . . . . . . . . . t | . . . . . . . . . . . . . . . . . . Range: efijntuv wsum b g x h i j e f c s p m d z n u v t ---------------------------------------- b | i . i i i j e f . . p m d z n u v t g | . . . . . . . . . . . . . . . . . . x | i . i i i j e f . . p m d z n u v t h | i . i i i j e f . . p m d z n u v t i | i . i i i j e f . . p m d z n u v t j | j . j j j j e f . . p m d z n u v t e | e . e e e e e f . . p m d z n u v t f | f . f f f f f f f . f f z z f f f f c | . . . . . . . f . . p m d z n u v t s | . . . . . . . . . . . . . . . . . . p | p . p p p p p f p . . . . . . . . . m | m . m m m m m f m . . . . . . . . . d | d . d d d d d z d . . . . . . . . . z | z . z z z z z z z . . . . . . . . . n | n . n n n n n f n . . . . . . . . . u | u . u u u u u f u . . . . . . . . . v | v . v v v v v f v . . . . . . . . . t | t . t t t t t f t . . . . . . . . . Range: defijmnptuvz</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="51"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/ - function for loading in config csv with multiple processes in one line duplicateconfig:{[t] update proc:raze[t `proc] from ((select from t)where count each t[`proc])}; / - end of default parameters /- called at every EOD by .u.end init:{ .lg.o[`init;"searching for servers"]; /- Open connection to discovery. Retry until connected to dqe. .servers.startupdependent[`dqedb; 30]; /- set timer to call EOD if[.dqe.utctime=1b;.eodtime.nextroll:.eodtime.getroll[`timestamp$.dqe.currentpartition]+(.z.T-.z.t)]; .timer.once[.eodtime.nextroll;(`.u.end;.dqe.getpartition[]);"Running EOD on Checker"]; /- add dqe functions to .api.detail .api.add .'value each .dqe.readdqeconfig[.dqe.detailcsv;"SB***"]; .dqe.compcounter[0N]:(0N;();()); configtable:([] action:`$(); params:(); proc:(); mode:`$(); starttime:`timespan$(); endtime:`timespan$(); period:`timespan$()) /- Set up configtable from csv `.dqe.configtable upsert .dqe.duplicateconfig[update ";"vs/:proc from (.dqe.readdqeconfig[.dqe.configcsv;"S**SNNN"])]; update checkid:til count .dqe.configtable from `.dqe.configtable; /- from timespan to timestamp update starttime:(`date$(.z.D,.z.d).dqe.utctime)+starttime from `.dqe.configtable; update endtime:?[0W=endtime;0Wp;(`date$(.z.D,.z.d).dqe.utctime)+endtime] from `.dqe.configtable; .dqe.loadtimer'[.dqe.configtable]; /- store i numbers of rows to be saved down to DB .dqe.tosavedown:()!(); .lg.o[`.dqc.init; "Starting EOD writedown."]; /- Checking if .eodtime.nextroll is correct if[((.z.P,.z.p).dqe.utctime)>.eodtime.nextroll:.eodtime.getroll[((.z.P,.z.p).dqe.utctime)];system"t 0";.lg.e[`init; "Next roll is in the past."]] st:.dqe.writedownperiod+exec min starttime from .dqe.configtable; et:.eodtime.nextroll-.dqe.writedownperiod; /- Log the start and end times. .lg.o[`.dqe.init; "Start time: ",(string st),". End time: ",string et]; .timer.repeat[st;et;.dqe.writedownperiod;(`.dqe.writedown;`);"Running periodic writedown for results"]; .timer.repeat[st;et;.dqe.writedownperiod;(`.dqe.writedownconfig;`);"Running periodic writedown for configtable"]; .lg.o[`init;"initialization completed"]; } writedown:{ if[0=count .dqe.tosavedown`.dqe.results;:()]; .dqe.savedata[.dqe.dqcdbdir;.dqe.getpartition[];.dqe.tosavedown`.dqe.results;`.dqe;`results]; /- get handles for DBs that need to reload hdbs:distinct raze exec w from .servers.SERVERS where proctype=`dqcdb; /- send message for DBs to reload .dqe.notifyhdb[.os.pth .dqe.dqcdbdir]'[hdbs]; } writedownconfig:{ if[0=count .dqe.tosavedown`.dqe.configtable;:()]; .dqe.savedata[.dqe.dqcdbdir;.dqe.getpartition[];.dqe.tosavedown`.dqe.configtable;`.dqe;`configtable]; /- get handles for DBsthat need to reload hdbs:distinct raze exec w from .servers.SERVERS where proctype=`dqcdb; /- send message for DB .dqe.notifyhdb[.os.pth .dqe.dqcdbdir]'[hdbs]; } /- checks for unfinished runs that match the new run dupchk:{[runtype;idnum;params;proc] if[params`comp;proc:params`compresproc]; if[`=proc;:()]; if[count select from .dqe.results where id=idnum,procschk=proc,chkstatus=`started; .dqe.updresultstab[runtype;idnum;0Np;0b;"error:fail to complete before next run";`failed;params;proc]]; } /- set initial values in results table initstatusupd:{[runtype;idnum;funct;params;rs] if[idnum in exec id from .dqe.compcounter;delete from `.dqe.compcounter where id=idnum;]; .lg.o[`initstatus;"setting up initial record(s) for id ",(string idnum)]; /- calls dupchk function to check if last runs chkstatus is still started .dqe.dupchk[runtype;idnum;params]'[rs]; vars:params`vars; updvars:(key params[`vars]) where (),10h=type each value params`vars; if[count updvars;vars[updvars]:`$params[`vars] updvars]; parprint:`$("," sv string (raze/) (),enlist each vars params`fnpar),$[params`comp;",comp(",(string params[`compproc]),",",(string params`compallow),")";""]; `.dqe.results insert (idnum;funct;parprint;rs[0];rs[1];.proc.cp[];0Np;0b;"";`started;runtype); } /- updates a check in the results table updresultstab:{[runtype;idnum;end;res;des;status;params;proc] if[1b=params`comp;proc:params`compresproc]; /- obtain count of checks that will be updated if[c:count s:exec i from .dqe.results where id=idnum, procschk=proc,chkstatus=`started; .lg.o[`updresultstab;raze "run check id ",(string idnum)," update in results table with check status ",string status]; `.dqe.results set update endtime:end,result:res,descp:enlist des,chkstatus:status,chkruntype:runtype from .dqe.results where id=idnum,procschk=proc,chkstatus=`started]; .dqe.tosavedown[`.dqe.results],:s; delete from `.dqe.compcounter where id=idnum; params:()!(); s2:exec i from .dqe.configtable where checkid=idnum; .dqe.tosavedown[`.dqe.configtable]:.dqe.tosavedown[`.dqe.configtable] union s2; .lg.o[`updresultstab;"Updated check id ",(string idnum)," in the results table with status ",string status]; } /- compares the third atom of results when comparison is on chkcompare:{[runtype;idnum;params] /- checks if all async check results have returned - if not, exit the function if[params[`compcount]<>(d:.dqe.compcounter idnum)`counter;:()]; .lg.o[`chkcompare;"comparison started with id ",string idnum]; /- obtain all the check returns a:d[`results] where not d[`procs]=params`compproc; procsforcomp:d[`procs] except params`compproc; /- obtain the check to compare the others to b:d[`results] where d[`procs]=params`compproc; /- if error in compare proc then fail check if[@[{all 0W=x};first b;0b]; .dqe.updresultstab[runtype;idnum;.proc.cp[];0b;"error: error on comparison process";`failed;params;`];:()]; errorprocs:d[`procs] where (),all each @[{0W=x};d`results;0b]; /- if error in all comparison procs then fail check if[(count errorprocs)= count d`results; .dqe.updresultstab[runtype;idnum;.proc.cp[];0b;"error: error with all comparison procs";`failed;params;`];:()]; matching:procsforcomp where all each params[`compallow] >= 100* abs -\:[a;first b]%\:first b; notmatching:procsforcomp except errorprocs,matching; .lg.o[`chkcompare;"comparison finished with id ",string idnum]; s:(string params[`compproc])," "; if[count errorprocs;s,:" | ";s,: raze"error ",("," sv string errorprocs)]; if[count notmatching;s,:" | ";s,:raze"no match ",("," sv string notmatching)]; if[count matching;s,:" | ";s,:raze"match ",("," sv string matching)]; .lg.o[`chkcompare;"Updating descp of compare process in the results table"]; resbool:not(count errorprocs)|count notmatching; .dqe.updresultstab[runtype;idnum;.proc.cp[];resbool;s;`complete;params;`]; } /- updates the results table with the check result postback:{[runtype;idnum;proc;params;result] .lg.o[`postback;"postback successful for id ",(string idnum)," from ",string proc]; /- if comparision, add to compcounter table if[params`comp; .dqe.compcounter[idnum]:( 1+0^.dqe.compcounter[idnum][`counter]; .dqe.compcounter[idnum][`procs],proc; /- join result to the list .dqe.compcounter[idnum][`results],$[3<count result;0W;last result])]; /- checks if error returned from server side; if[("e"=first result)&(not params`comp); .dqe.updresultstab[runtype;idnum;0Np;0b;result;`failed;params;proc]; :()]; /- in comparison run, check if all results have returned $[params`comp; .dqe.chkcompare[runtype;idnum;params]; .dqe.updresultstab[runtype;idnum;.proc.cp[];first result;result[1];`complete;params;proc]]; } /- sends the check function over async getresult:{[runtype;funct;params;idnum;proc;hand] .lg.o[`getresults;raze"Send function over to process: ",string proc]; fvars:params[`vars] params`fnpar; /- send function with variables down handle .async.postback[hand;(funct,$[10h=type fvars;enlist fvars;fvars]);.dqe.postback[runtype;idnum;proc;params]]; } /- sends check function to test processes runcheck:{[runtype;idnum;fn;params;rs] .lg.o[`runcheck;"Starting check run ",string idnum]; params[`fnpar]:(value value fn)[1]; temp:$[1=count params`fnpar;enlist params`fnpar;params[`fnpar]]!$[(10h=type params`vars)|(1=count params`vars);enlist params`vars;params`vars]; params[`vars]:temp; fncheck:` vs fn; /- run check to make sure passed in function exists if[not fncheck[2] in key value .Q.dd[`;fncheck 1]; .lg.e[`runcheck;"Function ",(string fn)," doesn't exist"]; :()]; /- set rs to a list rs:(),rs; /- h would be assigned to a dictionary with the process' procname, proctype, and handle h:.dqe.gethandles[rs]; /- r would be assigned to a list with two atoms, containing process' procname and proctype r:.dqe.fillprocname[rs;h]; .lg.o[`runcheck;"Checking if comparison check"]; if[not params`comp; .dqe.initstatusupd[runtype;idnum;fn;params]'[r]; .lg.o[`runcheck;"checking for processes that are not connectable"]; if[not any raze[r]in\:exec procname from .servers.SERVERS where .dotz.liveh w; .dqe.updresultstab[runtype;idnum;0Np;0b;"error:can't connect to process";`failed;params;`]; ]; /- checks if any procs didn't get handles procsdown:(h`procname) where 0N = h`w; if[count procsdown;.dqe.updresultstab[runtype;idnum;0Np;0b;"error:process is down or has lost its handle";`failed;params]'[procsdown]]; ]; if[params`comp; /- fail if comparison process is in list of processes to check against if[(params`compproc) in h`procname; .lg.e[`runcheck;"Can't compare process with itself"]; .dqe.updresultstab[runtype;idnum;0Np;0b;"error:compare process can't be compared with itself";`failed;params]'[h`procname]; :()]; params,:(enlist `compresproc)!enlist `$"," sv string h`procname; /- obtain handle for comparison process comph:.dqe.gethandles[params`compproc]; h:h,'comph; proccount:count h`procname; params,:(enlist `compcount)!enlist proccount; .lg.o[`runcheck;(string params`compcount)," process will be checked for this comparison"]; .dqe.initstatusupd[runtype;idnum;fn;params;(`$"," sv string r[;0]),params`compresproc];</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="52"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category private // @fileoverview Get stored cookie(s) relevant to current query // @param q {dict} query object // @return {string} cookie(s) getcookies:{[q] h:q`host;p:q`path;pr:q`protocol; //extact necessary components h:".",h; //prevent bad tailmatching t:select from .cookie.jar where h like/:host,p like/:path,(expires>.z.t)|null expires; //select all cookies that apply if[not pr~"https://";t:delete from t where secure]; //delete HTTPS only cookies if not HTTPS request :"; "sv"="sv'flip value exec name,val from t; //compile cookies into string } // @kind function // @category private // @fileoverview Add stored cookie(s) relevant to current query // @param q {dict} query object // @return {dict} query objeect with added cookies addcookies:{[q] if[count c:getcookies[q`url];q[`headers;`Cookie]:c]; :q; } // @kind function // @category public // @fileoverview Read a Netscape/cURL format cookiejar // @param f {string|symbol|#hsym} filename // @return {table} cookie jar readjar:{[f] j:read0 .url.hsurl f; //get hsym of input file & read j:j where not ("#"=first'[j])|0=count'[j]; //remove comments & empty lines t:flip`host`tailmatch`path`secure`expires`name`val!("*S*SJ**";"\t")0:j; //convert to a table t:update host:{"*.",x}'[host] from t where tailmatch=`TRUE; //implement tailmatching t:update path:{x,"*"}'[path] from t; //implement path matching t:update secure:secure=`TRUE from t; //convert secure to boolean t:update expires:?[0=expires;0Nz;`datetime$`timestamp$1970.01.01D00+1e9*expires] from t; //calculate expiry :delete tailmatch from update httponly:0b,maxage:0Nj,samesite:` from t; //add extra fields for reQ cookiejar } // @kind function // @category public // @fileoverview Write a Netscape/cURL format cookiejar // @param f {string|symbol|#hsym} filename // @param j {table} cookie jar // @return {#hsym} cookie jar filename writejar:{[f;j] t :"# Netscape HTTP Cookie File\n"; //make file header (copy cURL) t,:"# https://curl.haxx.se/docs/http-cookies.html\n"; t,:"# This file was generated by reQ! Edit at your own risk.\n\n"; t,:"\n"sv 1_"\t"0:select //convert to tab delimited & drop headers {("."=first x)_x}'[except\:[host;"*"]], `FALSE`TRUE "*"=first'[host], except\:[path;"*"], `FALSE`TRUE secure, ?[null expires;0;`long$1e-9*(`timestamp$expires)-1970.01.01D00:00], //convert expires back to epoch time name, val from j; :.url.hsurl[f] 0: "\n"vs t; //write to file } \d . ================================================================================ FILE: reQ_req_doh_google.q SIZE: 1,182 characters ================================================================================ \d .doh ENABLED:1b; //enable by default url:"dns.google.com"; //URL for API cache:()!() //cache IP for URL cache[`$url]:url; //don't resolve the resolver resolve:{[url] /* take a URL, resolve URL to IP & return */ uo:.url.parse0[0b;url]; //parse to object if[(`$h:uo`host) in key cache; :.url.format @[uo;`host;:;cache`$h]; //return from cache if present ]; r:.j.k .req.get["https://dns.google.com/resolve?name=",h;()!()]; //request from Google API i:first r[`Answer][`data]; //get first record cache[`$h]:i; //cache resovled IP :.url.format @[uo;`host;:;i]; //return resolved URL } \d . ================================================================================ FILE: reQ_req_ext_os.q SIZE: 1,120 characters ================================================================================ / os.q taken from https://github.com/jonathonmcmurray/qutil_packages @ beaabdd \d .os es:$[.z.o like "w*";" 2>NUL";" 2>/dev/null"]; //error suppression dependent on os test:{[x] /* .os.test - test if a command works on current os */ :@[{system x;1b};x,es;0b]; //run with system & suppress error } home:hsym`$getenv$[.z.o like "w*";`USERPROFILE;`HOME] //get home dir depending on OS hfile:(` sv home,) //get file path relative to home dir read:{$[1=count a;first;]a:read0 x} //read text file, single string if one line write:{x 0:$[10=type y;enlist;]y} //write text file, list of strings or single hread:{read hfile x} //read file from home dir hwrite:{write[hfile x;y]} //write file in home dir \d . ================================================================================ FILE: reQ_req_init.q SIZE: 247 characters ================================================================================ /package code if[.z.K<=3.1;.utl.pkg"json.q"]; //add JSON support for older q versions .utl.pkg"ext/os.q" .utl.pkg"url.q" .utl.pkg"cookie.q" .utl.pkg"b64.q" .utl.pkg"status.q" .utl.pkg"req.q" .utl.pkg"auth.q" .utl.pkg"multipart.q" ================================================================================ FILE: reQ_req_json.q SIZE: 927 characters ================================================================================ /downloaded from https://raw.githubusercontent.com/KxSystems/kdb/master/e/json.k \d .j /[]{} Cbg*xhijefcspmdznuvt k)q:"\"";s:{q,x,q};J:(($`0`1)!$`false`true;s;{$[#x;x;"null"]};s;{s@[x;&"."=8#x;:;"-"]};s)1 2 5 11 12 16h bin k)j:{$[10=abs t:@x;s@,/{$[x in r:"\t\n\r\"\\";"\\","tnr\"\\"r?x;x]}'x;99=t;"{",(","/:(j'!x),'":",'j'. x),"}";-1<t;"[",($[98=t;",\n ";","]/:.Q.fc[j']x),"]";J[-t]@$x]} /enclose k)e:{(*x),(","/:y),*|x};a:"\t\n\r\"\\";f:{$[x in a;"\\","tnr\"\\"a?x;x]} k)j:{$[10=abs t:@x;s$[|/x in a;,/f'x;x];99=t;e["{}"](j'!x),'":",'j'. x;-1<t;e["[]"].Q.fc[j']x;J[-t]@$x]} /disclose k)v:{=\~("\\"=-1_q,x)<q=x};d:{$[1<n:(s:+\v[x]*1 -1 1 -1"{}[]"?x)?0;1_'(0,&(v[x]&","=x)&1=n#s)_x:n#x;()]} k)c:{$["{"=*x;(`$c'n#'x)!c'(1+n:x?'":")_'x:d x;"["=*x;.Q.fc[c']d x;q=*x;$[1<+/v x;'`err;"",. x];"a">*x;"F"$x;"n"=*x;0n;"t"=*x]} k)k:{c x@&~v[x]&x in" \t\n\r"}; \ k j x:([]C:$`as`;b:01b;j:0N 2;z:0Nz,.z.z) k j x:"\"a \\" k"{},2]" ================================================================================ FILE: reQ_req_multipart.q SIZE: 2,153 characters ================================================================================ \d .req</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="53"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Comparing option pricing methods in q¶ In this paper, we compare the use of both Monte Carlo (MC) and Quasi-Monte Carlo (QMC) methods in the process of pricing European and Asian options. In doing so, we consider the use of two discretization schemes - standard discretization and Brownian-bridge construction. Results produced by the different methods are compared with the deterministic Black-Scholes price for each option type, using Global Sensitivity Analysis (SA). Note that the methods demonstrated below follow the work presented by S. Kucherenko et al. 2007. S. Kucherenko et al. 2007, “The Importance of Being Global – Application of Global Sensitivity Analysis in Monte Carlo Option Pricing”, Wilmott, pp. 82–91 Black-Scholes¶ The most common model used to calculate the price of options is Black-Scholes, where the formula for each market is derived from the Black-Scholes equation. In this paper, we look specifically at the Black-Scholes models for European and Asian call options. The standard Black-Scholes model for European options assumes a payoff based on the underlying price at exercise. The modified model for Asian options assumes a payoff based on the average underlying price over a predefined time period. In each case, the Black-Scholes model produces a closed-form solution with a deterministic result. For European call options, the price of the corresponding option at time \(t\), \(P(S_{t},t)\), is given by: Where \(T\) is the expiry, \(S_{t}\) is the price of the underlying asset at time \(t\), \(K\) is the strike price of the option, \(\sigma\) is the volatility and \(r\) is the interest rate. Note that the price is discounted by the dividends, \(q\), throughout. For Asian call options, we implement the same formula, using an adjusted \(S_{t}\), \(\sigma^{2}\) and drift rate, \(\mu\): Where \(n\) is the number of timesteps. Monte Carlo and Quasi-Monte Carlo simulations¶ Within the financial industry, there is a need to price complex financial instruments. Despite this need, there are a lack of analytical solutions to do so. MC methods are used with the financial industry to mimic the uncertainty associated with the underlying price of an instrument and to subsequently generate a value based on the possible underlying input values. One example of where MC is used in finance, is in evaluating an option on an equity. For each underlying asset, an MC simulation is used to create thousands of random price paths, with an associated payoff. The option price for each path is calculated by taking the average over the future payoffs and discounting them to the present. These models are based on pseudo-random numbers which, despite being commonly used, exhibit very slow convergence, with a rate of \(O(1/\sqrt{N})\) where \(N\) is the number of sampled points. To improve upon these models, QMC methods have been developed which use low-discrepancy sequences (LDS) to produce a rate of convergence ~ \(O(1/N)\). LDS are deterministic uniformly distributed sequences which are specifically designed to place sample points as uniformly as possible. Practical studies have shown that the most effective QMC method for application in financial engineering is based on Sobol' LDS. S. Kucherenko et al. 2001, “Construction and Comparison of High-Dimensional Sobol’ Generators”, Wilmott, Nov, pp. 64-79 broda.co.uk P. Jäckel 2001, Monte Carlo Methods In Finance, pp. 122. P. Glasserman 2003, Monte Carlo Methods in Financial Engineering, Springer. Wiener path¶ The starting point for asset price simulation is the construction of a Wiener path (or Brownian motion). Such paths are built from a set of independent Gaussian variates, using either standard discretization or Brownian-bridge construction. In the standard approximation, the Wiener path is found by taking the cumulative sum of the Gaussian variates. When constructing a Brownian bridge, the last step of the Wiener path is calculated first, followed by the mid-step, and then the space left between steps is bisected until all steps have been determined. An example of building up a Brownian bridge is shown in the diagram below, where we have a total of 14 timesteps (from 1 to 14). Note that we also have an additional timestep 0 , which is assumed to have a value of 0. The construction of a Brownian bridge over 14 steps. See Jäckel, 2001, op. cit. Both standard discretization and Brownian-bridge construction share the same variance and therefore the same resulting convergence when used with MC models. However, performance differs between the two when QMC methods are introduced, with faster convergence seen for Brownian-bridge construction. In order to showcase how performant the QMC simulation is when paired with Brownian-bridge construction, we use Global SA as outlined in S. Kucherenko et al. 2007. This method allows us to estimate the contribution of individual input parameters in the final variance of the output over a number of experiments. In each experiment, we: - Randomly generate \(n\) random numbers, either pseudo-random (MC) or Sobol’ sequence (QMC). (See also broda.co.uk) - Convert into a normal distribution. - Convert into a Wiener-path random walk using standard discretization or Brownian-bridge construction. - Convert into an asset-price path based on parameters: s : Asset price at \(t=0\)v : Volatilityr : Interest rateq : Dividendst : Expiry - Convert into an option price based on the option type and strike price, k . The prices produced are then averaged to find a final predicted price. Implementation¶ In the following sections, we compare the methods of option pricing mentioned above. The Black-Scholes price for each market is compared to an average price generated using the following combinations of simulation and discretization methods: - Pseudo-random number generation (MC) with standard discretization. - Sobol’ sequences (QMC) with standard discretization. - Sobol’ sequences (QMC) with Brownian-bridge construction. The Black-Scholes function for each market produces a closed-form solution with a deterministic result, while the MC/QMC functions perform a number of random experiments and return an average price, based on the option type and the strike price. Once both the Black-Scholes and MC/QMC prices have been calculated for each market, the root mean square error (RMSE) is calculated between the two. This is demonstrated in the final example below, where the process is repeated for an increasing number of paths, with resulting errors compared. The technical dependencies required for the below work are as follows: - Option Pricing kdb+/q library - embedPy - Sobol’ C++ library - SobolSeq1024 function provided in the Option Pricing kdb+/q library with max dimension of 1024. - matplotlib Utility functions For simplicity, utility functions are omitted from the code snippets below. These can be found within the Option Pricing library linked above. Load scripts¶ As mentioned previously, the implementations of option pricing methods outlined below are based on original C++ scripts used in S. Kucherenko et al. 2007. All code is contained within the option-pricing repository: Wrappers for the C++ pseudo-random and Sobol’ sequence number generators (see also broda.co.uk) are contained within rand.q , along with the cumulative and inverse cumulative normal distribution functions in norm.q . To run the below examples, q scripts are loaded including the C++ wrappers and graphics functions used throughout. \l code/q/rand.q \l code/q/norm.q \l notebooks/graphics/graphics.q Black-Scholes option pricing¶ The following functions provide q implementations of the Black-Scholes formula for each market, outlined above. They take in parameter dictionary pd as an argument, containing the parameters s , v , r , q , k , and t detailed in the previous section. Note that the Black-Scholes price of an Asian call option depends on the number of timesteps n , which must also be passed as an argument. / European bsEuroCall:{[pd] / Calculate volatility*sqrt delta T coefficient coeff:(v:pd`v)*sqrt t:pd`t; / Calculate d1 d1:(log[pd[`s]%pd`k]+t*(pd[`r]-pd`q)+.5*v*v)%coeff; / Calculate d2 d2:d1-coeff; / Calculate the option price - P(S,t) (pd[`s]*exp[neg t*pd`q]*cnorm1 d1) - pd[`k]*exp[neg t*pd`r]*cnorm1 d2 } / Asian bsAsiaCall:{[n;pd] / Calculate adjusted drift rate adjmu:.5*((r:pd`r)-.5*v2:v*v:pd`v)*n1:1+1.%n; / Calculate adjusted volatility squared adjv2:(v2%3)*n1*1+.5%n; / Calculate adjusted price adjS :pd[`s]*exp(t:pd`t)*(hv2:.5*adjv2)+adjmu-r; / Calculate d1 d1:(log[adjS%k:pd`k]+t*(r-q:pd`q)+hv2)%rtv2:sqrt adjv2*t; / Calculate d2 d2:d1-rtv2; / Calculate the option price - P(S,t) (adjS*exp[neg q*t]*cnorm1 d1)-k*exp[neg r*t]*cnorm1 d2 } The outputs of these functions are demonstrated below for 512 timesteps. nsteps:512 / number of timesteps pd:`s`k`v`r`q`t!100 100 .2 .05 0 1 / parameter dictionary / Calculate BS price for European/Asian options "European Black Scholes Price: ",string bseuro:bsEuroCall pd "Asian Black Scholes Price: ",string bsasia:bsAsiaCall[nsteps]pd European Black Scholes Price: 10.45058 Asian Black Scholes Price: 5.556009 Monte Carlo and Quasi-Monte Carlo option pricing¶ Generate random numbers¶ The first stage in predicting an option price is to generate a set of random numbers using either MC or QMC methods. In the example below we generate 512 pseudo-random and Sobol’ sequence numbers, with results plotted for comparison. Random numbers are generated using the Mersenne Twister number generator which has one parameter, the number of steps. The Sobol’ sequence generator takes two arguments: - the index of the point (0 < i < 231 - 1) - the dimension of the Sobol’ sequence, i.e. the number of steps (0 < d < 1025). / Function to generate n random numbers in d dimensions rdmngen:{[n;d](d;n)#mtrand3 d*n} / Function to generate n sobol numbers in d dimensions sobngen:{[n;d]flip sobolrand[d]each 1+til n} / Generate n random and sobol numbers in 2D data:(rdmngen;sobngen).\:nsteps,2 subplot[data;("Random";"Sobol");2;2#`scatter] It is clear that the pseudo-random numbers are not evenly distributed, with points clustering together in some sections, while leaving large portions of white space in others. In contrast, the Sobol’ sequence plot exhibits a much more even distribution, with few points clumping together. Convert to a Gaussian distribution¶ The generated sequences are converted from a uniform distribution to a Gaussian distribution. Following this conversion, around 68% of the values lie within one standard deviation, while two standard deviations account for around 95% and three account for 99.7%. Gaussian distribution In the example below we convert the uniform generated Sobol’ sequence to a Gaussian distribution, using the inverse cumulative normal function, invcnorm . / Convert sobol sequence to normal distribution zsob:invcnorm each sob:last data subplot[(sob;zsob);("Sobol Uniform";"Sobol Gaussian");2;2#`scatter] The differences between the Gaussian distributions produced for random and Sobol’ sequences are best demonstrated for a small number of timesteps, e.g. 64. Below we plot the 1-D Gaussian distributions for both random and Sobol’ number generation across 64 timesteps. / Returns 1D Gaussian distribution to plot gausscnv:{[g;n;d]first invcnorm each$[g~`rdm;rdmngen;sobngen][n;d] } / Calculates Gaussian variates for 64 steps, in 2 dimensions dist:gausscnv[;64;2]each`rdm`sob subplot[dist;("Random";"Sobol");2;2#`hist] As expected, the Sobol’ sequence exhibits a Gaussian curve with much better statistical properties than the random-number sequence. Convert into a Wiener-path random walk¶ The q code to build both a Brownian-bridge and Wiener-path random walk is shown below. Brownian bridge: /* n = number of timesteps /* dt = length of timesteps bbridge:{[n;dt] / create initial brownian bridge with indices for all timesteps bb:first flip(n-1).[util.initbb n]\(`bidx`ridx`lidx!3#n-1;((n-1)#0b),1b); / calculate weights and sigma value for each point in the path bb:update lwt:bidx-lidx,rwt:ridx-bidx,sigma:ridx-lidx from bb; bb:update lwt%sigma,rwt%sigma,sigma:sqrt dt*lwt*rwt%sigma from bb; / create a projection for weiner path creation containing new bbridge util.buildpath .[bb;(0;`sigma);:;sqrt n*dt] } Wiener path: / Performs cumulative sum / or inverse cumulative normal for random/sobol numbers /* u = gaussian variates /* d = dictionary containing bbridge and boolean for sobol/random numbers wpath:{[u;d]$[(::)~d`bb;sums;d`bb]invcnorm u } An example of how the Brownian bridge is built is shown below using bbdemo . The function outputs a table with n timesteps. / Demonstrates build up of bbridge indices bbdemo:{[n] / Create matrix showing steps taken / Step already taken = 1b, 0b otherwise x:1b,'enlist[n#0b], last flip(n-1).[util.initbb n]\(`bidx`ridx`lidx!3#n-1;((n-1)#0b),1b); / Print "X" where 1b, showing path taken flip(`$"i",'string til count x)!x:flip(" X")x } An example is shown below using 8 timesteps, showing the order in which steps are added to the path. Note that i0 was added here, where we assume that it has a value equal to 0. q)bbdemo 8 i0 i1 i2 i3 i4 i5 i6 i7 i8 -------------------------- X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X When recording the order of steps in the path, we take note of the left and right weights and indexes, and the corresponding sigma value for each step in the sequence. This is shown for 512 timesteps and 1 unit of time, with the sigma value for each index in the Brownian bridge subsequently plotted. q)dt:1 q)10#b:last value bbex:bbridge[nsteps;dt] bidx ridx lidx lwt rwt sigma ------------------------------- 511 511 511 22.62742 255 511 -1 0.5 0.5 11.31371 127 255 -1 0.5 0.5 8 383 511 255 0.5 0.5 8 63 127 -1 0.5 0.5 5.656854 191 255 127 0.5 0.5 5.656854 319 383 255 0.5 0.5 5.656854 447 511 383 0.5 0.5 5.656854 31 63 -1 0.5 0.5 4 95 127 63 0.5 0.5 4 Once the Brownian bridge has been initialized, it can be used to transform Gaussian variates into a Wiener-path random walk. Below, a Wiener path with 512 timesteps is constructed using a Sobol’ sequence (of length 512) and the Brownian bridge constructed previously. Note that the function wpath takes two arguments: - Sequence of generated numbers, Sobol’ or random. - Dictionary indicating whether to use standard discretization or Brownian-bridge construction, and whether to use Sobol’ sequences ( 1b ) or pseudo-random numbers (0b ). If using a Brownian bridge, the initial Brownian bridge must be passed in, if not use(::) . q)d:`bb`sobol!(bbex;1b) q)show w:wpath[sobolrand[nsteps;2];d] -0.4450092 0.06385387 -0.1017726 -1.221271 -0.9099617 -1.552524 -0.56.. q)plt[`:title]"Wiener path random walk"; q)plt[`:plot]w; q)plt[`:show][]; Convert into asset price path¶ At this point, the Wiener path is converted into an asset-price path using the methods outlined in S. Kucherenko et al. 2007, where the asset-price path is calculated as: Where \(S_0\) and \(S(t)\) are the asset prices at time \(0\) and \(t\) respectively, \(r\) is the interest rate, \(\sigma\) is the volatility and \(W(t)\) is a Wiener path up to time \(t\). This process can be done using the function spath , detailed below. /* u = gaussian variates /* n = number of timesteps /* d = dictionary with bbridge and boolean for random/sobol /* pd = dictionary of parameters s,v,r,t,q spath:{[u;n;d;pd] / Original asset price pd[`s]* / Wiener path*volatility*time for one timestep exp(wpath[u;d]*pd[`v]*sqrt dt)+ / Calculate sum of interest rate (discounted by dividends) / and half volitility squared for each timestep (1+til n)*(pd[`r]-pd[`q]+.5*v*v:pd`v)*dt:pd[`t]%n } Here we calculate six different asset-price paths and overplot them for comparison. We start by generating the Sobol’ sequences for 8 paths with 512 timesteps, incrementing the Sobol’ index each time. Brownian-bridge approximation is also used. -1"\nGenerated sequences: \n"; show u:sobolrand[nsteps;]each 2+til 8 plt[`:title]"Asset Price Path" plt[`:plot] each spath[;nsteps;d;pd]each u plt[`:show][] Generated sequences 0.25 0.75 0.25 0.75 0.25 0.75 0.25 0.25 0.75 0.75 .. 0.75 0.25 0.75 0.25 0.75 0.25 0.75 0.75 0.25 0.25 .. 0.375 0.625 0.125 0.875 0.875 0.125 0.625 0.125 0.875 0.625 .. 0.875 0.125 0.625 0.375 0.375 0.625 0.125 0.625 0.375 0.125 .. 0.125 0.375 0.375 0.125 0.625 0.875 0.875 0.375 0.125 0.375 .. 0.625 0.875 0.875 0.625 0.125 0.375 0.375 0.875 0.625 0.875 .. 0.3125 0.3125 0.6875 0.5625 0.1875 0.0625 0.9375 0.5625 0.0625 0.8125.. 0.8125 0.8125 0.1875 0.0625 0.6875 0.5625 0.4375 0.0625 0.5625 0.3125.. Convert into option price¶ Lastly, to find a single option price, an average is taken across the asset-price path for the MC/QMC method. This allows for comparison between the predicted price and the Black-Scholes equivalent. The formulae for both the European and Asian options are outlined in S. Kucherenko et al. 2007. For a European call option, the final MC/QMC price is calculated using: Where \(C\) is the final price of the call option, \(r\) is the interest rate, \(T\) is the length of timestep, \(N\) is the finite number of simulated price paths and \(K\) is the strike price. Note that \(max(S^{(i)}_{T}-K,0)\) represents the payoff for a call option. This has been translated into the below function: / MC/QMC Price of European Call Option /* m = number of paths /* u = sequence of random numbers /* d = dictionary with bbridge and boolean for random/sobol /* pd = dictionary of parameters s,v,r,t,q mcEuroCall:{[u;n;d;pd] exp[neg pd[`r]*pd`t]*avg 0 | (last each spath[;n;d;pd]each u)-pd`k } Similarly, for Asian call options, the below is used: Where we integrate over unit hypercube \(H^{n}\). In this case, the payoff for an geometric average Asian call option is calculated as the maximum between 0 and the geometric average of the underlying price, \(S(t)\), minus the strike price, \(K\). The final price for an Asian call option can therefore be determined generated by using the below function: / MC/QMC Price of Asian Call Option /* m = number of paths /* n = number of timesteps /* d = dictionary with bbridge and boolean for random/sobol /* pd = dictionary of parameters s,v,r,t,q mcAsiaCall:{[u;n;d;pd] exp[neg pd[`r]*pd`t]*avg 0 | (prd each xexp[;1%n]spath[;n;d;pd]each u)-pd`k } We also need a number-generator function for l trials, m paths and n steps which can be used with the Sobol’ or random-number generators. numgen:{[ng;l;m;n]ng@''$[ng~mtrand3;(l;first m)#n;(0N;m)#1+til l*m]} Here we demonstrate how to run these functions below for 512 timesteps, 256 paths and 5 trials. Sequences are generated for Sobol’ sequences using the above numgen function which will produce a sequence for each path and each trial. ntrials:5 npaths:256 "\nGenerated sequences:\n" 5#u:first numgen[sobolrand nsteps;ntrials;npaths;nsteps] "European Monte Carlo Price: ", string mcEuroCall[u;nsteps;`bb`sobol!(bbex;1b);pd] "Asian Monte Carlo Price: ", string mcAsiaCall[u;nsteps;`bb`sobol!(bbex;1b);pd] Generated sequences: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5.. 0.25 0.75 0.25 0.75 0.25 0.75 0.25 0.25 0.75 0.75 0.25 0.2.. 0.75 0.25 0.75 0.25 0.75 0.25 0.75 0.75 0.25 0.25 0.75 0.7.. 0.375 0.625 0.125 0.875 0.875 0.125 0.625 0.125 0.875 0.625 0.125 0.3.. 0.875 0.125 0.625 0.375 0.375 0.625 0.125 0.625 0.375 0.125 0.625 0.8.. European Monte Carlo Price: 10.28224 Asian Monte Carlo Price: 5.365942 Remembering that the Black-Scholes option prices for the same number of timesteps were: "European Black Scholes Price: ",string bseuro "Asian Black Scholes Price: ",string bsasia European Black Scholes Price: 10.45058 Asian Black Scholes Price: 5.556009 Example¶ In this section we deploy all the aforementioned techniques and compare the results. Multiple threads The example below can be run from the terminal across multiple threads using the following commands: q -s 8 q)\l op.q q)loadfile\`:init.q q)loadfile\`:code/q/run.q where we load in the functions contained within the Option Pricing library using the first two commands and run the example by loading in run.q . Parameters¶ As shown previously, a dictionary of parameters is created which contains the initial asset price s , volatility v , interest rate r , dividends q , expiry t and strike price k . q)show pd:`s`k`v`r`q`t!100 100 .2 .05 0 1 s| 100 k| 100 v| 0.2 r| 0.05 q| 0 t| 1 Additional parameters are also initialized for the number of paths (experiments), steps and trials. q)l:20 / Number of trials q)n:1024 / Number of steps q)show m:"j"$xexp[2;3+til 8] / Number of paths 8 16 32 64 128 256 512 1024 Given that the initial Brownian bridge is the same throughout, it is also initialized and passed in as an argument. q)10#last value bb:bbridge[n;1] bidx ridx lidx lwt rwt sigma ------------------------------- 1023 1023 1023 32 511 1023 -1 0.5 0.5 16 255 511 -1 0.5 0.5 11.31371 767 1023 511 0.5 0.5 11.31371 127 255 -1 0.5 0.5 8 383 511 255 0.5 0.5 8 639 767 511 0.5 0.5 8 895 1023 767 0.5 0.5 8 63 127 -1 0.5 0.5 5.656854 191 255 127 0.5 0.5 5.656854 Run experiments¶ The functions below calculate the RMSE between the Black-Scholes and MC/QMC prices for each market and each MC/QMC technique. Note that we reset the sobolrand index after each set of trials have been completed. / Run all techniques for option pricing /* bb = initial brownian bridge /* pd = dictionary of parameters /* l = number of trials /* n = number of timesteps /* m = number of paths runall:{[bb;pd;l;n;m] / Start timer for European options st:.z.p; / Output column names 0N!util.rcol; / Run experiments for European options e:util.run[`euro;bsEuroCall pd;bb;pd;l;n]each m; / Output total time taken for European -1"European: time taken = ",string[.z.p-st],"\n"; / Start timer for Asian options st:.z.p; / Output column names 0N!util.rcol; / Run experiments for Asian options a:util.run[`asia;bsAsiaCall[n;pd];bb;pd;l;n]each m; / Output total time taken for Asian -1"Asian: time taken = ",string .z.p-st; / Return table with European and Asian prices and errors e,a } / Dictionary keys util.d:`bb`sobol! / Output column names util.rcol:`mkt`npaths`rmse_bb_sobol`rmse_std_sobol`rmse_std_rdm, `prx_bb_sobol`prx_std_sobol`prx_std_rdm`prx_bs / RMSE util.rmse:{sqrt avg x*x-:y} / Run each technique for a specific market /* mkt = market, European/Asian /* bs = Black-Scholes price util.run:{[mkt;bs;bb;pd;l;n;m] / Create project with correct MC function for each market mc:$[mkt~`asia;mcAsiaCall;mcEuroCall][;n;;pd]; / Generate MC option price and calculate error for bbridge and sobol ea:util.rmse[bs]a:mc[;util.d(bb;1b)]each sob:numgen[sobolrand n;l;m;n]; / Generate MC option price and calculate error for standard and sobol eb:util.rmse[bs]b:mc[;util.d(::;1b)]each sob; / Generate MC option price and calculate error for bbridge and random ec:util.rmse[bs]c:mc[;util.d(bb;0b)]each numgen[mtrand3;l;m;n]; / Return dictionary of results util.rcol!0N!(mkt;m;ea;eb;ec;last a;last b;last c;bs) } Compare results¶ At this stage it is possible to plot the results obtained for the option prices, RMSE and log RMSE values. q)r:runall[bb;pd;l;n;m] q)select from r where mkt=`euro / European RMSEs and prices mkt npaths rmse_bb_sobol rmse_std_sobol rmse_std_rdm prx_bb_sobol pr.. ---------------------------------------------------------------------.. euro 8 2.218523 3.328543 4.537168 13.86836 4... euro 16 1.345787 2.442911 3.505794 9.206011 12.. euro 32 0.6865024 1.623545 3.618555 9.879788 11.. euro 64 0.3774031 0.9891046 1.93851 10.90519 11.. euro 128 0.2089234 0.5977986 1.532864 10.34505 11.. euro 256 0.117329 0.3648233 0.9575077 10.52265 10.. euro 512 0.05984563 0.3127605 0.7618771 10.50504 9... euro 1024 0.03176637 0.2853521 0.5670563 10.43112 10.. q)/ Asian RMSEs and prices q)select from r where mkt=`asia mkt npaths rmse_bb_sobol rmse_std_sobol rmse_std_rdm prx_bb_sobol pr.. ---------------------------------------------------------------------.. asia 8 1.044296 2.126291 2.421675 6.461112 5... asia 16 0.6879741 1.37292 1.80831 4.369775 6... asia 32 0.3959254 0.90278 1.167337 5.445392 6... asia 64 0.2453828 0.4006613 0.8905137 5.641087 5... asia 128 0.1543742 0.3089822 0.5973851 5.473975 5... asia 256 0.0771557 0.2283313 0.4139539 5.590241 5... asia 512 0.03863931 0.1614974 0.3102061 5.576155 5... asia 1024 0.01975347 0.166499 0.1831748 5.544304 5... Option prices¶ The plot below shows the option prices produced for each number of paths, compared to the Black-Scholes equivalent (black-dashed line). It is clear that the Sobol-Brownian bridge method converges the fastest. q)prxerrplot[r;`prx] RMSE¶ We can also plot the RMSE produced by comparing the prices for each method as they converge to the relative Black-Scholes price. The expected result is again exhibited, where the Sobol-Brownian bridge method converges the fastest. q)prxerrplot[r;`rmse] Log RMSE¶ Lastly, we can look at the log RMSE plot as another means of comparison between the methods. Similarly, we see that the Sobol-Brownian bridge method (blue) exhibits superior performance. q)prxerrplot[r;`logrsme] Conclusion¶ In this paper we demonstrated that it is possible to calculate option prices using both Black-Scholes and Monte Carlo/Quasi-Monte Carlo methods in q. The Monte Carlo/Quasi-Monte Carlo methods deployed different implementations of both Wiener-path approximation and random-number generation. Looking at the results produced, it is clear that both the option price produced and the resulting RMSE/log RMSE converged fastest when compared with the Black-Scholes price for the Quasi-Monte Carlo approach, with Sobol’ sequence number generation and Brownian-bridge construction. Author¶ Deanna Morgan joined First Derivatives in June 2018 as a data scientist in the Capital Markets Training Program and currently works as a machine-learning engineer in London. Acknowledgements¶ I gratefully acknowledge Sergei Kucherenko for allowing us to create a version of the C++ Option Pricing library in q and for providing technical knowledge throughout the project. I would additionally like to acknowledge my colleagues in the KX Machine Learning team for their guidance in the technical aspects of this paper.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="54"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mod ¶ Modulus x mod y mod[x;y] Where x and y are numeric, returns the remainder of x%y . q)-3 -2 -1 0 1 2 3 4 mod 3 0 1 2 0 1 2 0 1 q)7 mod 2 3 4 1 1 3 q)-7 7 mod/:\:-2.5 -2 2 2.5 -2 -1 1 0.5 -0.5 -1 1 2 mod is a multithreaded primitive. Implicit iteration¶ mod is an atomic function. q)(10;20 30)mod(7 13;-12) 3 10 -4 -6 It applies to dictionaries and keyed tables. q)d mod 5 a| 0 4 3 b| 4 0 4 q)5 mod d a| 5 -16 2 b| 1 0 -1 q)k mod 5 k | a b ---| --- abc| 0 4 def| 4 0 ghi| 3 4 Domain and range¶ b g x h i j e f c s p m d z n u v t ---------------------------------------- b | i . i i i j e f . . p m d z n u v t g | . . . . . . . . . . . . . . . . . . x | i . i i i j e f . . p m d z n u v t h | i . i i i j e f . . p m d z n u v t i | i . i i i j e f . . p m d z n u v t j | j . j j j j e f . . p m d z n u v t e | f . f f f f f f f . f f z z f f f f f | f . f f f f f f f . f f z z f f f f c | . . . . . . . f . . p m d z n u v t s | . . . . . . . . . . . . . . . . . . p | n . n n n n n f n . . . . . . . . . m | i . i i i i i f i . . . . . . . . . d | i . i i i i i . i . . . . . . . . . z | f . f f f f f f f . . . . . . . . . n | n . n n n n n f n . . . . . . . . . u | u . u u u u u f u . . . . . . . . . v | v . v v v v v f v . . . . . . . . . t | t . t t t t t f t . . . . . . . . . Range: defijmnptuvz % Divide, div , reciprocal Mathematics Q for Mortals: §4.8.1 Integer Division div and Modulus mod * Multiply¶ x*y *[x;y] Where x and y are conformable numerics or temporals, returns their product. q)3 4 5*2.2 6.6 8.8 11 q)1.1*`a`b`c!5 10 20 a| 5.5 b| 11 c| 22 q)t:([]price:10 20 30;qty:200 150 17) q)t*\:1.15 1 /raise all prices 15% price qty --------- 11.5 200 23 150 34.5 17 q)update price:price*1+.15*qty<50 from t /raise prices 15% where stock<50 price qty --------- 10 200 20 150 34.5 17 * is a multithreaded primitive. Implicit iteration¶ Multiply is an atomic function. q)(10;20 30)*(2;3 4) 20 60 120 It applies to dictionaries and tables. q)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 -21 3;4 5 -6) q)d*2 a| 20 -42 6 b| 8 10 -12 q)d*`b`c!(10 20 30;1000*1 2 3) / upsert semantics a| 10 -21 3 b| 40 100 -180 c| 1000 2000 3000 q)t*100 a b ---------- 1000 400 -2100 500 300 -600 q)k*k k | a b ---| ------ abc| 100 16 def| 441 25 ghi| 9 36 Range and domains¶ b g x h i j e f c s p m d z n u v t ---------------------------------------- b | i . i i i j e f . . p m d z n u v t g | . . . . . . . . . . . . . . . . . . x | i . i i i j e f . . p m d z n u v t h | i . i i i j e f . . p m d z n u v t i | i . i i i j e f . . p m d z n u v t j | j . j j j j e f . . p m d z n u v t e | e . e e e e e f . . p m d z n u v t f | f . f f f f f f f . f f z z f f f f c | . . . . . . . f . . p m d z n u v t s | . . . . . . . . . . . . . . . . . . p | p . p p p p p f p . . . . . . . . . m | m . m m m m m f m . . . . . . . . . d | d . d d d d d z d . . . . . . . . . z | z . z z z z z z z . . . . . . . . . n | n . n n n n n f n . . . . . . . . . u | u . u u u u u f u . . . . . . . . . v | v . v v v v v f v . . . . . . . . . t | t . t t t t t f t . . . . . . . . . Range: defijmnptuvz neg ¶ Negate neg x neg[x] Returns the negation of boolean or numeric x . A null has no sign, so is its own negation. q)neg -1 0 1 2 1 0 -1 -2 q)neg 01001b 0 -1 0 0 -1i q)neg (0W;-0w;0N) / infinities and a null -0W 0w 0N q)neg 2000.01.01 2012.01.01 / negates the underlying data value 2000.01.01 1988.01.01 An atomic function. neg is a multithreaded primitive. Domain and range¶ domain b g x h i j e f c s p m d z n u v t range i . i h i j e f i . p m d z n u v t Range: ihjefpmdznuvt not , Subtract Mathematics Q for Mortals §4.3.2 Not Zero not Q for Mortals §4.9.2 Temporal Arithmetic next , prev , xprev ¶ Immediate or near neighbors What if just under this layer of life you could find the old one, moving forward just the same, and just above, what’s yet to come — Emily Berry, Unexhausted Time next ¶ Next item/s in a list next x next[x] Where x is a list, for each item in x , returns the next item. For the last item, it returns a null if the list is a vector, otherwise an empty list () . q)next 2 3 5 7 11 3 5 7 11 0N q)next (1 2;"abc";`ibm) "abc" `ibm `int$() Duration of a quote: q)update (next time)-time by sym from quote next is a uniform function. prev ¶ Immediately preceding item/s in a list prev x prev[x] Where x is a list, for each item, returns the previous item. For the first item, it returns a null if the list is vector, otherwise an empty list () . q)prev 2 3 5 7 11 0N 2 3 5 7 q)prev (1 2;"abc";`ibm) `int$() 1 2 "abc" Shift the times in a table: q)update time:prev time by sym from t prev is a uniform function. xprev ¶ Nearby items in a list x xprev y xprev[x;y] Where x is a long atom and y is a list, returns for each item of y the item x indices before it. The first x items of the result are null, empty or blank as appropriate. There is no xnext function. Fortunately xprev with a negative number on the left can achieve this. q)2 xprev 2 7 5 3 11 0N 0N 2 7 5 q)-2 xprev 2 7 5 3 11 5 3 11 0N 0N q)1 xprev "abcde" " abcd" xprev is a right-uniform function. not ¶ Not zero not x not[x] Returns 0b where x not equal to zero, and 1b otherwise. Applies to all data types except symbol, and to items of lists, dictionary values and table columns, referring to the underlying data value. Nulls and infinities never equal zero. q)not -1 0 1 2 0100b q)not "abc","c"$0 0001b q)not `a`b!(-1 0 2;"abc","c"$0) a| 010b b| 0001b q)not 2000.01.01 2020.06.30 10b q)not 00:00:00 1b q)not 12:00:00.000000000 0b q)not (0W;-0w;0N) 000b An atomic function. not is a multithreaded primitive. neg Logic Q for Mortals §4.3.2 Not Zero not <> Not Equal¶ x<>y <>[x;y] This atomic binary operator returns 1b where (items of) x are less than y . q)(3;"a")<>(2 3 4;"abc") 101b 011b Equal = Comparison Q for Mortals: §4.3.1 Equality = and Disequality <> <> Not Equal¶x<>y <>[x;y] This atomic binary operator returns 1b where (items of) x are less than y . q)(3;"a")<>(2 3 4;"abc") 101b 011b Equal = Comparison Q for Mortals: §4.3.1 Equality = and Disequality <> null ¶ Is null null x null[x] Returns 1b where x is null. Applies to all data types except enums, and to items of lists, dict values and table columns. null is an atomic function. q)null 0 0n 0w 1 0n 01001b q)where all null ([] c1:`a`b`c; c2:0n 0n 0n; c3:10 0N 30) ,`c2 Enums always show as non-null. q)a:``a q)`=`a$` / non-enumerated and enumerated null symbol show as equivalent 1b q)null` / null symbol behaves as expected 1b q)null`a$` / enumeration of null symbol does not 0b The intention was not to have nulls in the enums. That value is used to indicate out of range. (Think of them as a way to represent foreign keys.) To test for an enumeration backed by a null symbol, one can use the equality test – but at the cost of CPU cycles: q)a:10000000?`8 q)v:`a$a q)\ts null v 18 16777344 q)\ts `=v 66 268435648 null is a multithreaded primitive.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="55"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// Set the timer to 200ms if not set already if[not system"t"; system"t 200"]]; if[@[value;`.proc.lowerpowermode;0b]; if[.timer.enabled; // Set the timer to 1000ms if lowpowermode system"t 1000"]]; \ f:{0N!`firing;x+1} f1:{0N!`firing;system"sleep ",string x} repeat[.proc.cp[];.proc.cp[]+0D00:01;0D00:00:15;(f1;2);"test timer"] rep[.proc.cp[];.proc.cp[]+0D00:01;0D00:00:15;(f1;3);0h;"test timer";1b] rep[.proc.cp[];.proc.cp[]+0D00:01;0D00:00:15;(f1;4);1h;"test timer";1b] once[.proc.cp[]+0D00:00:10;(`.timer.f;2);"test once"] .dotz.set[`.z.ts;run] \t 500 ================================================================================ FILE: TorQ_code_common_timezone.q SIZE: 1,203 characters ================================================================================ // taken from http://code.kx.com/wiki/Cookbook/Timezones \d .tz default:@[value;`default;`$"Europe/London"] // Load the timezone info from the config directory t:@[get;hsym`$tzfile;{.lg.e[`init;"failed to load timezone table from ",x," ",y]}[tzfile:string first .proc.getconfigfile["tzinfo"]]] // local from GMT lg:{[tz;z] $[0>type z;first;(::)]@exec gmtDateTime+adjustment from aj[`timezoneID`gmtDateTime;([]timezoneID:tz;gmtDateTime:z,());select timezoneID,gmtDateTime,adjustment from t]}; // GMT from local gl:{[tz;z] $[0>type z;first;(::)]@exec localDateTime-adjustment from aj[`timezoneID`localDateTime;([]timezoneID:tz;localDateTime:z,());select timezoneID,localDateTime,adjustment from t]}; // timezone switch // d = destination time zone // s = source timezone // z = time ttz:{[d;s;z]lg[d;gl[s;z]]} // default from GMT dg:lg[default] // GMT from default gd:gl[default] \ \d . / To recreate tzinfo from tzinfo.csv t:("SPJ";enlist ",")0:`:tzinfo.csv; t: delete offset from update adjustment:`timespan$1000000000*offset from t; t: update localDateTime:gmtDateTime+adjustment from t; t: `gmtDateTime xasc t; t: update `g#timezoneID from t; `:tzinfo set t; / save file for easy distribution ================================================================================ FILE: TorQ_code_common_tplogutils.q SIZE: 3,435 characters ================================================================================ / - functions for checking and repairing (if required) a tickerplant log file \d .tplog HEADER: 8 # -8!(`upd;`trade;()); / - header to build deserialisable msg UPDMSG: `char$10 # 8 _ -8!(`upd;`trade;()); / - first part of tp update msg CHUNK: 10 * 1024 * 1024; / - size of default chunk to read (10MB) MAXCHUNK: 8 * CHUNK; / - don't let single read exceed this check: {[logfile;lastmsgtoreplay] / - logfile (symbol) is the handle to the logsfile / - lastmsgtoreplay (long) is index position of the last message to be replayed from the log .lg.o[`tplog.check;"Checking ",string[logfile]," . Index of last message to replay is : ",string lastmsgtoreplay]; / - check if the logfile is corrupt loginfo: -11!(-2;logfile); .lg.o[`tplog.check;"Finished running check on log file. Result is : ",.Q.s1 loginfo]; :$[ 1 = count loginfo; / - the log file is good so return the good log file handle [.lg.o[`tplog.check;"The logfile is not corrupt"];logfile]; / - elseif the number of messages to be replayed is lower than the number of good messages then don't bother repairing the log loginfo[0] <= lastmsgtoreplay + 1; [.lg.o[`tplog.check;"The logfile is corrupt but the number of messages to replay (",string[lastmsgtoreplay + 1],") is less than the number of messages (",string[loginfo 0],")that can be read from the log"];logfile]; / - else run the repair function and return out the handle for the "good" log [.lg.o[`tplog.check;"The logfile is corrupt, attempting to write a good log"];repair[logfile]] ] }; repair: {[logfile] / - append ".good" to the "good" log file goodlog: `$ string[logfile],".good"; .lg.o[`tplog.repair;"Writing good log to ",string goodlog]; / - create file and open handle to it goodlogh: hopen goodlog set (); / - loop through the file in chunks .lg.o[`tplog.repair;"Starting to loop through the log file - ",string logfile]; repairover[logfile;goodlogh] over `start`size!(0j;CHUNK); .lg.o[`tplog.repair;"Finished looping through the log file - ",string logfile]; / - return goodlog goodlog }; repairover: {[logfile;goodlogh;d] / - logfile (symbol) is the handle to the logsfile / - goodlogh (int) is the handle to the "good" log file / - d (dictionary) has two keys start and size, the point to start reading from and size of chunk to read .lg.o[`tplog.repairover;"Reading logfile with an offset of : ",string[d`start]," bytes and a chunk of size : ",string[d`size]," bytes"]; x:read1 logfile,d`start`size; / - read <size> bytes from <start> u: ss[`char$x;UPDMSG]; / - find the start points of upd messages if[not count u; / - nothing in this block if[hcount[logfile] <= sum d`start`size;:d]; / - EOF - we're done :@[d;`start;+;d`size]]; / - move on <size> bytes m: u _ x; / - split bytes into msgs mz: 0x0 vs' `int$ 8 + ms: count each m; / - message sizes as bytes hd: @[HEADER;7 6 5 4;:;] each mz; / - set msg size at correct part of hdr g: @[(1b;)@-9!;;(0b;)@] each hd,'m; / - try and deserialize each msg goodlogh g[;1] where k:g[;0]; / - write good msgs to the "good" log if[not any k; / - saw msg(s) but couldn't read if[MAXCHUNK <= d`size; / - read as much as we dare, give up :@[d;`start`size;:;(sum d`start`size;CHUNK)]]; :@[d;`size;*;2]]; / - read a bigger chunk ns: d[`start] + sums[ms] last where k; / - move to the end of the last good msg :@[d;`start`size;:;(ns;CHUNK)]; }; ================================================================================ FILE: TorQ_code_common_u.q SIZE: 1,131 characters ================================================================================ /2016.07.22 torq edit - added broadcast /2008.09.09 .k -> .q /2006.05.08 add \d .u broadcast:@[value;`broadcast;1b]; // broadcast publishing is on by default. Availble in kdb version 3.4 or later. init:{w::t!(count t::tables`.)#()} del:{w[x]_:w[x;;0]?y}; .dotz.set[`.z.pc;{del[;x]each t}]; sel:{$[`~y;x;select from x where sym in y]} pub:{[t;x]{[t;x;w]if[count x:sel[x]w 1;(neg first w)(`upd;t;x)]}[t;x]each w t} add:{$[(count w x)>i:w[x;;0]?.z.w;.[`.u.w;(x;i;1);union;y];w[x],:enlist(.z.w;y)];(x;$[99=type v:value x;sel[v]y;@[0#v;`sym;`g#]])} sub:{if[x~`;:sub[;y]each t];if[not x in t;'x];del[x].z.w;add[x;y]} end:{(neg union/[w[;;0]])@\:(`.u.end;x)} // broadcasting. will override .u.pub with -25! if[broadcast and .z.K>=3.4; // group subscribers by their sym subscription pub_broadcast:{[t;x] subgroups:flip (w[t;;0]@/:value g;key g:group w[t;;1]); {[t;x;w] if[count x:sel[x]w 1;-25!(w 0;(`upd;t;x))] }[t;x] each subgroups}; // store the old definition pub_default:pub; // override .u.pub pub:pub_broadcast; ]; ================================================================================ FILE: TorQ_code_dataaccess_checkinputs.q SIZE: 7,886 characters ================================================================================ \d .dataaccess // checkinputs is the main function called when running a query - it checks: // (i) input format // (ii) whether any parameter pairs clash // (iii) parameter specific checks // The input dictionary accumulates some additional table information/inferred info checkinputs:{[dict] if[not in[`checksperformed;key dict];dict:.checkinputs.checkinputs dict]; dict:checktablename dict; if[in[`columns;key dict];.dataaccess.checkcolumns[dict`tablename;dict`columns;`columns];dict:rdbdate[dict;`columns]]; if[in[`timecolumn;key dict];dict:.dataaccess.checktimecolumn[dict];dict:rdbdate[dict;`timecolumn]]; dict:filldefaulttimecolumn dict; if[in[`instrumentcolumn ;key dict];.dataaccess.checkcolumns[dict`tablename;dict`instrumentcolumn;`instrumentcolumn ]]; if[in[`aggregations;key dict];.dataaccess.checkaggregations dict;dict:rdbdate[dict;`aggregations]]; if[in[`filters;key dict];.dataaccess.checkcolumns[dict`tablename;key dict`filters;`filters]]; if[in[`grouping;key dict];.dataaccess.checkcolumns[dict`tablename;dict`grouping;`grouping];dict:rdbdate[dict;`grouping]]; if[in[`timebar;key dict];.dataaccess.checktimebar dict;dict:rdbdate[dict;`timebar]]; if[in[`freeformwhere;key dict];.dataaccess.checkfreeformwhere dict;dict:freeformrdbdate[dict;`freeformwhere]]; if[in[`freeformby;key dict];.dataaccess.checkfreeformby dict;dict:freeformrdbdate[dict;`freeformby]]; if[in[`freeformcolumn;key dict];.dataaccess.checkfreeformcolumns dict;dict:freeformrdbdate[dict;`freeformcolumn]]; if[in[`sqlquery;key dict];'`$.checkinputs.formatstring[.schema.errors[`sqlquery;`errormessage];.proc.proctype]]; if[in[`firstlastsort;key dict];'`$.checkinputs.formatstring[.schema.errors[`firstlastsort;`errormessage];.proc.proctype]]; :dict; }; // function to check the validity of tablenames checktablename:{[dict] if[not dict[`tablename]in exec tablename from .checkinputs.tablepropertiesconfig where proctype in (.proc.proctype,`,`all); '`$.checkinputs.formatstring[.schema.errors[`tableexists;`errormessage];dict]]; dict:.checkinputs.jointableproperties dict; :update metainfo:(metainfo,`starttime`endtime!(starttime;endtime))from dict; }; //check that time column is of the correct type checktimecolumn:{[dict] .dataaccess.checkcolumns[dict`tablename;dict`timecolumn;`timecolumn]; if[dict[`timecolumn]~`date;:dict]; if[not first (exec t from meta dict`tablename where c=(dict[`timecolumn])) in "pzd";'`$.checkinputs.formatstring["Parameter:`timecolumn - column:{column} in table:{table} is of type:{type}, validtypes:-12 -14 -15h";`column`table`type!(dict`timecolumn;dict`tablename;(type( exec from dict`tablename)dict`timecolumn))]]; :dict; }; // function to fill in default columns to reduce the amount of information a user has to // fill in filldefaulttimecolumn:{[dict] if[not `timecolumn in key dict; :@[dict;`timecolumn;:;.checkinputs.getdefaulttime dict]]; :dict; }; // function to check the validity of columns with respect to the chosen tablename // parameter checkcolumns:{[table;columns;parameter] if[not all(`~columns)& parameter~`columns; columns,:(); avblecols:`i`date,cols table; if[any not in[columns;avblecols]; badcol:columns where not in[columns;avblecols]; '`$.checkinputs.formatstring[.schema.errors[`checkcolumns;`errormessage];`badcol`tab`parameter!(badcol;table;parameter)]]];};</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="56"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">The q language¶ Q is the programming system for working with kdb+. This corresponds to SQL for traditional databases, but unlike SQL, q is a powerful programming language in its own right. Q is an interpreted language. Q expressions can be entered and executed in the q console, or loaded from a q script, which is a text file with extension .q . You need at least some familiarity with q to use kdb+. Try following the examples here in the q console interface. The following pages will also be useful: Loading q¶ You load q by changing to the main q directory, then running the q executable. Note that you should not just click the q executable from the file explorer – this will load q but start in the wrong directory. It is best to create a start-up batch file or script to do this, and there are examples in the q/start directory, see q.bat (Windows), q.sh (Linux) and q.app (macOS). For example, the Windows q.bat is: c: cd \q w32\q.exe %* In Linux/macOS, it is best to call the q executable under rlwrap to support line recall and edit. The Linux q.sh script is: #!/bin/bash cd ~/q rlwrap l32/q "$@" First steps¶ Once q is loaded, you can enter expressions for execution: q)2 + 3 5 q)2 + 3 4 7 5 6 9 You can confirm that you are in the QHOME directory by calling a directory list command, e.g. q)\ls *.q ... "sp.q" ... q)\dir *.q ... "sp.q" ... Command-line options Command-line options e.g. q profile.q -p 5001 - loads script profile.q at startup. This can in turn load other scripts. - sets listening port to 5001 At any prompt, enter \\ to exit q. Console modes¶ The usual prompt is q) . Sometimes a different prompt is given; you need to understand why this is, and how to return to the standard prompt. - If a function is suspended, then the prompt has two or more ) . In this case, enter a single\ to remove one level of suspension, and repeat until the prompt becomesq) . For example:q)f:{2+x} / define function f q)f `sym / function call fails with symbol argument {2+x} / and is left suspended 'type + 2 `sym q))\ / prompt becomes q)). Enter \ to return to usual prompt q) - If there is no suspension, then a single \ will toggle between q and k modes:q)count each (1 2;"abc") / q expression for length of each list item 2 3 q)\ / toggle to k mode #:'(1 2;"abc") / equivalent k expression 2 3 \ / toggle back to q mode q) - If you change namespace, then the prompt includes the namespace. q)\d .h / change to .h namespace q.h)\d . / change back to default namespace q) Basics: System command \d Error messages¶ Error messages are terse. The format is a single quote, followed by error text: q)1 2 + 10 20 30 / cannot add 2 numbers to 3 numbers 'length q)2 + "hello" / cannot add number to character 'type Basics: Errors Introductory examples¶ To gain experience with the language, enter the following examples and explain the results. Also experiment with similar expressions. q)x:2 5 4 7 5 q)x 2 5 4 7 5 q)count x 5 q)8 # x 2 5 4 7 5 2 5 4 q)2 3 # x 2 5 4 7 5 2 q)sum x 23 q)sums x 2 7 11 18 23 q)distinct x 2 5 4 7 q)reverse x 5 7 4 5 2 q)x within 4 10 01111b q)x where x within 4 10 5 4 7 5 q)y:(x;"abc") / list of lists q)y 2 5 4 7 5 "abc" q)count y 2 q)count each y 5 3 The following is a function definition, where x represents the argument: q)f:{2 + 3 * x} q)f 5 17 q)f til 5 2 5 8 11 14 Q makes essential use of a symbol datatype: q)a:`toronto / symbol q)b:"toronto" / character string q)count a 1 q)count b 7 q)a="o" `type q)b="o" 0101001b q)a~b / a is not the same as b 0b q)a~`$b / `$b converts b to symbol 1b Data structures¶ Q basic data structures are atoms (singletons) and lists. Other data structures like dictionaries and tables are built from lists. For example, a simple table is just a list of column names associated with a list of corresponding column values, each of which is a list. q)item:`nut / atom (singleton) q)items:`nut`bolt`cam`cog / list q)sales: 6 8 0 3 / list q)prices: 10 20 15 20 / list q)(items;sales;prices) / list of lists nut bolt cam cog 6 8 0 3 10 20 15 20 q)dict:`items`sales`prices!(items;sales;prices) / dictionary q)dict items | nut bolt cam cog sales | 6 8 0 3 prices| 10 20 15 20 q)tab:([]items;sales;prices) / table q)tab items sales prices ------------------ nut 6 10 bolt 8 20 cam 0 15 cog 3 20 Note that a table is a flip (transpose) of a dictionary: q)flip dict items sales prices ------------------ nut 6 10 bolt 8 20 cam 0 15 cog 3 20 The table created above is an ordinary variable in the q workspace, and could be written to disk. In general, you create tables in memory and then write to disk. Since it is a table, you can use SQL-like query expressions on it: q)select from tab where prices < 20 items sales prices ------------------ nut 6 10 cam 0 15 Since it is an ordinary variable, you can also index it and do other typical data manipulations: q)tab 1 3 / index rows 1 and 3 items sales prices ------------------ bolt 8 20 cog 3 20 q)tab `sales / index column sales 6 8 0 3 q)tab, tab / join two copies items sales prices ------------------ nut 6 10 bolt 8 20 cam 0 15 cog 3 20 nut 6 10 bolt 8 20 cam 0 15 cog 3 20 A keyed table has one or more columns as keys: q)1!tab / keyed table items| sales prices -----| ------------ nut | 6 10 bolt | 8 20 cam | 0 15 cog | 3 20 Functions, operators, keywords, iterators¶ All functions take arguments on their right in brackets. Operators can also take arguments on left and right, as in 2+2 (infix syntax). Iterators take value arguments on their left (postfix syntax) and return derived functions. q)sales * prices / operator: * 60 160 0 60 q)sum sales * prices / keyword: sum 280 q)sumamt:{sum x*y} / define lambda: sumamt q)sumamt[sales;prices] 280 q)(sum sales*prices) % sum sales / calculate weighted average 16.47059 q)sales wavg prices / keyword: wavg 16.47059 q)sales , prices / operator: , join lists 6 8 0 3 10 20 15 20 q)sales ,' prices / iterator: ' join lists in pairs 6 10 8 20 0 15 3 20 Functions can apply to dictionaries and tables: q)-2 # tab items sales prices ------------------ cam 0 15 cog 3 20 Functions can be used within queries: q)select items,sales,prices,amount:sales*prices from tab items sales prices amount ------------------------- nut 6 10 60 bolt 8 20 160 cam 0 15 0 cog 3 20 60 Scripts¶ A q script is a plain text file with extension .q , which contains q expressions that are executed when loaded. For example, load the script KxSystems/kdb/sp.q and display the s table that it defines: q)\l sp.q / load script q)s / display table s s | name status city --| ------------------- s1| smith 20 london s2| jones 10 paris s3| blake 30 paris s4| clark 20 london s5| adams 30 athens Within a script, a line that contains a single / starts a comment block. A line with a single \ ends the comment block, or if none, exits the script. A script can contain multi-line definitions. Any line that is indented is taken to be a continuation of the previous line. Blank lines, superfluous blanks, and lines that are comments (begin with / ) are ignored in determining this. For example, if a script has contents: a:1 2 / this is a comment line 3 + 4 b:"abc" Then loading this script would define a and b as: q)a 5 6 7 / i.e. 1 2 3 + 4 q)b "abc" Multi-line function definitions In scripts, indentation allows function definitions to span multiple lines. fn:{[x,y] a:x*2.5; b:x+til floor y; a & b } The convention entails that in a multi-line definition the closing brace must also be indented. It is less likely to get misplaced if suffixed to the last line. Q queries¶ Q queries are similar to SQL, though often much simpler. Loading the script KxSystems/kdb/sp.q to populate tables s ,p and sp we can show some query examples: \l sp.q q)select from p where weight=17 p | name color weight city --| ------------------------ p2| bolt green 17 paris p3| screw blue 17 rome SQL statements can be entered, if prefixed with s) . q)s)select * from p where color in (red,green) / SQL query p | name color weight city --| ------------------------- p1| nut red 12 london p2| bolt green 17 paris p4| screw red 14 london p6| cog red 19 london The q equivalent would be: q)select from p where color in `red`green Similarly, compare: q)select distinct p,s.city from sp s)select distinct sp.p,s.city from sp,s where sp.s=s.s and q)select from sp where s.city=p.city s)select sp.s,sp.p,sp.qty from s,p,sp where sp.s=s.s and sp.p=p.p and p.city=s.city Note that the dot notation in q automatically references the appropriate table. Q results can have lists in the rows. q)select qty by s from sp s | qty --| ----------------------- s1| 300 200 400 200 100 400 s2| 300 400 s3| ,200 s4| 100 200 300 ungroup will flatten the result. q)ungroup select qty by s from sp s qty ------ s1 300 s1 200 s1 400 s1 200 ... Calculations can be performed on the intermediate results. q)select countqty:count qty,sumqty:sum qty by p from sp p | countqty sumqty --| --------------- p1| 2 600 p2| 4 1000 p3| 1 400 p4| 2 500 p5| 2 500 p6| 1 100</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="57"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">About this site¶ This site is the official documentation for kdb+ and the q programming language. It reflects the work of the KX community since 1993, has many authors, and continues to evolve. Search¶ The Search Box on this site is customized for the q language. Some examples: Operator glyphs $ ^ . <> /: ': and their names dollar bang at Operator names Drop roll Enum Extend Keywords xbar like ajf0 uj Namespace objects .z.pd .Q.dpfts System commands \d \ts \_ \\ Command-line options -b -p Internal functions -11! Popular queries types datatypes Queries not matched by the Search Box are handled by Google Search. Install man.q to open the Reference direct from the q session. GitHub¶ Truncated GitHub URLs are prefixed with the GitHub icon and omit the https://github.com/ prefix. For example, read KxSystems/kdb as https://github.com/KxSystems/kdb . Contribute¶ A finished work is exactly that, requires resurrection. — John Cage, “Lecture on Nothing”, 1949 The repository for this site is KxSystems/docs. The contribution model is GitHub and Forking Workflow. To contribute, submit a pull request. The repo includes a style guide for contributors. We gratefully acknowledge pull requests from Alexander Belopolsky James Hanna Aleks Bunin Jason Quinn Angus Wilson kylenarocroc Andrew in New York Letian Wang Alex Shroyer Mohammad Noor Bob Herrmann Peter Storeng Chris Shucksmith Sean Keevey Cillian Reilly Sean O’Hagan Conor McCarthy Rian Ó Cuinneagáin David Crossey Rikesh David Lu Thomas Smyth David Z. Han Sergey Vidyuk Deanna Morgan Simon Shanks Diane O’Donoghue Simon Watson Esperanza Lopez Aguilera Vincent Bernardoff Geo Carncross William Da Silva Ian O’Dwyer License¶ This work is licensed under a Creative Commons Attribution 4.0 International License. Images¶ This site includes images for which KX holds neither copyright nor permission. These images serve as links to their original sites. We understand this to be fair use. If you are a copyright holder and object to this use, please write to <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="ef8b808c9caf8497c18c8082">[email protected]</a>. Terminology¶ In 2018 and 2019 we made changes to the terminology used to describe the q language. Citations¶ How to cite the q programming language: Bibtex format @misc{OMS, author= {{Kx Systems, Inc.}}, year = {2020}, title = {Documentation for kdb+ and q}, note = {\url{https://code.kx.com/q/ref/}, Last accessed on 2020-04-15}, } - Chicago style - “Reference Card.” Documentation for kdb and q . Kx Systems, Inc. Accessed April 15, 2020. https://code.kx.com/q/ref/. - Harvard style - Documentation for kdb+ and q. 2020. Reference Card. [online] Available at: https://code.kx.com/q/ref/ [Accessed 15 April 2020]. - Vancouver style - Reference Card [Internet]. Documentation for kdb and q. Kx Systems, Inc.; 2020 [cited 2020Apr15]. Available from: https://code.kx.com/q/ref/ Citation Machine for other citation styles Wiki¶ The KX wiki was the primary documentation for q and kdb+ until January 2017. The content, which runs on Mediawiki, has been archived on GitHub. Performance of Intel Optane persistent memory¶ Key findings Use of Intel® Optane™ persistent memory (PMem) as a block-storage device with KX Streaming Analytics delivers 4× to 12x improved analytics performance compared to high-performance NVMe storage – similar performance to DRAM for query workloads. For key data-processing workloads, we found DRAM requirements were significantly reduced. PMem lets organizations support more demanding analytic workloads on more data with less infrastructure. Setup and evaluation¶ Hardware setup¶ We configured two systems: | configuration | baseline without Optane | with Optane | |---|---|---| | Server and operating system | Supermicro 2029U-TN24R4T Centos 8 | | | RAM | 768 GB RAM (2666 MHz) LRDIMMs | | | CPU | 36 physical cores: 2 × Intel® Xeon® Gold 6240L Gen.2 2.6 GHz CPU Hyper-threading turned on | | | Optane Persistent Memory | n/a | 12 DIMMs × 512 GB NMA1XXD512GPS | | Log | RAID 50 data volume 24× NVMe P4510 NVME | Intel Optane² persistent memory 3 TB EXT4³ DAX | | Intraday Database ≤ 24 Hrs | 48 TB RAIDIX ERA¹ Raid Software, XFS 6 RAID 5 Groups, 64 chunk | Intel Optane persistent memory 3 TB EXT4 DAX | | Historical Database > 24 Hrs | RAID 50 data volume Same as baseline configuration | Environment setup and testing approach¶ We configured a KX Streaming Analytics system operating in a high-availability (HA) cluster, processing and analyzing semiconductor manufacturing data as follows. We ran tests on both configurations for ingestion, processing, and analytics. Tests were run with the same data and durations. Publishing and ingestion¶ - Publish and ingest over 2.25M sensor readings in 894 messages per second, 2.5 TB per day - Ingest sensor trace, aggregation, event, and IoT data using four publishing clients from a semiconductor front-end processing environment Analytics¶ - 81 queries per second spanning real-time data, intraday data (< 24 hours), and historical data - 100 queries at a time targeting the real-time database (DRAM), intra-day database (on Intel Optane PMem), and historical database (on NVMe storage) - Single-threaded calculation and aggregation tests targeted at the in-memory database and intra-day database Data processing¶ - Perform a data-intensive process, entailing reading and writing all of the data ingested for the day High availability and replication¶ - System ran 24×7 with real-time replication to secondary node - Logged all data ingested, to support data protection and recovery - Data fed to two nodes, mediated to ensure no data loss in event of disruption to the primary system Data model and ingestion¶ The data-model workload involved multiple tables representing reference or master data, sensor reading, event and aggregation data. This relational model is used in fulfilling streaming analytics and queries spanning real-time and historical data. KX ingests raw data streams, processes and persists data into the following structure. For efficient queries and analytics, KX batches and stores data for similar time ranges together, using one or more sensor or streaming data loaders. The tables and fields used in our configuration are illustrated below. Test results¶ Reading and writing to disk¶ We used the kdb+ nano I/O benchmark for reading and writing data to a file system backed by block storage. The nano benchmark calculates basic raw I/O capability of non-volatile storage, as measured by kdb+. Note the cache is cleared after each test, unless otherwise specified. Bypassing page cache With most block storage devices, data is read into page cache to be used by the application. However, reads and writes to Intel Optane persistent memory configured as block storage bypass page cache (in DRAM). This improves overall system performances and lowers demand on the Linux kernel for moving data in/out of page cache and for overall memory management. Read performance (Intel Optane persistent memory as block device vs NVMe storage): - 2× to 9× faster reading data from 36 different files in parallel - Comparable to retrieving data from page cache (near DRAM performance) - 41× better for reading a file in a single thread. Write performance: - 42% slower than NVMe devices, due to striping only across 6 DIMM devices vs 24 NVME drives - Similar single-threaded write performance across the two configurations | before | after | comparison4 | |||| |---|---|---|---|---|---|---| | NVMe | PMem | PMem vs NVMe | |||| | Threads | 1 | 36 | 1 | 36 | 1 | 36 | | Total Write Rate (sync) | 1,256 | 5,112 | 1,137 | 2,952 | 0.91 | 0.58 | | Total create list rate | 3,297 | 40,284 | 4,059 | 30,240 | 1.23 | 0.75 | | Streaming Read (mapped) | 1,501 | 12,702 | 61,670 | 118,502 | 41.08 | 9.33 | | Walking List Rate | 2,139 | 9,269 | 3,557 | 28,657 | 1.66 | 3.09 | | Streaming ReRead (mapped) Rate (from DRAM for NVMe) | 35,434 | 499,842 | 101,415 | 479,194 | 2.86 | 0.96 | | random1m | 828 | 12,050 | 1,762 | 24,700 | 2.13 | 2.05 | | random64k | 627 | 8,631 | 1,905 | 36,970 | 3.04 | 4.28 | | random1mu | 607 | 10,216 | 1,099 | 14,679 | 1.81 | 1.44 | | random64ku | 489 | 6,618 | 1,065 | 8,786 | 2.18 | 1.33 | Query performance¶ We tested query performance by targeting data that would be cached in DRAM, on Intel Optane PMem, and NVMe drives, with parallel execution of each query using multiple threads where possible. Each query involved retrieving trace data with a range of parameters including equipment, chamber, lot, process plan, recipe, sequence, part, sensor, time range, columns of data requested. The parameters were randomized for time range of 10 minutes. Query response times using Intel Optane persistent memory were comparable to DRAM and 3.8× to 12× faster than NVMe. | QUERY PROCESSES | COMPARISONS | |||| |---|---|---|---|---|---| | DRAM RDB 2 | PMem IDB 8 | NVMe HDB 8 | PMem vs DRAM4 | PMem vs NVMe | | | 1 query at a time | ||||| | Mean response time (ms) | 23 | 26 | 319 | 1.17 | 12.10 | | Mean payload size (KB) | 778 | 778 | 668 | 1 | 1 | | 100 queries at a time | ||||| | Mean response time (ms) | 100 | 82 | 310 | 0.82 | 3.77 | | Mean payload size (KB) | 440 | 440 | 525 | 1 | 1 | Two real-time database query processes were configured matching typical configurations, with each process maintaining a copy of the recent data in DRAM. (Additional real-time processes could be added to improve performance with higher query volumes at the cost of additional DRAM.) Data-processing performance¶ KX Streaming Analytics enables organizations to develop and execute data and storage I/O intensive processes. We compare the performance of a mix of PMem with NVMe storage to NVMe-only storage configuration when reading significant volume of data from the intraday database and persisting it to the historical database on NVME storage. By reading data from PMem and writing to NVMe-backed storage, Optane cut data processing time by 1.67× and reduced the RAM required by 37%. | before | after | || |---|---|---|---| | NVMe only, no PMem | PMem & NVMe | PMem vs NVMe only4 | | | Data processed (GB) | 2,200 | 3,140 | 1.43 | | Processing Time (minutes) | 24.97 | 21.40 | 0.86 | | Processing time GB/s | 1.47 | 2.45 | 1.67 | | Max DRAM Utilisation5 | 56% | 35% | 0.63 | Summary results¶ Analytics¶ - Performed within 10% of DRAM for queries involving table joins - Performed 4× to 12× faster than 24 NVMe storage in RAID configuration - DRAM performed 3× to 10× faster when performing single-threaded calculations and aggregations on data Data processing and I/O operations¶ - Processed 1.6× more data per second than NVMe-only storage where data was read from PMem and written to NVMe storage - 2× to 10× faster reading data from files in parallel - Seed of reading data similar to page cache (DRAM) - Single-threaded file-write performance within 10% in both configurations - Multithreaded file-write performance 42% slower Infrastructure resources¶ - Required 37% less RAM to complete key I/O-intensive data processing - Required no page cache for querying or retrieving data stored in PMem Business benefits¶ - Collect and process more data with higher velocity sensors and assets - Accelerate analytics and queries on recent data by 4× to 12× - Reduce cost of infrastructure running with less servers and DRAM to support data processing and analytic workloads - Align infrastructure more closely to the value of data by establishing a storage tier between DRAM and NVMe- or SSD-backed performance block storage Organizations should consider Intel Optane persistent memory where there is a need to accelerate analytic performance beyond what is available with NVMe or SSD storage. Notes¶ - We used software RAID from RAIDIX to deliver lower latency and higher throughput for reads and writes over and above VROC and MDRAID. KX Streaming Analytics platform raises its performance with RAIDIX era - Intel Optane persistent memory configured in App Direct Mode as EXT4 volume single-block device. - In our testing we found EXT4 performed significantly better than XFS, with EXT4 performing 1.5× to 12× better than XFS - Higher is better. Factor of 1 = same performance. Factor of 2 = 200% faster than comparator. - Maximum DRAM utilization as measured by the operating system during the process and is primarily a function of amount of data that needed to be maintained in RAM for query access. The faster the completion of the process the less RAM that is required on the system.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="58"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // // @overview // Set a Keras model within the ML Registry // // @param registryPath {string} Full/relative path to the model registry // @param model {any} `(<|foreign)` The Keras object to be saved as a h5 file. // @param modelInfo {dict} Information relating to the model which is // being saved, this includes version, experiment and model names // // @return {null} registry.util.set.kerasModel:{[registryPath;model;modelInfo] $[99h=type model; [{[registryPath;modelInfo;sym;model] mlops.check.keras[model;0b]; modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model]; registry.util.set.write[model[`:save];modelPath,"/",string[sym],"/mdl.h5"]; }[registryPath;modelInfo]'[key model;value model]; ]; [ mlops.check.keras[model;0b]; modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model]; registry.util.set.write[model[`:save];modelPath,"/mdl.h5"]; ] ] } // @private // // @overview // Set a Torch model within the ML Registry // // @param registryPath {string} Full/relative path to the model registry // @param model {any} `(<|foreign)` The Torch object to be saved as a h5 file. // @param modelInfo {dict} Information relating to the model which is // being saved, this includes version, experiment and model names // // @return {null} registry.util.set.torchModel:{[registryPath;model;modelInfo] $[99h=type model; [{[registryPath;modelInfo;sym;model] mlops.check.torch[model;0b]; modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model]; registry.util.set.write[{.p.import[`torch][`:save][x;y]}[model];modelPath,"/",string[sym],"/mdl.pt"]; }[registryPath;modelInfo]'[key model;value model]; ]; [ mlops.check.torch[model;0b]; modelPath:registry.util.path.modelFolder[registryPath;modelInfo;`model]; registry.util.set.write[{.p.import[`torch][`:save][x;pydstr y]}[model];modelPath,"/mdl.pt"]; ] ] } // @private // // @overview // Add a code file with extension '*.p','*.py','*.q' to a specific // model such that the code can be loaded on retrieval of the model. // This is required to facilitate comprehensive support for PyTorch // models being persisted and usable. // // @param files {symbol|symbol[]} The absolute/relative path to a file or // list of files that are to be added to the registry associated with a // model. These must be '*.p', '*.q' or '*.py' // @param registryPath {string} Full/relative path to the model registry // @param modelInfo {dict} Information relating to the model which is // being saved, this includes version, experiment and model names // // @return {null} registry.util.set.code:{[files;registryPath;modelInfo] if[(11h<>abs type files)|all null files;:(::)]; files:registry.util.check.code[files]; if[0~count files;:(::)]; codePath:registry.util.path.modelFolder[registryPath;modelInfo;`code]; registry.util.copy.file[;hsym`$codePath]each files; } // @private // // @overview // Add a requirements file associated with a model to the versioned model // folder this can be either a 'pip freeze` of the current environment, // a user supplied list of requirements which can be pip installed or the // path to an existing requirements.txt file which can be used. // // 'pip freeze' is only suitable for users running within venvs and as such // is not supported within environments which are not inferred to be venvs as // running within 'well' established environments can cause irreconcilable // requirements. // // @param folderPath {string|null} A folder path indicating the location // the registry containing the model which is to be populated with a requirements // file // @param config Configuration provided by the user to // customize the experiment // // @return {null} registry.util.set.requirements:{[config] requirement:config[`requirements]; $[0b~requirement; :(::); 1b~requirement; registry.util.requirements.pipfreeze config; -11h=type requirement; registry.util.requirements.copyfile config; 0h=type requirement; registry.util.requirements.list config; logging.error"requirements config key must be a boolean, symbol or list of strings" ]; } // @private // // @overview // Set the parameters to a json file // // @param paramPath {string} The path to the parameter file // @param params {dict|table|string} The parameters to save to file // // @return {null} registry.util.set.params:{[paramPath;params] (hsym `$paramPath) 0: enlist .j.j params } // @private // // @overview // Set a metric associated with a model to a supported cloud // vendor or on-prem. This is a wrapper function used to facilitate // protected execution. // // @param storage {symbol} Type of registry storage - local or cloud // @param experimentName {string|null} The name of an experiment // @param modelName {string|null} The name of the model to be retrieved // @param version {long[]|null} The specific version of a named model // @param metricName {string} The name of the metric to be persisted // @param metricValue {float} The value of the metric to be persisted // // @return {null} registry.util.set.metric:{[storage;experimentName;modelName;version;config;metricName;metricValue] modelDetails:registry.util.search.model[experimentName;modelName;version;config]; if[not count modelDetails; logging.error"No model meeting your provided conditions was available" ]; // Construct the path to metric folder containing the config to be updated config,:flip modelDetails; metricPath:registry.util.path.modelFolder[config`registryPath;config;`metrics]; fileExists:`metric in key hsym`$metricPath; if[not fileExists;registry.util.create.modelMetric[metricPath]]; registry.set.modelMetric[metricName;metricValue;metricPath]; if[`local<>storage; registry.cloud.update.publish config ]; } // @private // // @overview // Set JSON file for specified object // // @param config {dict} Information relating to the model // being saved, this includes version, experiment and model names // @param jsonTyp {symbol} `registry.util.create` function to call // @param jsonStr {string} Name of JSON file // @param args {any} Arguments to apply to `registry.util.create` function. // // @return {null} registry.util.set.json:{[config;jsonTyp;jsonStr;args] jsonConfig:registry.util.create[jsonTyp]. args; if[not(::)~jsonConfig; (hsym `$config[`versionPath],"/config/",jsonStr,".json") 0: enlist .j.j jsonConfig ]; } // @private // // @overview // Set Python library and q/Python language versions with persisted models // // @param modelType {string} User provided model type defining is the model was "q"/"sklearn" etc // @param config Information relating to the model // being saved, this includes version, experiment and model names along with // path information relating to the saved location of model // // @return {null} registry.util.set.version:{[modelType;config] // Information about Python/q version used in model saving versionFile:config[`versionPath],"/.version.info"; // Define q version used when persisting the model versionInfo:enlist[`q_version]!enlist "Version: ",string[.z.K]," | Release Date: ",string .z.k; // Add model type to version info versionInfo,:enlist[`model_type]!enlist modelType; // If the model isn't q save version of Python used if[`q<>`$modelType;versionInfo,:enlist[`python_version]!enlist .p.import[`sys;`:version]`]; // Information about the Python library version used in the process of generating the model if[(`$modelType) in `sklearn`keras`torch`xgboost`pyspark; versionInfo,:enlist[`python_library_version]!enlist pygetver modelType; ]; // dont allow same model with different versions of q/python $[count key hsym `$versionFile; $[(.j.k raze read0 hsym `$versionFile)~.j.k raze .j.j versionInfo; (hsym `$versionFile) 0: enlist .j.j versionInfo; '"Error writing same model with two environments see .version.info file" ]; (hsym `$versionFile) 0: enlist .j.j versionInfo]; } ================================================================================ FILE: ml_ml_registry_q_main_utils_update.q SIZE: 2,570 characters ================================================================================ // update.q - Functionality for updating information related to the registry // Copyright (c) 2021 Kx Systems Inc // // @overview // Utilities for updating registry information // // @category Model-Registry // @subcategory Utilities // // @end \d .ml // @private // // @overview // Update the configuration supplied by a user such to include // all relevant information for the saving of a model and its // associated configuration // // @param modelName {string} The name to be associated with the model // @param modelType {string} The type of model that is being saved, namely // "q"|"sklearn"|"keras" // @param config {dict} Configuration information provided by the user // // @return {dict} Default configuration defined by // '.ml.registry.config.model' updated with user supplied information registry.util.update.config:{[modelName;modelType;config] config:registry.config.model,config; config[`experimentName]:registry.util.check.experiment config`experimentName; config,:`modelName`modelType!(modelName;modelType); registry.util.check.modelType config; config,:`registrationTime`uniqueID!(enlist .z.p;-1?0Ng); registry.util.search.version config } // @private // // @overview // Check folder paths, storage type and configuration and prepare the // ML Registry for publishing to the appropriate vendor // // @param folderPath {string|null} A folder path indicating the location // of the registry or generic null if in the current directory // @param experimentName {string|null} The name of an experiment from which // to retrieve a model, if no modelName is provided the newest model // within this experiment will be used. If neither modelName or // experimentName are defined the newest model within the // "unnamedExperiments" section is chosen // @param modelName {string|null} The name of the model to be retrieved // in the case this is null, the newest model associated with the // experiment is retrieved // @param version {long[]|null} The specific version of a named model to retrieve // in the case that this is null the newest model is retrieved (major;minor) // @param config {dict|null} Configuration information provided by the user // // @return {dict} Updated configuration information registry.util.update.checkPrep:{[folderPath;experimentName;modelName;version;config] config,:registry.util.check.config[folderPath;config]; if[`local<>storage:config`storage;storage:`cloud]; prepParams:(folderPath;experimentName;modelName;version;config); registry[storage;`update;`prep]. prepParams } ================================================================================ FILE: ml_ml_registry_tests_scripts_monitorUtils.q SIZE: 504 characters ================================================================================ monitorCols:`nulls`infinity`schema`latency`psi`csi`supervised; monitorFeatureChecks:{[k;r] all(type[r]~99h; count[r]~7; cols[r]~k; value[r]~1111110b ) }[monitorCols] monitorValueChecks:{[k;r] all(type[r]~99h; count[r]~7; cols[r]~k; key[r`nulls]~enlist`x; key[r`infinity]~`negInfReplace`posInfReplace; key[r`latency]~`avg`std; key[r`csi]~enlist`x; r[`schema]~enlist[`x]!enlist(),"f"; r[`supervised]~() ) }[monitorCols] ================================================================================ FILE: ml_ml_stats_init.q SIZE: 131 characters ================================================================================ // stats/init.q - Load stats library // Copyright (c) 2021 Kx Systems Inc .ml.loadfile`:stats/utils.q .ml.loadfile`:stats/stats.q ================================================================================ FILE: ml_ml_stats_stats.q SIZE: 6,255 characters ================================================================================ // stats/stats.q - Statistical tools // Copyright (c) 2021 Kx Systems Inc // // This statistical library contains functionality ranging from // descriptive statistical methods to gain more insight into a // users data, to linear regression estimation methods to investigate // unknown parameters in a model. Includes OLS, WLS, describe, // and percentile \d .ml</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="59"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">C# client for kdb+¶ A kdb+ interface for the C# programming language is documented and available to download from https://github.com/KxSystems/csharpkdb. The interface permits connecting C# and kdb+ processes via IPC. A kdb+ interface for the C# programming language is documented and available to download from https://github.com/KxSystems/csharpkdb. The interface permits connecting C# and kdb+ processes via IPC. Working with Microsoft Excel™¶ Interfacing via HTTP and CSV files¶ Assume that a kdb+ server process is listening on port 5001. Then an HTTP client can send a request that will return a CSV file. For instance, you can type this URL into a browser http://localhost:5001/q.csv?select from trade where i < 10 to get the first 10 trades. Depending on your browser settings, the result will be opened directly in Excel, saved to a CSV file, etc. The resulting CSV file would look something like this: stock,price,amount,time goog,75.43086,1800,05:21:48.815 amzn,96.28739,1400,03:46:53.366 goog,4.82224,2700,19:21:25.970 amd,34.25556,2400,16:00:29.397 msft,79.84078,1800,10:46:41.918 ibm,85.37164,1700,08:51:43.909 intel,60.03132,1900,08:17:48.629 amd,48.66041,2200,00:59:15.559 ibm,97.46072,1000,00:50:52.943 ibm,7.951954,1200,20:21:11.319 Alternatively a command-line HTTP client, such as wget , can also be used: wget -O output.csv "http://localhost:5001/q.csv?select from trade where i < 10" This saves the result of the query to the file output.csv, which can be loaded into Excel later. Table result The result must be a table, so that it can be converted to a CSV file. For instance, the following is invalid: wget -O output.csv "http://localhost:5001/q.csv?first trade" because the result is a dictionary. We need 1 # trade . Notice that the # symbol cannot be written literally in a URL. wget -O output.csv "http://localhost:5001/q.csv?1 %23 trade" Interfacing via CSV files¶ CSV files can also be generated by a q process, without using HTTP. For instance, the result of the previous query can be saved into a table and then to a file: q)output: select from trade where i < 10 q)save `:output.csv Excel automation add-ins¶ With Automation add-ins for Excel, you can use a C# function in a cell formula. This function can communicate with a kdb+ server process. Writing Custom Excel Worksheet Functions in C# Real-time interface via Excel RTD¶ It is possible to have Excel display changing data dynamically using the RTD (real-time data) functionality. Charles Skelton has developed a RTD server for q. This server is a .NET application, and it communicates directly with a q Ticker Plant, or a chained ticker plant. The RTD feature allows real-time data coming in from the ticker plant to be displayed in Excel. The schema can be customized according to whatever table names and column names are present in the ticker plant. Downloading¶ The RTD server can be downloaded from CharlesSkelton/excelrtd To install, run the setup.exe program and follow the instructions. You will need the Microsoft .NET Runtime installed. Configuration¶ Change directory to the location where you installed the program. At that location you will see the file K4RtdServer.dll.config , which is an XML file that contains config information for the server. Change the host and port keys in this file to connect to your ticker plant or chained ticker plant. Chained ticker plants are recommended as they provide some level of protection to your primary ticker plant. The name key indicates the logical name of the plant, and is referenced from within the Excel worksheet. For troubleshooting, a log can be activated – the log directory is specified in the config file under the logdir key. The program will create log files in the format logdir/log\_hhmmssfff.txt . Several levels of tracing are available: off error warning info verbose The RTD Server can also “fill” data on your behalf – should a null value be received from your ticker plant, the RTD server will use the last non-null value received for that cell instead. Example RTD file¶ The distribution contains an example Excel file that works with the default schema for demo trade and quote schema shipped with q. The format for requesting data from the RTD Server is =RTD("K4RtdServer",,"plantname","tablename","column","symbol") The RTD server can also store the recent history of a cell, and this can be made available by using an index into the history as an additional parameter to the RTD call, e.g. =RTD("K4RtdServer",,"plantname","tablename","column","symbol",1) will get the previous value of the cell. This can be useful for conditional formatting or perhaps triggering some other calculation. Other cells can be dependent on cells using the RTD function, as can series in charts. Adjusting the update rate for Excel¶ To set the engine to handle a larger volume of updates, in Excel complete the following: - In Excel, go to the Visual Basic Editor, by pressing Alt+F11 or clicking Tools > Macro > Visual Basic Editor - In the Immediate window (press Ctrl+G or click View > Immediate Window), type: Application.RTD.ThrottleInterval = 1000 ? Application.RTD.ThrottleInterval FFI interface for kdb+¶ FFI (foreign function interface) is a mechanism by which a program written in one programming language can call routines or make use of services written in another. The FFI interface is an extension to kdb+ for loading and calling dynamic libraries using pure q. The main purpose of the library is to build stable interfaces on top of external libraries, or to interact with the operating system from q. No compiler toolchain or writing C/C++ code is required to use this library. The FFI interface is documented and available to download from https://github.com/KxSystems/ffi/ GPUs¶ This is a quick example of calling CUDA code from q. It’s quite trivial to call out to the code. To set the scene (and hopefully experts will forgive the simplifications) CUDA is a variant on C that is used to write general-purpose programs that execute on NVIDIA graphics cards. Data is copied to the card, the computation executed, and the results copied back. It is important that - there is significant computation work to be performed on the card – ideally this entirely dominates the execution time - there is enough parallelism in the computation to keep the hardware resources of the card/s busy - that the data set fit in the limited memory of the cards On to a simple example of a function that takes an array of reals and squares it. Here we use single-precision floating point, however double can be used as well on later-model cards. Here is the annotated code: // Include the cuda header and the k.h interface. #include <cuda.h> #include"k.h" // Export the function we will load into kdb+ extern "C" K gpu_square(K x); // Define the "Kernel" that executes on the CUDA device in parallel __global__ void square_array(float *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<N) a[idx] = a[idx] * a[idx]; } // A function to use from kdb+ to square a vector of reals by // - allocating space on the graphics card // - copying the data over from the K object // - doing the work // - copy back and overwrite the K object data K gpu_square(K x) { // Pointers to host & device arrays float *host_memory = (float*) &(kF(x)[0]), *device_memory; // Allocate memory on the device for the data and copy it to the GPU size_t size = xn * sizeof(float); cudaMalloc((void **)&device_memory, size); cudaMemcpy(device_memory, host_memory, size, cudaMemcpyHostToDevice); // Do the computation on the card int block_size = 4; int n_blocks = xn/block_size + (xn%block_size == 0 ? 0:1); square_array <<< n_blocks, block_size >>> (device_memory, xn); // Copy back the data, overwriting the input, // free the memory we allocated on the graphics card cudaMemcpy(host_memory, device_memory, size, cudaMemcpyDeviceToHost); cudaFree(device_memory); return 0; } Then we write test.q . square:`cudalib 2:(`gpu_square;1) numbers: "e"$til 10 square[numbers] numbers \\ Here’s a sample execution 64-bit Linux with an NVIDIA GTX 8800. $ q test.q KDB+ 2.4 2008.09.02 Copyright (C) 1993-2008 Kx Systems l64/ ... 0 1 4 9 16 25 36 49 64 81e To give a feel for real use cases, a Libor Monte-Carlo portfolio computation runs in about 26 seconds on a single core of an x86 machine, and in 0.2 seconds on the graphics card. Some companies are releasing commercial code, such as swaption volatility calculations, as libraries that use GPUs under the covers. Java client for kdb+¶ A kdb+ interface for the Java programming language is documented and available to download from https://github.com/KxSystems/javakdb. The interface comprises of the following features: - query kdb+ - subscribe to a kdb+ publisher - publish to a kdb+ consumer - serialize/deserialize kdb+ formatted data - act as a server for a kdb+ instance</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="60"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">\d .gw // if error & sync message, throws an error. Else passes result as normal // status - 1b=success, 0b=error. sync - 1b=sync, 0b=async formatresponse:{[status;sync;result]$[not[status]and sync;'result;result]}; synccallsallowed:0b // whether synchronous calls are allowed querykeeptime:0D00:30 // the time to keep queries in the errorprefix:"error: " // the prefix for clients to look for in error strings clearinactivetime:0D01:00 // the time to keep inactive handle data \d .kxdash enabled:0b // Functionality for parsing and handling kx dashboard queries - disabled by default \d .proc loadprocesscode:1b // whether to load the process specific code defined at ${KDBCODE}/{process type} // Server connection details \d .servers CONNECTIONS:`rdb`hdb`idb // list of connections to make at start up RETRY:0D00:01 // period on which to retry dead connections. If 0, no reconnection attempts \d .aqrest loadexecute:0b // Whether to reset .aqrest.execute ================================================================================ FILE: TorQ_config_settings_hdb.q SIZE: 303 characters ================================================================================ // Bespoke HDB config \d .proc loadprocesscode:1b // whether to load the process specific code defined at ${KDBCODE}/{process type} // Server connection details \d .servers CONNECTIONS:() // list of connections to make at start up STARTUP:1b // create connections ================================================================================ FILE: TorQ_config_settings_housekeeping.q SIZE: 136 characters ================================================================================ //housekeeping config \d .hk inputcsv:first .proc.getconfigfile["housekeeping.csv"] runtimes:02:00:00 runnow:0b \d .win version:`w10 ================================================================================ FILE: TorQ_config_settings_idb.q SIZE: 520 characters ================================================================================ // Bespoke IDB config \d .idb wdbtypes:`wdb; // Server connection details \d .servers CONNECTIONS:`wdb // list of connections to make at start up STARTUP:1b // create connections \d .proc loadprocesscode:0b // whether to load the process specific code defined at ${KDBCODE}/{process type} ================================================================================ FILE: TorQ_config_settings_monitor.q SIZE: 1,371 characters ================================================================================ // Default configuration for the monitor process \d .monitor configcsv: first .proc.getconfigfile["monitorconfig.csv"]; //filepath to checkmonitor config csv file configstored:`; //filepath to checkmonitor flat table file runcheckinterval:0D00:00:05; //interval to run checks checkinginterval:0D00:00:07; //interval to identify that checks are not lagging cleartrackinterval:0D01:00:00; //interval to check tracks are under certain age in checktracker agecheck:0D12:00:00; //if check over agecheck, delete from tracker lagtime:0D00:01:00; //if check has been running over this time, set to neg //Enable loading \d .proc loadprocesscode:1b; //whether to load process specific code defined at ${KDBCODE}/{process type} // Server connection details \d .servers CONNECTIONS:`ALL // list of connections to make at start up ================================================================================ FILE: TorQ_config_settings_rdb.q SIZE: 3,042 characters ================================================================================ // Bespoke RDB config \d .rdb ignorelist:`heartbeat`logmsg //list of tables to ignore when saving to disk hdbtypes:`hdb //list of hdb types to look for and call in hdb reload hdbnames:() //list of hdb names to search for and call in hdb reload tickerplanttypes:`segmentedtickerplant //list of tickerplant types to try and make a connection to gatewaytypes:`gateway //list of gateway types to try and make a connection to checktpperiod:0D00:00:05 //how often to check for tickerplant connection onlyclearsaved:0b //if true, eod writedown will only clear tables which have been successfully saved to disk subscribeto:` //a list of tables to subscribe to, default (`) means all tables subscribesyms:` //a list of syms to subscribe for, (`) means all syms savetables:1b //if true tables will be saved at end of day, if false tables wil not be saved, only wiped garbagecollect:1b //if true .Q.gc will be called after each writedown - tradeoff: latency vs memory usage upd:insert //value of upd hdbdir:`:hdb //the location of the hdb directory replaylog:1b //replay the tickerplant log file schema:1b //retrieve the schema from the tickerplant tpconnsleepintv:10 //number of seconds between attempts to connect to the tp gc:1b //if true .Q.gc will be called after each writedown - tradeoff: latency vs memory usage sortcsv:hsym first .proc.getconfigfile["sort.csv"] //location of csv file reloadenabled:0b //if true, the RDB will not save when .u.end is called but //will clear it's data using reload function (called by the WDB) parvaluesrc:`log //where to source the rdb partition value, can be log (from tp log file name), //tab (from the the first value in the time column of the table that is subscribed for) //anything else will return a null date which is will be filled by pardefault pardefault:.z.D //if the src defined in parvaluesrc returns null, use this default date instead tpcheckcycles:0W //specify the number of times the process will check for an available tickerplant subfiltered:0b //allows subscription filters to be loaded and applied in the rdb connectonstart:1b //rdb connects to tickerplant as soon as it is started \d .proc loadprocesscode:1b // whether to load the process specific code defined at ${KDBCODE}/{process type} // Server connection details \d .servers CONNECTIONS:`hdb // list of connections to make at start up STARTUP:1b // create connections ================================================================================ FILE: TorQ_config_settings_reporter.q SIZE: 375 characters ================================================================================ /- Reporter config \d .rp inputcsv:first .proc.getconfigfile["reporter.csv"]; /- Location of report configuration csv file flushqueryloginterval:1D00:00:00; /- How often to flush the report query log data writetostdout:1b; /- whether to write query log info to standard out as well \d .servers CONNECTIONS:`gateway`rdb`hdb /- create connections to all processes ================================================================================ FILE: TorQ_config_settings_segmentedchainedtickerplant.q SIZE: 1,666 characters ================================================================================ \d . createlogs:0b; // create an stp log file (off in SCTP as createlogs does not control SCTP logging) \d .sctp chainedtp:1b; // switched between STP and SCTP codebases loggingmode:`none; // [none|create|parent] tickerplantname:`stp1; // list of tickerplant names to try and make a connection to tpconnsleep:@[value;`tpconnsleep;10]; // number of seconds between attempts to connect to source tickerplant tpcheckcycles:@[value;`tpcheckcycles;0W]; // number of times the process will check for an available tickerplant subscribeto:`; // list of tables to subscribe for subscribesyms:`; // list of syms to subscription to replay:0b; // replay the tickerplant log file schema:1b; // retrieve schema from tickerplant \d .stplg multilog:`tabperiod; // [tabperiod|none|periodic|tabular|custom] multilogperiod:0D01; errmode:1b; batchmode:`defaultbatch; // [autobatch|defaultbatch|immediate] customcsv:hsym first .proc.getconfigfile["stpcustom.csv"]; replayperiod:`day // [period|day|prior] \d .proc loadcommoncode:1b; loadprocesscode:1b; \d .timer enabled:1b; // enable timer</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="61"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">h:-2 / handle to print log lvl:2 / log level unit:"BKMGTP" / memory unit character mult:5 (1024*)\ 1 / memory multiplier / build memory string mem:{@[string"i"$(3#x)%mult m;2;,;unit m:mult bin x 2]} / build log header hdr:{string[(.z.D;.z.T)],mem system "w"} / build log message msg:{if[x<=lvl;h " " sv hdr[],(y;$[10h=type z;z;-3!z])]} / user level functions to log messages err:msg[0;"[E]"] wrn:msg[1;"[W]"] inf:msg[2;"[I]"] dbg:msg[3;"[D]"] trc:msg[4;"[T]"] ================================================================================ FILE: qtips_md.q SIZE: 1,325 characters ================================================================================ / empty tables ref:.util.sattr 1!flip `id`px`ts`qs`vol`rfr!"jffjff"$\:() prices:.util.sattr flip `id`px`time!"jfp"$\:() price:.util.sattr 1!prices trades:.util.sattr flip `id`ts`tp`time!"jjfp"$\:() trade:.util.sattr 1!trades quotes:.util.sattr flip `id`bs`bp`ap`as`time!"jjffjp"$\:() quote:.util.sattr 1!quotes \d .md / update the current price for id updp:{[id;tm] .log.dbg "updating price for ", string id; p:`price id; r:`ref id; z:.stat.norminv rand 1f; f:.stat.gbm[r `vol;r `rfr;(tm-p `time)%365D06;z]; p:`id`px`time!(id;f*p `px;tm); `price`prices upsert\: p; } / update the current quote for id updq:{[id;tm] .log.dbg "updating quote for ", string id; px:`price[id;`px]; r:`ref id; q:`id`time!(id;tm); q,:`bp`ap!.sim.tickrnd[r `ts] px; q,:`bs`as!1+2?r `qs; `quote`quotes upsert\: q; } / update the current trade price for id updt:{[id;tm] if[not id in key `quote;:(::)]; .log.dbg "updating trade for ", string id; q:`quote id; t:`id`time!(id;tm); t,:`ts`tp!.sim.trd[rand 0b;rand 1f] . q `bs`bp`ap`as; `trade`trades upsert\: t; } / dump all md.q tables in partitioned database format dump:{[db;tm] dt:"d"$tm; .log.inf "dumping tables in ", 1_ string ` sv db,`$string dt; 0!/:`price`quote`trade; .Q.dpft[db;dt;`id] each`price`quote`trade`prices`quotes`trades; 1!/:`price`quote`trade; } ================================================================================ FILE: qtips_net.q SIZE: 496 characters ================================================================================ / table to hold active and inactive connection information handle:.util.sattr 1!flip `h`active`user`host`address`time!"ibss*p"$\:() / record new client connection .z.po:{[h]`handle upsert (h;1b;.z.u;.Q.host .z.a;"i"$0x0 vs .z.a;.z.P);} .z.po 0i / simulate opening of 0 / mark client connection as inactive .z.pc:{[h]`handle upsert `h`active`time!(h;0b;.z.P);} / modify log header to include user, handle and host \d .log hdr:{string[(.z.D;.z.T;.z.u;.z.w;`handle . (.z.w;`host))],mem get"\\w"} ================================================================================ FILE: qtips_opt.q SIZE: 526 characters ================================================================================ \d .opt / empty getopt configuration config:1#flip `opt`def`doc!"s**"$\:() / parse x according to (c)onfig and list of (h)syms getopt:{[c;h;x] p:(!). c`opt`def; p:.Q.def[p] .Q.opt x; p:@[p;h;hsym]; p} / wrap a list of (s)trings (l)eft and (r)ight text wrap:{[l;r;s](max count each s)$s:l,/:s,\:r} / print usage according to (c)onfig and (f)ile usage:{[c;f] u:enlist "usage: q ",(string f)," [option]..."; a:wrap[(7#" "),"-";" "] string c `opt; a:a,'wrap["<";"> "] c `doc; a:a,'wrap["(";")"] -3!'c `def; u,:a; u} ================================================================================ FILE: qtips_prof.q SIZE: 1,068 characters ================================================================================ / empty events table prof.events:flip `id`pid`func`time!"jjsn"$\:() / view of profile statistics report prof.rpt::.prof.stats prof.events \d .prof pid:id:0 / record timing of a (f)unction with (n)ame when called with (a)rgs time:{[n;f;a] s:.z.p; id:.prof.id+:1; pid:.prof.pid; .prof.pid:id; r:f . a; .prof.pid:pid; `prof.events upsert (id;pid;n;.z.p-s); r} / instrument function (n)ame instr:{[n] m:get f:get n; system "d .",string first m 3; n set (')[.prof.time[n;f];enlist]; system "d ."; n} / generate list of directories dirs:{(` sv x,) each key[x] except `q`Q`h`j`o`prof} / generate list of profileable functions lambdas:{x where 100h=(type get@) each x} / instrument all functions instrall:{instr each lambdas raze .util.tree each `.,dirs`} / generate profile statistics report given an (e)vents table stats:{[e] c:select sum time,nc:count i by id:pid from e; e:e pj update neg time from c; s:select sum time*1e-6,n:count i,avg nc by func from e; s:update timepc:time%n from s; s:`pct xdesc update pct:100f*time%sum time from s; s} ================================================================================ FILE: qtips_qtips.q SIZE: 116 characters ================================================================================ \l util.q \l stat.q \l sim.q \l timer.q \l log.q \l md.q \l opt.q \l net.q \l hist.q \l deriv.q \l prof.q \l hist.q ================================================================================ FILE: qtips_sim.q SIZE: 1,253 characters ================================================================================ \d .sim / generate simulated security paths / (s)igma, (r)ate, (t)ime path:{[s;r;t] z:.stat.norminv count[t]?1f; p:prds .stat.gbm[s;r;deltas[first t;t]] z; p} / generate price path / security (id), (S)pot, (s)igma, (r)ate, (d)ate/(t)i(m)e genp:{[id;S;s;r;dtm] t:abs type dtm; tm:("np" t in 12 14 15h)$dtm; p:S*path[s;r;tm%365D06]; c:`id,`time`date[t=14h],`price; p:flip c!(id;dtm;p); p} / round price to nearest tick (up and down) tickrnd:{if[99h=type x;x@:y];(y;x+y:x*floor y%x)} / randomly delay a timeseries delay:{abs[type x]$x+next deltas[x]*count[x]?1f} / randomly throw away elements of list filter:{y asc (neg"j"$x*n)?n:count y} / generate bid/ask quotes / (t)ick (s)ize, (q)uote (s)ize, (p)rice path genq:{[ts;qs;p] q:p,'flip `bp`ap!tickrnd[ts] p `price; q:q,'flip `bs`as!1+count[p]?/:2#qs; q:`id`time`bs`bp`ap`as#q; q} / generate trade event / (b)id/ask flag, pct:percent fills / (b)id (s)ize, (b)id (p)rice, (a)sk (p)rice, (a)sk (s)izie trd:{[b;pct;bs;bp;ap;as](ceiling pct*?[b;bs;as];?[b;bp;ap])} / generate trade event / (q)uote table and (pct) fill rate gent:{[pct;q] q:filter[pct] raze (-1_@[;`time;delay] q@) each group q `id; t:q,' flip `ts`tp!trd[n?0b;(n:count q)?1f] . q `bs`bp`ap`as; t:`id`time`ts`tp#t; t} ================================================================================ FILE: qtips_stat.q SIZE: 2,011 characters ================================================================================ \d .stat / percentile pctile:{[p;x]x iasc[x] -1+ceiling p*count x} / 12 uniforms u12:{-6f+sum x cut (12*x)?1f} skew:{avg[x*x2]%sqrt m2*m2*m2:avg x2:x*x-:avg x} kurt:{-3f+avg[x2*x2]%x*x:avg x2:x*x-:avg x} / box-muller bm:{ if[count[x] mod 2;'`length]; x:2 0N#x; r:sqrt -2f*log first x; theta:2f*acos[-1f]*last x; x: r*cos theta; x,:r*sin theta; x} / geometric brownian motion / (s)igma, (r)ate, (t)ime, z:uniform random / user multiplies by (S)pot gbm:{[s;r;t;z]exp (t*r-.5*s*s)+z*s*sqrt t} / inter quartile range iqr:{(-) . pctile[.75 .25;x]} / auto correlation ac:{x%first x:x{(y#x)$neg[y]#x}/:c-til c:count x-:avg x} / horner's method / x:coefficients, y:data horner:{{z+y*x}[y]/[x]} / exponentially weighted moving average / x:decay rate, y:data ewma:{first[y](1f-x)\x*y} / central region - normal inverse cnorminv:{ a:-25.44106049637 41.39119773534 -18.61500062529 2.50662823884; b: 3.13082909833 -21.06224101826 23.08336743743 -8.47351093090 1; x*:horner[a;s]%horner[b] s:x*x-:.5; x} / tail region - normal inverse tnorminv:{ a:0.0000003960315187 0.0000002888167364 0.0000321767881768 0.0003951896511919 0.0038405729373609 0.0276438810333863 0.1607979714918209 0.9761690190917186 0.3374754822726147; x:horner[a] log neg log 1f-x; x} / beasley-springer-moro normal inverse approximation norminv:{ i:x<.5; x:?[i;1f-x;x]; x:?[x<.92;cnorminv x;tnorminv x]; x:?[i;neg x;x]; x} / open high low close ohlc:{`o`h`l`c!(first;max;min;last)@\:x} / count, min, max, median, standard deviation of x summary:{`n`mn`mx`md`dv!(count;min;max;med;sdev)@\:x} / error function erf:{ a: 1.061405429 -1.453152027 1.421413741 -0.284496736 0.254829592; t:1f%1f+0.3275911*abs x; t:1f-t*horner[a;t]*exp neg x*x; x:t*1 -1f x<0f; x} / cumulative normal cnorm:{.5*1f+erf x%sqrt 2f} / newton-raphson / (e)rror tolerance, (f)unction nr:{[e;f;x]$[e>abs d:first[r]%last r:f x;x;x-d]} / function inversion / (r)oot-finding (f)unction, (f)unction invert:{[rf;f;y;x]rf[(neg y;0f)+f@]x} ================================================================================ FILE: qtips_timer.q SIZE: 788 characters ================================================================================ / timer jobs timer.job:flip `name`func`time!"s*p"$\:() timer.job,:(`;();0Wp) \d .timer / merge record(y) into table(x) in reverse chronological order merge:`time xdesc upsert / add new timer (f)unction with (n)ame and (t)i(m)e into (t)able add:{[t;n;f;tm] r:(n;f;gtime tm); t:merge[t;$[0h>type tm;r;reverse flip r]]; t} / run timer job at (i)ndex from (t)able and current time tm run:{[t;i;tm] j:t i; t:.[t;();_;i]; r:value (f:j `func),ltime tm; if[not null r;t:merge[t;(j `name;f;tm+r)]]; t} / scan timer (t)able for runable jobs loop:{[t;tm] while[tm>=last tms:t `time;t:run[t;-1+count tms;tm]]; t} / helper function to generate repeating jobs / (d)elay, (e)nd (t)ime, (f)unction, tm:current time until:{[d;et;f;tm]if[tm<et;@[value;f,tm;0N!];:d]} .z.ts:loop`timer.job ================================================================================ FILE: qtips_util.q SIZE: 2,417 characters ================================================================================ \d .util / import designated function or entire directory use:{system["d"] upsert $[99h=type v:get x;v;(-1#` vs x)!1#v]}</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="62"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Multi-partitioned kdb+ databases: an equity options case study¶ kdb+ is well suited to managing massive datasets and offers an unrivalled performance advantage when it comes to processing and analyzing data. This is a case study highlighting some of the key points we have found with regard to the storage and maintenance of financial equity options data in kdb+. We also provide some examples of possible ways to design and query these large databases efficiently. Overview of the dataset¶ The equity-options data universe is one of the largest financial datasets generated, and more often than not the most challenging dataset for trading firms to manage and extract value from in a timely manner. Hundreds of gigabytes of trade and quote records are published daily from the equity options feeds, with recent daily row count volumes for Q1 of 2012 having an average and maximum count close to 4 billion and 6 billion rows respectively. These numbers represent a relief of sorts from the peaks of 2011 where we saw a maximum daily row count of over 13 billion as highlighted in the chart below. The dataset we use for our examples in this paper includes level 1 trade and quote data for financial options contracts, the associated reference data and the corresponding equity level 1 trades and quotes for the same time period. All data has been sourced from tickdata.com. The sample dataset has a maximum of approximately 200 million rows for a single date and includes data for 10 underlying securities. The full universe that our clients load is typically much larger, covering thousands of underlying securities. For some of these securities, for example AAPL, the ratio of the number of option quotes to underlying quotes can be in excess of 60:1 on some dates. It is thus inevitable that when storing equity options data for a large universe of underlying securities, the number of rows per partition will regularly exceed 2 billion. When kdb+ 3.0 was released in 2011 it removed the limit to the number of rows that can be stored in a single partition. This gives us two options for storing massive tables, either storing each day of data in a single partition, or storing each day of data in multiple partitions. In this paper we cover the use of a multi partitioned database, as whilst kdb+ 3.0 allows the very straight forward option of having a single partition for each date, there are still potential advantages to the approach of storing data across multiple partitions for a single date. Firstly, when saving data, multiple partitions can be written concurrently, potentially reducing the time required to load data from flat files or persist it from memory. Secondly, when running queries against the database, data from multiple partitions can be read in parallel using secondary threads. In the same way as queries on a database with a single partition per date can read data for multiple days in parallel, now data for the same date can be read in parallel. A third advantage related to database maintenance is that since the size of the individual column data files is reduced, the memory required to apply a sort to the data on disk will be reduced. kdb+ provides a simple method to store the data in multiple partitions for each date by using the par.txt file. When attempting to read data from a database like this a large number of queries will behave exactly as they would in a database with only one partition per date. However there are some cases in which it’s necessary to rework things a little, most notably in the case of as-of joins. This is covered below. Options schema¶ Sorting and indexing the options data is straightforward. If we have the data sorted by underlying security, option contract and finally timestamp we can apply the parted attribute to both underlying security and option contract, allowing us to filter quickly on either column. More interesting is the way in which we choose to store the underlying security market data so that we can link it to the options data efficiently. The simplest way of accomplishing this would be to store the prevailing value of each underlying alongside the option trades and quote data. These extra columns would either be provided in the source data (as was the case in our sample dataset) or could be pre-calculated with an as-of join and then stored. The advantage of this method is that we do not need to store any additional underlying market data and there is no overhead when performing lookups to find the prevailing underlying quote or last trade for an option quote or trade. However there are two obvious downsides to using this approach. The first is that it does not offer any flexibility with regard to how the market data for an underlying security maps to the corresponding option data, e.g. if we wanted to do a window join to look at multiple underlying quotes surrounding each option quote. The second is that a significant amount of extra storage space will be required when the data is de-normalized in this way compared to storing the underlying market data in separate tables and doing joins on demand. Even with a frugal schema for the underlying data, this might add 40-50 bytes of storage for each record (depending on whether condition code and exchange fields can be stored as single characters or symbols are required for example). Given that there will be billions of option quotes per day, this can add hundreds of gigabytes to the daily storage requirements. It is worth noting that this may not be as large a problem as it first appears given the possibilities for data compression. A second possibility is to store underlying market data as completely separate tables and do any joins we require between the two datasets on a purely ad-hoc basis. This option offers the lightest storage requirements and also gives us full flexibility in how we do the joins. It does, however, come with the cost of extra processing time when searching for the underlying market data related to the option data at query time. Examples of how this is done can be found in the queries section below. A third option, a combination of the first two, would be to save the option and underlying data as separate tables, but to compute row indices in the underlying market data tables and store them in the option tables as link columns. This requires less space (a single integer column per link to each underlying table) than storing full underlying trade/quote information along with each option table row, avoids having to find the correct underlying trade/quote at query time and also gives flexibility by having all the underlying data available for ad-hoc joins. Using the third option requires us to ensure that the underlying and option data for the same securities always reside in the same partition. We can achieve this as part of the load process outlined below. Loading and saving data¶ Here we assume some familiarity with loading large data files by splitting the file and loading in chunks using .Q.fs and .Q.fsn . Striping data over multiple partitions per date¶ Firstly we demonstrate how to save a chunk of loaded and parsed data into a database with multiple partitions per date. In our case we will split the data alphabetically by underlying symbol into groups as follows: ABC, DEF, GHI, JKL, MNO, PQR, STU and VWXYZ We should have a par.txt file containing paths to directories for each of these symbol groups. The contents of our par.txt file are thus as follows: /data/0 /data/1 /data/2 /data/3 /data/4 /data/5 /data/6 /data/7 Before demonstrating how we can stripe the data over these directories, it is worth noting that if future data volumes increase and we wish to partition the data into a greater number of stripes, we can do this by adding new directories to par.txt without the need to go back and repartition old data. We would only need to create empty tables for each existing date for each partitioned table in our db. With regard to saving a chunk of data, there are many ways to do this but we provide an example below where we read in a list of available partitions and create some helper functions to allow us to easily partition and save the data. In the code below the directory is assumed to be a global variable (DIR ) giving the path to our par.txt file in q format (symbol with a leading colon). // A dictionary mapping alphabetical group to the directory // in the database we wish to save to dirs:`ABC`DEF`GHI`JKL`MNO`PQR`STU`VWXYZ!hsym each`$read0 ` sv DIR,`par.txt // A function which will return a list of partitions // to which each of a list of symbols should be saved. getpart:.Q.fu {[symlist] key[dirs]0 3 6 9 12 15 18 21 bin .Q.A?first each string symlist,()} saveonepart:{[dt;tablename;data;part2save] (` sv dirs[part2save],(`$string dt),tablename,`)set .Q.en[DIR] delete part from select from data where part=part2save} We could use the following snippet to save some quote data stored in a variable CHUNK inside a function called by .Q.fs or .Q.fsn . DATE is assumed to be a global variable here representing the date for which we are loading data. It is elementary to modify this to include the possibility of data for more than one date in a particular file load. This is excluded here for the sake of simplicity. { … CHUNK:update part:getpart underlyingSym from CHUNK; saveonepart[DATE;`QUOTE;CHUNK]each distinct exec part from CHUNK; … } By implementing the previously-defined getpart function to generate the partition to save down to based on either the underlyingSym column for options data or the sym column for underlying market data, we can be sure that related options and underlying data will be stored in the same partitions. Once the data has been loaded and saved we will often need to apply an attribute to one or more of the columns of the data. This is a fairly easy step as we just need to apply the attribute to a table spread over multiple directories instead of one. In our example of option quotes we would like to apply the partitioned attribute (`p# ) to both the sym and underlyingSym columns. We can do this as follows: // function applies p# attribute to sym and underlyingSym columns // of the quote table for the specified date and directory addphashes:{[dt;dir] {[dt;dir;f]@[` sv dir,(`$string dt),`QUOTE;f;`p#]}[dt;dir] each `sym`underlyingSym} This may be called after loading all the data in our script: addphashes[DATE]each value dirs; Adding links to market data¶ If we wish to store links to the underlying data within the option data, a sensible time to generate and store these links would be just after we have loaded, sorted and added attributes to the data. We can do this as part of a loader script with the following code. Here we just create one link stored in the QUOTE table to the corresponding quote in the EQUOTE (underlying quote) table: dirs: `$read0 ` sv DIR,`par.txt addlinks:{[dt;dir] dir:` sv dir,`$string dt; // compute links as an as-of join. inds:select ind: x from aj[`sym`timestamp; select sym:underlyingSym,timestamp from dir`QUOTE; select sym,timestamp,i from dir`EQUOTE]; // save the links (` sv dir,`QUOTE`underlying)set `EQUOTE!exec ind from inds; // update the metadata of the QUOTE table u set distinct get[u:` sv dir,`QUOTE`.d],`underlying} Again we should use this for each partition for the date we have just loaded. addlinks[DATE]each value dirs; Example queries¶ In all of the examples below variables in caps are used instead of specific values. Raw options quote data retrieval with underlying quote¶ In the simplest case where we have the prevailing underlying quote stored alongside the option quote as columns lastbidPrice and lastaskPrice our query is: select sym, timestamp, bidPrice, askPrice, lastBidPrice, lastAskPrice from QUOTE where date=DATE, sym=SYM, time within (STARTTIME;ENDTIME) For the dataset where we have links to the prevailing underlying quote stored we can use: select sym, timestamp, bidPrice, askPrice, underlying.bid, underlying.ask from QUOTE where date=DATE, sym=SYM, time within (STARTTIME;ENDTIME) In the case where we have the options data and underlying data stored in separate tables, we could usually use an as-of join. However, since our database has multiple partitions per date, specifying the right-most argument of the as-of join in the usual way does not behave in the same manner as in the case of databases with a single partition per date. For example, if we look at a standard query to get the prevailing quote as of each trade we encounter some problems. aj[`sym`time; select price from TRADE where date=SOMEDATE, sym in SYMLIST; select sym, time, bid, ask from quote where date=SOMEDATE] In the case of a database with a single partition per date, this as-of join does not read the entire sym, time, bid and ask columns into memory before performing the lookup, rather it searches for the correct rows from a memory map of the quote table. In a database with multiple partitions per date, the following part of the previous query proves problematic: select sym, time, bid, ask from EQUOTE where date=SOMEDATE This now causes all of the data to be mapped and read into memory so that the as-of join can be performed. Not only is this undesirable due to the extra I/O required but because multiple memory-mapped partitions are being collapsed into one in memory table, it also has the detrimental side effect of removing the partitioned attribute normally found on the sym column since rows with the same symbol could occur in multiple partitions. This is not something that should actually occur in this database but since it is possible, kdb+ will defensively remove the partitioned attribute. In addition, because part of the reason we chose a database with multiple partitions for each date was to ensure no single partition was bigger than 2 billion rows, we may even hit a limit error when trying to do as-of joins against large tables. (This could only occur when using a version of kdb+ older than 3.0). The result of the above is that not only would a join take longer because more data is being read from the disk but also the join itself will be performed much more slowly. A particular technique to work around this would be to create our own function which will perform joins against each memory-mapped partition for a certain date without reading the full table into memory, then aggregate the results from each partition. In the function below the parameters are specified as follows: | parameter | denotes | |---|---| | c | A list of column names upon which to perform the as-of join, just as we would have with a regular aj | | t1 | The first table, just as in the case of a regular aj | | t2 | The second table name, a symbol giving the name of a partitioned table. (To do an as-of join on a non-partitioned table, use a regular aj .) | | t2d | The date upon which to do the join. (Do one date at a time if there are multiple dates.) | | t2c | A list of column expressions for the selection from the partitioned table, e.g. (`Ticker;`Timestamp;`Trade;`TradeSize;(log;`TradeSize)) – usually some subset of the column names in the table | | t2cn | A list of column aliases in the partitioned table e.g. `Ticker`Timestamp`price`size`lnprice | ajparted:{[c;t1;t2n;t2d;t2c;t2cn] if[not all c in t2cn;'`missingcols]; / we want just one row per row in the input, so put a row id as a key / and just fill values in the input table with / rows that are found for the second table / build table a table with the right schema to start t1:`rid xkey update rid:`s#i from aj[c;t1;?[t2n;enlist(<;`date;first date);0b;t2cn!t2c]]; / do aj's on each partition of the second table with the first table, / only return rows that we have values for, / then upsert these to the keyed t1 delete rid from 0!t1,/ {[c;t1;t2n;t2d;t2c;t2cn;x] inds@:vinds:where not null inds: (c#M:?[(` sv`$string x,t2d)t2n;();0b;t2cn!t2c])bin (c#value t1); 1!(0!t1)[vinds],'M inds}[c;t1;t2n;t2d;t2c;t2cn] peach distinct .Q.pd} Returning to our original example query of getting the prevailing underlying quote information for a particular set of option quotes, we can use the following call to the ajparted function: t1: select sym,timestamp,bidPrice,askPrice from QUOTE where date=SOMEDATE,sym in SYMLIST,timestamp within (STARTTIME:ENDTIME) ajparted[`underlyingSym`timestamp;t1;`EQUOTE;SOMEDATE; `sym`timestamp`bid`ask;`underlyingSym`timestamp`bid`ask] Snapshot of option chain¶ This is an example where we have an underlying symbol and want to get a snapshot of the quotes available for all options at a given time. Firstly, we will need to query the security master table (called mas in our case) for all option data available on the given date for this underlying and then find the last quote available for each option at, or prior to, the given time. The available option contracts may be queried from a security master table, mas , using the following: optsyms:select sym from mas where date=OURDATE, underlyingSym=OURSYM Now that we have all of the options contracts we require, the question becomes how to query the QUOTE table to get the available quote for each option at the given time. One way to do this would be to write a query to extract the last quote prior to the time in question for each option: select last bid, last ask by sym from QUOTE where date=OURDATE, sym in optsyms, time<=OURTIME However, this is another place where we would normally use an as-of join since it allows us to efficiently search for the last record prior to the time in question rather than scan through all of the records for each symbol to see if the timestamp constraint is fulfilled. We can use the same function from the previous example to do an as-of join here. Firstly, we use the cross function to create a table of symbol and time pairs in order to carry out the as-of join: rack:optsyms cross ([]timestamp:1#OURTIME) Now we can use ajparted to find the correct rows from the QUOTE table ajparted[`sym`timestamp;rack;`QUOTE;OURDATE; `sym`timestamp`bid`ask;`sym`timestamp`bid`ask] In the previous example we were able to avoid an ad-hoc as-of join provided we had underlying data (or pre-calculated links to it) stored alongside the options quote data. In this case however, we will need to use an as-of join regardless of how the data is stored. Building a minutely time series of at-the-money option contracts¶ In our final example, we demonstrate the creation of a minutely time series based on the idea of a generic option contract. In this case we choose a condition specifying this contract as the next expiring call contract nearest to at-the-money. Initially, we create a time series of minutely bars for the underlying symbol so we can determine which actual option contract we should use at each point. Given we are working with relatively low-volume trade data, we do this with a regular grouping and aggregation query. Note that we could also use ajparted with a rack of symbol and time pairs as in the above example. bars:select last price by time.minute from ETRADE where date=ourdate, sym=OURSYM We now need to find the particular contracts that are available on our chosen date and the one that is closest to at-the-money at each point in our series. We can wrap up the logic for choosing a contract into a function with parameters for date, underlying security, contract type (call or put) and price: closest2atm:{[d;s;t;p] / A list of all the nearest expiring contracts on this date cands:`strike xasc select sym,strike from mas where date=d, underlyingSym=s, typ=t, expir=(min;expir)fby underlyingSym; / A list of all strike prices with midpoints between, / we can then use bin to find the contract with a strike price to our prices searchlist:1_raze{avg[x,y],x}':[cands`strike]; inds:searchlist bin p; / Any odd indices in inds mean price is closer to strike above / add one to these and divide everything by 2 to give the indices into cands inds[where 1=inds mod 2]+:1; inds:inds div 2; / return the list of at-the-money symbols cands[`sym]inds} update sym:closest2atm[OURDATE;OURSYM;"C";price] from `bars Finally we query the closing bid and ask for each of these bars: ajparted[ `sym`timestamp; select sym, minute, timestamp:`timespan$minute, price from bars; `QUOTE; OURDATE; `sym`timestamp`bid`ask; `sym`timestamp`bid`ask] Compression¶ The use of compression for all or part of the dataset here is outside the scope of this white paper, however, this is undoubtedly an important feature to consider using for options data. Knowledge Base: File compression Conclusion¶ Storing and querying options data can present challenges due to the volume of data involved. While the introduction of kdb+ 3.0 lets clients easily handle the ever-increasing data volumes by removing the per-partition limit of 2 billion rows, there are still several potential benefits to storing the tick data in multiple partitions per date as outlined in Overview of the dataset. As we have seen however, in choosing this approach it will be necessary to write some queries in a different way to achieve the best performance. Regardless of whether the data is stored in a single partition per date or in a multi-partitioned manner, the choice of schema will still impact the storage space required for the data and queries against the database. Here we have presented several different schema choices and examples of several simple queries implemented for each schema. Author¶ James Hanna has helped design and develop kdb+ implementations and proof of concepts for more than 40 customers. Based in New York, James is a Technical Architect for KX, a high-performance data-management, event-processing and trading platform.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="63"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// log stubs .finos.log.critical:{-1"CRITICAL: ",x;} .finos.log.error :{-1"ERROR: " ,x;} .finos.log.warning :{-1"WARNING: " ,x;} .finos.log.info :{-1"INFO: " ,x;} .finos.log.debug :{-1"DEBUG: " ,x;} .finos.util.shr :{0b sv x xprev 0b vs y} / right shift .finos.util.xor :{0b sv (<>/) 0b vs'(x;y)} / XOR .finos.util.land:{0b sv (&). 0b vs'(x;y)} / AND .finos.util.lnot:{0b sv not 0b vs x} / NOT .finos.util.crc32:{.finos.util.lnot(.finos.util.lnot"i"$x){.finos.util.xor[.finos.util.shr[8]y]x .finos.util.xor[.finos.util.land[y]255i]0x0 sv 0x000000,"x"$z}[{8{$[x mod 2i;.finos.util.xor -306674912i;::].finos.util.shr[1]x}/x}each"i"$til 256]/y} // Run and log garbage collection. .finos.util.free:{[].finos.log.debug"freed ",(string .Q.gc[])," bytes";} // Date from year/month/day. .finos.util.ymd:{"D"$"."sv"0"^-4 -2 -2$string(x;y;z)}' // Convert epoch seconds to (global) timestamp. // @param x number or number vector // @return timestamp or timestamp vector .finos.util.timestamp_from_epoch:{"p"$("j"$1970.01.01D)+1000000000*x} // Attempt to execute a monadic function. // Can be replaced with {(1b;x y)} for debugging. // @param x monadic function // @param y arg // @return pair: (1b;result) or (0b;error) .finos.util.try:{@[(1b;)x@;y;(0b;)]} // Print progress, with peach and try-catch. // The weight function is used to measure progress more accurately when // different arguments will take significantly different amounts of time. // When this is not the case, pass a constant function (e.g. {1}). // E.g. to (re/de)compress files, set/unset .z.zd and pass x as hcount, y // as {x set get x}, and z as the files. // @param x monadic function: weight (e.g. hcount, {1}, etc.) // @param y monadic function // @param z list: args for y // @return dict: z!@[(1b;)y@;;(0b;)]peach z .finos.util.progress:{ f:{[s;f;a;w;i] eta:{x+(abs type e)$(e:y-x)%z}; dll:{" "sv(key x){": "sv(string x;$[10<>type y;string;]y)}'get x}; progper:{ paren:{"(",x,")"}; prog:{"/"sv(neg count string y)$string(x;y)}; per:{.Q.fmt[6;2;100*x],"%"}; " "sv(prog[x;y];paren per x%y)}; .finos.log.debug dll`now`position`work`elapsed`eta!( p; progper[i+1;count a]; progper[w i;last w]; p-s; eta[s;p:.z.P;(w i)%last w] ); .finos.util.try[f]a i}; z!f[.z.P;y;z;w:sums x peach z]peach til count z} ================================================================================ FILE: kdb_qdoc_src_qdoc_example_dba.q SIZE: 4,484 characters ================================================================================ //DBA (kdb+ datastore) utilities to manage partitioned and splayed tables. //Originally based on open source utils from code.kx.com /// //Get all the partitions in a HDB. //Support all valid types. //@param path to database (hsym) //@return empty list if no partitions are found .finos.dba.getParts0:{[db] if[()~l:key db;'(string db),": No such file or directory"]; f:{[db] d:key db;d@where d like "[0-9]*"}; r:$[`par.txt in key db; raze f'[hsym each `$read0` sv db,`par.txt]; f db]; if[0=count r;:r]; c:.finos.dba.partCastChar first r; c$string r} /// //Get all the partitions in a HDB. //Support all valid types. .finos.dba.getParts:{[db] r:.finos.dba.getParts0 db; if[0=count r;'"No partitions found at: ",string db]; r} /// //Load a splayed table as a dictionary. //Normally, `:partitionDir@`tableName would work, but for directories // that have dotfiles that are lexicographically earlier than .d, // assumptions are violated and q fails to load the table. //Also, tables with uneven column lengths can't be flipped. //@return table .finos.dba.loadSplayedTableDict:{[tableHsym] .finos.tc2.argMatch[.z.s;enlist tableHsym;enlist `:/path/to/table]; files:key tableHsym; / Get filenames in tableDir. if[not `.d in files; '`notTableDir]; / Ensure there's a .d file. colNames:tableHsym`.d; / Read the .d file. /Return map-on-demand or map-immediate depending on trailing "/". colVals:$["/"~last string tableHsym; x; @[tableHsym@;;()]each colNames]; / Return () if cannot read col. colNames!colVals} /// //Matching column lists like wantTypes~haveTypes yields false positives // for empty tables that have compound columns since the empty column // would have a type of " " rather than "C". //@param wantTypes List of char. //@param haveTypes List of char. //@param cnt Number of rows in the table. //@return True if wantTypes~haveTypes or all " " columns are compound columns in wantTypes. .finos.dba.colTypeMatcher:{[wantTypes;haveTypes;cnt] matched:wantTypes~haveTypes; if[matched|0<cnt; :matched]; if[count[wantTypes]<>count haveTypes; :0b]; haveTypesBlank:haveTypes=" "; blankPos:where haveTypesBlank; / Positions with relaxed matching. nonblankPos:where not haveTypesBlank; / Positions with exact matching. (wantTypes[nonblankPos]~haveTypes[nonblankPos]) & all wantTypes[blankPos] in .Q.A} /// //"Denumerate" (i.e. resolve enumerations in) object x (recurse if necessary) //@param x Object to process (simple type, list, dict or table) //@return "Denumerated" object. .finos.dba.priv.help,: enlist".finos.dba.denum[list/table/dict]"; .finos.dba.denum:{[x] $[0=t:abs type x; .z.s'[x]; t<20; x; / built-in-types t<=77; $[-11h=type key enumName:key x; value x; / enumeration '"sym list not loaded: ",string enumName]; t<98; .z.s'[x]; / compound list t=98; @[x;cols x;.z.s]; / table t=99; !/[.z.s'[(key;get)@\:x]]; / dict '`unknownType]} /// //Section: Datastore generation functions //Examples: //splay trade table, enumerate all symbol cols to `sym and use `sym column as parted column //> splayToPartition[`:/d/d1/data;2009.01.01;`sym;`trade] //splay trade table as `prints, enumerate all symbol cols to `sym and use `sym column as parted column //> splayDataToPartition[`:/d/d1/data;2009.01.01;`sym;`prints;trade] //splay trade table, enumerate named `foo`bar symbol columns individually all others against //`sym, sort by `foo and apply parted attribute, do not reorder columns //> splayToPartition[`:/d/d1/data;2009.01.01;(`foo`bar;`foo;`);`trade] //snapshot and copy symbol files and repoint all files to a new a new dated directory //copies `:/d/d1/data/sym to `:/d/d1/snapshot/sym.2009.01.01 (today's date) and //creates a symlink from `:/d/d1/data/sym to the snapshot //> snapshotSym[`:/d/d1/data;`:/d/d1/snapshot] .finos.dba.priv.help,: "--- Datastore production functions ---"; /// //Load (reload) all sym files //@param db DB root path (hsym) //@return list of sym variables .finos.dba.loadSyms:{[db] db:.finos.dba.pathAsHsym db; /find all sym files s:f where (f:key db) like string[.finos.dba.priv.SYMPREFIX],"*"; /load all sym files s set'get each ` sv'db,'s} ================================================================================ FILE: kdb_qdoc_src_qtabledoc_example_bovespa_schema.q SIZE: 668 characters ================================================================================ // @table cqs_bbo // @owner brunk // @src rtdev feed journals at /path/to/feed/YYYY/MM/cqs_mdelta.* // @desc Per-exchange best bid and offer details for listed underliers. // @note Table is a merged view of redundant a-side and b-side (per-line) journal files. // @seealso cqs_nbbo, cts_prints // @col cqsID Stock ticker // @col line Cqs line id // @col seq Per-line sequence number // @col exchangeTime Exchange time // @col exch Exchange // @col bp Bid price // @col bs Bid size // @col ap Ask price // @col as Ask size // @col qc Quote condition // @col rcvTimeA A-side receive time. May be null // @col rcvTimeB A-side receive time. May be null t:([]c1:();c2:()) ================================================================================ FILE: kdb_tests_dep_a_a1_module.q SIZE: 161 characters ================================================================================ if[not a2Loaded; '"a2 not loaded but is a dependency of this module"]; -1"this is a/a1"; a1Loaded:1b; .finos.dep.include"a1s1.q"; .finos.dep.loadScript"a1s2.q"; ================================================================================ FILE: kdb_tests_dep_test1.q SIZE: 1,175 characters ================================================================================ .test.root:.finos.dep.cutPath[.finos.dep.currentFile[]]0; .finos.dep.resolvers[`test]:{ if[x[`name]~"a/a5"; '"sorry, wrong resolver"]; root:.test.root,"/",x[`name];`projectRoot`scriptPath`libPath!(root;"";"")}; .finos.dep.resolvers[`test2]:{root:.test.root,"/test2/",x[`name];`projectRoot`scriptPath`libPath!(root;"";"")}; .finos.dep.regModule["a/a1";"1.0";system["cd"],"/a/a1";"";""]; .finos.dep.loadModule"a/a1"; if[not a1Loaded; '"a1 not loaded"]; if[not a2Loaded; '"a2 not loaded"]; if[not a1s1Loaded; '"a1s1 not loaded"]; if[not a1s2Loaded; '"a1s2 not loaded"]; .finos.dep.loadScriptIn["a/a1";"a1s3.q"]; if[not a1s3Loaded; '"a1s3 not loaded"]; .finos.dep.loadFromRecord`name`version`resolver!("a/a3";"0.0";`test); if[not a3Loaded; '"a3 not loaded"]; .finos.dep.loadFromRecord`name`version`resolver!("a/a4";"0.0";"test"); if[not a4Loaded; '"a4 not loaded"]; a5Loaded:0b; .finos.dep.loadFromRecord`name`version`resolver`override!("a/a5";"0.0";"test2";1b); if[a5Loaded; '"a5 loaded when it shouldn't have"]; .finos.dep.loadFromRecord`name`version`resolver!("a/a5";"0.0";"test"); if[not a5Loaded; '"a5 not loaded"]; if[a5Loc<>2; '"a5 loaded from wrong location"]; ================================================================================ FILE: kdb_tests_inithook_inithook1.q SIZE: 989 characters ================================================================================ \l timer/timer.q \l inithook/inithook.q //Asynchronous inithook example. //Suppose we want to connect to two different services and run some code when both connections succeed. //In this case we can use two separate inithook symbols to indicate which connection is done and then //have an inithook depending on both so that it only runs when both provide calls are done. tpConnected:{ -1"TP connected"; .finos.init.provide`tpConnected; };</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="64"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Summarize and say¶ Analyze a dictionary of results; map between dictionaries A lambda for the Look & Say sequence composes with desc and the Do iterator to produce the Summarize & Say sequence. A million integers in string form hashes with group and desc to a dictionary of 8002 unique seeds. A dictionary of sequences, keyed by the unique seeds: composition count distinct@ measures the iterations needed to converge; max finds the slowest sequences. Reverse lookup on a dictionary finds their unique seeds; indexing the sequence and seed dictionaries with them finds the slowest sequences and all the seeds that produce them. Seven code lines, no loops, no counters, no control structures. Find for Sum & Say sequences all the seed values up to a million that take the most iterations to converge See Rosetta Code for the task details. Note that in this context a series has converged when a value in the series is repeated. Start with the simple Look and Say sequence. q)ls:{raze(string 1_ deltas d,count x),'x d:where differ x} / look & say q)ls string 0 "10" q)10 ls\string 0 ,"0" "10" "1110" "3110" "132110" "1113122110" "311311222110" "13211321322110" "1113122113121113222110" "31131122211311123113322110" "132113213221133112132123222110" The Look and Say sequence (or “Morris sequence”) grows indefinitely. The Summarize and Say variant converges. q)sumsay:ls desc@ / summarize & say q)15 sumsay\string 0 ,"0" "10" "1110" "3110" "132110" "13123110" "23124110" "1413223110" "1423224110" "2413323110" "1433223110" "1433223110" "1433223110" "1433223110" "1433223110" "1433223110" Unique seeds¶ Because sumsay sorts its argument digits, variations in order produce the same sequence. q)sumsay each string`123`321 "131211" "131211" So it is not necessary to construct a sequence for every one of a million seeds. q)seeds:group desc each string til 1000000 A dictionary. Its key is the unique seeds; its value their permutations. q)seeds string`21`9721`66633 12 21 1279 1297 1729 1792 1927 1972 2179 2197 2719 2791 2917 2971 7129 7192 7219 72.. 33666 36366 36636 36663 63366 63636 63663 66336 66363 66633 Sequences¶ Construct a 30-term sequence for each seed. seq:(key seeds)!30 sumsay\'key seeds A dictionary of seeds and their sequences. Iterator syntax An iterator is a unary operator with postfix syntax. It takes a single argument, on its left. Above, sumsay is the argument of \ (the Scan form of) the Do iterator. Applying Do to sumsay derives the function sumsay\ . We earlier saw sumsay\ applied as a binary, with infix syntax and 15 as a left argument. Here the left argument is 30: thirty successive applications of sumsay . With no right argument, 30 sumsay\ is a projection of sumsay\ on its left argument 30, forming a unary that will apply sumsay to its argument 30 times. That projection 30 sumsay\ is the left argument of the Each operator ' . The unary derived function 30 saysum\' applies 30 saysum\ to each item of its argument. Bonus points The repetition in (key seeds)!30 sumsay\'key seeds can be removed by another iterator. The expression n f/x applies f successively n times to x . Its Scan form n f\x does the same, but returns the result of each application. The result has n+1 items, corresponding to til n+1 applications. The first item of the result corresponds to 0 applications; i.e. the original argument. 0 f/ and 0 f\ are identity functions for any f . Which means… 1 f\x <=> (x;f x) . q)1 reverse\1011001b 1011001b 1001101b The Apply operator applies a function to a list of its arguments. So for some f we can define q)(~) . 1 reverse\ "madamimadam" 1b In our case f is {30 sumsay\'x} . seq:(!) . 1{30 sumsay\'x}\key seeds Convergence¶ Count the iterations to convergence: how many does the slowest take? q)max its:(count distinct@)each seq 21 Above, (count distinct@) is a composition, equivalent to {count distinct x} Selecting seeds¶ What unique seeds had sequences that took 21 steps to converge? its was defined by applying (count distinct@)each to seq , so its is a dictionary with the same keys, and q)where its=max its "9900" is a dictionary reverse lookup, returning the keys with values of 21. Just that one, then. But that unique seed corresponds to several seeds: q)raze seeds where its=max its 9009 9090 9900 Above, the reverse dictionary lookup, then the results looked up in dictionary seeds . Script¶ Put it all together. ls:{raze(string 1_ deltas d,count x),'x d:where differ x} / look & say sumsay:ls desc@ / summarize & say seeds:group desc each string til 1000000 / seeds for million integers seq:(key seeds)!30 sumsay\'key seeds / sequences for unique seeds top:max its:(count distinct@)each seq / count iterations / report results rpt:{1 x,": ",y,"\n\n";} rpt["Seeds"]" "sv string raze seeds where its=top / all forms of top seed/s rpt["Iterations"]string top rpt["Sequence"]"\n\n","\n"sv raze seq where its=top Output: Seeds: 9009 9090 9900 Iterations: 21 Sequence: 9900 2920 192210 19222110 19323110 1923123110 1923224110 191413323110 191433125110 19151423125110 19251413226110 1916151413325110 1916251423127110 191716151413326110 191726151423128110 19181716151413327110 19182716151423129110 29181716151413328110 19281716151423228110 19281716151413427110 19182716152413228110 19281716151413427110 19182716152413228110 19281716151413427110 19182716152413228110 19281716151413427110 19182716152413228110 19281716151413427110 19182716152413228110 19281716151413427110 19182716152413228110 Review¶ Defined function ls for the Look & Say sequence; sumsay for the Summarize & Say sequence is just he composition ls desc@ . Took a million integers as strings and used group to hash them into an 8002-entry dictionary keyed by their digits sorted in descending order. Made a dictionary of 30-item sequences for the unique seeds, using the Do and Each iterators. Used composition count distinct@ and each to count the number of iterations required in each sequence before it converged, and aggregator max to measure the longest. Used reverse lookup on the dictionary of iterations to find the unique seeds for the slowest-converging sequences, then mapped them to the corresponding original seeds and to the sequences themselves. (Turns out there is just the one.)</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="65"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category preprocessing // @desc Transform a list of integers based on a previously generated // label encoding // @param data {int[]} Data to be reverted to original representation // @param map {dictionary} Maps true representation to associated integer or // the return from .ml.labelEncode.fit // @return {symbol[]} Integer values of `data` replaced by their appropriate // 'true' representation. Values that do not appear in the mapping supplied // by `map` are returned as null values applyLabelEncode:{[data;map] if[99h<>type map;'"Input must be a dictionary"]; $[`modelInfo`transform~key map;map[`modelInfo]?;map?]data } // @kind function // @category preprocessing // @desc Break specified time columns into constituent components // @param tab {table} Contains time columns // @param timeCols {symbol[]} Columns to apply encoding to, if set to :: // all columns with date/time types will be encoded // @return {dictionary} All time or date types broken into labeled versions // of their constituent components timeSplit:{[tab;timeCols] if[(::)~timeCols;timeCols:i.findCols[tab;"dmntvupz"]]; timeDict:i.timeDict/:[tab]timeCols,:(); flip(timeCols _ flip tab),raze timeDict } ================================================================================ FILE: ml_ml_util_utilities.q SIZE: 6,275 characters ================================================================================ // util/utilities.q - Utilities library // Copyright (c) 2021 Kx Systems Inc // // Includes range, arange, combs, eye, iMax, iMin, // linearSpace, shape, trainTestSplit, tab2df, // df2tabTimezone, df2tab \d .ml // @kind function // @category utilities // @desc Range of values // @param array {number[]} A numerical array // @returns {float} Range of its values range:{[array] max[array]-min array } // @kind function // @category utilities // @desc Evenly-spaced values // @param start {number} Start of the interval (inclusive) // @param end {number} End of the interval (non-inclusive) // @param step {number} Spacing between values // @return {number[]} A vector of evenly-spaced values between start and end // in steps of length `step` arange:{[start;end;step] start+step*til 0|ceiling(end-start)%step } // @kind function // @category utilities // @desc Unique combinations of a vector or matrix // @param n {int} Number of values required for combinations // @param degree {int} Degree of the combinations to be produced // @return {int[]} Unique combinations of values from the data combs:{[n;degree] flip(degree-1)i.combFunc[n]/enlist til n } // @kind function // @category utilities // @desc Create identity matrix // @param n {int} Width/height of identity matrix // @return {int[]} Identity matrix of height/width n eye:{[n] @[n#0.;;:;1.]each til n } // @kind function // @category utilities // @desc Index of the first occurance of the maximum value in a list // @param array {number[]} Array of values // @return {number} The index of the maximum element of the array iMax:{[array] array?max array } // @kind function // @category utilities // @desc Index of minimum element of a list // @param array {number[]} Array of values // @return {number} The index of the minimum element of the array iMin:{[array] array?min array } // @kind function // @category utilities // @desc Create an array of evenly-spaced values // @param start {number} Start of the interval (inclusive) // @param end {number} End of the interval (non-inclusive) // @param n {int} How many spaces are to be created // @return {number[]} A vector of `n` evenly-spaced values between // start and end linearSpace:{[start;end;n] start+til[n]*(end-start)%n-1 } // @kind function // @category utilities // @desc Shape of a matrix // @param matrix {number[]} Matrix of values // @return {number[]} Its shape as a list of dimensions shape:{[matrix] -1_count each first scan matrix } // @kind function // @category utilities // @desc Split data into training and test sets // @param data {any[]} Matrix of input values // @param target {any[]} A vector of target values the same count as data // @param size {float[]} Percentage size of the testing set // @return {dictionary} Contains the data matrix and target split into a // training and testing set trainTestSplit:{[data;target;size] dictKeys:`xtrain`ytrain`xtest`ytest; n:count data; split:(0,floor n*1-size)_neg[n]?n; dictVals:raze(data;target)@\:/:split; dictKeys!dictVals } // @kind function // @category utilities // @desc Convert q table to Pandas dataframe // @param tab {table} A q table // @return {<} a Pandas dataframe tab2df:{ if[.pykx.loaded;:.pykx.eval["lambda x:x"].pykx.topd x]; keyTab:keys x; c:cols x:0!x; c1:i.findCols[x;"bxhijef"]; df:i.pandasDF[{$[count y;y!x y;()!()]}[x;c1]]; cls:c except c1; // Early exit if only numeric columns existed if[0=count cls;:df]; updTab:@[flip x;i.findCols[x;"c"];enlist each]; // Convert temporal columns to timestamps and // assign as datetime64[ns] columns timeCols:i.findCols[x;"pmdznuvt"]; timeTab:?[updTab;();0b;timeCols!timeCols]; timeTab:@[timeTab;timeCols;{("p"$@[4#+["d"$0];-16+type x]x)-"p"$1970.01m}]; convfn:{x[`:assign][z pykw i.npArray[y z;"datetime64[ns]"]]}; df:convfn/[df;count[timeCols]#enlist timeTab;timeCols]; // Convert symbols to strings (char vector conversions are faster) // otherwise assign the underlying datatype convfn:{x[=;z;enlist $[11h=type dat:y z;string dat;dat]]}[df;updTab]; convfn each cls except timeCols; // Reorder the columns based on initial input df:df[`:reindex][`columns pykw c]; // Index the table if originally keyed $[count keyTab; df[`:set_index]keyTab; df ] } // @kind function // @category utilities // @desc Convert a pandas dataframe containing datetime timezones and // datetime objects (datetime.datetime, datetime.time) to a q table // @param tab {<} An embedPy representation of a Pandas dataframe // @param local {boolean} Indicates if timezone objects are to be converted // to local time (1b) or UTC (0b) // @param qObj {boolean} Indicates if python datetime.date/datetime.time // objects are returned as q (1b) or foreign objects (0b) // @return {<} a q table df2tabTimezone:{[tab;local;qObj] index:$[enlist[::]~tab[`:index.names]`;0;tab[`:index.nlevels]`]; tab:$[index;tab[`:reset_index][];tab]; numpyCols:csym tab[`:columns.to_numpy][]`; if[`index in numpyCols;numpyCols:numpyCols except`index;index-:1]; dataArgs:enlist[`exclude]!enlist`float32`datetime`datetimetz`timedelta; dict:tab[`:select_dtypes][pykwargs dataArgs][`:to_dict;`list]`; dateTimeData:tab[`:select_dtypes][`include pykw`datetime]; dict,:i.dateConvert dateTimeData; timeDeltaData:tab[`:select_dtypes][`include pykw`timedelta]; dict,:i.dateDict[timeDeltaData]+"n"$0; timezoneData:tab[`:select_dtypes][`include pykw`datetimetz]; dict,:i.timezoneConvert[timezoneData;local]; float32Data:tab[`:select_dtypes][`include pykw`float32][`:to_dict;`list]`; dict,:i.float32Convert[float32Data;local]; // Check if the first value in columns are foreign foreign:where 112h=type each first each value dict; if[0<count foreign; dictKeys:key[dict]foreign; dictVals:i.dateTimeConvert[;qObj] each dict dictKeys; dict,:dictKeys!dictVals ]; tbl:flip numpyCols#dict; / convert syms to strings (to match embedpy behaviour) index!@[tbl;exec c from meta tbl where t="s";string] } // @kind function // @category utilities // @desc Convert pandas dataframe to q table // @param tab {<} An embedPy representation of a Pandas dataframe // @return {<} a q table df2tab:df2tabTimezone[;0b;0b] ================================================================================ FILE: ml_ml_util_utils.q SIZE: 18,636 characters ================================================================================ // util/utils.q - Utility functions // Copyright (c) 2021 Kx Systems Inc // // General utility functions for the ML Toolkit \d .ml // @kind function // @category utilitiesUtility // @desc Unique combinations of a vector or matrix // @param n {int} Number of values required for combinations // @param vals {int[]} Indices involved in the combination // @return {int[]} Unique combinations of values from the data i.combFunc:{[n;vals] j@:i:where 0<>k:n-j:1+last vals; sumVals:-1_sums@[(1+sum k i)#1;0,sums k i;:;(j,0)-0,-1+j+k i]; (vals@\:where k),enlist sumVals } // @private // @kind function // @category utilitiesUtility // @desc Convert python float32 function to produce correct precision // Note check for x~()!() which is required in cases where underlying // representation is float32 for dates/times // @param data {float[]} Floating point data from the dataFrame // @param local {boolean} Indicates if timezone objects are to be converted // to local time (1b) or UTC (0b) // @return {float[]} Python float32 objects converted to correct precision // in kdb i.float32Convert:{[data;local] $[(local~0b)|data~()!(); data; ?[0.000001>data;"F"$string data;0.000001*floor 0.5+data*1000000] ] } // @private // @kind function // @category utilitiesUtility // @desc Convert datetime.timezone types to kdb+ date/time // @param tab {<} Contains columns with datetime timezone objects // @param local {boolean} Indicates if timezone objects are to be converted // to local time (1b) or UTC (0b) // @return {dictionary} Datetime objects are converted to kdb date/time // objects i.timezoneConvert:{[tab;local] $[local~0b; i.dateConvert tab; "P"$neg[6]_/:'cstring tab[`:astype;`str][`:to_dict;<;`list] ] } // @private // @kind function // @category utilitiesUtility // @desc Convert datetime/datetimetz objects to kdb timestamp // @param dataFrame {<} Pandas dataFrame containing datetime data // @return {dictionary} Datetime objects are converted to timestamps in kdb i.dateConvert:{[dataFrame] nullCols:where any each dataFrame[`:isnull;::][`:to_dict;<;`list]; $[count nullCols; [npCols:csym dataFrame[`:columns.to_numpy][]`; dropCols:dataFrame[`:drop;npCols except nullCols;`axis pykw 1]; nullData:"P"$cstring dropCols[`:astype;`str][`:to_dict;<;`list]; nonNullData:i.dateDict dataFrame[`:drop;nullCols;`axis pykw 1]; nullData,nonNullData+1970.01.01D0 ]; i.dateDict[dataFrame]+1970.01.01D0 ] } // @private // @kind function // @category utilitiesUtility // @desc Convert datetime data to integer representation // @param data {<} Pandas dataframe object containing timedelta objects // @return {dictionary} Datetime objects are converted to integer values i.dateDict:{[data] data[`:astype;`int64][`:to_dict;<;`list] } // @private // @kind function // @category utilitiesUtility // @desc Convert datetime.date/time objects to kdb+ date/time // @param dateTime {<} Python datetime object // @param qObj {boolean} Indicates if python datetime.date/datetime.time // objects // are returned as q (1b) or foreign objects (0b) // @return {datetime;<} kdb date/time format or embedpy object i.dateTimeConvert:{[dateTime;qObj] $[qObj~0b; dateTime; [firstVal:.p.wrap first dateTime; // Convert datetime.time/date to iso string format and convert to kdb+ // otherwise return foreign $[i.isInstance[firstVal;i.dateTime`:time]; i.isoFormat["N"]each dateTime; i.isInstance[firstVal;i.dateTime`:date]; i.isoFormat["D"]each dateTime; dateTime ] ] ] } // @private // @kind function // @category utilitiesUtility // @desc Cast python datetime object to a kdb datatype // @param cast {string} Data type in which python object will be cast to // @param dateTime {<} Python datetime object // @return {any} Python datetime object casted to kdb datatype i.isoFormat:{[cast;dateTime] cast$.p.wrap[dateTime][`:isoformat][]` }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="66"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Timezones (TZ) and Daylight Savings Time (DST)¶ Q has two built-in functions ltime and gtime which can be used to get the UTC time or local time according to the TZ shell environment setting. One solution for more comprehensive timezone calculations is to have a table that contains the timezones, their UTC offsets, and the datetime of any DST changes. e.g. timezoneID gmtDateTime gmtOffset localDateTime ---------------------------------------------------------------------------------------------- Europe/Zurich 2006.03.26D01:00:00.000000000 0D02:00:00.000000000 2006.03.26D03:00:00.000000000 Europe/Zurich 2006.10.29D01:00:00.000000000 0D01:00:00.000000000 2006.10.29D02:00:00.000000000 Europe/Zurich 2007.03.25D01:00:00.000000000 0D02:00:00.000000000 2007.03.25D03:00:00.000000000 Europe/Zurich 2007.10.28D01:00:00.000000000 0D01:00:00.000000000 2007.10.28D02:00:00.000000000 Europe/Zurich 2008.03.30D01:00:00.000000000 0D02:00:00.000000000 2008.03.30D03:00:00.000000000 Europe/Zurich 2008.10.26D01:00:00.000000000 0D01:00:00.000000000 2008.10.26D02:00:00.000000000 Europe/Zurich 2009.03.29D01:00:00.000000000 0D02:00:00.000000000 2009.03.29D03:00:00.000000000 Europe/Zurich 2009.10.25D01:00:00.000000000 0D01:00:00.000000000 2009.10.25D02:00:00.000000000 Europe/Zurich 2010.03.28D01:00:00.000000000 0D02:00:00.000000000 2010.03.28D03:00:00.000000000 Europe/Zurich 2010.10.31D01:00:00.000000000 0D01:00:00.000000000 2010.10.31D02:00:00.000000000 Europe/Zurich 2011.03.27D01:00:00.000000000 0D02:00:00.000000000 2011.03.27D03:00:00.000000000 Europe/Zurich 2011.10.30D01:00:00.000000000 0D01:00:00.000000000 2011.10.30D02:00:00.000000000 Europe/Zurich 2012.03.25D01:00:00.000000000 0D02:00:00.000000000 2012.03.25D03:00:00.000000000 and then, using three functions, where t is the timezone table: lg:{[tz;z] exec gmtDateTime+gmtOffset from aj[`timezoneID`gmtDateTime;([]timezoneID:tz;gmtDateTime:z);t]}; gl:{[tz;z] exec localDateTime-gmtOffset from aj[`timezoneID`localDateTime;([]timezoneID:tz;localDateTime:z);t]}; ttz:{[d;s;z]lg[d;gl[s;z]]} one can transform between local time and UTC and vice-versa, for any specified timezone. q)lg[enlist `$"Europe/Zurich";enlist 2010.03.28D01:00:00.000] ,2010.03.28D03:00:00.000000000 q)gl[enlist `$"Europe/Zurich";enlist 2010.03.28D03:00:00.000] ,2010.03.28D01:00:00.000000000 and local times between timezones q)show ttz[enlist `$"America/New_York";enlist `$"Europe/Zurich";enlist 2010.03.28D03:00:00.000] ,2010.03.27D21:00:00.000000000 q)show ttz[enlist `$"America/New_York";enlist `$"Europe/Zurich";enlist .z.P] ,2010.01.20D07:00:08.088411000 Generating Reference Data¶ Via TimeZoneDB¶ TimeZoneDB provides a .csv file generated from IANA tz database, which can be downloaded from https://timezonedb.com/download. Please check any current license details from https://timezonedb.com; The time_zone.csv can be loaded as follows: q)t:flip `timezoneID`gmtDateTime`gmtOffset`dst!("S JIB";csv)0:`:time_zone.csv q)delete from `t where gmtDateTime>=10170056837; / remove any unix timestamps greater than our max timestamp q)update gmtDateTime:12h$-946684800000000000+gmtDateTime*1000000000 from `t; / change datatype timestamp q)update gmtOffset:16h$gmtOffset*1000000000 from `t; / change datatype to timespan q)update localDateTime:gmtDateTime+gmtOffset from `t; / create localtime when change occurred q)`gmtDateTime xasc `t; q)update `g#timezoneID from `t; Via Java util¶ The timezone information can be generated using a brute-force approach in Java, and written to a CSV file using: KxSystems/cookbook/timezones/WriteTzInfo.java Date Period The above Java code creates times between years 1900 and 2100, and can be edited for different date periods Import into kdb+ and save to a binary file using q)t:("SPJ";enlist ",")0:`:tzinfo.csv; q)update gmtOffset:`timespan$1000000000*gmtOffset from `t; q)update localDateTime:gmtDateTime+gmtOffset from `t; q)`gmtDateTime xasc `t; q)update `g#timezoneID from `t; q)`:tzinfo set t; / save file for easy distribution A previously generated CSV can be found at: KxSystems/cookbook/timezones/tzinfo.zip – zipped tzinfo.csv Via Unix zdump¶ Alternatively you can use the zdump Unix command. Valid timezones supported by the system can be found in /usr/share/zoneinfo/ e.g. q)system"zdump -v Africa/Cairo" "Africa/Cairo Fri Dec 13 20:45:52 1901 UTC = Fri Dec 13 22:45:52 1901 EET isdst=0" "Africa/Cairo Sat Dec 14 20:45:52 1901 UTC = Sat Dec 14 22:45:52 1901 EET isdst=0" "Africa/Cairo Sun Jul 14 21:59:59 1940 UTC = Sun Jul 14 23:59:59 1940 EET isdst=0" "Africa/Cairo Sun Jul 14 22:00:00 1940 UTC = Mon Jul 15 01:00:00 1940 EEST isdst=1" ... for example, to load a table based on info from Africa/Cairo : t:([] timezoneID:(); gmtDateTime:(); gmtOffset:(); localDateTime:(); abbr:(); dst:()); mon:`Jan`Feb`Mar`Apr`May`Jun`Jul`Aug`Sep`Oct`Nov`Dec!("01";"02";"03";"04";"05";"06";"07";"08";"09";"10";"11";"12") uptz:{[x;y] prepend:{if[1=count x;:"0",x];x}; x:" " vs ssr[x;" ";" "]; t1:12h$value "" sv (x[5];enlist".";mon`$x[2];enlist".";prepend[x[3]];enlist"D";x[4];".000000000"); t2:12h$value "" sv (x[12];enlist".";mon`$x[9];enlist".";prepend[x[10]];enlist"D";x[11];".000000000"); y upsert (`$x[0];t1;t2-t1;t2;`$x[13];1h$parse @["=" vs x[14];1]); }; poptz:{[x;y]uptz[;`t] each system "zdump -v ",x;}; poptz["Africa/Cairo";`t]; Unicode¶ Unicode text can be stored in symbol, byte and character datatypes. Since the data is simply a sequence of bytes, any Unicode format can be stored. However, it is best to use an encoding such as UTF-8 or GBK that extends 7-bit ASCII, i.e. a single byte in the range 00 –7f means the same thing in ASCII. kdb+ will load a script with such encoding, but it will not load other formats. Note that if using these encodings, avoid having a byte-order-mark prefix on the data. The q language itself uses only 7-bit ASCII. For example, the statement 2+3 should be given as the three decimal bytes 50 43 51, as in: q)`char$50 43 51 "2+3" q)value `char$50 43 51 5 Fixed-width Unicode formats cannot be used, since for example, in UTF-16, 2+3 would be the six decimal bytes 50 0 43 0 51 0, and q does not recognize this: q)value `char$50 0 43 0 51 0 'char The display console should have the matching code page set or you will not be able to view the data correctly. e.g. if you store in UTF-8 format, ensure that your code page for the display is also UTF-8. Table and column names should be plain ASCII. For example, the following has Chinese characters in symbol and character columns: sym:`apples`bananas`oranges name:(`$"蘋果";`$"香蕉";`$"橙") text:("每日一蘋果, 醫生遠離我";"香蕉船是一道可口的甜品";"從佛羅里達州來的鮮橙很甜美") t:([]sym;name;text) You can work with this table as usual, but note that the q console displays the text entries as their octal character numbers: q)select sym,name from t sym name -------------- apples 蘋果 bananas 香蕉 oranges 橙 q)select from t where name=`$"香蕉" sym name text .. ---------------------------------------------------------.. bananas 香蕉 "\351\246\231\350\225\211\350\210\271\346\.. Display with -1 to show formatted text: q)-1 text 0; 每日一蘋果, 醫生遠離我 Example assignments using the C interface: int main(){ int c=khp("localhost",5001); k(c,"set",ks("a"),kp("香蕉"),(K)0); k(c,"set",ks("b"),kp("\351\246\231\350\225\211"),(K)0); close(c); }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="67"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // // @overview // Delete all folders relating to an experiment or to 1/all versions of a model // // @param config {dict} Configuration information provided by the user // @param objectType {symbol} ``` `experiment `allModels or `modelVersion``` // // @return {null} registry.util.delete.object:{[config;objectType] // Required variables folderPath:config`folderPath; experimentName:config`experimentName; modelName:config`modelName; version:config`version; // Generate modelStore and object paths based on objectType paths:registry.util.getObjectPaths [folderPath;objectType;experimentName;modelName;version;config]; modelStorePath:paths`modelStorePath; checkPath:objectPath:paths`objectPath; objectString:1_string objectPath; // Check if object exists before attempting to delete if["*"~last objectString;checkPath:hsym`$-1_objectString]; if[emptyPath:()~key checkPath; logging.info"No artifacts created for ",objectString,". Unable to delete." ]; // Where clause relative to each object type objectCondition:registry.util.delete.where [experimentName;modelName;version;objectType]; whereClause:enlist(not;objectCondition); // Update the modelStore with remaining models newModels:?[modelStorePath;whereClause;0b;()]; modelStorePath set newModels; // Delete relevant folders if[not emptyPath; logging.info"Removing all contents of ",objectString; registry.util.delete.folder objectPath ]; // Load new modelStore load modelStorePath; } // @private // // @overview // Functional where clause required to delete objects from the modelStore // // @param experimentName {string} Name of experiment // @param modelName {string} Name of model // @param version {long[]} Model version number (major;minor) // @param objectType {symbol} ``` `experiment `allModels or `modelVersion``` // // @return {(fn;symbol;symbol)} Where clause in functional form registry.util.delete.where:{[experimentName;modelName;version;objectType] $[objectType~`allModels; (like;`modelName;modelName); objectType~`modelVersion; (&;(like;`modelName;modelName);({{x~y}[y]'[x]};`version;version)); (like;`experimentName;experimentName) ] } ================================================================================ FILE: ml_ml_registry_q_main_utils_get.q SIZE: 10,695 characters ================================================================================ // get.q - Utilties relating to retrieval of objects from the registry // Copyright (c) 2021 Kx Systems Inc // // @overview // Utilities for object retrieval within the registry // // @category Model-Registry // @subcategory Utilities // // @end \d .ml // @private // // @overview // Retrieve a model from the registry, this is a wrapped version of // this functionality to facilitate protected execution in the case // that issues arise with retrieval and loading of a model from // cloud providers or an on-prem location // // @param storage {symbol} The form of storage from which the model is // being retrieved // @param experimentName {string|null} The name of an experiment from which // to retrieve a model, if no modelName is provided the newest model // within this experiment will be used. If neither modelName or // experimentName are defined the newest model within the // "unnamedExperiments" section is chosen // @param modelName {string|null} The name of the model to be retrieved // in the case this is null, the newest model associated with the // experiment is retrieved // @param version {long[]|null} The specific version of a named model to retrieve // in the case that this is null the newest model is retrieved (major;minor) // @param config {dict} Configuration containing information surrounding // the location of the registry and associated files // @param optionalKey {sym} Optional symbol for loading model // // @return {dict} The model and information related to the // generation of the model registry.util.get.model:{[storage;experimentName;modelName;version;config;optionalKey] // Retrieve the model from the store meeting the user specified conditions modelDetails:registry.util.search.model[experimentName;modelName;version;config]; if[not count modelDetails; logging.error"No model meeting your provided conditions was available" ]; // Construct the path to model folder containing the model to be retrieved config,:flip modelDetails; configPath:registry.util.path.modelFolder[config`registryPath;config;::]; modelPath:registry.util.path.modelFolder[config`registryPath;config;`model]; codePath:registry.util.path.modelFolder[config`registryPath;config;`code]; registry.util.load.code codePath; func:{[k;configPath;modelDetails;modelPath;config;storage] $[k~(::); modelConfig:configPath,"/config/modelInfo.json"; modelConfig:configPath,"/config/",string[k],"/modelInfo.json" ]; modelInfo:.j.k raze read0 hsym`$modelConfig; // Retrieve the model based on the form of saved model modelType:first`$modelDetails`modelType; modelPath,:$[k~(::);"";string[k],"/"],$[modelType~`q; "mdl"; modelType~`keras; "mdl.h5"; modelType~`torch; "mdl.pt"; modelType~`pyspark; "mdl.model"; "mdl.pkl" ]; model:mlops.get[modelType] $[modelType in `q;modelPath;pydstr modelPath]; if[registry.config.commandLine`deployType; axis:modelInfo[`modelInformation;`axis]; model:mlops.wrap[`python;model;axis]; ]; returnInfo:`modelInfo`model!(modelInfo;model); returnInfo }[;configPath;modelDetails;modelPath;config;storage]; if[b:()~key hsym `$configPath,"/config/modelInfo.json"; k:key hsym `$configPath,"/config"]; r:$[b;$[optionalKey~(::);k!func'[k];func optionalKey];func[::]]; if[`local<>storage;registry.util.delete.folder config`folderPath]; r } // @private // // @overview // Retrieve metrics from the registry, this is a wrapped version of this // functionality to facilitate protected execution in the case that issues // arise with retrieval or loading of metrics from cloud providers or // an on-prem location // // @param storage {symbol} The form of storage from which the model is // being retrieved // @param experimentName {string|null} The name of an experiment from which // to retrieve a model, if no modelName is provided the newest model // within this experiment will be used. If neither modelName or // experimentName are defined the newest model within the // "unnamedExperiments" section is chosen // @param modelName {string|null} The name of the model to be retrieved // in the case this is null, the newest model associated with the // experiment is retrieved // @param version {long[]|null} The specific version of a named model to retrieve // in the case that this is null the newest model is retrieved (major;minor) // @param config {dictionary} Configuration containing information surrounding // the location of the registry and associated files // @param param {null|dict|symbol} Search parameters for the retrieval // of metrics // // @return {table} The metric table for a specific model, which may // potentially be filtered registry.util.get.metric:{[storage;experimentName;modelName;version;config;param] modelDetails:registry.util.search.model[experimentName;modelName;version;config]; if[not count modelDetails; logging.error"No model meeting your provided conditions was available" ]; // Construct the path to model folder containing the model to be retrieved config,:flip modelDetails; metricPath:registry.util.path.modelFolder[config`registryPath;config;`metrics]; metricPath:metricPath,"metric"; metric:1_get hsym`$metricPath; returnInfo:registry.util.search.metric[metric;param]; if[`local<>storage;registry.util.delete.folder config`folderPath]; returnInfo } // @private // // @overview // Retrieve parameters from the registry, this is a wrapped version of this // functionality to facilitate protected execution in the case that issues // arise with retrieval or loading of metrics from cloud providers or // an on-prem location // // @param storage {symbol} The form of storage from which the model is // being retrieved // @param experimentName {string|null} The name of an experiment from which // to retrieve a model, if no modelName is provided the newest model // within this experiment will be used. If neither modelName or // experimentName are defined the newest model within the // "unnamedExperiments" section is chosen // @param modelName {string|null} The name of the model to be retrieved // in the case this is null, the newest model associated with the // experiment is retrieved // @param version {long[]|null} The specific version of a named model to retrieve // in the case that this is null the newest model is retrieved (major;minor) // @param config {dictionary} Configuration containing information surrounding // the location of the registry and associated files // @param paramName {symbol|string} The name of the parameter to retrieve // // @return {string|dict|table|float} The value of the parameter associated // with a named parameter saved for the model. registry.util.get.params:{[storage;experimentName;modelName;version;config;paramName] modelDetails:registry.util.search.model[experimentName;modelName;version;config]; if[not count modelDetails; logging.error"No model meeting your provided conditions was available" ]; // Construct the path to model folder containing the model to be retrieved config,:flip modelDetails; paramPath:registry.util.path.modelFolder[config`registryPath;config;`params]; paramName:$[-11h=type paramName; string paramName; 10h=type paramName; paramName; logging.error"ParamName must be of type string or symbol" ]; paramPath,:paramName,".json"; returnInfo:registry.util.search.params[paramPath]; if[`local<>storage;registry.util.delete.folder config`folderPath]; returnInfo } registry.util.get.version:{[storage;experimentName;modelName;version;config;param] modelDetails:registry.util.search.model[experimentName;modelName;version;config]; if[not count modelDetails; logging.error"No model meeting your provided conditions was available" ]; config,:flip modelDetails; rootPath:registry.util.path.modelFolder[config`registryPath;config;::]; versionInfo:@[read0;hsym `$rootPath,"/.version.info";{'"Version information not found for model"}]; .j.k raze versionInfo }; // @private // // @overview // Retrieve a q/python/sklearn/keras model or parameters/metrics related to a // specific model from the registry. // // @todo // Add type checking for modelName/experimentName/version // // @param cli {dict} Command line arguments as passed to the system on // initialisation, this defines how the fundamental interactions of // the interface are expected to operate. // @param folderPath {dict|string|null} Registry location. // 1. Can be a dictionary containing the vendor and location as a string, e.g.: // - enlist[`local]!enlist"myReg" // - enlist[`aws]!enlist"s3://ml-reg-test" // 2. A string indicating the local path // 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON // @param experimentName {string|null} The name of an experiment from which // to retrieve a model, if no modelName is provided the newest model // within this experiment will be used. If neither modelName or // experimentName are defined the newest model within the // "unnamedExperiments" section is chosen // @param modelName {string|null} The name of the model to be retrieved // in the case this is null, the newest model associated with the // experiment is retrieved // @param version {long[]|null} The specific version of a named model to retrieve // in the case that this is null the newest model is retrieved (major;minor) // @param param {null|dict|symbol|string} Parameter required for parameter/ // metric retrieval // in the case when this is a string, it is converted to a symbol // // @return {dict} The model and information related to the // generation of the model registry.util.get.object:{[typ;folderPath;experimentName;modelName;version;param] if[(typ~`metric)&abs[type param] in 10 11h; param:enlist[`metricName]!enlist $[10h=abs[type param];`$;]param ]; config:registry.util.check.config[folderPath;()!()]; if[not`local~storage:config`storage;storage:`cloud]; // Locate/retrieve the registry locally or from the cloud config:$[storage~`local; registry.local.util.check.registry config; [checkFunction:registry.cloud.util.check.model; checkFunction[experimentName;modelName;version;config`folderPath;config] ] ]; getParams:$[(typ~`model)&param~(::); (storage;experimentName;modelName;version;config;::); (storage;experimentName;modelName;version;config;param) ]; .[registry.util.get typ; getParams; {[x;y;z] $[`local~x;;registry.util.delete.folder]y; 'z }[storage;config`folderPath] ] } ================================================================================ FILE: ml_ml_registry_q_main_utils_init.q SIZE: 846 characters ================================================================================ // init.q - Initialise main q utilities for the model registry // Copyright (c) 2021 Kx Systems Inc // // Utilities relating to all basic interactions with the registry \d .ml</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="68"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Circles in a Circle, 1923 Everything begins with a dot. — W.W. Kandinsky . Apply, Index, Trap @ Apply At, Index At, Trap At¶ - Apply a function to a list of arguments - Get items at depth in a list - Trap errors | rank | syntax | function semantics | list semantics | |---|---|---|---| | 2 | v . vx .[v;vx] | Apply Apply v to list vx of arguments | Index Get item/s vx at depth from v | | 2 | u @ ux @[u;ux] | Apply At Apply unary u to argument ux | Index At Get items ux from u | | 3 | .[g;gx;e] | Trap Try g . gx ; catch with e | | | 3 | @[f;fx;e] | Trap At Try f@fx ; catch with e | Where e is an expression, typically a functionf is a unary function andfx in its domaing is a function of rank \(n\) andgx an atom or list of count \(n\) with items in the domains ofg v is a value of rank \(n\) (or a handle to one) andvx a list of count \(n\) with items in the domains ofv u is a unary value (or a handle to one) andux in its domain Amend, Amend At¶ For the ternary and quaternary forms .[d; i; u] @[d; i; u] .[d; i; v; vy] @[d; i; v; vy] where d is a list or dictionary, or a handle to a list, dictionary or datafilei indexesd asd . i ord @ i (must be a list for Amend)u is a unary withd in its domainv is a binary withd andvy in its left and right domains see Amend and Amend At. Apply, Index¶ v . vx evaluates value v on the \(n\) arguments listed in vx . q)add / addition 'table' 0 1 2 3 1 2 3 4 2 3 4 5 3 4 5 6 q)add . 2 3 / add[2;3] (Index) 5 q)(+) . 2 3 / +[2;3] (Apply) 5 q).[+;2 3] 5 q).[add;2 3] 5 If v has rank \(n\), then vx has \(n\) items and v is evaluated as: v[vx[0]; vx[1]; …; vx[-1+count vx]] If v has rank 2, then vx has 2 items and v is applied to the first argument vx[0] and the second argument vx[1] . v[vx[0];vx[1]] Variadic operators Most binary operators such as Add have deprecated unary forms and are thus actually variadic. Where v is such a variadic operator, parenthesize it to provide it as the left argument of Apply. q).[+;2 2] 4 q)(+) . 2 2 4 If v has rank 1, then vx has one item and v is applied to the argument vx[0] . v[vx[0]] Q for Mortals §6.5.3 Indexing at Depth Nullaries¶ Nullaries (functions of rank 0) are handled differently. The pattern above suggests that the empty list () would be the argument list to nullary v , but Apply for nullary v is denoted by v . enlist[::] , i.e. the right argument is the enlisted null. For example: q)a: 2 3 q)b: 10 20 q){a + b} . enlist[::] 12 23 Index¶ d . i returns an item from list or dictionary d as specified by successive items in list i . Since 4.1t 2022.03.25, d can be a persisted table. The result is found in d at depth count i as follows. The list i is a list of successive indexes into d . i[0] must be in the domain of d@ . It selects an item of d , which is then indexed by i[1] , and so on. ( (d@i[0]) @ i[1] ) @ i[2] … q)d ((1 2 3;4 5 6 7) ;(8 9;10;11 12) ;(13 14;15 16 17 18;19 20)) q)d . enlist 1 / select item 1, i.e. d@1 8 9 10 11 12 q)d . 1 2 / select item 2 of item 1 11 12 q)d . 1 2 0 / select item 0 of item 2 of item 1 11 A right argument of enlist[::] selects the entire left argument. q)d . enlist[::] (1 2 3;4 5 6 7) (8 9;10;11 12) (13 14;15 16 17 18;19 20) Index At¶ The selections at each level are individual applications of Index At: first, item d@i[0] is selected, then (d@i[0])@i[1] , then ((d@i[0])@ i[1])@ i[2] , and so on. These expressions can be rewritten using Over applied to Index At; the first is d@/i[0] , the second is d@/i[0 1] , and the third is d@/i[0 1 2] . In general, for a vector i of any count, d . i is identical to d@/i . q)((d @ 1) @ 2) @ 0 / selection in terms of a series of @s 11 q)d @/ 1 2 0 / selection in terms of @-Over 11 Cross sections¶ Index is cross-sectional when the items of i are lists. That is, items-at-depth in d are indexed for paths made up of all combinations of atoms of i[0] and atoms of i[1] and atoms of i[2] , and so on to the last item of i . The simplest case of cross-sectional index occurs when the items of i are vectors. For example, d .(2 0;0 1) selects items 0 and 1 from both items 2 and 0: q)d . (2 0; 0 1) 13 14 15 16 17 18 1 2 3 4 5 6 7 q)count each d . (2 0; 0 1) 2 2 Note that items appear in the result in the same order as the indexes appear in i . The first item of i selects two items of d , as in d@i[0] . The second item of i selects two items from each of the two items just selected, as in (d@i[0])@'i[1] . Had there been a third vector item in i , say of count 5, then that item would select five items from each of the four items-at-depth 1 just selected, as in ((d@i[0])@'i[1])@''i[2] , and so on. When the items of i are vectors the result is rectangular to at least depth count i , depending on the regularity of d , and the k th item of its shape vector is (count i)[k] for every k less than count i . That is, the first count i items of the shape of the result are count each i . More general cross-sectional indexing occurs when the items of i are rectangular lists, not just vectors, but the situation is much like the simpler case of vector items. Nulls in i ¶ Nulls in i mean “select all”: if i[0] is null, then continue on with d and the rest of i , i.e. 1_i ; if i[1] is null, then for every selection made through i[0] , continue on with that selection and the rest of i , i.e. 2_i ; and so on. For example, d .(::;0) means that the 0th item of every item of d is selected. q)d (1 2 3;4 5 6 7) (8 9;10;11 12) (13 14;15 16 17 18;19 20) q)d . (::;0) 1 2 3 8 9 13 14 Another example, this time with i[1] equal to null: q)d . (0 2;::;1 0) (2 1;5 4) (14 13;16 15;20 19) Note that d .(::;0) is the same as d .(0 1 2;0) , but in the last example, there is no value that can be substituted for null in (0 2;;1 0) to get the same result, because when item 0 of d is selected, null acts like 0 1 , but when item 2 of d is selected, it acts like 0 1 2 . The general case of a non-negative integer list i ¶ In the general case, when the items of i are non-negative integer atoms or lists, or null, the structure of the result can be thought of as cascading structures of the items of i . That is, with nulls aside, the result is structurally like i[0] , except that wherever there is an atom in i[0] , the result is structurally like i[1] , except that wherever there is an atom in i[1] , the result is structurally like i[2] , and so on. The general case of Index can be defined recursively in terms of Index At by partitioning the list i into its first item and the rest: Index:{[d;F;R] $[ F~::; Index[d; first R; 1 _ R]; 0 =count R; d @ F; 0>type F; Index[d @ F; first R; 1 _ R] Index[d;; R]'F ]} That is, d . i is Index[d;first i;1_i] . To work through the definition, start with F as the first item of i and R as the remainder. At each step in the recursion: - if F is null then select all ofd and continue on, with the first item of the remainderR as the newF and the remainder ofR as the new remainder; - otherwise, if the remainder is the empty vector apply Index At (the right argument F is now the last item ofi ), and we are done; - otherwise, if F is an atom, apply Index At to select that item ofd and continue on in the same way as whenF is null; - otherwise, apply Index with fixed arguments d andR , but independently to the items of the listF . Dictionaries and symbolic indexing¶ If i is a symbol atom then d must be a dictionary or handle of a directory on the K-tree, and d . i selects the value of the entry named in i . For example, if: dir:`a`b!(2 3 4;"abcdefg") then `dir . enlist`b is "abcdefg" and `dir . (`b;1 3 5) is "bdf" . If i is a list whose items are non-negative integer atoms and symbol atoms, then just like the non-negative integer vector case, d . i is a single item at depth count i in d . The difference is that wherever a symbol appears in i , say as the kth item, the selection up to the kth item must produce a dictionary or a handle of a directory. Selection by the kth item is the value of an entry in that dictionary or directory, and further selections go on from there. For example: q)(1;`a`b!(2 3 4;10 20 30 40)) . (1; `b; 2) 30 As we have seen above for the general case, every atom in the k th item of i must be a valid index of all items at depth k selected by d . k # i . Moreover, symbols can only select from dictionaries and directories, and integers cannot. Consequently, if the k th item of i contains a symbol atom, then all items selected by d . k # i must be dictionaries or handles of directories, and therefore all atoms in the k th item of i must be symbols. It follows that each item of i must be made up entirely of non-negative integer atoms, or entirely of symbol atoms, and if the k th item of i is made up of symbols, then all items at depth k in d selected by the first k items of i must be dictionaries. Note that if d is either a dictionary or handle to a directory then d . enlist key d is a list of values of all the entries. Step dictionaries¶ Where d is a dictionary, d@i or d[i] or d i returns for each item of i that is outside the domain of d a null of the same type as the keys. q)d:`cat`cow`dog`sheep!`chat`vache`chien`mouton q)d cat | chat cow | vache dog | chien sheep| mouton q)d `sheep`snake`cat`ant `mouton``chat` q) q)e:(10*til 10)!til 10 q)e 0 | 0 10| 1 20| 2 30| 3 40| 4 50| 5 60| 6 70| 7 80| 8 90| 9 q)e 80 35 20 -10 8 0N 2 0N A step dictionary has the sorted attribute set. Its keys are a sorted vector. Where s is a step dictionary, and i[k] are the items of i that are outside the domain of d , the value/s for d@i@k are the values for the highest keys that are lower than i k . q)d:`cat`cow`dog`sheep!`chat`vache`chien`mouton q)ds:`s#d q)ds~d 1b q)ds `sheep`snake`cat`ant `mouton`mouton`chat` q) q)es:`s#e q)es~e 1b q)es 80 35 20 -10 8 3 2 0N Set Attribute Step Dictionaries Apply At, Index At¶ @ is syntactic sugar for the case where u is a unary and ux a 1-item list. u@ux is always equivalent to u . enlist ux . Brackets are syntactic sugar The brackets of an argument list are also syntactic sugar. Nothing can be expressed with brackets that cannot also be expressed using . . You can use the derived function @\: to apply a list of unary values to the same argument. q){`o`h`l`c!(first;max;min;last)@\:x}1 2 3 4 22 / open, high, low, close o| 1 h| 22 l| 1 c| 22 Composition¶ A sequence of unaries u , v , w … can be composed with Apply At as u@v@w@ . All but the last @ may be elided: u v w@ . q)tc:til count@ / indexes of a list q)tc "abc" "0 1 2" The last value in the sequence can have higher rank if projected as a unary by Apply. q)di:reciprocal(%). / divide into q)di 2 3 / divide 2 into 3 1.5 Trap¶ In the ternary, if evaluation of the function fails, the expression is evaluated. (Compare try/catch in some other languages.) q).[+;"ab";`ouch] `ouch If the expression is a function, it is evaluated on the text of the signalled error. q).[+;"ab";{"Wrong ",x}] "Wrong type" For a successful evaluation, the ternary returns the same result as the binary. q).[+;2 3;{"Wrong ",x}] 5 Trap At¶ @[f;fx;e] is equivalent to .[f;enlist fx;e] . Use Trap At as a simpler form of Trap, for unary values. .Q.trp (extend trap at) Limit of the trap¶ Trap catches only errors signalled in the applications of f or g . Errors in the evaluation of fx or gg themselves are not caught. q)@[2+;"42";`err] `err q)@[2+;"42"+3;`err] 'type [0] @[2+;"42"+3;`err] ^ When e is not a function¶ If e is a function it will be evaluated only if f or g fails. It will however be parsed before any of the other expressions are evaluated. q)@[2+;"42";{)}] ') [0] @[2+;"42";{)}] ^ If e is any other kind of expression it will always be evaluated – and first, in the usual right-to-left sequence. In this respect Trap and Trap At are unlike try/catch in other languages. q)@[string;42;a:100] / expression not a function "42" q)a // but a was assigned anyway 100 q)@[string;42;{b::99}] / expression is a function "42" q)b // not evaluated 'b [0] b ^ For most purposes, you will want e to be a function. Q for Mortals §10.1.8 Protected Evaluation Errors signalled¶ index an atom in vx or ux is not an index to an item-at-depth in d rank the count of vx is greater than the rank of v type v or u is a symbol atom, but not a handle to an value type an atom of vx or ux is not an integer, symbol or null</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="69"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Connection handles¶ kdb+ communicates with the console, stdout, stderr, file system, and other processes through connection handles. There are three permanent system handles: 0 console 1 stdout 2 stderr File and process handles are created by hopen and destroyed by hclose . Write¶ Syntax: h x neg[h] x where h is a handle, writes x to its target as described below and returns itself. A handle is an int atom but is variadic. Syntactically, it can be an int atom or a unary function. q)1 / one is one 1 q)1 "abc\n" / or stdout abc 1 A handle is an applicable value. It (and its negation) can be applied to an argument and iterated. Console¶ Where h is 0 and x is a string or parse tree, evaluates x in the main thread and returns the result. q)0 "1 \"hello\"" /string hello1 q)0 (+;2;2) /parse tree 4 File, stdout, stderr¶ Where h is stdout, stderr, or a file handle h x appends stringx to the fileneg[h] x wherex is a- string, appends x,"\n" - list of strings, appends x,'"\n" to the file. - string, appends q)a:1 "quick brown fox\n" quick brown fox q)a 1 q)a:-1 ("quick";"brown";"fox") quick brown fox q)a -1 q)f:`:tmp.txt q)hopen f 3i q)3 "quick brown fox" 3 q)-3 ("quick";"brown";"fox") -3 q)hclose 3 q)read0 f "quick brown foxquick" "brown" "fox" q)\ls data ls: data: No such file or directory 'os [0] \ls data ^ q)h:hopen `:data/new q)h /handle is an integer 3i q)type h /atom -6h q)h "now is the time" /but can be applied as a unary 3i q)/and iterated q)h each (" for all good men";" to come to the aid of the party") 3 3i q)hclose h q)read0 `:data/new /hopen created file path "now is the time for all good men to come to the aid of the party" Process¶ h x sends stringx as a sync request (get)neg[h] x sends stringx as an async request (set) Read¶ Console¶ Reading from the console with read0 permits interactive input. q)s:{1 x;read0 0}"Next track: " Next track: Bewlay Brothers q)s "Bewlay Brothers"</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="70"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">deltas ¶ Differences between adjacent list items deltas x deltas[x] Where x is a numeric or temporal vector, returns differences between consecutive pairs of its items. q)deltas 1 4 9 16 1 3 5 7 In a query to get price movements: update diff:deltas price by sym from trade With signum to count the number of up/down/same ticks: q)select count i by signum deltas price from trade price| x -----| ---- -1 | 247 0 | 3 1 | 252 domain: b g x h i j e f c s p m d z n u v t range: i . i i i j e f . . n i i f n u v t First predecessor¶ The predecessor of the first item is 0. q)deltas 2000 2005 2007 2012 2020 2000 5 2 5 8 It may be more convenient to have 0 as the first item of the result. q)deltas0:{first[x]-':x} q)deltas0 2000 2005 2007 2012 2020 0 5 2 5 8 Subtract Each Prior The derived function -': (Subtract Each Prior) used to define deltas is variadic and can be applied as either a unary or a binary. However, deltas is supported only as a unary function. For binary application, use the derived function. desc , idesc , xdesc ¶ Sort and grade: descending Q chooses from a variety of algorithms, depending on the type and data distribution. desc ¶ Descending sort desc x desc[x] Returns x sorted into descending order. The function is uniform. The sort is stable: it preserves order between equals. Where x is a - vector, it is returned sorted - mixed list, the result is sorted within datatype - dictionary, returns it sorted by the values - table, returns it sorted by the first non-key column and with the sorted attribute set on that column Unlike asc , which sets the parted attribute where there are other non-key columns, desc sets only the sorted attribute. q)desc 2 1 3 4 2 1 2 / vector 4 3 2 2 2 1 1 q)desc (1;1b;"b";2009.01.01;"a";0) / mixed list 2009.01.01 "b" "a" 1 0 q)desc `a`b`c!2 1 3 / dictionary c| 3 a| 2 b| 1 q)desc([]a:3 4 1;b:`a`d`s) / table a b --- 4 d 3 a 1 s q)meta desc([]a:3 4 1;b:`a`d`s) c| t f a -| ----- a| j b| s domain: b g x h i j e f c s p m d z n u v t range: b g x h i j e f c s p m d z n u v t idesc ¶ Descending grade idesc x idesc[x] Where x is a list or dictionary, returns the indices needed to sort list it in descending order. q)L:2 1 3 4 2 1 2 q)idesc L 3 2 0 4 6 1 5 q)L idesc L 4 3 2 2 2 1 1 q)(desc L)~L idesc L 1b q)idesc `a`c`b!1 2 3 `b`c`a domain: b g x h i j e f c s p m d z n u v t range: j j j j j j j j j j j j j j j j j j xdesc ¶ Sorts a table in descending order of specified columns. The sort is by the first column specified, then by the second column within the first, and so on. x xdesc y xdesc[x;y] Where x is a symbol vector of column names defined in y , which is passed by - value, returns - reference, updates y sorted in descending order by x . The sorted attribute is not set. The sort is stable, i.e. it preserves order amongst equals. q)\l sp.q q)s s | name status city --| ------------------- s1| smith 20 london s2| jones 10 paris s3| blake 30 paris s4| clark 20 london s5| adams 30 athens q)`city xdesc s / sort descending by city s | name status city --| ------------------- s2| jones 10 paris s3| blake 30 paris s1| smith 20 london s4| clark 20 london s5| adams 30 athens q)meta `city xdesc s / `s# attribute not set c | t f a ------| ----- s | s name | s status| i city | s Duplicate column names xdesc signals dup if it finds duplicate columns in the right argument. (Since V3.6 2019.02.19.) Sorting data on disk¶ xdesc can sort data on disk directly, without loading the entire table into memory: see xasc . Duplicate keys in a dictionary or duplicate column names in a table will cause sorts and grades to return unpredictable results. asc , iasc , xasc , attr , Set Attribute Dictionaries & tables, Metadata, Sorting Q for Mortals §8.8 Attributes dev , mdev , sdev ¶ Deviations dev ¶ Standard deviation dev x dev[x] Where x is a numeric list, returns its standard deviation (as the square root of the variance). Applies to all numeric data types and signals an error with temporal types, char and sym. q)dev 10 343 232 55 134.3484 dev is an aggregate function, equivalent to {sqrt var x} . domain: b g x h i j e f c s p m d z n u v t range: f . f f f f f f f . f f f f f f f f Since 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists. q)M:get`:m77 set m:(2 3;4 0N;1 7) q)dev m 1.247219 2 q)dev M 1.247219 2 q)T:get`:tab/ set t:flip`a`b!flip m q)dev t a| 1.247219 b| 2 q)dev T a| 1.247219 b| 2 dev is a multithreaded primitive. mdev ¶ Moving deviations x mdev y mdev[x;y] Where x is a positive int atomy is a numeric list returns the floating-point x -item moving deviations of y , with any nulls after the first item replaced by zero. The first x items of the result are the deviations of the terms so far, and thereafter the result is the moving deviation. q)2 mdev 1 2 3 5 7 10 0 0.5 0.5 1 1 1.5 q)5 mdev 1 2 3 5 7 10 0 0.5 0.8164966 1.47902 2.154066 2.87054 q)5 mdev 0N 2 0N 5 7 0N / nulls after the first are replaced by 0 0n 0 0 1.5 2.054805 2.054805 q)t b c ---- 1 45 2 46 3 47 q)2 mdev t b c ------- 0 0 0.5 0.5 0.5 0.5 mdev is a uniform function. Domain and range: b g x h i j e f c s p m d z n u v t ---------------------------------------- b | f . f f f f f f f . f f f f f f f f g | . . . . . . . . . . . . . . . . . . x | f . f f f f f f f . f f f f f f f f h | f . f f f f f f f . f f f f f f f f i | f . f f f f f f f . f f f f f f f f j | f . f f f f f f f . f f f f f f f f e | . . . . . . . . . . . . . . . . . . f | . . . . . . . . . . . . . . . . . . c | . . . . . . . . . . . . . . . . . . s | . . . . . . . . . . . . . . . . . . p | . . . . . . . . . . . . . . . . . . m | . . . . . . . . . . . . . . . . . . d | . . . . . . . . . . . . . . . . . . z | . . . . . . . . . . . . . . . . . . n | . . . . . . . . . . . . . . . . . . u | . . . . . . . . . . . . . . . . . . v | . . . . . . . . . . . . . . . . . . t | . . . . . . . . . . . . . . . . . . Range: f Implicit iteration¶ mdev applies to dictionaries and tables. q)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 21 3;4 5 6) q)2 mdev d a| 0 0 0 b| 3 8 1.5 q)2 mdev t a b ------- 0 0 5.5 0.5 9 0.5 q)2 mdev k k | a b ---| ------- abc| 0 0 def| 5.5 0.5 ghi| 9 0.5 sdev ¶ Sample standard deviation sdev x sdev[x] Where x is a numeric list, returns its sample standard deviation as the square root of the sample variance. q)sdev 10 343 232 55 155.1322 sdev is an aggregate function, equivalent to {sqrt var[x]*count[x]%-1+count x} . domain: b g x h i j e f c s p m d z n u v t range: f . f f f f f f f . f f f f f f f f Since 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists. q)M:get`:m77 set m:(2 3;4 0N;1 7) q)sdev m 1.527525 2.828427 q)sdev M 1.527525 2.828427 q)T:get`:tab/ set t:flip`a`b!flip m q)sdev t a| 1.527525 b| 2.828427 q)sdev T a| 1.527525 b| 2.828427 sdev is a multithreaded primitive. var , svar Mathematics Sliding windows Standard deviation, Variance Standard deviation ! Dict¶ Make a dictionary or keyed table; remove a key from a table x!y ![x;y] Where x andy are same-length lists, returns a dictionary in whichx is the key andy is the valuey is a simple table andx is a member of1_til count y , returns a keyed table with the firstx columns as its keyy is a table andx is 0, returns a simple table; i.e. removes the key Dictionary keys should be distinct (i.e. {x~distinct x}key dict) but no error is signalled if that is not so. Items of x and y can be of any datatype, including dictionaries and tables. q)`a`b`c!1 2 3 a| 1 b| 2 c| 3 q)show kt:2!([]name:`Tom`Jo`Tom; city:`NYC`LA`Lagos; eye:`green`blue`brown; sex:`m`f`m) name city | eye sex ----------| --------- Tom NYC | green m Jo LA | blue f Tom Lagos| brown m q)show ku:([]name:`Tom`Jo`Tom; city:`NYC`LA`Lagos)!([]eye:`green`blue`brown; sex:`m`f`m) name city | eye sex ----------| --------- Tom NYC | green m Jo LA | blue f Tom Lagos| brown m q)kt~ku 1b q)0!kt name city eye sex -------------------- Tom NYC green m Jo LA blue f Tom Lagos brown m Dict is a uniform function on its right domain. Errors¶ | error | cause | |---|---| | length | x and y are not same-length lists | | length | x is not in 1_ til count y | | type | y is not a simple table | key , value Dictionaries & tables Q for Mortals §5 Dictionaries differ ¶ Find where list items change value differ x differ[x] Returns a boolean list indicating where consecutive pairs of items in x differ. It applies to all data types. It is a uniform function. The first item of the result is always 1b : r[i]=1b for i=0 r[i]=not A[i]~A[i-1] otherwise q)differ`IBM`IBM`MSFT`CSCO`CSCO 10110b q)differ 1 3 3 4 5 6 6 1101110b Split a table with multiple dates into a list of tables with distinct dates. q)d:2009.10.01+asc 100?30 q)s:100?`IBM`MSFT`CSCO q)t:([]date:d;sym:s;price:100?100f;size:100?1000) q)i:where differ t[`date] / indices where dates differ q)tlist:i _ t / list of tables with one date per table q)tlist 0 date sym price size ----------------------------- 2009.10.01 IBM 37.95179 710 2009.10.01 CSCO 52.908 594 2009.10.01 MSFT 32.87258 250 2009.10.01 CSCO 75.15704 592 q)tlist 1 date sym price size ---------------------------- 2009.10.02 MSFT 18.9035 26 2009.10.02 CSCO 12.7531 760 domain: b g x h i j e f c s p m d z n u v t range: b b b b b b b b b b b b b b b b b b differ is a multithreaded primitive. Binary use deprecated As of V3.6 the keyword is variadic. Binary application is deprecated and may disappear in future versions. The keyword cannot be applied infix. For a binary version, use Match Each Prior: ~:' . Basics: Comparison ! Display¶ Write to console and return 0N!x ![0N;x] Returns x after printing its unformatted text representation to the console. q)2+0N!3 3 5 Useful for debugging, or avoiding formatting that obscures the data’s structure. show Debugging distinct ¶ Unique items of a list distinct x distinct[x] Where x is a list returns the distinct (unique) items of x in the order of their first occurrence. The result does not have the unique attribute set. q)distinct 2 3 7 3 5 3 2 3 7 5 Returns the distinct rows of a table. q)distinct flip `a`b`c!(1 2 1;2 3 2;"aba") a b c ----- 1 2 a 2 3 b It does not use comparison tolerance q)\P 14 q)distinct 2 + 0f,10 xexp -13 2 2.0000000000001 distinct is a multithreaded primitive. Errors¶ | error | cause | |---|---| | type | x is an atom |</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="71"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">The .Q namespace¶ Tools General Datatype addmonths btoa b64 encode dd join symbols j10 encode binhex f precision format j12 encode base 36 fc parallel on cut ty type ff append columns x10 decode binhex fmt precision format x12 decode base 36 ft apply simple fu apply unique Database gc garbage collect chk fill HDB gz GZip dpft dpfts save table id sanitize dpt dpts save table unsorted qt is table dsftg load process save res keywords en enumerate varchar cols s plain text ens enumerate against domain s1 string representation fk foreign key sha1 SHA-1 encode hdpf save tables V table to dict l load v value ld load and group view subview li load partitions lo load without Constants M chunk size A a an alphabets qp is partitioned b6 bicameral alphanums qt is table n nA nums & alphanums Partitioned database state Debug/Profile bv build vp bt backtrace bvi build incremental vp prf0 code profiler cn count partitioned table sbt string backtrace D partitions trp extend trap at ind partitioned index trpd extend trap MAP maps partitions ts time and space par locate partition PD partition locations Environment pd modified partition locns K k version pf partition field w memory stats pn partition counts pt partitioned tables Environment (Command-line) PV partition values def command defaults pv modified partition values opt command parameters qp is partitioned x non-command parameters vp missing partitions IPC Segmented database state addr IP/host as int P segments fps fpn pipe streaming u date based fs fsn file streaming hg HTTP get File I/O host IP to hostname Cf create empty nested char file hp HTTP post Xf create file Functions defined in q.k are loaded as part of the ‘bootstrap’ of kdb+. Some are exposed in the default namespace as the q language. Others are documented here as utility functions in the .Q namespace. The .Q namespace is reserved for use by KX, as are all single-letter namespaces. Consider all undocumented functions in the namespace as exposed infrastructure – and do not use them. In non-partitioned databases the partitioned database state variables remain undefined. A (upper-case alphabet)¶ a (lower-case alphabet)¶ an (all alphanumerics)¶ .Q.A / upper-case alphabet .Q.a / lower-case alphabet .Q.an / all alphanumerics Strings: upper-case Roman alphabet (.Q.A ), lower-case Roman alphabet (.Q.a ), and all alphanums (.Q.an ). q).Q.A "ABCDEFGHIJKLMNOPQRSTUVWXYZ" q).Q.a "abcdefghijklmnopqrstuvwxyz" q).Q.an "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789" addmonths ¶ .Q.addmonths[x;y] Where x is a date and y is an int, returns x plus y months. q).Q.addmonths[2007.10.16;6 7] 2008.04.16 2008.05.16 If the date x is near the end of the month and (x.month + y )’s month has fewer days than x.month , the result may spill over to the following month. q).Q.addmonths[2006.10.29;4] 2007.03.01 Mathematics with temporals How to handle temporal data in q addr (IP/host as int)¶ .Q.addr x Where x is a hostname or IP address as a symbol atom, returns the IP address as an integer (same format as .z.a ) q).Q.addr`localhost 2130706433i q).Q.host .Q.addr`localhost `localhost q).Q.addr`localhost 2130706433i q)256 vs .Q.addr`localhost 127 0 0 1 b6 (bicameral-alphanums)¶ .Q.b6 Returns upper- and lower-case alphabet and numerics. q).Q.b6 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" Used for binhex encoding and decoding. bt (backtrace)¶ .Q.bt[] Dumps the backtrace to stdout at any point during execution or debug. q)f:{{.Q.bt[];x*2}x+1} q)f 4 [2] f@:{.Q.bt[];x*2} ^ [1] f:{{.Q.bt[];x*2}x+1} ^ [0] f 4 ^ 10 q)g:{a:x*2;a+y} q)g[3;"hello"] 'type [1] g:{a:x*2;a+y} ^ q)).Q.bt[] >>[1] g:{a:x*2;a+y} ^ [0] g[3;"hello"] ^ >> marks the current stack frame. (Since V4.0 2020.03.23.) The debugger itself occupies a stack frame, but its source is hidden. (Since V3.5 2017.03.15.) btoa (b64 encode)¶ .Q.btoa x q).Q.btoa"Hello World!" "SGVsbG8gV29ybGQh" Since V3.6 2018.05.18. bv (build vp)¶ .Q.bv[] .Q.bv[`] In partitioned DBs, construct the dictionary .Q.vp of table schemas for tables with missing partitions. Optionally allow tables to be missing from partitions, by scanning partitions for missing tables and taking the tables’ prototypes from the last partition. After loading/re-loading from the filesystem, invoke .Q.bv[] to (re)populate .Q.vt /.Q.vp , which are used inside .Q.p1 during the partitioned select .Q.ps . (Since V2.8 2012.01.20, modified V3.0 2012.01.26) If your table exists at least in the latest partition (so there is a prototype for the schema), you could use .Q.bv[] to create empty tables on the fly at run-time without having to create those empties on disk. .Q.bv[`] (with argument) will use prototype from first partition instead of last. (Since V3.2 2014.08.22.) Some admins prefer to see errors instead of auto-manufactured empties for missing data, which is why .Q.bv is not the default behavior. q)n:100 q)t:([]time:.z.T+til n;sym:n?`2;num:n) q).Q.dpft[`:.;;`sym;`t]each 2010.01.01+til 5 `t`t`t`t`t q)tt:t q).Q.dpft[`:.;;`sym;`tt]last 2010.01.01+til 5 `tt q)\l . q)tt +`sym`time`num!`tt q)@[get;"select from tt";-2@]; / error ./2010.01.01/tt/sym: No such file or directory q).Q.bv[] q).Q.vp tt| +`date`sym`time`num!(`date$();`sym$();`time$();`long$()) q)@[get;"select from tt";-2@]; / no error bvi (build incremental vp)¶ It offers the same functionality as .Q.bv , but scans only new partitions loaded in the hdb since the last time .Q.bv or .Q.bvi was run. Since v4.1 2024.09.13. Cf (create empty nested char file)¶ Deprecated Deprecated since 4.1t 2022.03.25. Using resulting files could return file format errors since 3.6. .Q.Cf x A projection of .Q.Xf : i.e. .Q.Xf[`char;] chk (fill HDB)¶ .Q.chk x Where x is a HDB as a filepath, fills tables missing from partitions using the most recent partition containing the table as a template, and reports which partitions (but not which tables) it is fixing. q).Q.chk[`:hdb] () () ,`:/db/2009.01.04 ,`:/db/2009.01.03 Q must have write permission for the HDB area to create missing tables If it signals an error similar to './2010.01.05/tablename/.d: No such file or directory check the process has write permissions for that filesystem. Q for Mortals §14.5.2 .Q.chk cn (count partitioned table)¶ .Q.cn x Where x is a partitioned table, passed by value, returns its count. Populates .Q.pn cache. D (partitions)¶ .Q.D In segmented DBs, contains a list of the partitions – conformant to .Q.P – that are present in each segment. .Q.P!.Q.D can be used to create a dictionary of partition-to-segment information. q).Q.P `:../segments/1`:../segments/2`:../segments/3`:../segments/4 q).Q.D 2010.05.26 2010.05.31 ,2010.05.27 2010.05.28 2010.05.30 2010.05.29 2010.05.30 q).Q.P!.Q.D :../segments/1| 2010.05.26 2010.05.31 :../segments/2| ,2010.05.27 :../segments/3| 2010.05.28 2010.05.30 :../segments/4| 2010.05.29 2010.05.30 dd (join symbols)¶ .Q.dd[x;y] Shorthand for ` sv x,`$string y . Useful for creating filepaths, suffixed stock symbols, etc. q).Q.dd[`:dir]`file `:dir/file q){x .Q.dd'key x}`:dir `:dir/file1`:dir/file2 q).Q.dd[`AAPL]"O" `AAPL.O q)update sym:esym .Q.dd'ex from([]esym:`AAPL`IBM;ex:"ON") esym ex sym -------------- AAPL O AAPL.O IBM N IBM.N def (command defaults)¶ Default values and type checks for command-line arguments parsed with .Q.opt .Q.def[x;y] Where x is a dictionary of default parameter names and values, and y is the output of .Q.opt . Types are inferred from the default values provided, which must be an atom type. $ q -abc 123 -xyz 321 q).Q.def[`abc`xyz`efg!(1;2.;`a)].Q.opt .z.x abc| 123 xyz| 321f efg| `a If a command-line value cannot be converted to the data type of the default value, a null is produced $ q -param1 11 -param2 2000.01.01 -param3 wrong q).Q.def[`param1`param2`param3!(1;1999.01.01;23.1)].Q.opt .z.x param1| 11 param2| 2000.01.01 param3| 0n .z.x (argv), .z.X (raw command line), .z.f (file), .z.q (quiet mode), .Q.opt (command parameters), .Q.x (non-command parameters) dpft (save table)¶ dpfts (save table with symtable)¶ dpt (save table unsorted)¶ dpts (save table unsorted with symtable)¶ .Q.dpft[d;p;f;t] .Q.dpfts[d;p;f;t;s] .Q.dpt[d;p;t] .Q.dpts[d;p;t;s] Where d is a directory handlep is a partition of a databasef a field of the table (required to be present in table since 4.1t 2021.09.03) named byt belowt , the name (as a symbol) of a simple table whose columns are vectors or compound listss is the handle of a symtable saves t splayed to partition p . The table cannot be keyed. This would signal an 'unmappable error if there are columns which are not vectors or simple nested columns (e.g. char vectors for each row). It also rearranges the columns of the table so that the column specified by f is second in the table (the first column in the table will be the virtual column determined by the partitioning e.g. date). Returns the table name if successful. q)trade:([]sym:10?`a`b`c;time:.z.T+10*til 10;price:50f+10?50f;size:100*1+10?10) q).Q.dpft[`:db;2007.07.23;`sym;`trade] `trade q)delete trade from `. `. q)trade 'trade q)\l db q)trade date sym time price size ----------------------------------------- 2007.07.23 a 11:36:27.972 76.37383 1000 2007.07.23 a 11:36:27.982 77.17908 200 2007.07.23 a 11:36:28.022 75.33075 700 2007.07.23 a 11:36:28.042 58.64531 200 2007.07.23 b 11:36:28.002 87.46781 800 2007.07.23 b 11:36:28.012 85.55088 400 2007.07.23 c 11:36:27.952 78.63043 200 2007.07.23 c 11:36:27.962 90.50059 400 2007.07.23 c 11:36:27.992 73.05742 600 2007.07.23 c 11:36:28.032 90.12859 600 If you are getting an 'unmappable error, you can identify the offending columns and tables: / create 2 example tables q)t:([]a:til 2;b:2#enlist (til 1;10)) / bad table, b is unmappable q)t1:([]a:til 2;b:2#til 1) / good table, b is mappable q)helper:{$[(type x)or not count x;1;t:type first x;all t=type each x;0]}; q)select from (raze {([]table:enlist x;columns:enlist where not helper each flip .Q.en[`:.]`. x)} each tables[]) where 0<count each columns table columns ------------- t b .Q.dpfts allows the enum domain to be specified. Since V3.6 (2018.04.13) q)show t:([]a:10?`a`b`c;b:10?10) a b --- c 8 a 1 b 9 b 5 c 4 a 6 b 6 c 1 b 8 c 5 q).Q.dpfts[`:db;2007.07.23;`a;`t;`mysym] `t q)mysym `c`a`b dsftg (load process save)¶ .Q.dsftg[d;s;f;t;g] Where d is(dst;part;table) wheretable hasM rowss is(src;offset;length) f is fields as a symbol vectort is(types;widths) g is a unary post-processing function loops .Q.M&1000000 rows at a time. .Q.M (chunk size) For example, loading TAQ DVD: q)d:(`:/dst/taq;2000.10.02;`trade) q)s:(`:/src/taq;19;0) / nonpositive length from end q)f:`time`price`size`stop`corr`cond`ex q)t:("iiihhc c";4 4 4 2 2 1 1 1) q)g:{x[`stop]=:240h;@[x;`price;%;1e4]} q).Q.dsftg[d;s;f;t;g] en (enumerate varchar cols)¶ ens (enumerate against domain)¶ .Q.en[dir;table] .Q.ens[dir;table;name] Where dir is a symbol handle to a folder or generic null (:: )table is a tablename is a symbol atom naming a sym file indir When dir is a symbol handle, the function - creates if necessary the folder dir - gets sym fromdir if it exists - enumerates against in-memory sym using the symbols intable - writes sym to file indir - returns table with columns enumerated (for.Q.ens , againstname )Locking ensures two processes do not write to the sym file at the same time The following example uses .Q.en to enumerate to both the in-memory and disksym domain, while saving the table output usingset : Providing a new or updated table against an existingq)t1:([]col1:`a`b`c;col2:1 2 3) q)`:/tmp/db/t1/ set .Q.en[`:/tmp/db;t1]; q)sym / contents of in-memory sym populated from symbols in table `a`b`c q)get `:/tmp/db/sym / on-disk sym same as in-memory sym `a`b`c q)get `:/tmp/db/t1/col1 / col1 enumerated against sym domain `sym$`a`b`c sym domain will read the existing on-disk sym domain before updating. Both the in-memory and on-disk version are updated to reflect the new state. Continuing with the same example shows the existingsym domain being altered:q)t2:([]col1:`a`d`e;col2:1 2 3) q)`:/tmp/db/t2/ set .Q.en[`:/tmp/db;t2]; / enumerate additional table against existing sym domain q)sym / in-memory sym now contains additional symbols `a`b`c`d`e q)get `:/tmp/db/sym / on-disk sym same as in-memory sym `a`b`c`d`e When dir is a generic null (since 4.1 2025.01.17), the function - does not read/write/lock the sym file - enumerates against in-memory sym using the symbols intable , for exampleq)t1:([]a:`a`b`c;b:1 2 3) q).Q.en[::;t1]; q)sym `a`b`c q)t2:([]a:`a`d`e;b:1 2 3) q).Q.en[::;t2]; q)sym `a`b`c`d`e on-disk sym files should be kept in sync with in-memory enum domain .Q.ens allows enumeration against domains (and therefore filenames) other than sym . q)([]sym:`mysym$`a`b`c)~.Q.ens[`:db;([]sym:`a`b`c);`mysym] Tables splayed across a directory must be fully enumerated and not keyed. The solution is to enumerate columns of type varchar before saving the table splayed. dsave , Enum Extend, save Enumerating symbol columns in a table Splayed tables Data-management techniques Working with sym files Q for Mortals §14.2.8 Working with sym files f (precision format)¶ .Q.f[x;y] Where x is an int atomy is a numeric atom returns y as a string formatted as a float to x decimal places. Because of the limits of precision in a double, for y above 1e13 or the limit set by \P , formats in scientific notation. q)\P 0 q).Q.f[2;]each 9.996 34.3445 7817047037.90 781704703567.90 -.02 9.996 -0.0001 "10.00" "34.34" "7817047037.90" "781704703567.90" "-0.02" "10.00" "-0.00" The 1e13 limit is dependent on x . The maximum then becomes y*10 xexp x and that value must be less than 1e17 – otherwise you'll see sci notation or overflow. q)10 xlog 0Wj-1 18.964889726830812 .Q.fmt (precision format with length), -27!(x;y) (IEEE754 precision format) \P (precision) fc (parallel on cut)¶ .Q.fc[x;y] Where x is is a unary atomic functiony is a list returns the result of evaluating f vec – using multiple threads if possible. (Since V2.6) q -s 8 q)f:{2 xexp x} q)vec:til 100000 q)\t f vec 12 q)\t .Q.fc[f]vec 6 In this case the overhead of creating threads in peach significantly outweighs the computational benefit of parallel execution. q)\t f peach vec 45 ff (append columns)¶ .Q.ff[x;y] Where x is table to modifyy is a table of columns to add tox and set to null returns x , with all new columns in y , with values in new columns set to null of the appropriate type. If there is a common column in x and y , the column from x is kept (i.e. it will not null any columns that exist in x ). q)src:0N!flip`sym`time`price`size!10?'(`3;.z.t;1000f;10000) sym time price size ------------------------------ mil 10:30:32.148 470.7883 6360 igf 00:28:17.727 634.6716 7885 kao 06:52:34.397 967.2398 4503 baf 10:07:47.382 230.6385 4204 kfh 00:45:40.134 949.975 6210 jec 05:12:49.761 439.081 8740 kfm 16:31:50.104 575.9051 8732 lkk 04:54:11.685 591.9004 4756 kfi 13:01:04.698 848.1567 3998 fgl 05:18:45.828 389.056 9342 q).Q.ff[src] enlist `sym`ratioA`ratioB!3#1 sym time price size ratioA ratioB -------------------------------------------- mil 10:30:32.148 470.7883 6360 igf 00:28:17.727 634.6716 7885 kao 06:52:34.397 967.2398 4503 baf 10:07:47.382 230.6385 4204 kfh 00:45:40.134 949.975 6210 jec 05:12:49.761 439.081 8740 kfm 16:31:50.104 575.9051 8732 lkk 04:54:11.685 591.9004 4756 kfi 13:01:04.698 848.1567 3998 fgl 05:18:45.828 389.056 9342 fk (foreign key)¶ .Q.fk x Where x is a table column, returns ` if the column is not a foreign key or `tab if the column is a foreign key into tab . fmt (precision format)¶ .Q.fmt[x;y;z] Where x andy are integer atomsz is a numeric atom returns z as a string of length x , formatted to y decimal places. q).Q.fmt[6;2]each 1 234 " 1.00" "234.00" To format the decimal data in a column to 2 decimal places, change it to string. q)fix:{.Q.fmt'[x+1+count each string floor y;x;y]} q)fix[2]1.2 123 1.23445 -1234578.5522 "1.20" "123.00" "1.23" "-1234578.55" Also handy for columns: q)align:{neg[max count each x]$x} q)align fix[2]1.2 123 1.23445 -1234578.5522 " 1.20" " 123.00" " 1.23" "-1234578.55" Example: persist a table with float values to file as character strings of length 9, e.g. 34.3 to " 34.3" Keep as much precision as possible, i.e. persist 343434.3576 as "343434.36" . q)fmt:{.Q.fmt[x;(count 2_string y-i)&x-1+count string i:"i"$y]y} q)fmt[9] each 34.4 343434.358 " 34.4" "343434.36" .Q.f (precision format), -27!(x;y) (IEEE754 precision format) \P (precision) fpn (pipe streaming)¶ fps (pipe streaming)¶ .Q.fs for pipes .Q.fps[x;y] .Q.fpn[x;y;z] Where x is a unary functiony is a filepath to a fifo (named pipe)z is an integer (Since V3.4) Reads z -sized lumps of complete "\n" delimited records from a pipe and applies a function to each record. This enables you to implement a streaming algorithm for various purposes such as converting a large compressed CSV file into an on-disk kdb+ database without holding the data in memory all at once or using disk space required for the uncompressed file. Streaming data from named pipes .Q.fps is a projection of .Q.fpn with the chunk size set to 131000 bytes. fs (file streaming)¶ fsn (file streaming)¶ .Q.fs[x;y] .Q.fsn[x;y;z] Where x is a unary functiony is a filepathz is an integer loops over file y , grabs z -sized lumps of complete "\n" delimited records, applies x to each record, and returns the size of the file as given by hcount . This enables you to implement a streaming algorithm for various purposes such as converting a large CSV file into an on-disk kdb+ database without holding the data in memory all at once. .Q.fsn is almost identical to .Q.fs but takes an extra argument z , the size in bytes that chunks will be read in. This is particularly useful for balancing load speed and RAM usage. .Q.fs is a projection of .Q.fsn with the chunk size set to 131000 bytes. For example, assume that the file potamus.csv contains the following: Take, a, hippo, to, lunch, today, -1, 1941-12-07 A, man, a, plan, a, hippopotamus, 42, 1952-02-23 If you call .Q.fs on this file with the function 0N! , you get the following list of rows: q).Q.fs[0N!]`:potamus.csv ("Take, a, hippo, to, lunch, today, -1, 1941-12-07";"A, man, a,.. 120 .Q.fs can also be used to read the contents of the file into a list of columns. q).Q.fs[{0N!("SSSSSSID";",")0:x}]`:potamus.csv (`Take`A;`a`man;`hippo`a;`to`plan;`lunch`a;`today`hippopotamus;-1 42i;1941.12.. 120 ft (apply simple)¶ .Q.ft[x;y] Where y is a keyed tablex is a unary functionx[t] in whicht is a simple table returns a table with at least as many key columns as t . As an example, note that you can index into a simple table with row indices, but not into a keyed table – for that you should use a select statement. To illustrate the method, the following example shows an indexing function being applied to a keyed table named sp (script sp.q is used to populate the table). q)\l sp.q q)sp 2 3 / index simple table with integer list argument s p qty --------- s1 p3 400 s1 p4 200 q)s 2 3 / index keyed table fails 'length Now create an indexing function, and wrap it in .Q.ft . This works on both types of table: q).Q.ft[{x 2 3};s] s | name status city --| ------------------- s3| blake 30 paris s4| clark 20 london Equivalent select statement: q)select from s where i in 2 3 s | name status city --| ------------------- s3| blake 30 paris s4| clark 20 london fu (apply unique)¶ .Q.fu[x;y] Where x is a unary function and y is - a list, returns x[y] after evaluatingx only on distinct items ofy - not a list, returns x[y] q)vec:100000 ? 30 / long vector with few different values q)f:{exp x*x} / e raised to x*x q)\t:1000 r1:f vec 745 q)\t:1000 r2:.Q.fu[f;vec] 271 q)r1~r2 1b Not suitable for all unary functions .Q.fu applies x to the distinct items of y . Where for any index i , the result of x y i depends on no other item of y , then .Q.fu works as intended. Where this is not so, the result is unlikely to be expected or useful. To explore this, study .Q.fu[avg;] (4 3#12?100)10?4 . gc (garbage collect)¶ .Q.gc[] Run garbage-collection and returns the amount of memory that was returned to the OS. It attempts to coalesce pieces of the heap into their original allocation units and returns any units ≥64MB to the OS. Refer to \g (garbage collection mode) for details on how memory is created on the heap. When secondary threads are configured and .Q.gc[] is invoked in the main thread, .Q.gc[] is automatically invoked in each secondary thread. If the call is instigated in a secondary thread, it affects that thread’s local heap only. Example of garbage collection in the default deferred mode, using .Q.w[] to view memory stats: q)a:til 10000000 / create an object that is ≥64MB q).Q.w[] / view current heap size and how many bytes used of the heap (all objects plus previously allocated object) used| 134589136 heap| 201326592 peak| 201326592 wmax| 0 mmap| 0 mphy| 17179869184 syms| 689 symw| 37406 q).Q.gc[] / garbage collection doesnt return any memory to OS 0 q)delete a from `. / delete the original object, placing it on the heap `. q).Q.w[] / used memory has decreased, heap remains the same used| 371376 heap| 201326592 peak| 201326592 wmax| 0 mmap| 0 mphy| 17179869184 syms| 690 symw| 37436 q).Q.gc[] / garbage collection has returned 134217728 to the OS from the heap 134217728 q).Q.w[] / heap size has reduced, while used memory remains the same used| 371376 heap| 67108864 peak| 201326592 wmax| 0 mmap| 0 mphy| 17179869184 syms| 690 symw| 37436 Depending on your data, memory can become fragmented and therefore difficult to release back to the OS. The following demonstrates an example: q).Q.w[] / initial memory stats used| 371360 heap| 67108864 peak| 67108864 wmax| 0 mmap| 0 mphy| 17179869184 syms| 689 symw| 37406 q)v:{(10#"a";10000#"b")}each til 1000000; / create 1000000 rows, each containing 2 elements of 10 chars and 10000 chars q).Q.w[] / both heap and used memory has grown used| 16456760016 heap| 16508780544 peak| 16508780544 wmax| 0 mmap| 0 mphy| 17179869184 syms| 689 symw| 37406 q).Q.gc[] / garbage collection has found no slab of contiguous unused memory of ≥64MB to free 0 q)v:v[;0] / change v to 1000000 rows, each only containing the 1st element of 10 chars (2nd element removed) q).Q.w[] / used memory has decreased, heap remains the same used| 40760016 heap| 16508780544 peak| 16508780544 wmax| 0 mmap| 0 mphy| 17179869184 syms| 690 symw| 37436 q).Q.gc[] / garbage collection has found no contiguous unused memory of ≥64MB to free 0 q)v:-8!v / convert v into its serialised form, return vector used by v to heap q).Q.gc[] / garbage collection now found unused contiguous memory slab ≥64MB to return to OS 16374562816 q)v:-9!v / convert serialised form of v back to its original state q).Q.w[] / used memory remains the same as before, but heap has reduced used| 40760016 heap| 134217728 peak| 16508780544 wmax| 0 mmap| 0 mphy| 17179869184 syms| 690 symw| 37436 If you have nested data, e.g. columns of char vectors, or much grouping, you may be fragmenting memory. Since V3.3 2015.08.23 (Linux only) unused pages in the heap are dropped from RSS during .Q.gc[] . Since 4.1t 2022.07.01, .Q.gc[0] can be used to perform a subset of operations performed by .Q.gc[] (i.e. only return unused blocks >= 64MB to os). This has the advantage of running return faster than .Q.gc[] , but with the disadvantage of not defragmenting unused memory blocks of a smaller size (therefore may not free as much unused memory). .Q.w (memory stats), \g (garbage collection mode), \w (workspace) gz (GZip)¶ .Q.gz[::] / zlib loaded? .Q.gz cbv / unzipped .Q.gz (cl;cbv) / zipped Where cbv is a char vector (or byte vector since 4.1t 2021.09.03,4.0 2021.10.01)cl is compression level [1-9] as a long returns, for - the general null, a boolean atom as whether Zlib is loaded cbv , the inflated (unzipped) vector- a 2-list, the deflated (zipped) vector since V4.0 2020.04.16. q).Q.gz{0N!count x;x}[.Q.gz(9;10000#"helloworld")] 66 "helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhellow.. -18!x (ipc compress bytes) hdpf (save tables)¶ .Q.hdpf[historicalport;directory;partition;`p#field] The function: - saves all tables to disk, by calling .Q.dpft (saves as splayed tables to a partition) - clears in-memory tables - sends reload message to HDB, by opening a temporary connection and sending \l . hg (HTTP get)¶ .Q.hg x Where x is a URL as a symbol atom or (since V3.6 2018.02.10) a string, returns a string for the result of an HTTP[S] GET query. (Since V3.4) q).Q.hg`:http://www.google.com q)count a:.Q.hg`:http:///www.google.com 212 q)show a "<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>4.. q).Q.hg ":http://username:<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="e898899b9b9f879a8ca89f9f9fc68f87878f848dc68b8785">[email protected]</a>" If you have configured SSL/TLS, HTTPS can also be used. q).Q.hg ":https://www.google.com" .Q.hg will utilize proxy settings from the environment, lower-case versions taking precedence: | environment variable | use | |---|---| http_proxy , HTTP_PROXY | The URL of the HTTP proxy to use | no_proxy , NO_PROXY | Comma-separated list of domains for which to disable use of proxy | N.B. HTTPS is not supported across proxies which require CONNECT . Since 4.0 2019.10.22, gzip compression is supported. Requests include the HTTP header "Accept-Encoding: gzip". The server then decides whether to gzip the returned payload, which is uncompressed prior to .Q.hg returning. host (IP to hostname)¶ .Q.host x Where x is an IP address as an int atom, returns its hostname as a symbol atom. q).Q.host .Q.addr`localhost `localhost q).Q.addr`localhost 2130706433i q)"I"$"104.130.139.23" 1753385751i q).Q.host "I"$"104.130.139.23" `netbox.com q).Q.addr `netbox.com 1753385751i .Q.addr (IP/host as int), $ tok (IP address as int) hp (HTTP post)¶ .Q.hp[x;y;z] Where x is a URL as a symbol handle or string (since V3.6 2018.02.10)y is a MIME type as a stringz is the POST query as a string Returns a string for the result of an HTTP[S] POST query. (Since V3.4) Uses proxy settings (if defined) and compression handling, as described in hg (HTTP get). q).Q.hp["http://google.com";.h.ty`json]"my question" "<!DOCTYPE html>\n<html lang=en>\n <meta charset=utf-8>\n <meta name=viewpo.. id (sanitize)¶ .Q.id x Where x is - a symbol atom, returns x with items sanitized to valid q namesq).Q.id each `$("ab";"a/b";"two words";"2drifters";"2+2") `ab`ab`twowords`a2drifters`a22 - a table, returns x with column names sanitized by removing characters that interfere withselect/exec/update and adding"1" to column names which clash with commands in the.q namespace. Updated in V3.2 to include.Q.res for checking collisions.q).Q.id flip (5#.Q.res)!(5#()) in1 within1 like1 bin1 binr1 ---------------------------- q).Q.id flip(`$("a";"a/b"))!2#() a ab ---- - a dictionary (since v4.1 2024.09.13), supports the same rules as table aboveq).Q.id (5#.Q.res)!(5#()) abs1 | acos1| asin1| atan1| avg1 | Since 4.1t 2022.03.25,4.0 2022.10.26 produces a symbol a when the input contains a single character that is not in .Q.an (it previously produced an empty sym) e.g. q).Q.id`$"+" a / previous version returned ` Table processing also has additional logic to cater for duplicate column names (names are now appended with 1,2,etc. when matched against previous columns) after applying previously defined rules e.g. q)cols .Q.id(`$("count+";"count*";"count1"))xcol([]1 2;3 4;5 6) `count1`count11`count12 / previous version returned `count1`count1`count1 q)cols .Q.id(`$("aa";"=";"+"))xcol([]1 2;3 4;5 6) `aa`a`a1 / previous version returned `aa`1`1 Since 4.1t 2022.11.01,4.0 2022.10.26, the same rule is applied when the provided name begins with either an underscore or a numerical character. Previously, it could produce an invalid column name. q).Q.id`$"_" `a_ q)cols .Q.id(`$("3aa";"_aa";"_aa"))xcol([]1 2;3 4;5 6) `a3aa`a_aa`a_aa1 ind (partitioned index)¶ .Q.ind[x;y] Where x is a partitioned tabley is a long int vector of row indexes intox returns rows y from x . When picking individual records from an in-memory table you can simply use the special virtual field i : select from table where i<100 But you cannot do that directly for a partitioned table. .Q.ind comes to the rescue here, it takes a table and indexes into the table – and returns the appropriate rows. .Q.ind[trade;2 3] A more elaborate example that selects all the rows from a date: q)t:select count i by date from trade q)count .Q.ind[trade;(exec first sum x from t where date<2010.01.07)+til first exec x from t where date=2010.01.07] 28160313 / show that this matches the full select for that date q)(select from trade where date=2010.01.07)~.Q.ind[trade;(exec first sum x from t where date<2010.01.07)+til first exec x from t where date=2010.01.07] 1b Continuous row intervals If you are selecting a continuous row interval, for example if iterating over all rows in a partition, instead of using .Q.ind you might as well use ```q q)select from trade where date=2010.01.07,i within(start;start+chunkSize) ```` j10 (encode binhex)¶ x10 (decode binhex)¶ j12 (encode base-36)¶ x12 (decode base-36)¶ .Q.j10 s .Q.j12 s .Q.x10 s .Q.x12 s Where s is a string, these functions return s encoded (j10 , j12 ) or decoded (x10 , x12 ) against restricted alphabets: …10 en/decodes against the alphabet.Q.b6 , this is a base-64 encoding - see BinHex and Base64 for more details than you ever want to know about which characters are where in the encoding. To keep the resulting number an integer the maximum length ofs is 10.-12 en/decodes against.Q.nA , a base-36 encoding. As the alphabet is smallers can be longer – maximum length 12. The main use of these functions is to encode long alphanumeric identifiers (CUSIP, ORDERID..) so they can be quickly searched – but without filling up the symbol table with vast numbers of single-use values. q).Q.x10 12345 "AAAAAAADA5" q).Q.j10 .Q.x10 12345 12345 q).Q.j10 each .Q.x10 each 12345+1 2 3 12346 12347 12348 q).Q.x12 12345 "0000000009IX" q).Q.j12 .Q.x12 12345 12345 Tip If you don’t need the default alphabets it can be very convenient to change them to have a blank as the first character, allowing the identity 0 <-> " " . If the values are not going to be searched (or will be searched with like ) then keeping them as nested character is probably going to be simpler. K (version date)¶ k (version)¶ .Q.K / version date .Q.k / version Return the interpreter version date (.Q.K ) and number (.Q.k ) for which q.k has been written: checked against .z.K at startup. q).Q.K 2020.10.02 q).Q.k 4f l (load)¶ .Q.l x Where x is a hsym or symbol atom naming a directory in the current directory, loads it recursively as in load , but into the default namespace. (Implements system command \l .) ld (load and group)¶ .Q.ld x Exposes logic used by \l to group script lines for evaluation. Since 4.1t 2022.11.01,4.0 2023.03.28. q).Q.ld read0`:funcs.q 1 2 5 6 "/ multi line func" "f:{\n x+y\n }" "/ single line func" "g:{x*y}" li (load partitions)¶ .Q.li[partitions] In the current hdb, adds any partition(s) which are both in the list supplied and on disk. Partitions can be a list or atomic variable. For example: q)`:/tmp/db/2001.01.01/t/ set tt:.Q.en[`:/tmp/db]([]sym:10?`A`B`C;time:10?.z.T;price:10?10f) q)\l /tmp/db q)`:2001.01.02/t/`:2001.01.03/t/ set\:tt q)date ,2001.01.01 q).Q.li[2001.01.02];date 2001.01.01 2001.01.02 q).Q.li[2001.01.02 2001.01.03];select count i by date from t date | x ----------| -- 2001.01.01| 10 2001.01.02| 10 2001.01.03| 10 Since v4.1 2024.09.20. lo (load without)¶ .Q.lo[`:database;cd;scripts] Where database is a hsym or symbol atom (as per parameter to .Q.l)cd is a boolean flag indicating whether to cd to the database dirscripts is a boolean flag indicating whether to execute any scripts in the database dir Load a database without changing directory and/or loading scripts in the database (since 4.1t 2023.03.01). q)\cd "/tmp" q)key`:db/2023.02.01 `s#,`trade q).Q.lo[`:db;0;0] q)trade date sym time price ------------------------------------ 2023.02.01 C 10:15:18.957 6.346716 2023.02.01 B 10:15:18.958 9.672398 2023.02.01 C 10:15:18.959 2.306385 2023.02.01 B 10:15:18.960 9.49975 2023.02.01 A 10:15:18.961 4.39081 q)\cd "/tmp" M (chunk size)¶ .Q.M Chunk size for dsftg (load-process-save). q)0W~.Q.M / defaults to long infinity 1b MAP (maps partitions)¶ .Q.MAP[] Keeps partitions mapped to avoid the overhead of repeated file system calls during a select . (Since V3.1.) For use with partitioned HDBS, used in tandem with \l dir q)\l . q).Q.MAP[] .Q.MAP currently has the following limitations: - .Q.MAP does not work with linked columns - .Q.MAP does not work with virtual partition columns - Use of .Q.MAP with compressed files is not recommended, as the uncompressed maps will be retained in memory You may need to increase the number of available file handles, and also the number of available file maps (for Linux see vm.max_map_count ) Since 4.1t 2024.01.11 parallelized over tables and partitions with peach when kdb+ running with secondary threads. n (nums)¶ nA (alphanums)¶ .Q.n .Q.nA Strings: numerics (.Q.n ) and upper-case alphabet and numerics (.Q.nA ). q).Q.n "0123456789" q).Q.nA "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" .Q.nA is used for base-36 encoding and decoding. opt (command parameters)¶ .Q.opt .z.x Presents command-line arguments as a dictionary, using the output of .z.x . Defaults can be added using .Q.def . $ q -param1 val1 -param2 val2 q)params:.Q.opt .z.x q)show params param1| "val1" param2| "val2" q)params`param1 "val1" Example of a command-line parameter with no value and a parameter with multiple values: $ q -param1 -param2 as asd -param3 q).Q.opt .z.x param1| () param2| ("as";"asd") param3| () .z.x (argv), .z.X (raw command line), .z.f (file), .z.q (quiet mode), .Q.def (command defaults), .Q.x (non-command parameters) P (segments)¶ .Q.P In segmented DBs, returns a list of the segments (i.e. the contents of par.txt ). q).Q.P `:../segments/1`:../segments/2`:../segments/3`:../segments/4 par (get expected partition location)¶ .Q.par[dir;part;table] Where dir is a directory filepathpart is a date returns the expected location of table . (Sensitive to par.txt .) q).Q.par[`:.;2010.02.02;`quote] `:/data/taq/2010.02.02/quote Can assist in checking `p attribute is present on all partitions of a table in an HDB q)all{`p=attr .Q.par[`:.;x;`quote]`sym}each date 1b Does not look into the segment directories. The function calculates only the path, based on the partition and the contents of par.txt in a round-robin fashion. It does not check the contents of the segments to see if the partition is there. See Segmented databases for details. PD (partition locations)¶ .Q.PD In partitioned DBs, a list of partition locations – conformant to .Q.PV – which represents the partition location for each partition. (In non-segmented DBs, this will be simply count[.Q.PV]#`:. .) .Q.PV!.Q.PD can be used to create a dictionary of partition-to-location information. q).Q.PV 2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31 q).Q.PD `:../segments/1`:../segments/2`:../segments/3`:../segments/4`:../segments/3`:../segments/4`:../segments/1 q).Q.PV!.Q.PD 2010.05.26| :../segments/1 2010.05.27| :../segments/2 2010.05.28| :../segments/3 2010.05.29| :../segments/4 2010.05.30| :../segments/3 2010.05.30| :../segments/4 2010.05.31| :../segments/1 pd (modified partition locations)¶ .Q.pd In partitioned DBs, .Q.PD as modified by .Q.view . pf (partition field)¶ .Q.pf In partitioned DBs, the partition field. Possible values are `date`month`year`int . pn (partition counts)¶ .Q.pn In partitioned DBs, returns a dictionary of cached partition counts – conformant to .Q.pt , each conformant to .Q.pv – as populated by .Q.cn . Cleared by .Q.view . .Q.pv!flip .Q.pn can be used to create a crosstab of table-to-partition-counts once .Q.pn is fully populated. q)n:100 q)t:([]time:.z.T+til n;sym:n?`2;num:n) q).Q.dpft[`:.;;`sym;`t]each 2010.01.01+til 5 `t`t`t`t`t q)\l . q).Q.pn t| q).Q.cn t 100 100 100 100 100 q).Q.pn t| 100 100 100 100 100 q).Q.pv!flip .Q.pn | t ----------| --- 2010.01.01| 100 2010.01.02| 100 2010.01.03| 100 2010.01.04| 100 2010.01.05| 100 q).Q.view 2#date q).Q.pn t| q).Q.cn t 100 100 q).Q.pn t| 100 100 q).Q.pv!flip .Q.pn | t ----------| --- 2010.01.01| 100 2010.01.02| 100 prf0 (code profiler)¶ .Q.prf0 pid Where pid is a process ID, returns a table representing a snapshot of the call stack at the time of the call in another kdb+ process pid , with columns name assigned name of the function file path to the file containing the definition line line number of the definition col column offset of the definition, 0-based text function definition or source string pos execution position (caret) within text This process must be started from the same binary as the one running .Q.prf0 , otherwise binary mismatch is signalled. Since 4.1t 2022.03.25, .Q.prf0 will not try to stop the process if passed a negative pid . This should be used when a kdb+ process is already stopped under control of something other than .Q.prf0 (for example, in a debugger or a native-code profiler). A negative pid should not be used in a running process. pt (partitioned tables)¶ .Q.pt Returns a list of partitioned tables. pv (modified partition values)¶ .Q.pv A list of the values of the partition domain: the values corresponding to the slice directories actually found in the root. In partitioned DBs, .Q.PV as modified by .Q.view . Q for Mortals §14.5.3 .Q.pv PV (partition values)¶ .Q.PV In partitioned DBs, returns a list of partition values – conformant to .Q.PD – which represents the partition value for each partition. (In a date-partitioned DB, unless the date has been modified by .Q.view , this is simply date.) q).Q.PD `:../segments/1`:../segments/2`:../segments/3`:../segments/4`:../segments/3`:../segments/4`:../segments/1 q).Q.PV 2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31 q)date 2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31 q).Q.view 2010.05.28 2010.05.29 2010.05.30 q)date 2010.05.28 2010.05.29 2010.05.30 2010.05.30 q).Q.PV 2010.05.26 2010.05.27 2010.05.28 2010.05.29 2010.05.30 2010.05.30 2010.05.31 qp (is partitioned)¶ .Q.qp x Where x - is a partitioned table, returns 1b - a splayed table, returns 0b - anything else, returns 0 q)\ B +`time`sym`price`size!`B C +`sym`name!`:C/ \ q).Q.qp B 1b q).Q.qp select from B 0 q).Q.qp C 0b qt (is table)¶ .Q.qt x Where x is a table, returns 1b , else 0b . res (keywords)¶ .Q.res Returns the control words and keywords as a symbol vector. key `.q returns the functions defined to extend k to the q language. Hence to get the full list of reserved words for the current version: q).Q.res,key`.q `abs`acos`asin`atan`avg`bin`binr`cor`cos`cov`delete`dev`div`do`enlist`exec`ex.. .Q.id (sanitize) s (plain text)¶ .Q.s x Returns x formatted to plain text, as used by the console. Obeys console width and height set by \c . q).Q.s ([h:1 2 3] m: 4 5 6) "h| m\n-| -\n1| 4\n2| 5\n3| 6\n" Occasionally useful for undoing Studio for kdb+ tabular formatting. s1 (string representation)¶ .Q.s1 x Returns a string representation of x . sbt (string backtrace)¶ .Q.sbt x Where x is a backtrace object returns it as a string formatted for display. Since V3.5 2017.03.15. sha1 (SHA-1 encode)¶ .Q.sha1 x Where x is a string, returns as a bytestream its SHA-1 hash. q).Q.sha1"Hello World!" 0x2ef7bde608ce5404e97d5f042f95f89f1c232871 Since V3.6 2018.05.18. t (type letters)¶ .Q.t List of chars indexed by datatype numbers. q).Q.t " bg xhijefcspmdznuvts" q).Q.t?"j" / longs have datatype 7 7 trp (extend trap at)¶ .Q.trp[f;x;g] Where f is a unary functionx is its argumentg is a binary function extends Trap At (@[f;x;g] ) to collect backtrace: g gets called with arguments: - the error string - the backtrace object You can format the backtrace object with .Q.sbt . q)f:{`hello+x} q) / print the formatted backtrace and error string to stderr q).Q.trp[f;2;{2"error: ",x,"\nbacktrace:\n",.Q.sbt y;-1}] error: type backtrace: [2] f:{`hello+x} ^ [1] (.Q.trp) [0] .Q.trp[f;2;{2"error: ",x,"\nbacktrace:\n",.Q.sbt y;-1}] ^ -1 q) .Q.trp can be used for remote debugging. q)h:hopen`::5001 / f is defined on the remote q)h"f `a" 'type / q's IPC protocol can only get the error string back [0] h"f `a" ^ q) / a made up protocol: (0;result) or (1;backtrace string) q)h".z.pg:{.Q.trp[(0;)@value@;x;{(1;.Q.sbt y)}]}" q)h"f 3" 0 / result ,9 9 9 q)h"f `a" 1 / failure " [4] f@:{x*y}\n ^\n [3.. q)1@(h"f `a")1; / output the backtrace string to stdout [4] f@:{x*y} ^ [3] f:{{x*y}[x;3#x]} ^ [2] f `a ^ [1] (.Q.trp) [0] .z.pg:{.Q.trp[(0;)@enlist value@;x;{(1;.Q.sbt y)}]} ^ Since V3.5 2017.03.15. trpd (extend trap)¶ .Q.trpd[f;x;g] Where f is a function of rank- x is an atom or list of count with items in the domains of f - g is a binary function extends Trap (.[f;x;g] ) to collect backtrace: g is called with arguments: - the error string - the backtrace object You can format the backtrace object with .Q.sbt . q).Q.trpd[{x+y};(1;2);{2"error: ",x,"\nbacktrace:\n",.Q.sbt y;-1}] 3 q).Q.trpd[{x+y};(1;`2);{2"error: ",x,"\nbacktrace:\n",.Q.sbt y;-1}] error: type backtrace: [2] {x+y} ^ [1] (.Q.trpd) [0] .Q.trpd[{x+y};(1;`2);{2"error: ",x,"\nbacktrace:\n",.Q.sbt y;-1}] ^ -1 Use .Q.trp as a simpler form of .Q.trpd, for unary values. Since 4.1 2024.03.12. ts (time and space)¶ Apply, with time and space .Q.ts[x;y] Where x and y are valid arguments to Apply returns a 2-item list: - time and space as \ts would - the result of .[x;y] q)\ts .Q.hg `:http://www.google.com 148 131760 q).Q.ts[.Q.hg;enlist`:http://www.google.com] 148 131760 "<!doctype html><html itemscope=\"\" itemtype=\"http://schema.org/WebPa q).Q.ts[+;2 3] 0 80 5 Since V3.6 2018.05.18. ty (type)¶ .Q.ty x Where x is a list, returns the type of x as a character code: - lower case for a vector - upper case for a list of uniform type - else blank q)t:([]a:3 4 5;b:"abc";c:(3;"xy";`ab);d:3 2#3 4 5;e:("abc";"de";"fg")) q)t a b c d e ------------------ 3 a 3 3 4 "abc" 4 b "xy" 5 3 "de" 5 c `ab 4 5 "fg" q).Q.ty each t`a`b`c`d`e "jc JC" .Q.ty is a helper function for meta If the argument is a table column, returns upper case for mappable/uniform lists of vectors. u (date based)¶ .Q.u - In segmented DBs, returns 1b if each partition is uniquely found in one segment. (E.g., true if segmenting is date-based, false if name-based.) - In partitioned DBs, returns 1b . V (table to dict)¶ .Q.V x Where x is - a table, returns a dictionary of its column values. - a partitioned table, returns only the last partition (N.B. the partition field values themselves are not restricted to the last partition but include the whole range). v (value)¶ .Q.v x Where x is - a filepath, returns the splayed table stored at x - any other symbol, returns the global named x - anything else, returns x view (subview)¶ .Q.view x Where x is a list of partition values that serves as a filter for all queries against any partitioned table in the database, x is added as a constraint in the first sub-phrase of the where-clause of every query. .Q.view is handy when you are executing queries against partitioned or segmented tables. Recall that multiple tables can share the partitioning. Q.view can guard against runaway queries that ask for all historical data. .Q.view 2#date Since 4.1t 2022.03.25,4.0 2023.05.26 this would signal an invalid partition filter error if partition value(s) resulted in no matches with .Q.PV. .Q.view , also used when loading an hdb, now utilizes threads to load .d files (column names) since 4.1t 2023.04.17. Q for Mortals §14.5.8 Q.view vp (missing partitions)¶ .Q.vp In partitioned DBs, returns a dictionary of table schemas for tables with missing partitions, as populated by .Q.bv . (Since V3.0 2012.01.26.) q)n:100 q)t:([]time:.z.T+til n;sym:n?`2;num:n) q).Q.dpft[`:.;;`sym;`t]each 2010.01.01+til 5 `t`t`t`t`t q)tt:t q).Q.dpft[`:.;;`sym;`tt]last 2010.01.01+til 5 `tt q)\l . q)tt +`sym`time`num!`tt q)@[get;"select from tt";-2@]; / error ./2010.01.01/tt/sym: No such file or directory q).Q.bv[] q).Q.vp tt| +`date`sym`time`num!(`date$();`sym$();`time$();`long$()) q)@[get;"select from tt";-2@]; / no error w (memory stats)¶ .Q.w[] Returns the memory stats from \w into a more readable dictionary. Refer to \w for an explanation of each statistic. q).Q.w[] used| 168304 heap| 67108864 peak| 67108864 wmax| 0 mmap| 0 mphy| 8589934592 syms| 577 symw| 25436 .Q.gc (garbage collect) Command-line parameter -w (workspace memory limit) System command \w (memory stats and workspace memory limit) Xf (create file)¶ Deprecated Deprecated since 4.1t 2022.03.25. Using resulting files could return file format errors since 3.6. .Q.Xf[x;y] Where x is a mapped nested datatype as either an upper-case char atom, or as a short symbol (e.g.`char )y is a filepath creates an empty nested-vector file at y . q).Q.Xf["C";`:emptyNestedCharVector]; q)type get`:emptyNestedCharVector 87h x (non-command parameters)¶ .Q.x Set by .Q.opt : a list of non-command parameters from the command line, where command parameters are prefixed by - . $ q taq.k path/to/source path/to/destn q)cla:.Q.opt .z.X /command-line arguments q).Q.x "/Users/me/q/m64/q" "path/to/source" "path/to/destn" .z.x (argv), .z.X (raw command line), .z.f (file), .z.q (quiet mode), .Q.opt (command parameters), .Q.def (command defaults)</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="72"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Common design principles for kdb+ gateways¶ In the vast majority of kdb+ systems, data is stored across several processes. These setups can range from a single real-time and historic database on the same server to multi-site architectures where data of various forms is stored in hundreds of different processes. In either scenario there is likely to be the same requirement to access data across processes. This is typically achieved using a ‘gateway’ process. The primary objective of a gateway is to act as a single interface point and separate the end user from the configuration of underlying databases or ‘services’. With this, users do not need to know where data is stored or make multiple requests to retrieve it. An optimal solution serves to assist the user without imposing any unnecessary constraints or loss in performance. While the implementation of any particular gateway is likely to differ, dependent on specific system requirements, there are a number of shared technical challenges and solutions. This paper aims to outline the common design options and examine the advantages and disadvantages associated with each method. In doing so, it seeks to offer a best-practice guide on how to implement an efficient and scalable kdb+ gateway framework. Where appropriate, sample code extracts are included to illustrate techniques. Gateway design¶ Figure 1 outlines the general principle of how a gateway acts as a single point of contact for a client by collecting data from several underlying services, combining data sets and if necessary performing an aggregation operation before returning the result to the client. Figure 1: Gateway schematic Whilst the above diagram covers the principle of all gateways, the specific design of a gateway can vary in a number of ways according to expected use cases. The implementation of a gateway is largely determined by the following factors. - Number of clients or users - Number of services and sites - Requirement of data aggregation - Level of redundancy and failover In addition the extent to which functionality is exposed to the end user can be controlled using one of the following options. The first approach serves to act as a standard kdb+ process to the user, offering ad-hoc qSQL query access. The specific implementation of this approach is outside the scope of this paper. The second and more common approach offers a number of stored procedures for specific data retrieval scenarios. This more structured API form of gateway is generally easier to implement and arguably offers a more robust solution for production kdb+ applications than supporting free form qSQL-type requests. Let’s consider a basic example where a user makes a request for trade data for a single stock across yesterday and today. The task of the gateway can be broken down into the following steps. - Check user entitlements and data-access permissions - Provide access to stored procedures - Gain access to data in the required services - Provide best possible service and query performance As the gateway serves as the sole client interface it is the logical point for entitlement validation. Permissioning is commonly broken into two components; user level access using the .z.pw function, and execution access using either .z.pg or .z.ps . These functions can be customized to control access at several levels including by symbol, table, service or region. Any user requests that fail entitlement checks should be returned to the client with an appropriate message without proceeding any further. At this point it is worth noting that gateways are primarily used for data retrieval and not for applying updates to data. Once the user’s request has been verified, the gateway needs to retrieve the data from the real-time and historic services that store the data for the particular symbol requested. The most basic gateway would create and maintain connections to these services on startup. The gateway would then send the query to these services using a function similar to the sample outlined below, in which we make use of an access function to correctly order the constraint clause of the query. / access function in RDB and HDB / tlb: table to query; sd:start date; ed:end date; ids:list of ids or symbols selectFunc:{[tbl;sd;ed;ids] $[`date in cols tbl; select from tbl where date within (sd;ed),sym in ids; [res:$[.z.D within (sd;ed); select from tbl where sym in ids;0#value tbl]; `date xcols update date:.z.D from res]] } / stored procedure in gateway. / calls access function in RDB/HDB and joins results / sd:start date; ed:end date; ids:list of ids or symbols getTradeData:{[sd;ed;ids] hdb:hdbHandle(`selectFunc;`trade;sd;ed;ids); rdb:rdbHandle(`selectFunc;`trade;sd;ed;ids); hdb,rdb } / client calls stored procedure in gateway gatewayHandle "getTradeData[.z.D-1;.z.D;`ABC.X]" Whilst the above may satisfy a single user setup with a small number of underlying services, it quickly becomes inefficient as the number of processes grows and we look to handle the processing of concurrent requests. As this example uses synchronous messaging between the client and the gateway, the gateway is effectively blocked from receiving any additional user requests until the first completes. Also as the requests made by the gateway to the services are synchronous they can only be made in succession, rather than in parallel. These issues obviously reduce the performance in terms of time taken to complete user requests. By using asynchronous communication it is possible for the gateway to process a second request which may only require a RDB whilst the gateway is idle awaiting the response from a first query which may have only required a HDB. Extending this principle, if we then wanted to be able to process two simultaneous requests requiring the same service, we could start additional services and introduce load balancing between them. These two principles of load balancing and IPC messaging are considered further in the following sections. Load balancing¶ As we scale the number of user requests we will need to scale both the number of gateways, to handle the processing of requests and responses, and the number of underlying services to retrieve the required data. The number of gateways required is likely to be determined by the scope of the stored procedures and the intensity of any joining or aggregation of data being performed. In practice the number of gateways required is usually small compared to the number of data services. In the initial example the configuration for all services was loaded into the gateway; however in systems with multiple processes the load balancing of services is typically achieved using a separate standalone process. There are a couple of different ways in which this process can function. As a pass-through¶ In this setup the gateway sends each individual service request to the load-balancer process, which distributes them to the least busy service in its pool of resources and returns the result to the gateway when it is received from the service. Figure 2 shows this flow between processes. Figure 2: Pass-through load balancer schematic The mserve solution written by Arthur Whitney and Simon Garland provides a sample implementation of how a load-balancing process can allocate the query to the least busy service and return the result to the client, which in this case is the gateway. Knowledge Base: A load-balancing kdb+ server In this script the load balancer process determines which service to issue the request to according to which has the minimum number of outstanding requests queued at the time the request is received. Nathan Perrem has also provided an extended version of this solution, which queues outstanding requests in the load-balancer process if all services are busy and processes the queue as services complete their request. This script ensures requests are processed in the order in which they are received and provides support for scenarios where services crash before completing a request. This solution makes it easier to track the status of all incoming queries as well as allowing clients to provide callback functions for results of queries. As a connection manager¶ In this configuration the gateway process makes a request to the load balancer for connection details of one or more services required to process the request. When the gateway receives the connection details it then sends the request directly to the service. In Figure 3, the numbers show the sequence of communications between each of the processes, before the result is returned to the client. Figure 3: Connection manager load balancer schematic This process can act such that it immediately returns the connection details for a service on a simple round-robin basis. Alternatively it can only return connection details to the gateway whenever a particular service is free to receive a query. The key difference with the latter method is that the load-balancing process can be used for thread management. In the basic round-robin approach the load balancer is distributing details based on a simple count of how many requests have been allocated to a particular service within a period of time. The communication between the gateway and the load balancer is only on the initial request for connection details for the service. There is no requirement for the gateway to notify the load balancer when it has received results from the service. A sample extract of the code for this method is shown as follows. / table of services configuration in load balancer t:([] service:`rdb`rdb`hdb`hdb; addr:hsym@/:`$”localhost:”,/:string 5000+til 4; handle:4#0n; counter:4#0) / function in load balancer requestForService:{serv] det:select from t where service=serv,not null handle; res:(det(sum det`counter)mod count det)`addr; update counter:counter+1 from `t where addr=res; res } / gateway request loadBalancerHandle "requestForService[`rdb]" A disadvantage with this method is that an equal distribution of queries of different duration could result in one service being idle having processed its requests, whilst another is still queued. A more intelligent solution involves both communication with the load balancer for the initial connection request and also whenever the service completes a request. The benefit of this is an efficient use of services where there will not be requests queued whenever a suitable server is idle. The additional requirement with this approach is that if all services are currently busy, any subsequent requests need to be maintained in a queue and processed whenever a service becomes available again. The following outlines the functions and callbacks required for this approach. / service table and queue in load balancer t:([] service:`rdb`rdb`hdb`hdb; addr:hsym@/:`$”localhost:”,/:string 5000+til 4; handle:4#0n; inUse:4#0b) serviceQueue:()!() / functions in load balancer requestForService:{[serv] res:exec first addr from t where service=serv,not inUse; $[null res; addRequestToQueue[.z.w;serv]; [update inUse:1b from `t where addr=res; neg[.z.w](`receiveService;res)]]; } addRequestToQueue{[hdl;serv] serviceQueue[serv]::serviceQueue[serv],hdl; } returnOfService:{[ad] update inUse:0b from `t where addr=ad; } / gateway callback and requests receiveService:{[addr]-1”Received Service:”,string[addr]; } loadBalancerHandle(`requestForService;`rdb) loadBalancerHandle(`returnOfService;`:localhost:5000) Whilst both the pass-through and connection-manager methods can incorporate similar logic for service allocation, the pass-through approach has the disadvantage that it adds an additional and unnecessary IPC hop, with the data being returned via the load balancer, whereas with the latter method the data is sent directly from the service to the gateway. For a system with a smaller number of user requests the simpler round-robin approach may be sufficient, however in general the additional benefit of thread management from using the last method offers a more efficient use of services. Synchronous vs asynchronous¶ With each of the communications, outlined in Figure. 3, between the client and the gateway (1,8), gateway and load balancer (2,3), and gateway and service (4-7), we have the option of making synchronous or asynchronous requests. In addition to the standard asynchronous request type we can also make use of blocking-asynchronous requests where the client sends a request asynchronously and blocks until it receives a callback from the server. Which method is used is largely driven by the communication between the client and the gateway. If the client makes a synchronous request to the gateway, both the subsequent requests to the load balancer and the service are required to be synchronous or blocking-asynchronous requests. In this case each gateway is only able to process a single user request at a time. As noted earlier this is an inefficient design as the gateway will largely be idle awaiting responses from the load balancer or service. A much more efficient design is where communication between the client and the gateway uses either asynchronous or blocking-asynchronous messaging. With this arrangement the gateway is able to make multiple asynchronous requests to the load balancer or services without having to wait on a response from either. To support multiple concurrent requests the gateway needs to track which processes have been sent a request related to the original client request. This can be easily achieved by tagging each user request with an identifier which is then passed between processes and used when handling callbacks. With the asynchronous method the gateway maintains the state of each request in terms of caching results returned from services and any outstanding service requests. When all results are returned and processed the gateway then invokes a callback in the client with the result set. Data transport and aggregation¶ As a general principle it is more efficient to perform aggregation at the service level instead of pulling large datasets into the gateway process. By doing so we can take full advantage of the map-reduce method built in to kdb+. However there are use cases which may require gateway-level aggregation, for example the correlation of two datasets. In these cases the location of processes is important and can have a significant effect on performance. By positioning the gateway as close as possible to the underlying sources of data, we can reduce any latency caused by unnecessary data transport over IPC. In practice this means it is preferable to have gateways located on the same server as the data services, which perform the aggregation locally and return the much smaller dataset to the remote client. Expanding this principle to a system where there may be multiple distinct zones, for a request requiring data from multiple services in two different zones, it may be quicker to send two separate requests to a gateway in each zone rather than one single request to a single gateway. This approach has the additional overhead of requiring the client to maintain connections to multiple gateways and to know how the data is separated – the very same problem the use of gateways seeks to avoid. Depending on the application and the nature of requests, it may be beneficial to use a tiered gateway setup. In this configuration a client would make a single connection to a primary-level gateway, which would break up the request into sub-requests and distribute to their relevant zones. An example could be the calculation of correlation between two pairs of stocks, were the data for the first pair is split across different HDBs in one zone and the data for the second pair is split between two HDBs in another zone. In this example, each lower level gateway would process a sub-request for the single pair that is stored in that zone. By performing the aggregation close to the data source, the amount of data transport required between zones is reduced and the result can be joined in the primary gateway and returned to the client. Figure 4: Tiered-gateway schematic Resilience¶ To offer the most reliable service to clients it is important to consider the potential for failure within any application. In the context of gateways, failure generally relates to one of the following issues. Service failure¶ Service failure can relate to any reason which prevents a service from being available or functional. In this case we need to have a similar service available which can process the request. Failover can either be incorporated using a hot-hot set up where duplicate processes and data are used on an equal basis, or where redundant processes are used as a secondary alternative. For the majority of systems where failover is provided by hardware of the same specification, the hot-hot setup provides the more efficient use of resources. As the load balancer process acts as a single point of failure between the gateway and service level, each gateway should be configured to use an additional failover load balancer process in the event the primary side is not available. To handle failure of any individual data service each load balancer process should have knowledge of a number of similar processes. Incorporating failover at each level also makes it possible to create maintenance windows whilst providing a continuous client service. Disconnections¶ In addition to service failure which may result in a sustained outage of a particular process or server, there can also be shorter-term disconnect events caused by network problems. This can occur between any of the three connections we looked at earlier, client to gateway, gateway to load balancer and gateway to service. While client-to-gateway disconnects will require failure logic to be implemented on the client side, disconnects between the gateway and the other processes should be handled seamlessly to the client. By using the .z.pc handler, the gateway can recognize when processes with outstanding requests disconnect before returning a result. In these scenarios the gateway can reissue the request to an equivalent process using the failover mentioned above. Code error¶ Whilst every effort can be made to prevent code errors a robust gateway framework should also be designed to handle unexpected errors at each point. Code errors may result in an explicit error being reached, in which case protected evaluation of callbacks between processes should be used so that any error can be returned through the process flow to the client. It is generally safer to handle and return errors than reissue the same request to multiple equivalent services which are likely to return the same error. Some code errors may result in no explicit error being reached, but a failure to return a response to a process which expects it. This point raises the issue of client expectation and whether it may be desirable to terminate requests at the gateway level after a period of time. Service expectation¶ For systems where there may be legitimate requests that take an extended period of time at the service level, compared to an average request, it would obviously not be desirable to terminate the request, particularly when it would not be possible for the gateway to terminate the request at the service level. In these cases it may be preferable to use the query timeout -T parameter selectively at the service level to prevent any individual requests impacting the application. In other scenarios were the quantity of requests may be too large, resulting in long queuing in the load balancer process, it can be desirable to reject the request for a service and return an error to the client. This method can provide stability where it isn’t possible for the underlying services to process the requests at the rate at which they are being made. Data caching¶ In general gateways are used as a means through which underlying data servers are accessed and the gateway isn’t responsible for storing any data itself. However, there are two scenarios in which it may be efficient for it to do so. Depending on the nature of the application and the client requests, it may be efficient for the gateway to store aggregated responses from a number of underlying services. In use cases where cached data wont become ‘stale’ and where it is likely that additional users will make similar requests there can be benefit in storing the response rather than going to the service level, reading the same data and performing the aggregation, on each request. What data is cached in the gateway can either be determined by configuration and calculated on startup or driven dynamically by user requests for a particular type of data. It is likely that any such data would come from a HDB as opposed to a RDB where the data is constantly updating. The additional memory overhead in storing the response means this method is only appropriate for highly-aggregated data which is time-consuming to calculate from a number of different sources. For most cases where clients are frequently requesting the same aggregated data it is preferable to have this calculated in a separate service level process which is accessed by the gateway. The second and more common case for storing data in the gateway process is with reference data. Depending on the extent of the stored procedures available this relatively static data can either be applied to incoming requests or to the result set. Storage of reference data at the gateway level removes the need for the same data to be stored by multiple processes at the service level as well as being used to determine which underlying services are required for a particular user request. Conclusion¶ As outlined in the initial overview, the implementation of any particular gateway framework is likely to be different and it is important to consider all potential requirements of an application in its design. In this paper we have addressed the most common technical challenges associated with gateways and looked at the reasoning for selecting various design options. As applications can evolve over time it is also beneficial to maintain usage and performance metrics which can be used to verify framework suitability over time or identify when any changes in design may be required. In summary: - Gateway frameworks should provide a means to load-balance data processes and achieve an efficient use of resources. The use of a separate process is an effective way of centralizing configuration and making it straightforward to scale the number of data services. - Gateways using asynchronous messaging allow simultaneous execution of multiple requests with no unnecessary latency caused by blocking. - The location of gateway processes is important to minimize data transport and use of a tiered gateway arrangement can provide an optimal solution with multi-zoned applications. - Failover procedures for process failures and disconnections are required to create a manageable environment which can provide a continuous and reliable service to clients. All code included is using kdb+ 3.0 (2012.11.12). Author¶ Michael McClintock has worked as consultant on a range of kdb+ applications for hedge funds and leading investment banks. Based in New York, Michael has designed and implemented data-capture and analytics platforms across a number of different asset classes.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="73"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Linear programming¶ Linear Programming is a large topic, of which this article reviews just a few applications. More articles on it would be very welcome: please contact <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="3d59525e4e7d5645135e5250">[email protected]</a>. Iverson Notation and linear algebra Q is a descendant of the notation devised at Harvard by the Turing Award winner, mathematician Ken Iverson, when he worked with Howard Aiken and Nobel Prize winner Wassily Leontief on the computation of economic input-output tables. At Harvard, Ken Iverson and fellow Turing Award winner Fred Brooks gave the world’s first course in what was then called ‘data processing’. Like other descendants of Iverson Notation (e.g. A+, APL, J), q inherits compact and powerful expression of linear algebra. Q Math Library: zholos/qml Problem¶ Given a series of nodes and distances, find the minimum path from each node to get to each other node. Solution¶ Edsger W. Dijkstra published an optimized solution in 1959 that calculated cumulative minimums. A simple Linear Algebra approach entails producing a ‘path connection matrix’ (square matrix with nodes down rows and across columns) showing the distances, which is typically symmetric. Inner product is used in repeated iterations to enhance the initial matrix to include paths possible through 1 hop (through 1 intermediate node), 2 hops and so forth by repeated calls. The optimal solution (all paths) is found by iterating until no further changes are noted in the matrix (called transitive closure). Example¶ Here is a simple case for just 6 nodes and the distances between connected nodes. q)node6:`a`b`c`d`e`f q)bgn:`a`a`a`b`b`b`b`d`d`e`e`f`f`f q)end:`b`d`c`a`d`e`f`a`e`d`f`b`c`e q)far:30 40 80 21 25 16 23 12 30 23 25 17 18 22 q)show dist6:flip `src`dst`dist!(bgn;end;far) src dst dist ------------ a b 30 a d 40 a c 80 b a 21 b d 25 b e 16 b f 23 d a 12 d e 30 e d 23 e f 25 f b 17 f c 18 f e 22 First, transform the above table into a connectivity matrix of path lengths. Symmetry In this example a->b can differ from b->a, which is more general than the problem requires, but you could make the matrix symmetric for real distances. For ‘no connection’ we use infinity, so the inner product of cumulative minimums works properly over the iterations. q)cm[node6;dist6;`inf] 0 30 80 40 0w 0w 21 0 0w 25 16 23 0w 0w 0 0w 0w 0w 12 0w 0w 0 30 0w 0w 0w 0w 23 0 25 0w 17 18 0w 22 0 cm is a simple function to produce the connectivity matrix. cm creates a connectivity matrix from nodes and a distance table.- Result is a square float matrix where a cell contains distance to travel between nodes. - An unreachable node is marked with the infinity value for minimum path distance. (Or 0 for credit matrix – see below). cm:{[n;d;nopath] nn:count n; / number of nodes res:(2#nn)#(0 0w)`zero`inf?nopath; / default whole matrix to nopath ip:flip n?/:d`src`dst; / index pairs res:./[res;ip;:;`float$d`dist]; / set reachable index pairs ./[res;til[nn],'til[nn];:;0f] / zero on diagonal to exclude a node with itself } Assignment with a scattered index The last two lines of cm both use ./ for assignment with a scattered index. The second argument is a list of index pairs – co-ordinates in res . The fourth argument is a corresponding list of values. The third argument is the assignment function. Over for how the iterator / specifies the iteration here. tview adds row and column labels. tview:{[mat] $[(`$nodes:"node",string[count mat])in key `.; nodes:value nodes; nodes:`$string til count mat]; ((1,1+count nodes)#`,nodes),((count[nodes],1)#nodes),'mat } To improve the display of the connection matrix: q)tview cm[node6;dist6;`inf] a b c d e f `a 0f 30f 80f 40f 0w 0w `b 21f 0f 0w 25f 16f 23f `c 0w 0w 0f 0w 0w 0w `d 12f 0w 0w 0f 30f 0w `e 0w 0w 0w 23f 0f 25f `f 0w 17f 18f 0w 22f 0f In the above result note that [a;e] is not directly accessible. So we use a bridge function to jump through one intermediate node and see new paths. q)tview bridge cm[node6;dist6;`inf] a b c d e f `a 0f 30f 80f 40f 46f 53f `b 21f 0f 41f 25f 16f 23f `c 0w 0w 0f 0w 0w 0w `d 12f 42f 92f 0f 30f 55f `e 35f 42f 43f 23f 0f 25f `f 38f 17f 18f 42f 22f 0f We now see a path [a;e] of 46, [a->b(30), then b->e(16)]. After 1 hop we also see path [d;c] of 92, [d->a(12), then a->c(80)]. bridge applies connectivity over each hop by using a Minimum.Sum inner product cumulatively: q)bridge {x & x('[min;+])\: x} So for 2 hops: q)tview bridge bridge cm[node6;dist6;`inf] a b c d e f `a 0f 30f 71f 40f 46f 53f `b 21f 0f 41f 25f 16f 23f `c 0w 0w 0f 0w 0w 0w `d 12f 42f 73f 0f 30f 55f `e 35f 42f 43f 23f 0f 25f `f 38f 17f 18f 42f 22f 0f Note with 2 hops we improve [d;c] to 73 [d->e(30), then e->f(25), then f->c(18)]: For ‘transitive closure’ iterate until no further improvement (i.e. optimal path lengths reached) q)tview (bridge/) cm[node6;dist6;`inf] a b c d e f `a 0f 30f 71f 40f 46f 53f `b 21f 0f 41f 25f 16f 23f `c 0w 0w 0f 0w 0w 0w `d 12f 42f 73f 0f 30f 55f `e 35f 42f 43f 23f 0f 25f `f 38f 17f 18f 42f 22f 0f A larger example was presented in k4 listbox publicly available here: q)\curl -s https://us-east.manta.joyent.com/edgemesh/public/net_dist -o dist q)\l dist `dist q)dist src dst dist ------------ 2 17 139 2 34 131 3 174 150 4 226 171 4 567 13 7 786 130 9 174 112 .. q)node:0N!distinct raze dist`src`dst 2 3 4 7 9 12 13 14 16 17 18 20 21 22 24 26 27 29 31 34 35 37 41 42 43 44 45 4.. Repeating the above process with this node and dist for the optimal solution, also showing calculation time and space (using \ts ): q)\ts opt:(bridge/) cm[node;dist;`inf] 92 1706512 Check node length from node 2 to node 174. q)node?2 174 / Find row, col of node in optimal matrix 0 72 q)opt[0;72] / Cell [0;74] is path length to go from node 2 to node 174 398f q)opt . node?2 174 / Or in one simple step using . index notation 398f This does not get the hops, although the hops could be calculated by ‘capturing’ the intermediate results in the optimal case. To do this use bridge\ instead of bridge/ , then count changes between iterations, or just index in to see the path length converge … q)count iters:(bridge\) cm[node;dist;`inf] / Calculate all iterations 5 q)/ It took 5 iterations to find the optimal paths Now we can see how the path length changes during the iterations: here we see it “first converges” to 398 after 1 hop for node [2;174]. q)iters .\: node?2 174 / Index into each iteration to see iterative path improvement 0w 398 398 398 398 Another random path choice for node[2;210] does not converge until after 3 hops, also showing iterative improvement: q)iters .\: node?2 210 / Path improvement for node [2;210] 0w 0w 638 555 555 Related applications of this approach¶ The principle used can be generalized to different inner-product solutions for related problems. The solution above is an instance of generalized inner-product of 2 functions f.g and was an example Ken Iverson often used to demonstrate how Linear Algebra can be applied to real-world problems. The solution may be considered ‘expensive’ on memory and CPU, as it calculates all possible paths, but that is becoming less of an issue. The bridge function above uses the inner product of Minimum.Sum (& and + in q), but variants can be used in similar, related problem domains. Here is a summary of three related use cases, starting with the above minimum-path solution. Minimum distances¶ For minimum distances in a path table (example above), using an inner product of Minimum.Sum , where ‘no path’ is represented by 0w (float infinity) to determine minimums properly. This calculates the minimum of the sums of distances between nodes at each pivot. The bridge function looks like this: bridge:{x & x('[min;+])\: x} Counterparty credit¶ For a counterparty credit-matrix solution, using an inner product of Maximum.Minimum , where no credit is represented by 0 to determine maximums properly. This calculates the maximum of the minimum credit between nodes at each pivot, the bridge function looks like this; bridge:{x | x('[max;&])\: x} This returns the optimal possible credit by allowing credit through intermediate counterparties. For example if A only has credit with B, but B has credit with C, then after 1 hop, A actually has credit with C through B, but capped by the credit path in the same way. A special note here is the simple case where the credit matrix is boolean. The ‘connectivity matrix’ is now a simple yes/no to determine connections e.g. for electrical circuits. Each iteration improves the connections by adding additional 1s into the matrix that are now reachable in successive hops and uses the same bridge algorithm. Matrix multiplication¶ For generalized matrix multiplication, using an inner product of Sum.Times . This calculates the sum of the product between nodes at each pivot, the bridge function looks like this; bridge:{x + x('[sum;*])\: x} Generalization¶ The inner product for the above 3 bridge use cases could be further generalized as projections of a cumulative inner product function. q)cip:{[f;g;z] f[z;] z('[f/;g])\: z} q)bridgeMS:cip[&;+;] / Minimum.Sum (minimum path) q)bridgeCM:cip[|;&;] / Maximum.Minimum (credit matrix) q)bridgeMM:cip[+;*;] / Sum.Times (matrix multiplication) Performance¶ The version of bridge used above shows the Linear Algebra most clearly. It can be further optimized for performance, as shown here for the first case (minimum-path problem). Although all operations are atomic, flipping the argument seems to improve cache efficiency. bridgef:{x + x('[sum;*])/:\: flip x} The peach keyword can be used to parallelize evaluation. / Parallel version (multithreaded run q -s 6) bridgep: {x & {min each x +\: y}[flip x;] peach x} The .Q.fc utility uses multi-threading where possible. / .Q.fc version bridgefc:{x & .Q.fc[{{{min x+y}[x] each y}[;y] each x}[;flip x];x]} A colleague, Ryan Sparks, is presently experimenting with further (significant) performance improvements by using CUDA on a graphics coprocessor for the inner-product function bridge . This work is evolving and looks very promising. I look forward to Ryan presenting a paper and/or presentation on his results when complete as perhaps a sequel to this article. Script with examples from this article Test results¶ Ryan Sparks reports the following test results running V3.5 2017.05.02 using 6 secondary processes: | function | \ts:1000 20×20 | \ts:100 100×100 | 1000×1000 | 2000×2000 | 4000×4000 | |---|---|---|---|---|---| bridge0 | 178 63,168 | 689 5,330,880 | 6,488 4,112,433,152 | 35,068 32,833,633,920 | untested | bridge1 | 296 9,456 | 1,065 159,728 | 2,255 12,337,200 | 11,327 49,249,968 | untested | bridge2 | 207 9,008 | 1,249 157,616 | 6,496 12,317,152 | 40,073 49,209,824 | untested | bridge3 | 171 63,136. | 683 5,330,848 | 6,292 4,112,433,168 | 32,446 32,833,633,936 | untested | bridge4 | 165 6,560 | 182 106,912 | 425 8,225,232 | 5,967 32,834,000 | 48,271 131,203,536 | bridge5 | 612 6,656 | 1,823 106,624 | 1,695 8,221,360 | 5,112 32,826,032 | 32,915 131,187,376 | bridgejp | 556 6,704 | 1,507 106,672 | 1,330 8,221,360 | 3,904 32,826,032 | 32,402 131,187,376 | bridgep | 193 6,560 | 219 106,912 | 429 8,225,184 | 5,922 32,833,952 | 53,890 131,203,488 | bridgef | 201 9392 | 778 159,664 | 2,030 1,233,713 | 10,625 49,249,904 | untested | bridgef2 | 546 6,704 | 1,807 106,672 | 1,701 8,221,360 | 5,552 32,826,032 | 31,428 131,187,376 | bridge0:{x & (&/) each' x+/:\: flip x} bridge1:{x & x(min@+)/:\: flip x} bridge2:{x & x((&/)@+)\: x} bridge3:k){x&&/''x+/:\:+x} bridge4:k){x&(min'(+x)+\:)':x} bridge5:k){x&.Q.fc[{(min y+)'x}[+x]';x]} bridgejp:{x & .Q.fc[{{{min x+y}[x] each y}[;y] each x}[;flip x];x]} bridgep:{x & {min each x +\: y}[flip x;] peach x} bridgef:{x & x('[min;+])/:\: flip x} bridgef2:{x & .Q.fc[{x('[min;+])/:\: y}[;flip x];x]} Your mileage may vary As always, optimizations need to be tested on the hardware and data in use. Acknowledgements¶ My thanks to Nion Chang, Pierre Kovalev, Jonny Press, Ryan Sparks and Stephen Taylor for contributions to this article. Rob Hodgkinson <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="7b0914193b161a09101e0f1c09121f0802080f1e160855181416">[email protected]</a></span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="74"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mergemode:`part // the partbyattr writedown mode can merge data from temporary storage to the hdb in three ways: // 1. part - the entire partition is merged to the hdb // 2. col - each column in the temporary partitions are merged individually // 3. hybrid - partitions merged by column or entire partittion based on byte limit mergenumrows:100000 // default number of rows for merge process mergenumtab:`quote`trade!10000 50000 // specify number of rows per table mergenumbytes:500000000 // default partition bytesize for merge limit in merge process (only used when .merge.mergebybytelimit=1b) tpconnsleepintv:10 // number of seconds between attempts to connect to the tp upd:insert // value of the upd function replay:1b // replay the tickerplant log file schema:1b // retrieve schema from tickerplant settimer:0D00:00:10 // timer to check if data needs written to disk partitiontype:`date // set type of partition (defaults to `date, can be `date, `month or `year) getpartition:{@[value;`.wdb.currentpartition;(`date^partitiontype)$.proc.cd[]]} // function to determine the partition value reloadorder:`hdb`rdb // order to reload hdbs and rdbs hdbdir:`:hdb // move wdb database to different location sortcsv:hsym first .proc.getconfigfile"sort.csv" // location of csv file permitreload:1b // enable reload of hdbs/rdbs compression:() // specify the compress level, empty list if no required gc:1b // garbage collect at appropriate points (after each table save and after sorting data) eodwaittime:0D00:00:10.000 // time to wait for async calls to complete at eod tpcheckcycles:0W // number of attempts to connect to tp before process is killed // Server connection details \d .servers STARTUP:1b // create connections CONNECTIONS:`hdb`tickerplant`rdb`gateway`sort // list of connections to make at start up \d .proc loadprocesscode:1b // Whether to load the process specific code defined at ${KDBCODE}/{process type} ================================================================================ FILE: TorQ_tests_bglaunchprocess_settings.q SIZE: 417 characters ================================================================================ /variables and fns to be used during the unit tests: /let the test port_no be 7124 input1:`procname`proctype`U`localtime`p`T`g`w`qcmd`custom`load!("test2";"test";"${KDBAPPCONFIG}/passwords/accesslist.txt";"0";"7124";"180";"1";"1000";"q";"custom_arg";"${TORQHOME}/tests/bglaunchprocess/settings.q"); input2:`procname`proctype`load!("test3";"test";"${TORQHOME}/tests/bglaunchprocess/settings.q"); .servers.startup[]; ================================================================================ FILE: TorQ_tests_chainedtp_database.q SIZE: 142 characters ================================================================================ trade:flip `time`sym`price`size`stop`cond`ex`side!"PSFIBCCS" $\: (); quote:flip `time`sym`bid`ask`bsize`asize`mode`ex`src!"PSFFJJCCS" $\: (); ================================================================================ FILE: TorQ_tests_chainedtp_settings.q SIZE: 603 characters ================================================================================ // IPC connection parameters .servers.CONNECTIONS:`tickerplant`chainedtp`rdb; .servers.USERPASS:`admin:admin; // Test updates testtrade:((5#`GOOG),5?`4;10?100.0;10?100i;10#0b;10?.Q.A;10?.Q.A;10#`buy); testquote:(10?`4;(5?50.0),50+5?50.0;10?100.0;10?100i;10?100i;10?.Q.A;10?.Q.A;10#`3); // Paths to process CSV and test TP log directory processcsv:getenv[`KDBTESTS],"/chainedtp/process.csv"; tptestlogs:getenv[`KDBTESTS],"/chainedtp/tplogs"; // Function projections (using functions from helperfunctions.q) startproc:startorstopproc["start";;processcsv]; stopproc:startorstopproc["stop";;processcsv]; ================================================================================ FILE: TorQ_tests_dataaccess_checkinputs_settings.q SIZE: 702 characters ================================================================================ testpath:hsym`$getenv[`KDBTESTS],"/dataaccess/checkinputs"; processcsv:` sv testpath,`config`process.csv; //- code to pass in a test name //- extract the input parameter from {testname}.csv //- extract the expected error from checkinputerrors.csv //- compare error with expected error checkreturnederror:{[test]errors[test;`error]~@[.dataaccess.checkinputs;gettestparams test;::]}; checkreturnederrorcustom:{[test;param]errors[test;`error]~@[.dataaccess.checkinputs;param;::]}; //- read dictionary of params from csv named according to the test {testname}.csv gettestparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv testpath,`testdata,`$string[test],".csv";"s*"]}; ================================================================================ FILE: TorQ_tests_dataaccess_common_settings.q SIZE: 699 characters ================================================================================ testpath:hsym`$getenv[`KDBTESTS],"/dataaccess/common"; processcsv:` sv testpath,`config`process.csv; //- code to pass in a test name //- extract the input parameter from {testname}.csv //- extract the expected error from checkinputerrors.csv //- compare error with expected error checkreturnederror:{[test]errors[test;`error]~@[.checkinputs.checkinputs;gettestparams test;::]}; checkreturnederrorcustom:{[test;param]errors[test;`error]~@[.checkinputs.checkinputs;param;::]}; //- read dictionary of params from csv named according to the test {testname}.csv gettestparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv testpath,`testdata,`$string[test],".csv";"s*"]}; ================================================================================ FILE: TorQ_tests_dataaccess_extractqueryparam_settings.q SIZE: 750 characters ================================================================================ inputpath:hsym`$getenv[`KDBTESTS],"/dataaccess/extractqueryparam/input"; outputpath:hsym`$getenv[`KDBTESTS],"/dataaccess/extractqueryparam/output"; processcsv:hsym`$getenv[`KDBTESTS],"/dataaccess/extractqueryparam/`config`process.csv"; //- code to pass in a test name //- extract data from the input and output directories //- compare function output with expected output getinputparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv inputpath,`$string[test],".csv";"s*"]}; getoutputparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv outputpath,`$string[test],".csv";"s*"]}; testfunction:{[test] getoutputparams[test]~.eqp.extractqueryparams[getinputparams[test];.eqp.queryparams]}; ================================================================================ FILE: TorQ_tests_dataaccess_gwquerytest_settings.q SIZE: 283 characters ================================================================================ // IPC connection parameters .servers.CONNECTIONS:`gateway; .servers.USERPASS:`admin:admin; testpath:hsym`$getenv[`KDBTESTS],"/dataaccess/gwquerytest"; sublistvalue:2; getdict:{exec parameter!get each parametervalue from (("s*";1#",")0: ` sv testpath,`inputs,`$string[x],".csv")} ================================================================================ FILE: TorQ_tests_dataaccess_mockdata.q SIZE: 1,689 characters ================================================================================ params:([proctype:`hdb`rdb] func:`generatehdb`generaterdb; partitiontype:`date`date; hdbname:`hdb`; n:5 5; tablename:`xdaily`xdaily; nrecord:10 10 );</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="75"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/ Deletes the specified object reference from the namespace. If the reference deleted is the last object in the / namespace then the namespace is removed as well recursively up the namespace tree. / NOTE: The namespace hierarchy removal will never remove the root namespace even if it is empty / @param nsRef (Symbol) The object reference to remove from the namespace .ns.deleteReference:{[nsRef] if[not .ns.isSet nsRef; :(::); ]; refSplit:`ns`ref!(-1_; last) @\: ` vs nsRef; refSplit[`ns]:`.^$[0 = count refSplit`ns; `; ` sv refSplit`ns]; ![refSplit`ns; (); 0b; enlist refSplit`ref]; if[.type.isEmptyNamespace get refSplit`ns; .z.s refSplit`ns; ]; }; / @returns (Symbol) A symbol reference to the function that called the function which called this function or 'anon-func' if an anonymous inner function .ns.getFunctionCaller:{ bt:.Q.btx .Q.Ll `; caller:first bt[2][1]; $[("q";`) ~ caller; caller:`$"q-prompt"; 0 = count caller; caller:`$"anon-func"; / else caller:`$first caller ]; :caller; }; / @param func (Symbol|Function) A reference to a function or an actual function / @returns (Function) Resolves the function reference such that a function is always returned / @throws FunctionDoesNotExistException If the reference does not exist / @throws NotAFunctionException If the input value is not a function or the reference does not reference a function .ns.i.getFunction:{[func] $[.type.isFunction func; :func; not .type.isSymbol func; '"NotAFunctionException"; not .ns.isSet func; '"FunctionDoesNotExistException (",string[func],")" ]; func:get func; if[not .type.isFunction func; '"NotAFunctionException"; ]; :func; }; ================================================================================ FILE: kdb-common_src_os.q SIZE: 9,151 characters ================================================================================ // Operating System Specific Functionality // Copyright (c) 2017 - 2018 Sport Trades Ltd // Documentation: https://github.com/BuaBook/kdb-common/wiki/os.q .require.lib each `util`type; / The separator characters for PATH-type environment variables in all configured OSs .os.cfg.envPathSeparator:(`symbol$())!`char$(); .os.cfg.envPathSeparator[`l`v`m]:":"; .os.cfg.envPathSeparator[`w]:";"; / The PATH-type environment variable for shared object / DLL loading for all configured OSs .os.cfg.sharedObjectEnvVar:(`symbol$())!`symbol$(); .os.cfg.sharedObjectEnvVar[`l`v]:`LD_LIBRARY_PATH; .os.cfg.sharedObjectEnvVar[`m]:`DYLD_LIBRARY_PATH; .os.cfg.sharedObjectEnvVar[`w]:`PATH; / The current operating system, independent of architecture / @see .os.i.getOsType .os.type:`; / The separator character for PATH-type environment variables in the current OS .os.envPathSeparator:" "; / The environment variable containing the PATH-type environment variable for shared object / DLL loading .os.sharedObjectEnvVar:`; .os.init:{ .os.type:.os.i.getOsType[]; .os.envPathSeparator:.os.cfg.envPathSeparator .os.type; .os.sharedObjectEnvVar:.os.cfg.sharedObjectEnvVar .os.type; }; / Runs the specified command with the specified parameters. NOTE: That not / every command has the equivalent parameters in each Operating System environment. / @param cmd (Symbol) The OS command to run / @param paramStr (String) The list of parameters to pass to the command / @throws UnsupportedOsCommandException If the command specified is not supported on this OS / @throws IllegalArgumentException If the parameter argument is not a string .os.run:{[cmd;paramStr] if[not cmd in .os.availableCommands[]; '"UnsupportedOsCommandException (",string[cmd],")"; ]; if[not[.util.isEmpty paramStr] & not .type.isString paramStr; '"IllegalArgumentException"; ]; :.util.system .os[.os.type][cmd] paramStr; }; / @returns (SymbolList) All the available commands in the current operating system .os.availableCommands:{ :key 1_ .os .os.type; }; / @returns (Boolean) True if the PID is valid and a process exists on the current server that matches it. Otherwise returns false .os.isProcessAlive:{[pid] osCheck:first .os.run[`pidCheck; string pid]; if[`w=.os.type; :osCheck like "*",string[pid],"*"; ]; :not "B"$osCheck; }; / @returns (String) Current terminal window size in system "c" format - "*lines* *columns*" .os.getTerminalSize:{ rawTermSize:trim .os.run[`terminalSize; ""]; termSize:""; $[.os.type in `l`m; termSize:" " vs first rawTermSize; `w = .os.type; termSize:trim last each ":" vs/: rawTermSize raze where each rawTermSize like/: ("Lines:*"; "Columns:*") ]; :" " sv termSize; }; / @returns (Boolean) True if the kdb process is running in an interactive session, false otherwise .os.isInteractiveSession:{ interactRes:.os.run[`isInteractive; ::]; if[.os.type in `l`m; :not "B"$first interactRes; ]; }; / @returns (Symbol) OS independent process architecture .os.getProcessArchitecture:{ bits:"I"$-2#string .z.o; $[32=bits; :`x86; 64=bits; :`x86_64; / else '"UnsupportedProcessArchitectureException" ]; }; / @param path (FilePath|FolderPath) The path to dereference / @returns (FilePath|FolderPath) The 'real' path of specified path, removing sym links .os.dereferencePath:{[path] :hsym `$first .os.run[`readlink; 1_ string path]; }; .os.i.getOsType:{ :`$first string .z.o; }; .os.i.convertPathForWindows:{[path] :ssr[path;"/";"\\"]; }; // Windows Implementation .os.w.mkdir:{ :"mkdir ",.os.i.convertPathForWindows x; }; .os.w.rmdir:{ :"rmdir ",.os.i.convertPathForWindows x; }; .os.w.pwd:{ :"echo %cd%"; }; .os.w.rm:{ :"del ",.os.i.convertPathForWindows x; }; .os.w.rmF:{ :"del /F /Q ",.os.i.convertPathForWindows x; }; .os.w.pidCheck:{ :"tasklist /FI \"PID eq ",x,"\" /FO CSV /NH"; }; .os.w.sigterm:{ :"taskkill /PID ",x; }; .os.w.sigkill:{ :"taskkill /PID ",x," /F"; }; .os.w.sleep:{ :"timeout /t ",x," /nobreak >nul"; }; / ln requires 2 arguments so pass string separated by "|" / First argument should be the target, 2nd argument should be the source .os.w.ln:{ args:"|" vs x; :"mklink ",.os.i.convertPathForWindows[args 0]," ",.os.i.convertPathForWindows args 1; }; / mv requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.w.mv:{ args:"|" vs x; :"move ",.os.i.convertPathForWindows[args 0]," ",.os.i.convertPathForWindows args 1; }; / cp requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.w.cp:{ args:"|" vs x; :"copy ",.os.i.convertPathForWindows[args 0]," ",.os.i.convertPathForWindows args 1; }; .os.w.rmFolder:{ :"rd /S /Q ",.os.i.convertPathForWindows x; }; .os.w.rmFolder:{ :"rd /S /Q ",.os.i.convertPathForWindows x; }; .os.w.tail:{ :"type ",.os.i.convertPathForWindows x; }; .os.w.safeRmFolder:{ :"rmdir ",.os.i.convertPathForWindows x; }; .os.w.procCount:{ :"echo %NUMBER_OF_PROCESSORS%"; }; .os.w.which:{ :"where ",x; }; .os.w.ver:{ :"ver"; }; / cp requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.w.cpFolder:{ args:"|" vs x; :"xcopy /e /y ",.os.i.convertPathForWindows[args 0]," ",.os.i.convertPathForWindows args 1; }; .os.w.terminalSize:{ :"mode con"; }; // Linux Implementation .os.l.mkdir:{ :"mkdir -p ",x; }; .os.l.rmdir:{ :"rmdir ",x; }; .os.l.pwd:{ :"pwd"; }; .os.l.rm:{ :"rm -v ",x; }; .os.l.rmF:{ :"rm -vf ",x; }; .os.l.pidCheck:{ :"kill -n 0 ",x," 2>/dev/null; echo $?"; }; .os.l.sigint:{ :"kill -s INT ",x; }; .os.l.sigterm:{ :"kill -s TERM ",x; }; .os.l.sigkill:{ :"kill -s KILL ",x; }; .os.l.sleep:{ :"sleep ",x; }; / ln requires 2 arguments so pass string separated by "|" / First argument should be the target, 2nd argument should be the source .os.l.ln:{ args:"|" vs x; :"ln -s ",args[1]," ",args 0; }; / mv requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.l.mv:{ args:"|" vs x; :"mv ",args[0]," ",args 1; }; / cp requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.l.cp:{ args:"|" vs x; :"cp ",args[0]," ",args 1; }; .os.l.rmFolder:{ :"rm -rvf ",x; }; .os.l.tail:{ :"tail -n 30 ",x; }; .os.l.safeRmFolder:{ :"rmdir ",x; }; .os.l.procCount:{ :"getconf _NPROCESSORS_ONLN"; }; .os.l.which:{ :"which ",x; }; .os.l.ver:{ :"cat /etc/system-release"; }; .os.l.cpuAssign:{ :"taskset -cp ",x; }; / cp requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.l.cpFolder:{ args:"|" vs x; :"cp -rv ",args[0]," ",args 1; }; .os.l.terminalSize:{ :"stty size"; }; / 'tty' exits 9 if there is a TTY attached, 1 otherwise .os.l.isInteractive:{ :"tty --quiet; echo $?"; }; .os.l.shell:{ :"bash -c \"",x,"\""; }; .os.l.readlink:{ :"readlink -f ",x; }; // Mac OSX Implementation .os.m.mkdir:{ :"mkdir -p ",x; }; .os.m.rmdir:{ :"rmdir ",x; }; .os.m.pwd:{ :"pwd"; }; .os.m.rm:{ :"rm -v ",x; }; .os.m.rmF:{ :"rm -vf ",x; }; .os.m.pidCheck:{ :"kill -n 0 ",x," 2>/dev/null; echo $?"; }; .os.m.sigint:{ :"kill -s INT ",x; }; .os.m.sigterm:{ :"kill -s TERM ",x; }; .os.m.sigkill:{ :"kill -s KILL ",x; }; .os.m.sleep:{ :"sleep ",x; }; / ln requires 2 arguments so pass string separated by "|" / First argument should be the target, 2nd argument should be the source .os.m.ln:{ args:"|" vs x; :"ln -s ",args[1]," ",args 0; }; / mv requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.m.mv:{ args:"|" vs x; :"mv ",args[0]," ",args 1; }; / cp requires 2 arguments so pass string separated by "|" / First argument should be the source, 2nd argument should be the target .os.m.cp:{ args:"|" vs x; :"cp ",args[0]," ",args 1; }; .os.m.rmFolder:{ :"rm -rvf ",x; };</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="76"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // @kind function // @category nlpTimeUtility // @desc Seperate YearMonth formats to year and month // i.e "ymd" -> "y","m","d" // @params ymd {string[]} The format for each date objecct // @returns {string} Formats of YearMonthDays objects seperated tm.i.formatYMD:{[ymd] @[ymd;i unq;:;"ymd" unq:where 1=count each i:where each "ymd" in/:\:ymd] } // @private // @kind function // @category nlpTimeUtility // @desc Fill in the blanks in a date format string // @param format {string} A date format, as some permutation of // "d", "m", and "y" // @returns {string} The date format with any blanks filled with their most // plausible value tm.i.resolveFormat:{[format] $[0=n:sum" "=format; ; 1=n; ssr[;" ";first"ymd"except format]; 2=n; tm.i.dateFormats; {"dmy"} ]format } // @private // @kind dictionary // @category nlpTimeUtility // @desc The format to use, given a single known position // @type dictionary tm.i.dateFormats:(!). flip( ("d ";"dmy"); // 10th 02 99 ("m ";"mdy"); // Feb 10 99 ("y ";"ymd"); // 1999 02 10 (" d ";"mdy"); // 02 10th 99 (" m ";"dmy"); // 10 Feb 99 (" y ";"dym"); // 10 1999 02 This is never conventionally used (" d";"ymd"); // 99 02 10th (" m";"ydm"); // 99 10 Feb This is never conventionally used (" y";"dmy")) // 10 02 1999 //mdy is the american option // @private // @kind function // @category nlpTimeUtility // @desc Turns a regex time string into a q timestamp // i.e "131030" -> 13:10:30.000 // "1pm" -> 13:00:00.000 // @param text {string} A time string // @returns {timestamp} The q time parsed from an // appropriate string tm.i.parseTime:{[text] numText:vs[" ";text][0]in"1234567890:."; time:"T"$text where numText; amPM:regex.i.check[;text]each regex.objects`am`pm; time+$[amPM[0]&12=`hh$time;-1;amPM[1]&12>`hh$time;1;0]*12:00 } // @private // @kind function // @category nlpTimeUtility // @desc Remove any null values // @array {number[][]} Array of values // returns {number[][]} Array with nulls removed tm.i.rmNull:{[array] array where not null array[;0] } ================================================================================ FILE: ml_nlp_code_email.q SIZE: 7,598 characters ================================================================================ // code/email.q - Nlp email utilities // Copyright (c) 2021 Kx Systems Inc // // Utilities for handling emails \d .nlp // @private // @kind function // @category nlpEmailUtility // @desc Rich Text Format (RTF) parsing function imported from python email.i.striprtf:.p.get[`striprtf;<] // @private // @kind function // @category nlpEmailUtility // @desc Extract information from various message text types // @params textTyp {string} The format of the message text // @param msg {string|dictionary} An email message, or email subtree // @returns {boolean} Whether or not msg fits the text type criteria email.i.findMime:{[textTyp;msg] msgDict:99=type each msg`payload; contentTyp:textTyp~/:msg`contentType; attachment:0b~'msg[`payload]@'`attachment; all(msgDict;contentTyp;attachment) } // @private // @kind function // @category nlpEmailUtility // @desc Use beautiful soup to extract text from a html file // @param msg {string} The message payload // @returns {string} The text from the html email.i.html2text:{[msg] cstring email.i.bs[pydstr msg;`html.parser][`:get_text;pydstr "\\n"]` } // @private // @kind function // @category nlpEmailUtility // @desc Given an email, extract the text of the email // @param msg {string|dictionary} An email message, or email subtree // @returns {string} The text of the email, or email subtree email.i.extractText:{[msg] // String is actual text, bytes attachment or non text mime type like inline // image, dict look at content element msgType:type msg; if[10=msgType;:msg]; if[4=msgType;:""]; if[99=msgType;:.z.s msg`content]; findMime:email.i.findMime[;msg]; text:$[count i:where findMime["text/plain"]; {x[y][`payload]`content}[msg]each i; count i:where findMime["text/html"]; {email.i.html2text x[y][`payload]`content}[msg]each i; count i:where findMime["application/rtf"]; // Use python script to extract text from rtf {email.i.striprtf x[y][`payload]`content}[msg]each i; .z.s each msg`payload ]; "\n\n"sv text } // @private // @kind function // @category nlpEmailUtility // @desc Get all the to/from pairs from an email // @param msg {dictionary} An email message, or subtree thereof // @returns {any[]} To/from pairings of an email email.i.getToFrom:{[msg] payload:msg`payload; payload:$[98=type payload;raze .z.s each payload;()]; edges:(msg[`sender;0;1];)each msg[`to;;1]; edges,payload } // @private // @kind function // @category nlpEmailUtility // @desc Extract the sender information from an email // @param emails {<} The email as an embedPy object // @returns {string[]} Sender name and email email.i.getSender:{[emails] fromInfo:raze emails[`:get_all;<]each pydstr each ("from";"resent-from"); cstring email.i.getAddr fromInfo where not(::)~'fromInfo } // @private // @kind function // @category nlpEmailUtility // @desc Extract the receiver information from an email // @param emails {<} The email as an embedPy object // @returns {string[]} Reciever name and email email.i.getTo:{[emails] toInfo:raze emails[`:get_all;<]each pydstr each ("to";"cc";"resent-to";"resent-cc"); cstring email.i.getAddr toInfo where not any(::;"")~/:\:toInfo } // @private // @kind function // @category nlpEmailUtility // @desc Extract the date information from an email // @param emails {<} The email as an embedPy object // @returns {timestamp} Date email was sent email.i.getDate:{[emails] dates:string 6#email.i.parseDate emails[@;`date]; "P"$"D"sv".:"sv'3 cut{$[1=count x;"0";""],x}each dates } // @private // @kind function // @category nlpEmailUtility // @desc Extract the subject information from an email // @param emails {<} The email as an embedPy object // @returns {string} Subject of the email email.i.getSubject:{[emails] subject:emails[@;`subject]; $[(::)~subject`; ""; cstring email.i.makeHdr[email.i.decodeHdr subject][`:__str__][]` ] } // @private // @kind function // @category nlpEmailUtility // @desc Extract the content type of an email // @param emails {<} The email as an embedPy object // @returns {string} Content type of an email email.i.getContentType:{[emails] cstring emails[`:get_content_type][]` } // @private // @kind function // @category nlpEmailUtility // @desc Extract the payload information from an email // @param emails {<} The email as an embedPy object // @returns {dictionary|table} Dictionary of `attachment`content or a table // of payloads // Content is byte[] for binary data, char[] for text email.i.getPayload:{[emails] if[emails[`:is_multipart][]`; :email.i.parseMbox1 each emails[`:get_payload][]` ]; // Raw bytes decoded from base64 encoding, wrapped embedPy raw:emails[`:get_payload;`decode pykw 1]; rtf:"application/rtf"~cstring email.i.getContentType emails; attachment:"attachment"~cstring emails[`:get_content_disposition][]`; payload:`attachment`content!(0b;raw`); if[all(rtf;attachment);:payload]; if[attachment; payload,`attachment`filename!(1b;cstring email[`:get_filename][]`); ]; content:cstring email.i.getContentType emails; if[not any content~/:("text/html";"text/plain";"message/rfc822");:payload]; charset:cstring emails[`:get_content_charset][]`; content:cstring i.str[raw;pydstr $[(::)~charset;"us-ascii";charset];`ignore]`; `attachment`content!(0b;content) } // @private // @kind function // @category nlpEmailUtility // @desc Extract meta information from an email // @params filepath {string} The path to the mbox // @returns {dictionary} Meta information from the email email.i.parseMbox:{[filepath] mbox:email.i.mbox pydstr filepath; email.i.parseMbox1 each flip[mbox[`:items;<][]]1 } // @private // @kind function // @category nlpEmailUtility // @desc Extract meta information from an email // @params mbox {<} Emails in mbox format // @returns {dictionary} Meta information from the email email.i.parseMbox1:{[mbox] columns:`sender`to`date`subject`contentType`payload; msgInfo:`getSender`getTo`getDate`getSubject`getContentType`getPayload; columns!email.i[msgInfo]@\:.p.wrap mbox } // Python imports email.i.bs:.p.import[`bs4]`:BeautifulSoup email.i.getAddr:.p.import[`email.utils;`:getaddresses;<] email.i.parseDate:.p.import[`email.utils;`:parsedate;<] email.i.decodeHdr:.p.import[`email.header;`:decode_header] email.i.makeHdr:.p.import[`email.header;`:make_header] email.i.msgFromString:.p.import[`email]`:message_from_string email.i.mbox:.p.import[`mailbox]`:mbox // @kind function // @category nlpEmail // @desc Convert an mbox file to a table of parsed metadata // @param filepath {string} The path to the mbox file // @returns {table} Parsed metadata and content of the mbox file email.loadEmails:{[filepath] parseMbox:email.i.parseMbox filepath; update text:.nlp.email.i.extractText each payload from parseMbox } // @kind function // @category nlpEmail // @desc Get the graph of who emailed who, including the number of // times they emailed // @param emails {table} The result of .nlp.loadEmails // @returns {table} Defines to-from pairings of emails email.getGraph:{[emails] getToFrom:flip csym raze email.i.getToFrom each emails; getToFromTab:flip`sender`to!getToFrom; 0!`volume xdesc select volume:count i by sender,to from getToFromTab } // @kind function // @category nlpEmailUtility // @desc Extract meta information from an email // @params content {string} Email content as string // @returns {dictionary} Meta information from the email email.parseMail:{[content] email.i.parseMbox1 email.i.msgFromString[pydstr content]`. } ================================================================================ FILE: ml_nlp_code_nlpCode.q SIZE: 16,644 characters ================================================================================ // code/nlpCode.q - NLP code // Copyright (c) 2021 Kx Systems Inc // // Main NLP code base \d .nlp // Date-Time // @kind function // @category nlp // @desc Find any times in a string // @param text {string} A text, potentially containing many times // @returns {any[]} A list of tuples for each time containing // (q-time; timeText; startIndex; 1+endIndex) findTimes:{[text] timeText:regex.matchAll[regex.objects.time;text]; parseTime:tm.i.parseTime each timeText[;0]; time:parseTime,'timeText; time where time[;0]<24:01 } // @kind function // @category nlp // @desc Find all the dates in a document // @param text {string} A text, potentially containing many dates // @returns {any[]} A list of tuples for each time containing // (startDate; endDate; dateText; startIndex; 1+endIndex) findDates:{[text] ym:regex.matchAll[regex.objects.yearMonth;text]; ymd:regex.matchAll[regex.objects.yearMonthDay;text]; convYMD:tm.i.convYearMonthDay each ymd[;0]; dates:tm.i.rmNull convYMD,'ymd; if[count dates;ym@:where not any ym[;1] within/: dates[; 3 4]]; convYM:tm.i.convYearMonth each ym[;0]; dates,:tm.i.rmNull convYM,'ym; dates iasc dates[;3] } // Parsing function</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="77"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Running kdb+ as a service on Windows¶ Windows 7+ provides a task scheduler tool which can be used to run kdb+ processes as services. To schedule a kdb+ process to start on startup: - Press the +R keys to open Run. - Type taskschd.msc and press Enter. - Under Actions select Create Task. - Enter a name and description for your task. Select the user under which the task should be run, and if the user should be logged on for the task to run. If you would like the process to be hidden, select the hidden option. - Add a trigger to start the task on system startup: - First click New under the triggers tab. - Select At startup under Begin the task. - - Select the actions the task should take before: - Click New under the Actions tab. - Select q.exe with any command-line arguments and the folder which the process should start in. - - Set any required conditions. - Configure any required settings. - When the task is complete, press OK. Output redirect¶ If you need to redirect output you must modify the action using the following configuration: The arguments should be /c C:\\q\\w64\\q.exe -p 5010 -q >C:\\q\\logs\\q.5010.log Note the directory for logs must exist. Multiple processes¶ If you want to set up several instances, the steps required are slightly different. Rather than configure the task to run q.exe , instead create a .bat file to start multiple kdb+ processes in the background and run this from the task. Example: start "q5010" /B cmd.exe /c C:\\q\\w64\\q.exe -p 5010 -q >C:\\q\\logs\\q.5010.log start "q5011" /B q testScript.q -q In this way you can set up on Windows a complete kdb+ process that starts all the required processes in the correct order.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="78"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/ https://lintool.github.io/Cloud9/docs/exercises/pagerank.html node:asc distinct raze cloud9.l l:node?cloud9.l show S:(1 2#1+max over l), .ml.prepend[1f] l show node[i]!r i:idesc r:.ml.pageranks[d;S] over r:n#1f%n:S[0;0] -1 "into a full matrix"; show A:.ml.full S show node[i]!r i:idesc r:.ml.pageranka[d;A] show node[i]!r i:idesc r:.ml.pageranki[d;A] over r:n#1f%n:count A show node[i]!r i:idesc r:$[;.ml.google[d;A]] over r:n#1f%n:count A node:asc distinct raze berkstan.l l:node?berkstan.l show S:(1 2#1+max over l), .ml.prepend[1f] l -1"not enough memory to convert sparse -> full matrix"; -1"just perform a few sparse iterations"; show node[i]!r i:idesc r:10 .ml.pageranks[d;S]/ r:n#1f%n:S[0;0] ================================================================================ FILE: funq_pandp.q SIZE: 288 characters ================================================================================ pandp.f:"1342-0.txt" pandp.b:"https://www.gutenberg.org/files/1342/" -1"[down]loading pride and prejudice text"; .ut.download[pandp.b;;"";""] pandp.f; pandp.txt:read0 `$pandp.f pandp.chapters:1_"\nChapter " vs "\n" sv 35_-373_ pandp.txt pandp.s:{first[x ss"\n\n"]_x} each pandp.chapters ================================================================================ FILE: funq_pendigits.q SIZE: 433 characters ================================================================================ pendigits.f:("pendigits.tra";"pendigits.tes") pendigits.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/" pendigits.b,:"pendigits/" -1"download the pendigits training and test data set"; .ut.download[pendigits.b;;"";""] each pendigits.f; pendigits.y:last pendigits.X:(17#"h";",") 0: `$pendigits.f 0 pendigits.X:-1_pendigits.X pendigits.yt:last pendigits.Xt:(17#"h";",") 0: `$pendigits.f 1 pendigits.Xt:-1_pendigits.Xt ================================================================================ FILE: funq_persuasion.q SIZE: 339 characters ================================================================================ / persuasion persuasion.f:"105.txt" persuasion.b:"https://www.gutenberg.org/files/105/" -1"[down]loading persuasion text"; .ut.download[persuasion.b;;"";""] persuasion.f; persuasion.txt:read0 `$persuasion.f persuasion.chapters:1_"Chapter" vs "\n" sv 44_-373_persuasion.txt persuasion.s:{(3+first x ss"\n\n\n")_x} each persuasion.chapters ================================================================================ FILE: funq_pima.q SIZE: 400 characters ================================================================================ pima.f:"pima-indians-diabetes.data" pima.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/" pima.b,:"pima-indians-diabetes/" -1"[down]loading pima-indians-diabetes data set"; .ut.download[pima.b;;"";""] pima.f; pima.XY:("EEEEEEEEB";",")0:`$pima.f pima.X:-1_pima.XY pima.y:first pima.Y:-1#pima.XY pima.c:`preg`plas`pres`skin`test`mass`pedi`age`class pima.t:`class xcols flip pima.c!pima.XY ================================================================================ FILE: funq_plot.q SIZE: 1,405 characters ================================================================================ \c 20 100 \l funq.q \l dji.q / define a plotting function using 10 characters of gradation plt:.ut.plot[w:40;h:20;c:.ut.c10;sum] -1"plotting 1-dimensional dataset (sin x): x"; -1 value plt X:sin .01*til 1000; -1"plotting 2-dimensional dataset (uniform variates): (x;y)"; -1 value plt X:10000?/:2#1f; -1"plotting 2-dimensional dataset (normal variates): (x;y)"; -1 value plt (.ml.bm 10000?) each 2#1f; -1"plotting 3-dimensional dataset: (x;y;z)"; -1 value plt {(x;{x*x*x}x-.5;x:til[x]%x)} 1000; -1"plotting 3-dimensional grid as a heatmap: X (matrix)"; -1 value plt .ut.hmap {x*/:(x:til x)*(x;x)#1f} 1000; b:1b / use binary encoding for portable (bit|pix)map -1"plotting black/white Mandelbrot series"; c:.ut.tcross . (.ut.nseq .) each flip (-1+w:1000;-2 -1.25;.5 1.25) x:w cut .ml.mbrotp 20 .ml.mbrotf[c]/0f -1 value plt .ut.hmap x; -1"saving PBM image"; `mandel.pbm 0: .ut.pbm[b] x -1"plotting gray scale Mandelbrot series"; x:w cut last 20 .ml.mbrota[c]// (0f;0) -1 value plt .ut.hmap x; -1"saving PGM image"; `mandel.pgm 0: .ut.pgm[b;20] x -1"saving PPM image"; `mandel.ppm 0: .ut.ppm[b;20] flip[(rand 1+20;til 1+20;rand 1+20)] x -1"plotting sparkline of the dow jones index components"; exec -1 ((4$string first stock),": ",.ut.spark close) by stock from dji.t; / tests .ut.assert[1b] last[x]<last .ut.heckbert[4] . x:.47 .56 .ut.assert[1b] last[x]<last .ut.heckbert[10] . x:32064 64978f ================================================================================ FILE: funq_porter.q SIZE: 4,492 characters ================================================================================ / this is the porter stemmer algorithm ported to q. it follows the / algorithm presented in: / Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14, / no. 3, pp 130-137 / https://tartarus.org/martin/PorterStemmer/def.txt / this implementation includes the three points of departure from the / original paper introduced here: / https://www.tartarus.org/~martin/PorterStemmer / note that this implementation stems single words - not full text. / this obviates global variables and .porter.stem, therefore, can be / 'peach'ed. instead of run-time computations and function calls, / hard-coded offsets and $[;;] operators are used for performance. / implementation accuracy can be verified by running the trailing code / nick psaris / release 1: august 2018 \d .porter / are the letters in x vowels vowel:{ v:x in "aeiou"; / aeiou are vowels / y is a vowel if the preceding letter is a consonant v[i where not (1b,v) i:where x="y"]:1b; v} / are the letters in x consonants cons:not vowel:: / returns true if x contains a vowel hasvowel:any vowel:: / returns true if x ends in a double consonant doublec:{$[2>count x;0b;(=) . -2#x;last cons x;0b]} / return true if last three letters are consonant - vowel - / consonant and last letter is not in "wxy" cvc:{$[3>count x;0b;101b~-3#cons x;not last[x] in "wxy";0b]} / if a<m replace n characters with (r)eplacement suffix r:{[a;n;r;x]$[a<m n:n _ x;n,r;x]} / compute m where m in c?(vc){m}v? and c and v are consecutive lists / of consonants and vowels m:{sum[x] - first x:x where differ x:cons x} / remove plurals and -ed or -ing step1ab:{ x:$[not x like "*s";x;x like "*sses";-2_x; x like "*ies";-2_x;x like "*ss";x;-1_x]; if[x like "*eed";:$[0<m -3_x;-1_x;x]]; if[not x like o:"*ed";if[not x like o:"*ing";:x]]; if[not hasvowel n:(1+neg count o)_x;:x];x:n; if[x like "*at";:x,"e"]; if[x like "*bl";:x,"e"]; if[x like "*iz";:x,"e"]; if[doublec x;:$[last[x] in "lsz";x;-1_x]]; if[1=m x;if[cvc x;:x,"e"]]; x} / replace y with i when there exist other vowels step1c:{if[x like "*y";if[hasvowel -1_x;x[-1+count x]:"i"]];x} / map double suffices to single ones step2:{ c:x -2+count x; if[c="a";:$[x like "*ational";r[0;-7;"ate";x]; x like "*tional";r[0;-6;"tion";x];x]]; if[c="c";:$[x like "*enci";r[0;-4;"ence";x]; x like "*anci";r[0;-4;"ance";x];x]]; if[c="e";:$[x like "*izer";r[0;-4;"ize";x];x]]; if[c="l";:$[x like "*bli";r[0;-3;"ble";x];x like "*alli";r[0;-4;"al";x]; x like "*entli";r[0;-5;"ent";x];x like "*eli";r[0;-3;"e";x]; x like "*ousli";r[0;-5;"ous";x];x]]; if[c="o";:$[x like "*ization";r[0;-7;"ize";x]; x like "*ation";r[0;-5;"ate";x];x like "*ator";r[0;-4;"ate";x];x]]; if[c="s";:$[x like "*alism";r[0;-5;"al";x]; x like "*iveness";r[0;-7;"ive";x];x like "*fulness";r[0;-7;"ful";x]; x like "*ousness";r[0;-7;"ous";x];x]]; if[c="t";:$[x like "*aliti";r[0;-5;"al";x];x like "*iviti";r[0;-5;"ive";x]; x like "*biliti";r[0;-6;"ble";x];x]]; if[c="g";:$[x like "*logi";r[0;-4;"log";x];x]]; x} / handle -ic-, -full, -ness etc step3:{ c:x -1+count x; if[c="e";:$[x like "*icate";r[0;-5;"ic";x];x like "*ative";r[0;-5;"";x]; x like "*alize";r[0;-5;"al";x];x]]; if[c="i";:$[x like "*iciti";r[0;-5;"ic";x];x]]; if[c="l";:$[x like "*ical";r[0;-4;"ic";x];x like "*ful";r[0;-3;"";x];x]]; if[c="s";:$[x like "*ness";r[0;-4;"";x];x]]; x} / remove -ant, -ence etc, in context <c>vcvc<v> step4:{ c:x -2+count x; if[c="a";:$[x like "*al";r[1;-2;"";x];x]]; if[c="c";:$[x like "*ance";r[1;-4;"";x];x like "*ence";r[1;-4;"";x];x]]; if[c="e";:$[x like "*er";r[1;-2;"";x];x]]; if[c="i";:$[x like "*ic";r[1;-2;"";x];x]]; if[c="l";:$[x like "*able";r[1;-4;"";x];x like "*ible";r[1;-4;"";x];x]]; if[c="n";:$[x like "*ant";r[1;-3;"";x];x like "*ement";r[1;-5;"";x]; x like "*ment";r[1;-4;"";x];x like "*ent";r[1;-3;"";x];x]]; if[c="o";:$[x like "*ion";$[x[-4+count x] in "st";r[1;-3;"";x];x]; x like "*ou";r[1;-2;"";x];x]]; if[c="s";:$[x like "*ism";r[1;-3;"";x];x]]; if[c="t";:$[x like "*ate";r[1;-3;"";x];x like "*iti";r[1;-3;"";x];x]]; if[c="u";:$[x like "*ous";r[1;-3;"";x];x]]; if[c="v";:$[x like "*ive";r[1;-3;"";x];x]]; if[c="z";:$[x like "*ize";r[1;-3;"";x];x]]; x} / remove final e if m>1, change -ll to -l if m>1 step5:{ if["e"=last x;x:$[0=a:m x;x;1<a;-1_x;not cvc -1_x;-1_x;x]]; if["l"=last x;if[doublec x;if[1<m x;:-1_x]]]; x} stem:{ if[3>count x;:x]; x:step1ab x; x:step1c x; x:step2 x; x:step3 x; x:step4 x; x:step5 x; x} ================================================================================ FILE: funq_qmlmm.q SIZE: 195 characters ================================================================================</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="79"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Basic programs¶ From GeeksforGeeks Python Programming Examples Many of the published examples have been rewritten to use Python techniques more likely to illuminate the q solution. Where practical, the solutions are shown as expressions evaluated in the REPL, which better allows for experimenting. Follow links to the originals for more details on the problem and Python solutions. Factorial of a number¶ >>> def factorial(n): return 1 if (n==1 or n==0) else n * factorial(n - 1) ... >>> factorial(5) 120 q)factorial:{$[x<2;1;x*.z.s x-1]} q)factorial 5 120 Above .z.s refers to the running function; it can be assigned any name. Factorial 5 is defined non-recursively as the product of the integers 1-5. >>> math.prod(map(1 ._add_, range(5))) # in Python 3.8 >>> 120 q)prd 1+til 5 120 Simple interest¶ >>> p=10000 # principal >>> r=5 # rate >>> t=5 # time periods >>> (p*r*t)/100 # simple interest 2500.0 q)p:10000 / principal q)r:5 / rate q)t:5 / time periods q)(p*r*t)%100 / simple interest 2500f Q programs tend to prefer vectors. q)(prd 10000 5 5)%100 2500f Iteration is implicit in most q operators. Here we have three principals and corresponding time periods. The rate is the same for all three. q)p:1000 1500 1750 / principals q)r:3 / rate q)t:5 6 7 / time periods q)(p*r*t)%100 / simple interest 150 270 367.5 Compound interest¶ >>> p = 1200 # principal >>> r = 5.4 # rate >>> t = 2 # time periods >>> p*(pow((1+r/100),t)) # compound interest 1333.0992 q)p:1200 / principal q)r:5.4 / rate q)t:2 / time periods q)p*(1+r%100)xexp t / compound interest 1333.099 Again, iteration through lists is implicit. q)p:1200 1500 1800 / principals q)r:5.4 / rate q)t:2 2 3 / time periods q)p*(1+r%100)xexp t / compound interest 1333.099 1666.374 2107.63 Whether an Armstrong number¶ import numpy as np def is_armstrong(x): s = 0 t = x while t: s += (t % 10) ** len(str(x)) t //= 10 return s == x >>> [is_armstrong(x) for x in (153, 120, 1253, 1634)] [True, False, False, True] isArmstrong:{x=sum{x xexp count x}10 vs x} q)isArmstrong each 153 120 1253 1634 1001b The steps of isArmstrong explain themselves. q)10 vs 153 / decode base-10 integer 1 5 3 q)1 5 3 xexp 3 / raise to 3rd power 1 125 27f q)sum 1 5 3 xexp 3 153f Area of a circle¶ The area of a circle of radius \(r\) is \(\pi r^2\), where \(\pi\) is the arc-cosine of -1. >>> import numpy as np >>> np.arccos(-1)*5*5 # area of circle of radius 5 78.53981633974483 q)(acos -1)*5*5 / area of circle of radius 5 78.53982 Prime numbers in an interval¶ >>> from sympy import sieve >>> list(sieve.primerange(11, 25)) [11, 13, 17, 19, 23] range:{x+til y-x-1} sieve_primerange:{ c:range[x;y]; / candidates lmt:"j"$sqrt last c; / highest divisor to test c where all 0<c mod/:range[2;lmt] } q)sieve_primerange[11;25] 11 13 17 19 23 No q primitive for this, but range is a useful starting point. q)show c:range[11;25] / candidates 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 q)"j"$sqrt last c / need test modulo only to here 5 q)range[2;]"j"$sqrt last c 2 3 4 5 q)c mod/:2 3 4 5 / modulo each c against all of them 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 2 0 1 2 0 1 2 0 1 2 0 1 2 0 1 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 q)show f:0<c mod/:2 3 4 5 / flag remainders 101010101010101b 101101101101101b 101110111011101b 111101111011110b q)all f / AND the flag vectors 101000101000100b q)where all f / index the hits 0 2 6 8 12 q)c where all f / select from range 11 13 17 19 23 Whether a number is prime¶ >>> from sympy import isprime >>> [isprime(x) for x in (11, 15, 1)] [True, False, False] No q primitive for this either. range:{x+til y-x-1} isPrime:{(x>1)and all 0<x mod range[2;"j"$sqrt x]} q)isPrime each 11 15 1 100b Nth Fibonacci number¶ # next Fibonacci pair def nfp(x): return [x[1], sum(x)] # Nth Fibonacci pair def fibp(n): if n<2: return [0, 1] return nfp(fibp(n-1)) def fib(n): return fibp(n)[0] >>> fib(9) 21 nfp:{(x 1),sum x} fib:{first(x-1)nfp/0 1} q)fib 9 21 Above we see nfp applied with the Do iterator / . The Python solution recurses down from n to reach the initial state of [0, 1] , while the q solution iterates n-1 times on an initial state of 0 1 . Whether a Fibonacci number¶ import math def is_fibonacci(n): phi = 0.5 + 0.5 * math.sqrt(5.0) a = phi * n return n == 0 or abs(round(a) - a) < 1.0 / n >>> [is_fibonacci(x) for x in (8, 34, 41)] [True, True, False] \(x\) is a Fibonacci number if either of \(5x^{2}\pm 4\) is a perfect square. is_ps:{x={x*x}"j"$sqrt x} / is perfect square? is_fibonacci:{.[or]is_ps flip 4 -4+/:5*x*x} q)is_fibonacci 8 34 41 110b The iteration implicit in q’s operators means that is_fibonacci also iterates implicitly. Sum of squares of first N natural numbers¶ def squaresum(n): return (n * (n + 1) / 2) * (2 * n + 1) / 3 >>> [squaresum(x) for x in (4,5)] [30.0, 55.0] squaresum:{(x*(x+1)%2)*(1+x*2)%3} q)squaresum 4 5 30 55f The q solution mirrors the Python, but the primitives iterate implicitly. Cube sum of first N natural numbers¶ def sum_cubes(x): return (x * (x + 1) // 2) ** 2 >>> [sum_cubes(x) for x in (5, 7)] [225, 784] sum_cubes:{(x*(x+1)div 2)xexp 2} q)sum_cubes 5 7 225 784f Once again, the q operators iterate implicitly. Array programs¶ From GeeksforGeeks Python Programming Examples Follow links to the originals for more details on the problem and Python solutions. Sum of an array¶ >>> sum([1, 2, 3]) 6 >>> sum([15, 12, 13, 10]) 50 q)sum each (1 2 3; 15 12 13 10) 6 50 Largest item in an array¶ >>> max([10, 20, 4]) 20 >>> max([20, 10, 20, 4, 100]) 100 q)max each (10 20 4; 20 10 20 4 100) 20 100 Rotate an array¶ >>> import numpy as np >>> np.roll([1, 2, 3, 4, 5, 6, 7],-2) array([3, 4, 5, 6, 7, 1, 2]) Q has a primitive for rotating lists. q)2 rotate 1 2 3 4 5 6 7 3 4 5 6 7 1 2 Remainder of array multiplication divided by n¶ def findremainder(arr, n): lens, mul = len(arr), 1 for i in range(lens): mul = (mul * (arr[i] % n)) % n return mul % n >>> findremainder([ 100, 10, 5, 25, 35, 14 ], 11) 9 findRemainder:{(x*y) mod 11} over q)findRemainder 100 10 5 25 35 14 9 The binary lambda {(x*y)mod 11} returns the modulo-11 of the product of two numbers. over applies it to reduce the argument list. The naïve solution q)(prd 100 10 5 25 35 14) mod 11 9 overflows for a long list. Reconstruct array, replacing arr[i] with (arr[i-1]+1)%M ¶ def construct(m, a): ind, n = 0, len(a) # Finding the index which is not -1 for i in range(n): if (a[i]!=-1): ind = i break # Calculating the values of the indexes ind-1 to 0 for i in range(ind-1, -1, -1): if (a[i]==-1): a[i]=(a[i + 1]-1 + m)% m # Calculating the values of the indexes ind + 1 to n for i in range(ind + 1, n): if(a[i]==-1): a[i]=(a[i-1]+1)% m print(*a) >>> construct(7, [5, -1, -1, 1, 2, 3]) 5 6 0 1 2 3 >>> construct(10, [5, -1, 7, -1, 9, 0]) 5 6 7 8 9 0 construct:{[v;M] {$[y=-1;(x+1)mod z;y]}[;;M]\[v]} q)construct[5 -1 -1 1 2 3;7] 5 6 0 1 2 3 q)construct[5 -1 7 -1 9 0;10] 5 6 7 8 9 0 The q solution applies a binary lambda {$[y=-1;(x+1)mod z;y]}[;;M] to successive pairs of items of argument vector v . The lambda is defined with three arguments and projected on M – constant for each iteration – so becoming a binary that can be iterated through the vector. Successive application relies on the Scan iterator. Is array monotonic?¶ def isMonotonic(A): return (all(A[i] <= A[i + 1] for i in range(len(A) - 1)) or all(A[i] >= A[i + 1] for i in range(len(A) - 1))) >>> isMonotonic([6, 5, 4, 4]) True isMonotonic:{asc[x]in(x;reverse x)} q)isMonotonic 6 5 4 4 1b q)isMonotonic 6 5 3 4 0b Both these solutions overcompute. The Python program traverses the entire list twice. The q program sorts the entire list. Native sort in q is very fast, but if the list is long and likely to fail we might prefer to iterate and stop as soon as we find the list is not monotonic. Monotony can rise or fall, so we test the first pair for both cases. The first several items in the list may match, so we continue testing with both ≤ and ≥ until we eliminate one or both. So, a two-item initial state. (1;(<=;>=)) 1 is the next (first) index to test; the operators (<=;>=) are the tests to apply. (They would be (<;>) for strict monotony.) Our function will try the tests, returning those that pass, and the next index, as the next state. We shall apply it with the While iterator, so we need it be unary, i.e. to take one argument. We also want it to refer to the list, so we project a binary lambda on the list to bind the list to the lambda as a constant value for its y argument. q)v:5 5 5 5 6 6 7 8 9 11 / list to test q)it:(1;(<=;>=)) / initial state: index and tests q)try:{[x;y] i:x 0; f:x 1; (i+1; f where f .\:y i-1 0) }[;v] q){count x 1} try\it 1 (~>;~<) 2 (~>;~<) 3 (~>;~<) 4 (~>;~<) 5 ,~> 6 ,~> 7 ,~> 8 ,~> 9 ,~> 10 ,~> 11 () Above we see the ≤ test eliminated after item 4. Our test function for the iterator {count x 1} stops iteration when the list of functions is empty. The last result will be (n;()) , with n the next index that would have been tested. We can improve this, using the Converge iterator. isMt:{[v] / is monotonic? try:{[x;y] / apply tests x[1] between y x[0]-1 0 i:x 0; f:x 1; / index; tests go:i<count y; / end of list? f:$[go;f where f .\:y i-1 0;f]; / tests passed go&:0<count f; / keep testing? (i+go;f) }[;v]; / project onto v it:(1;(<=;>=)); / initial state count[v]=first try/[it] } / reached end of v? The first item of the final result of try/[it] is the last index for which at least one of the tests ≤ and ≥ held true. We compare it to count[v] to see if try got to the end of the list. The second item of the final result is the list of tests that held true. Instead of testing the final index, we could count that list to see if either ≤ or ≥ held true throughout v . The result of isMt would then be {0<count x 1}try/[it] q)isMt 6 5 4 4 1b q)isMt 6 5 3 4 0b The above approach can be generalized. The list of functions could be of any length, contain any binary functions. The initial index could be anywhere in the list, and try adapted to stop iteration before the end of the list.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="80"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/ if reQ not loaded, define necessary components here if[not `req in key `; .url.parse0:{[q;x] if[x~hsym`$255#"a";'"hsym too long - consider using a string"]; //error if URL~`: .. too long x:.url.sturl x; //ensure string URL p:x til pn:3+first ss[x;"://"]; //protocol uf:("@"in x)&first[ss[x;"@"]]<first ss[pn _ x;"/"]; //user flag - true if username present un:pn; //default to no user:pass u:-1_$[uf;(pn _ x) til (un:1+first ss[x;"@"])-pn;""]; //user:pass d:x til dn:count[x]^first ss[x:un _ x;"[/?]"]; //domain a:$[(dn=count x)|"?"=x[dn];"/",;] dn _ x; //absolute path (add leading slash if necessary) o:`protocol`auth`host`path!(p;u;d;a); //create URL object :$[q;@[o;`path`query;:;query o`path];o]; //split path into path & query if flag set, return }; .url.sturl:{(":"=first x)_x:$[-11=type x;string;]x}; .url.hsurl:{`$":",.url.sturl x}; .req.query:`method`url`hsym`path`headers`body`bodytype!(); .req.proxy:{[u] p:(^/)`$getenv`$(floor\)("HTTP";"NO"),\:"_PROXY"; //check HTTP_PROXY & NO_PROXY env vars, upper & lower case - fill so p[0] is http_, p[1] is no_ t:max(first ":"vs u[`url]`host)like/:{(("."=first x)#"*"),x}each"," vs string p 1; //check if host is in NO_PROXY env var t:not null[first p]|t; //check if HTTP_PROXY is defined & host isn't in NO_PROXY :$[t;@[;`proxy;:;p 0];]u; //add proxy to URL object if required }; .req.enchd:{[d] k:2_@[k;where 10<>type each k:(" ";`),key d;string]; //convert non-string keys to strings v:2_@[v;where 10<>type each v:(" ";`),value d;string]; //convert non-string values to strings :("\r\n" sv ": " sv/:flip (k;v)),"\r\n\r\n"; //encode headers dict to HTTP headers }; .req.buildquery:{[q] r:string[q`method]," ",q[`url;`path]," HTTP/1.1\r\n", //method & endpoint TODO: fix q[`path] for proxy use case "Host: ",q[`url;`host],$[count q`headers;"\r\n";""], //add host string .req.enchd[q`headers], //add headers $[count q`body;q`body;""]; //add payload if present :r; //return complete query string }; .cookies.addcookies:{[q]q}; // without reQ loaded, don't do anything with cookies ]; VERBOSE:@[value;`.ws.VERBOSE;$[count .z.x;"-verbose" in .z.x;0b]]; //default to non-verbose output w:([h:`int$()] hostname:`$();callback:`$()) //table for recording open websockets .ws.onmessage.server:{value[w[.z.w]`callback]x} //pass messages to relevant handler open0:{[x;y;v] q:@[.req.query;`method`url;:;(`GET;.url.parse0[0]x)]; //create reQ query object q:.req.proxy q; //handle proxy if needed hs:.url.hsurl`$raze q ./:enlist[`url`protocol],$[`proxy in key q;1#`proxy;enlist`url`host]; //get hostname as handle q[`headers]:(enlist"Origin")!enlist q[`url;`host]; //use Origin header q:.cookies.addcookies[q]; //if reQ is loaded, cookies can be added s:first r:hs d:.req.buildquery[q]; //build query & send if[v;-1"-- REQUEST --\n",string[hs]," ",d]; //if verbose, log request if[v;-1"-- RESPONSE --\n",last r]; //if verbose, log response servers,:(s;hs); //record handle & callback in table w,:(s;hs;y); //record handle & callback in table :r; //return response } open:{neg first open0[x;y;.ws.VERBOSE]} //return neg handle for messaging .ws.close:{[h] h:abs h; if[all(h in key .ws.w;h in key .z.W);hclose h]; //close handle if h is found both in .ws.w and .z.W (all opened handles) .ws.w:.ws.w _ h; //remove h from .ws.w .z.wc h; //remove h from .ws.servers } .ws.closea:{.ws.close each (0!.ws.w)[`h]} //close all opened websockets \d . ================================================================================ FILE: ws.q_ws-handler_ws-handler.q SIZE: 939 characters ================================================================================ / WebSockets handler module; create & receive WebSocket connections in a managable way \d .ws / Set up client/server tables & handlers without overwriting, so this script / can be loaded multiple times without issue clients:@[value;`.ws.clients;([h:`int$()] hostname:`$())]; //clients = incoming connections over ws servers:@[value;`.ws.servers;([h:`int$()] hostname:`$())]; //servers = outgoing connections over ws onmessage.client:@[value;`.ws.onmessage.client;{{x}}]; //default echo onmessage.server:@[value;`.ws.onmessage.server;{{x}}]; //default echo .z.ws:{.ws.onmessage[$[.z.w in key servers;`server;`client]]x} //pass messages to relevant handler func .z.wo:{clients,:(.z.w;.z.h)} //log incoming connections .z.wc:{{delete from y where h=x}[.z.w] each `.ws.clients`.ws.servers} //clean up closed connections \d . ================================================================================ FILE: ws.q_ws-server_wsu.q SIZE: 920 characters ================================================================================ /wsu.q /websocket pubsub functionality /based off kx u.q \d .wsu init:{w::t!(count t::tables`.)#()} del:{w[x]_:w[x;;0]?y};.z.wc:{del[;x]each t}; sel:{$[`~y;x;select from x where sym in y]} pub:{[t;x]{[t;x;w]if[count x:sel[x]w 1;(neg first w).j.j(t;x)]}[t;x]each w t} add:{[h;x;y]$[(count w x)>i:w[x;;0]?h;.[`.wsu.w;(x;i;1);union;y];w[x],:enlist(h;y)];(x;$[99=type v:value x;sel[v]y;0#v])} sub:{[h;x;y]if[x~`;:sub[h;;y]each t];if[not x in t;'x];del[x]h;add[h;x;y]} end:{(neg union/[w[;;0]])@\:(`.u.end;x)}</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="81"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">/ General job addition function. Adds a job to the cron system for execution / @param func (Symbol) Symbol reference to the function to execute / @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments / @param runType (Symbol) The type of cron job to add. See .cron.runners / @param startTime (Timestamp) The first time the job will be run. NOTE: Timestamp will be rounded to the nearest millisecond / @param endTime (Timestamp) The time to finish a repeating job executing. Pass null (0Np) to repeat forever or for one time jobs. NOTE: Timestamp will be rounded to the nearest millisecond / @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs / @returns (Long) The ID of the new cron job / @throws InvalidCronJobIntervalException If the interval specified is smaller than the cron interval / @throws FunctionDoesNotExistFunction If the function for the cron job does not exist / @throws ReferenceIsNotAFunctionException If the symbol reference for the function is not actually a function / @throws InvalidCronRunTypeException If the run type specified is not present in .cron.runners / @throws InvalidCronJobTimeException If the start time specified is before the current time or the end time is before the start time .cron.add:{[func;args;runType;startTime;endTime;interval] if[not .ns.isSet func; .log.if.error "Function to add to cron does not exist [ Function: ",string[func]," ]"; '"FunctionDoesNotExistFunction"; ]; if[not .type.isFunction get func; .log.if.error "Symbol reference for cron job is not a function [ Reference: ",string[func]," ]"; '"ReferenceIsNotAFunctionException"; ]; if[not runType in key .cron.runners; .log.if.error "Invalid cron run type. Expecting one of: ",.convert.listToString key .cron.runners; '"InvalidCronRunTypeException"; ]; if[not all .type.isTimestamp each (startTime; endTime); .log.if.error "Invalid start time or end time. Must be a timestamp"; '"InvalidCronJobTimeException"; ]; startTime:.time.roundTimestampToMs startTime; endTime:.time.roundTimestampToMs endTime; now:.time.today[] + `second$.time.nowAsTime[]; if[startTime < now; if[`disallowed = .cron.cfg.historicalStartTimes; .log.if.error ("Cron job start time is in the past. Cannot add job [ Start Time: {} ] [ Now: {} ]"; startTime; now); '"InvalidCronJobTimeException"; ]; if[`allowed = .cron.cfg.historicalStartTimes; .log.if.debug ("Allowing start time in the past as configured [ Start Time: {} ] [ Now: {} ]"; startTime; now); ]; if[`setAsNow = .cron.cfg.historicalStartTimes; .log.if.debug ("Overwriting start time in the past to now as configured [ Start Time: {} ] [ Now: {} ]"; startTime; now); startTime:now; ]; ]; if[not[.util.isEmpty endTime] & endTime < startTime; .log.if.error ("Cron job end time specified is before the start time. Cannot add job [ Start Time: {} ] [ End Time: {} ]"; startTime; endTime) '"InvalidCronJobTimeException"; ]; if[(`ticking = .cron.cfg.mode) & not[.util.isEmpty interval] & .cron.cfg.timerInterval > .convert.timespanToMs interval; .log.if.error "Cron job repeat interval is shorter than the cron timer interval (ticking). Cannot add job"; '"InvalidCronJobIntervalException"; ]; jobId:.cron.jobId; .cron.jobId+:1; `.cron.jobs upsert (jobId;func;args;runType;startTime;endTime;interval;startTime); if[`tickless = .cron.cfg.mode; .cron.i.setNextTick[]; ]; :jobId; }; / Shortcut function to add a job that will only execute once / @param func (Symbol) Symbol reference to the function to execute / @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments / @param startTime (Timestamp) The first time the job will be run / @see .cron.add .cron.addRunOnceJob:{[func;args;startTime] :.cron.add[func;args;`once;startTime;0Np;0Nn]; }; / Shortcut function to add a job that repeats forever / @param func (Symbol) Symbol reference to the function to execute / @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments / @param startTime (Timestamp) The first time the job will be run / @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs / @see .cron.add .cron.addRepeatForeverJob:{[func;args;startTime;interval] :.cron.add[func;args;`repeat;startTime;0Wp;interval]; }; / Schedules a job that repeats forever but only if there isn't an active job with the same function and arguments / @param uFunc (Symbol) Symbol reference to the function to execute / @param uArgs () Any arguments that are required to execute the function. Pass generic null (::) for no arguments / @param startTime (Timestamp) The first time the job will be run / @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs / @returns (Long) The job ID either of the existing job, or the newly scheduled job / @see .cron.addRepeatForeverJob .cron.addUniqueRepeatForeverJob:{[uFunc;uArgs;startTime;interval] match:exec from .cron.jobs where func = uFunc, args ~\: uArgs, not 0Wp = nextRunTime; if[not null match`id; .log.if.info ("Cron job with matching function and arguments is active. Not adding job [ Function: {} ] [ Arguments: {} ]"; uFunc; uArgs); :match`id; ]; :.cron.addRepeatForeverJob[uFunc; uArgs; startTime; interval]; }; / Shortcut function to add a job that repeats until a specified time / @param func (Symbol) Symbol reference to the function to execute / @param args () Any arguments that are required to execute the function. Pass generic null (::) for no arguments / @param startTime (Timestamp) The first time the job will be run / @param endTime (Timestamp) The time to finish a repeating job executing. Pass null (0Np) to repeat forever or for one time jobs / @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs / @see .cron.add .cron.addRepeatUntilTimeJob:{[func;args;startTime;endTime;interval] :.cron.add[func;args;`repeat;startTime;endTime;interval]; }; / Schedules a job that repeats until the specified time but only if there isn't an active job with the same function and arguments / @param uFunc (Symbol) Symbol reference to the function to execute / @param uArgs () Any arguments that are required to execute the function. Pass generic null (::) for no arguments / @param startTime (Timestamp) The first time the job will be run / @param endTime (Timestamp) The time to finish a repeating job executing. Pass null (0Np) to repeat forever or for one time jobs / @param interval (Timespan) The interval at which repeating jobs should recur. Pass null (0Nn) for one time jobs / @see .cron.addRepeatUntilTimeJob .cron.addUniqueRepeatUntilTimeJob:{[uFunc;uArgs;startTime;endTime;interval] match:exec from .cron.jobs where func = uFunc, args ~\: uArgs, not 0Wp = nextRunTime; if[not null match`id; .log.if.info ("Cron job with matching function and arguments is active. Not adding job [ Function: {} ] [ Arguments: {} ]"; uFunc; uArgs); :match`id; ]; :.cron.addRepeatUntilTimeJob[uFunc; uArgs; startTime; endTime; interval]; }; / Cancels the specified job from running. Run once jobs will never run and repeating jobs will no longer run / @param jobId (Long) The ID of the job to cancel / @throws InvalidCronJobException If the ID of the job does not exist .cron.cancelJob:{[jobId] if[not jobId in key .cron.jobs; '"InvalidCronJobException"; ]; update nextRunTime:0Wp from `.cron.jobs where id = jobId; if[`tickless = .cron.cfg.mode; .cron.i.setNextTick[]; ]; }; / Removes all entries from .cron.status and all jobs that will not run again. By default this is run at / midnight every day / @see .cron.status .cron.cleanStatus:{ delete from `.cron.jobs where nextRunTime = 0Wp; delete from `.cron.status where not null id; }; / The main cron function that is bound to .z.ts as part of the initialisation .cron.ts:{ toRun:0!select id, runType from .cron.jobs where nextRunTime <= .time.now[]; .cron.runners[toRun`runType] @' toRun`id; if[`tickless = .cron.cfg.mode; .cron.i.setNextTick[]; ]; }; / Execution function for jobs that only run once / @returns (Boolean) If the job executed successfully or not / @see .cron.i.run .cron.i.runOnce:{[jobId] status:.cron.i.run jobId; .cron.cancelJob jobId; :status; }; / Execution function for jobs that repeat / @returns (Boolean) If the job exeucted successfully or not / @see .cron.i.run .cron.i.runRepeat:{[jobId] status:.cron.i.run jobId; jobDetails:.cron.jobs jobId; if[.type.isInfinite jobDetails`nextRunTime; .log.if.debug "Job has been self-cancelled. Will not reschedule [ Job: ",string[jobId]," ]"; :status; ]; newNextRunTime:(+). jobDetails`nextRunTime`interval; if[newNextRunTime > jobDetails`endTime; .log.if.info "Job has reached 'end time'. Will not schedule again [ Job: ",string[jobId]," ]"; newNextRunTime:0Wp; ]; update nextRunTime:newNextRunTime from `.cron.jobs where id = jobId; :status; }; / Executes the specified cron job / @param jobId (Long) The cron job to run now / @returns (Boolean) If the job executed successfully or not / @see .ns.protectedExecute .cron.i.run:{[jobId] jobDetails:.cron.jobs jobId; startTimer:.time.now[]; result:.ns.protectedExecute . jobDetails`func`args; endTimer:.time.now[]; status:not .ns.const.pExecFailure ~ first result; if[not status; $[.cron.cfg.printBacktraceOnFailure; .log.if.error ("Cron job failed to execute [ Job ID: {} ]. Error - {}\n{}"; jobId; last result; result`backtrace); / else .log.if.error ("Cron job failed to execute [ Job ID: {} ]. Error - {}"; jobId; last result) ]; result:(`errorMsg`backtrace inter key result)#result; ]; / Cron job failures will always be logged if[.cron.cfg.logStatus | not status; `.cron.status upsert jobId,jobDetails[`func`nextRunTime],(startTimer;endTimer - startTimer;status;result); ]; :status; }; / Updates the 'tickless' timer tick based on the next run time. If no more cron jobs are scheduled to run, the timer will be disabled / until a new job is added / @see .cron.jobs / @see .cron.oneMsAsTimespan / @see .cron.maxTimerAsTimespan .cron.i.setNextTick:{ nextRun:exec min nextRunTime from .cron.jobs; if[.type.isInfinite nextRun; .log.if.trace "No active cron jobs scheduled. Disabling system timer"; system "t 0"; :(::); ]; / Always make sure the next timer tick: / * Is not 0 (so accidentally disabled) / * Is not greater than max integer - 1 timer:.cron.maxTimerAsTimespan & .cron.oneMsAsTimespan | nextRun - .time.roundTimestampToMs .time.now[]; timerMs:.convert.timespanToMs timer; if[timerMs = system "t"; :(::); ]; system "t ",string timerMs; .log.if.trace "Tickless cron timer updated [ Next Run: ",string[timer]," (",string[timerMs]," ms) ]"; };</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="82"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kdb+ in astronomy¶ The field of observational astronomy has always been data-driven, but like many other fields, technological advances are causing something of a paradigm shift, and – according to experts – a bit of a headache! Currently under construction are new infrastructures that have the potential to record volumes of data that have not been seen before in the field. The Large Synoptic Survey Telescope (LSST) and Square Kilometer Array (SKA) are set to record such huge amounts of data that experts are concerned about their ability to make sense of this data, purely due to its sheer size. The LSST is expected to be fully operational and start recording data in 2021, producing 15TB every night. It will take an image of half the sky every three nights, and so the same objects will be photographed again and again. This gives the data from the LSST a time dimension – how an object moves over time is something that will be studied from this data. In the 2020s, the SKA will produce 160TB of data per second. This is in the region of exabytes per night, and zettabytes per year. If this data were processed in real time, any large differences in objects (e.g. brightness/position) could then be investigated immediately. Use cases of the data on a non-real-time basis include discovering the formation and structure of our solar system, investigating distant galaxies, and the evolution of the universe. There is no single choice of programming language in astronomy, but C is used for many astronomical applications. C is one of the most popular and commonly-used programming languages in the world with a wide range of uses varying from powering operating systems to building application software. kdb+ has the ability to extend its functionality through dynamically-loaded C/C++ modules, so we have the ability both to make use of existing utilities, and to create our own. We believe that due to the amount of data collected, its time-series nature, and the potential need for both real-time and historical-based analysis, kdb+ would be very well suited to the data collected in the astronomy industry, and would be an ideal fit for many future astronomy projects. This paper serves to provide an example of how this data collected in astronomy can be processed using the power and speed of kdb+. With future projects collecting data at increasing scales, this is an ideal time for the power of kdb+ to be applied to the field of astronomy. To demonstrate, we have loaded some raw astronomical data in to kdb+. In addition, we make use of kdb+’s versatility in extending with C to show how it takes a relatively small amount of q code to load some raw astronomical data in to kdb+ and run some simple analysis on the dataset. The European Space Agency’s (ESA) Gaia project is a mission to chart a three-dimensional map of the Milky Way galaxy. Gaia is a billion-pixel camera that will take images of the galaxy to eventually record over one billion stars across the next five years, with also the anticipation of making other discoveries along the way. It recently released its first set of data to the public in the form of FITS files. This can be downloaded from ESA’s website. Sloan Digital Sky Survey (SDSS)¶ The Sloan Digital Sky Survey (SDSS) – based in New Mexico – began surveying the sky in 2000. It covered roughly one third of the sky, and observed around 500 million objects. It has produced more than 100TB of data, of which all is available free to the public through its website. This has resulted in the SDSS often being described as the project that genuinely brought astronomy in to Big Data territory, and so its significance in this field cannot be underestimated. The data can be accessed through SQL queries, or through the raw FITS files that are also publicly available. We describe FITS files below. FITS files¶ Flexible Image Transport System (FITS) is a digital file format used for storing scientific data. It is the format most widely used by astronomers and astrophysicists for transporting, analyzing and archiving scientific data since the early 1980s. The data in the SDSS database is archived in FITS files. These files are primarily designed to store images in the form of multidimensional numerical arrays or data in the form of tables. One FITS file can contain multiple tables and images. FITS files consist of segments called Header Data Units (HDUs). Every FITS file has a primary HDU which is usually an image and optional extension HDUs which may contain images or tables. Each HDU contains a Header Unit and a Data Unit. The Header Unit contains metadata pertaining to the information stored in the Data Unit and the Data Unit is the image or table referenced by the metadata. Processing the data – linking FITS and kdb+ using C¶ KX provides a header file, k.h , for interacting with C from kdb+. It provides the link between kdb+ and C by converting the different data types and structures between the two languages. Using this header file, we created a shared object that could natively parse a FITS file and load the data into a kdb+ database. This C extension can read metadata in HDUs and extract columns from binary tables, converting the information into a format usable by kdb+. The C functions are loaded into kdb+ from the shared object by using the Dynamic Load operator 2: , which is described in more detail later. Interfaces: C client for kdb+ “C API for kdb+” KxSystems/kdb James Neill’s repository for kdb+ in astronomy: jpneill/fitsToKdb The shared object¶ There are five functions for extracting metadata about the FITS file and the tables. Each of these takes in one or more K objects from a kdb+ process and returns a K object to that process. // Function which prints a list of the number of HDUs and their types // x – q symbol that is the FITS file `$”example.fits” K listHDUs(K x) // Function which returns number of rows in a table from a FITS file // x - q symbol that is the FITS file `$"example.fits" // y – q int or long representing the HDU of the binary table K num_rows(K x, K y) // Function which returns number of columns in a table from a FITS file // x - q symbol that is the FITS file `$"example.fits" // y – q int or long representing the HDU of the binary table K num_cols(K x, K y) // Function which returns a list of columns in a binary table in a FITS file // x - q symbol that is the FITS file `$"example.fits" // y - q int or long representing the HDU we're looking for K cols(K x, K y) // Function which returns a column type from a binary table in a FITS file // x - q symbol for name of FITS file // y - q symbol for column name // z - q int or long representing the HDU we're looking at K getColType(K x, K y, K z) There are four functions for extracting columns of different types: // All of the following take in the same parameters: // x - q symbol for FITS file name // y - q symbol for column name // z - q long for number of rows to extract // h - q int or long representing the number of the HDU we're looking at K readLongCol(K x,K y,K z,K h) K readIntCol(K x,K y,K z,K h) K readDoubleCol(K x,K y,K z,K h) K readSymCol(K x,K y,K z,K h) Importing table data to kdb+¶ In kdb+, the Dynamic Load 2: operator is a binary function used to dynamically load C functions from a shared object. The left argument is the library from which the function is loaded (of type symbol), and the right argument is a list containing the function name (type symbol), and the number of arguments that function takes (type integer). Dynamically loaded functions have the datatype value 112h . Below is an example of how the listHDUs function is loaded: q).astro.listHDUs:`fitsToKdb 2:(`listHDUs;1) Once the function is loaded into the kdb+ process it can be called in the same way as any q function: q)file:`$"specObj-dr12.fits" q).astro.listHDUs[file] Number of HDUs: 2 HDU 1: IMAGE_HDU HDU 2: BINARY_TBL The other functions can then be loaded in in the same manner. .astro.getFitsRowCount:`fitsToKdb 2:(`num_rows;2) .astro.getFitsColCount:`fitsToKdb 2:(`num_cols;2) .astro.getFitsColNames:`fitsToKdb 2:(`cols;2) .astro.getFitsColType: `fitsToKdb 2:(`getColType;3) .astro.readLongCol: `fitsToKdb 2:(`readLongCol;4) .astro.readDoubleCol: `fitsToKdb 2:(`readDoubleCol;4) .astro.readIntCol: `fitsToKdb 2:(`readIntCol;4) .astro.readSymCol: `fitsToKdb 2:(`readSymCol;4) The source file for these example analytics is the specObj-dr12.fits file from the SDSS database. www.sdss.org/dr12/spectro/spectro_access This is a 2.9GB file containing the redshifts and classifications of all 4 million+ objects observed, including galaxy, quasar, and stellar spectra. We are able to use the functions defined above to create a kdb+ database from this FITS format file. q).astro.getFitsRowCount[file;2] 4355200i q)c:.astro.getFitsColNames[file;2] // columns in table q)t:.astro.getFitsColType[file;;2]each c // types of columns q)n:1000000 // number of rows to extract from the table q)icols:c where t=`I // get only the int columns // build a dictionary containing the int columns q)icols:lower[icols]!.astro.readIntCol[file;;n;2]each icols // repeat for each of the long, float and sym cols Interfaces: Using foreign functions with kdb+ Building tables in kdb+¶ Once the data is in dictionary form, combine and build the table: q)specObj:flip raze(icols;jcols;fcols;scols) q)specObj nspecobs spectrographid bluefiber nturnoff boss_specobj_id .. -------------------------------------------------------------- 2 1 -1 -1 0 -1 2 1 -1 -1 0 -1 2 1 -1 -1 0 -1 2 1 -1 -1 0 -1 2 1 -1 -1 0 -1 2 1 -1 -1 0 -1 2 1 -1 -1 0 -1 2 1 -1 -1 0 -1 3 1 -1 -1 0 -1 .. q)count specObj 1000000 At this point, we have successfully loaded the data from a FITS file in to an in-memory kdb+ table. From here we can run some sample queries to take a look at the data and what it contains. Obtaining a breakdown by class of the data: q)select count i by class from specObj class | x ------| ------ GALAXY| 678098 QSO | 112701 STAR | 209201 // Time(ms) and memory(bytes) taken to compute across 1m rows q)\ts select count i by class from specObj 18 16777872 Calculating recessional velocity¶ Calculating the recessional velocity (recessional velocity is the rate at which an object is moving away from Earth) of each object using the observed redshift (z column): q)rv:{[z](z-1)%1+z*:z+:1} q)rv 1.6 0.742268 q)select class,subclass,plug_ra,plug_dec,z,recVel:rv z from specObj class subclass plug_ra plug_dec z recVel ------------------------------------------------------------- GALAXY 146.7142 -1.041304 0.02127545 0.02104918 GALAXY 146.9195 -0.9904918 0.2139246 0.1914661 QSO BROADLINE 146.9023 -0.9849133 0.6521814 0.4637643 GALAXY 146.8598 -0.8089017 0.1265536 0.1186022 .. // Time(ms) and memory(bytes) taken to compute above query q)\ts select class,subclass,plug_ra,plug_dec,z,recVel:rv z from specObj 30 33555104 // breakdown of average recessional velocity by class q)select recVel:avg rv z by class from specObj class | recVel ------| ------------- GALAXY| 0.1374245 QSO | 0.6371058 STAR | -6.876509e-05 q)\ts select recVel:avg rv z by class from specObj 53 53478144 The kdb+ function fby aggregates values from one list based on groups in another list. It is commonly used to extend the functionality of the Where clause in a select statement. Placing fby in a Where clause allows an aggregate function (e.g. sum , max , avg ) to be used to select individual rows across groupings. The left argument is a list containing two items – the first being the aggregation function, the second being the data vector (list/column) – and the right argument is the ‘group by’ vector. Below we make use of the fby function to obtain a breakdown by class of the objects with above average recessional velocity. q)select count i by class from (select class, recVel:rv z from specObj) where recVel>(avg;recVel) fby class class | x ------| ------ GALAXY| 242519 QSO | 68750 STAR | 116152 q)\ts select count i by class from (select class, recVel:rv z from specObj) where recVel>(avg;recVel) fby class 84 36701520 We can infer from these results that quasi-stellar objects (QSOs) have the greatest recessional velocities of the three classes in this data set on average, whereas stars’ negative recessional velocity suggests they are moving towards us on average. Only roughly one third (242,519 of 678,098) of the galaxies in this data have an above-average recessional velocity by class, while over half of stars, and closer to two thirds of QSOs are above average in their respective classes. As previously mentioned, the positive impact of having the SDSS data available to the public has been massive. Since it was released, over 3,000 papers have been written on a range of different topics in the field, based on data from the SDSS. The SDSS is just one example of several projects, but it shows the benefit of making the data accessible to all interested parties, both professional and amateur. This impact throughout the wider astronomical community has pushed leaders in the field to continue this development – to further promote sky surveys by building bigger, more powerful telescopes. However, this doesn’t come without its problems, with data storage and computational processing power being pushed to the limits. kdb+ has the ability to scale to these extremes. Sean Keevey’s “A natural query interface for distributed systems” discusses how data distributed over several processes and machines can be seamlessly accessed from a single starting point by the end user. We previously mentioned how astronomy data can often come with a time domain, which would be well suited to kdb+ and how the data would be stored on disk. The data set in this example does not have a time domain, as it just provides information on given objects recorded once, but this certainly does not mean that it is not suited to kdb+. We could apply an attribute to this data for optimization, such as the sorted attribute to the class column in the example data set. The benefits of this would become more apparent as more files were being loaded in. Ciaran Gorman’s “Columnar database and query optimization” gives an in-depth explanation as to how they can be applied. Further reading¶ - The Atlantic: “How big data is changing astronomy (again)” - Cloud Computing and the Square Kilometre Array Authors¶ Andrew Magowan is a kdb+ consultant who has developed data and analytic systems for some of the world's largest financial institutions. Andrew is currently based in New York where he maintains a global tick capture application across a range of asset classes at a major investment bank. James Neill works as a kdb+ consultant for one of the world’s largest investment banks developing a range of applications. James has also been involved in the design of training courses in data science and machine learning as part of the First Derivatives Capital Markets Training Programme.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="83"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category models // @desc Fit a vanilla torch model to data // @param data {dictionary} Containing training and testing data according to // keys `xtrn`ytrn`xtst`ytst // @param model {<} Model object being passed through the system (compiled) // @return {<} A vanilla fitted torch model models.torch.NN.fit:{[data;model] optimArg:enlist[`lr]!enlist 0.9; optimizer:models.i.Adam[model[`:parameters][];pykwargs optimArg]; criterion:models.i.neuralNet[`:BCEWithLogitsLoss][]; dataX:models.i.numpy[models.i.npArray[data`xtrain]][`:float][]; dataY:models.i.numpy[models.i.npArray[data`ytrain]][`:float][]; tensorXY:models.i.tensorData[dataX;dataY]; modelArgs:`batch_size`shuffle`num_workers!(count first data`xtrain;1b;0); if[.pykx.loaded;modelArgs:.pykx.topy each modelArgs]; dataLoader:models.i.dataLoader[tensorXY;pykwargs modelArgs]; nEpochs:10|`int$(count[data`xtrain]%1000); models.torch.torchFit[model;optimizer;criterion;dataLoader;nEpochs] } // @kind function // @category models // @desc Compile a keras model for binary problems // @param data {dictionary} Containing training and testing data according to // keys `xtrn`ytrn`xtst`ytst // @param seed {int} Seed used for initialising the same model // @return {<} The compiled torch models models.torch.NN.model:{[data;seed] models.torch.torchModel[count first data`xtrain;200] } // @kind function // @category models // @desc Predict test data values using a compiled model // for binary problem types // @param data {dictionary} Containing training and testing data according to // keys `xtrn`ytrn`xtst`ytst // @param model {<} Model object being passed through the system (fitted) // @return {boolean} The predicted values for a given model models.torch.NN.predict:{[data;model] dataX:models.i.numpy[models.i.npArray[data`xtest]][`:float][]; torchMax:.p.wrap last models.i.torch[`:max][model[dataX];1]`; if[.pykx.loaded;torchMax:torchMax[`:values]]; torchMax[`:detach][][`:numpy][][`:squeeze][]` } // Load required python modules models.i.torch:.p.import[`torch] models.i.npArray:.p.import[`numpy]`:array; models.i.Adam:.p.import[`torch.optim]`:Adam models.i.numpy:.p.import[`torch]`:from_numpy models.i.tensorData:.p.import[`torch.utils.data]`:TensorDataset models.i.dataLoader:.p.import[`torch.utils.data]`:DataLoader models.i.neuralNet:.p.import[`torch.nn] models.torch.torchFit:.p.get[`runmodel]; models.torch.torchModel:.p.get[`classifier]; ================================================================================ FILE: ml_automl_code_tests_utils.q SIZE: 2,239 characters ================================================================================ // code/tests/utils.q - Testing utilities // Copyright (c) 2021 Kx Systems Inc // // The following utilities are used to test that a function is returning the // expected error message or data. These functions will likely be provided in // some form within the test.q script provided as standard for the testing of // q and embedPy code. // @kind function // @category tests // @desc Ensure that a test that is expected to fail, does so with an // appropriate message // @param function {(<;proj)} The function or projection to be tested // @param data {any} Data to be applied to the function as an individual item // for unary functions or a list of variables for multivariant functions // @param applyType {boolean} Is function to be applied unary/multivariant // (1b/0b) // @param expectedError {string} Expected error message on failure of the // function // @return {boolean} Function errored with appropriate message (1b), function // failed inappropriately or passed (0b) failingTest:{[function;data;applyType;expectedError] // Is function to be applied unary or multivariant applyType:$[applyType;@;.]; failureFunction:{[err;ret](`TestFailing;ret;err~ret)}expectedError; functionReturn:applyType[function;data;failureFunction]; $[`TestFailing~first functionReturn;last functionReturn;0b] } // @kind function // @category tests // @desc Ensure that a test that is expected to pass, // does so with an appropriate return // @param function {(<;proj)} The function or projection to be tested // @param data {any} Data to be applied to the function as an individual item // for unary functions or a list of variables for multivariant functions // @param applyType {boolean} Is function to be applied unary/multivariant // (1b/0b) // @param expectedReturn {string} The data expected to be returned on execution // of the function with the supplied data // @return {boolean} Function returned the appropriate output (1b), function // failed or executed with incorrect output (0b) passingTest:{[function;data;applyType;expectedReturn] // Is function to be applied unary or multivariant applyType:$[applyType;@;.]; functionReturn:applyType[function;data]; expectedReturn~functionReturn } ================================================================================ FILE: ml_automl_code_utils.q SIZE: 24,710 characters ================================================================================ // code/utils.q - General utility functions // Copyright (c) 2021 Kx Systems Inc // // The purpose of this file is to house utilities that are useful across more // than one node or as part of the AutoML fit functionality and graph. \d .automl // @kind data // @category utility // @desc List of models to exclude // @type symbol[] utils.excludeList:`GaussianNB`LinearRegression // @kind function // @category utility // @desc Defaulted fitting and prediction functions for AutoML cross // validation and hyperparameter search. Both models fit on a training set // and return the predicted scores based on supplied scoring function. // @param func {<} Scoring function that takes parameters and data as input, // returns appropriate score // @param hyperParam {dictionary} Hyperparameters to be searched // @param data {float[]} Data split into training and testing sets of format // ((xtrn;ytrn);(xval;yval)) // @return {boolean[]|float[]} Predicted and true validation values utils.fitPredict:{[func;hyperParam;data] predicts:$[0h~type hyperParam; func[data;hyperParam 0;hyperParam 1]; @[.[func[][hyperParam]`:fit;data 0]`:predict;data[1]0]` ]; (predicts;data[1]1) } // @kind function // @category utility // @desc Load function from q. If function not found, try Python. // @param funcName {symbol} Name of function to retrieve // @return {<} Loaded function utils.qpyFuncSearch:{[funcName] func:@[get;funcName;()]; $[()~func;.p.get[funcName;<];func] } // @kind function // @category utility // @desc Load NLP library if requirements met // This function takes no arguments and returns nothing. Its purpose is to load // the NLP library if requirements are met. If not, a statement printed to // terminal. utils.loadNLP:{ notSatisfied:"Requirements for NLP models are not satisfied. gensim must be", " installed. NLP module will not be available."; $[(0~checkimport 3)&(::)~@[{system"l ",x};"nlp/nlp.q";{0b}]; .nlp.loadfile`:init.q; -1 notSatisfied; ]; } // @kind function // @category utility // @desc Used throughout the library to convert linux/mac file names to // windows equivalent // @param path {string} Linux style path // @return {string} Path modified to be suitable for windows systems utils.ssrWindows:{[path] $[.z.o like "w*";ssr[;"/";"\\"];]path } // Python plot functionality utils.plt:.p.import`matplotlib.pyplot; // @kind function // @category utility // @desc Split data into training and testing sets without shuffling // @param features {table} Unkeyed tabular feature data // @param target {number[]} Numerical target vector // @param size {float} Percentage of data in testing set // @return {dictionary} Data separated into training and testing sets utils.ttsNonShuff:{[features;target;size] `xtrain`ytrain`xtest`ytest! raze(features;target)@\:/:(0,floor n*1-size)_til n:count features } // @kind function // @category utility // @desc Return column value based on best model // @param modelTab {table} Models to apply to feature data // @param modelName {symbol} Name of current model // @param col {symbol} Column to search // @return {symbol} Column value utils.bestModelDef:{[modelTab;modelName;col] first?[modelTab;enlist(=;`model;enlist modelName);();col] } // @kind function // @category automl // @desc Retrieve feature and target data using information contained // in user-defined JSON file // @param method {dictionary} Retrieval methods for command line data. i.e. // `featureData`targetData!("csv";"ipc") // @return {dictionary} Feature and target data retrieved based on user // instructions utils.getCommandLineData:{[method] methodSpecification:cli.input`retrievalMethods; dict:key[method]!methodSpecification'[value method;key method]; if[count idx:where`ipc=method;dict[idx]:("J";"c";"c")$/:3#'dict idx]; dict:dict,'([]typ:value method); featureData:.ml.i.loadDataset dict`featureData; featurePath:dict[`featureData]utils.dataType method`featureData; targetPath:dict[`targetData]utils.dataType method`targetData; targetName:`$dict[`targetData]`targetColumn; // If data retrieval methods are the same for both feature and target data, // only load data once and retrieve the target from the table. Otherwise, // retrieve target data using .ml.i.loadDataset data:$[featurePath~targetPath; (flip targetName _ flip featureData;featureData targetName); (featureData;.ml.i.loadDataset[dict`targetData]$[`~targetName;::; targetName]) ]; `features`target!data } // @kind function // @category utility // @desc Create a prediction function to be used when applying a // previously fit model to new data. The function calls the predict method // of the defined model and passes in new feature data to make predictions. // @param config {dictionary} Information about a previous run of AutoML // including the feature extraction procedure used and the best model // produced // @param features {table} Tabular feature data to make predictions on // @returns {number[]} Predictions utils.generatePredict:{[config;features] original_print:utils.printing; utils.printing:0b; bestModel:config`bestModel; features:utils.featureCreation[config;features]; modelLibrary:config`modelLib; utils.printing:original_print; $[`sklearn~modelLibrary; bestModel[`:predict;<]features; modelLibrary in`keras`torch`theano; [features:enlist[`xtest]!enlist features; customName:"." sv string config`modelLib`modelFunc; get[".automl.models.",customName,".predict"][features;bestModel] ]; '"NotYetImplemented" ] } // @kind function // @category utility // @desc Apply feature extraction/creation and selection on provided // data based on a previous run // @param config {dictionary} Information about a previous run of AutoML // including the feature extraction procedure used and the best model // produced // @param features {table} Tabular feature data to make predictions on // @returns {table} Features produced using config feature extraction // procedures utils.featureCreation:{[config;features] sigFeats:config`sigFeats; extractType:config`featureExtractionType; if[`nlp~extractType;config[`savedWord2Vec]:1b]; if[`fresh~extractType; relevantFuncs:raze`$distinct{("_" vs string x)1}each sigFeats; appropriateFuncs:1!select from 0!.ml.fresh.params where f in relevantFuncs; config[`functions]:appropriateFuncs ]; features:dataPreprocessing.node.function[config;features;config`symEncode]; features:featureCreation.node.function[config;features]`features; if[not all newFeats:sigFeats in cols features; n:count newColumns:sigFeats where not newFeats; features:flip flip[features],newColumns!((n;count features)#0f),()]; flip value flip sigFeats#"f"$0^features }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="84"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category rs // @desc Cross validated parameter random search applied to data with // ascending split indices // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function that takes parameters and data as input // and returns a score // @param p {dictionary} Dictionary of hyperparameters to be searched with // format `typ`randomState`n`p where typ is the type of search // (random/sobol), randomState is the seed, n is the number of // hyperparameter sets and p is a dictionary of parameters - see // documentation for more info. // @param tstTyp {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Scores for hyperparameter sets on each of // the k folds for all values of h and additionally returns the best // hyperparameters and score on the holdout set for 0 < h <=1. rs.kfSplit:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.kfSplit] // @kind function // @category rs // @desc Cross validated parameter random search applied to data with // shuffled split indices // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function that takes parameters and data as input // and returns a score // @param p {dictionary} Dictionary of hyperparameters to be searched with // format `typ`randomState`n`p where typ is the type of search // (random/sobol), randomState is the seed, n is the number of // hyperparameter sets and p is a dictionary of parameters - see // documentation for more info. // @param tstTyp {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Scores for hyperparameter sets on each of // the k folds for all values of h and additionally returns the best // hyperparameters and score on the holdout set for 0 < h <=1. rs.kfShuff:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.kfShuff] // @kind function // @category rs // @desc Cross validated parameter random search applied to data with // an equi-distributions of targets per fold // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function that takes parameters and data as input // and returns a score // @param p {dictionary} Dictionary of hyperparameters to be searched with // format `typ`randomState`n`p where typ is the type of search // (random/sobol), randomState is the seed, n is the number of // hyperparameter sets and p is a dictionary of parameters - see // documentation for more info. // @param tstTyp {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Scores for hyperparameter sets on each of // the k folds for all values of h and additionally returns the best // hyperparameters and score on the holdout set for 0 < h <=1. rs.kfStrat:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.kfStrat] // @kind function // @category rs // @desc Cross validated parameter random search applied to roll // forward time-series sets // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function that takes parameters and data as input // and returns a score // @param p {dictionary} Dictionary of hyperparameters to be searched with // format `typ`randomState`n`p where typ is the type of search // (random/sobol), randomState is the seed, n is the number of // hyperparameter sets and p is a dictionary of parameters - see // documentation for more info. // @param tstTyp {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Scores for hyperparameter sets on each of // the k folds for all values of h and additionally returns the best // hyperparameters and score on the holdout set for 0 < h <=1. rs.tsRolls:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.tsRolls] // @kind function // @category rs // @desc Cross validated parameter random search applied to chain // forward time-series sets // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function that takes parameters and data as input // and returns a score // @param p {dictionary} Dictionary of hyperparameters to be searched with // format `typ`randomState`n`p where typ is the type of search // (random/sobol), randomState is the seed, n is the number of // hyperparameter sets and p is a dictionary of parameters - see // documentation for more info. // @param tstTyp {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Scores for hyperparameter sets on each of // the k folds for all values of h and additionally returns the best // hyperparameters and score on the holdout set for 0 < h <=1. rs.tsChain:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.tsChain] // @kind function // @category rs // @desc Cross validated parameter random search applied to percentage // split dataset // @param pc {float} (0-1) representing percentage of validation data // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function that takes parameters and data as input // and returns a score // @param p {dictionary} Dictionary of hyperparameters to be searched with // format `typ`randomState`n`p where typ is the type of search // (random/sobol), randomState is the seed, n is the number of // hyperparameter sets and p is a dictionary of parameters - see // documentation for more info. // @param tstTyp {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Scores for hyperparameter sets on each of // the k folds for all values of h and additionally returns the best // hyperparameters and score on the holdout set for 0 < h <=1. rs.pcSplit:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.pcSplit] // @kind function // @category rs // @desc Cross validated parameter random search applied to randomly // shuffled data and validated on a percentage holdout set // @param pc {float} (0-1) representing percentage of validation data // @param k {int} Number of folds // @param n {int} Number of repetitions // @param features {any[][]} Matrix of features // @param target {any[]} Vector of targets // @param function {fn} Function that takes parameters and data as input // and returns a score // @param p {dictionary} Dictionary of hyperparameters to be searched with // format `typ`randomState`n`p where typ is the type of search // (random/sobol), randomState is the seed, n is the number of // hyperparameter sets and p is a dictionary of parameters - see // documentation for more info. // @param tstTyp {float} Size of the holdout set used in a fitted grid // search, where the best model is fit to the holdout set. If 0 the function // will return scores for each fold for the given hyperparameters. If // negative the data will be shuffled prior to designation of the holdout set // @return {table|list} Scores for hyperparameter sets on each of // the k folds for all values of h and additionally returns the best // hyperparameters and score on the holdout set for 0 < h <=1. rs.mcSplit:hp.i.search hp.i.xvScore[hp.i.rsGen;xv.mcSplit] // Multi-processing functionality // Load multi-processing modules loadfile`:util/mproc.q loadfile`:util/pickle.q // If multiple processes are available, multi-process cross validation library if[0>system"s"; multiProc.init[abs system"s"]enlist".ml.loadfile`:util/pickle.q" ]; xv.picklewrap:{picklewrap[(0>system"s")&.p.i.isw x]x} ================================================================================ FILE: ml_nlp_code_cluster.q SIZE: 12,956 characters ================================================================================ // code/cluster.q - Nlp clustering utilities // Copyright (c) 2021 Kx Systems Inc // // Clustering utilites for textual data \d .nlp // @private // @kind function // @category nlpClusteringUtility // @desc Extract the keywords from a list of documents or keyword // dictionary // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @returns {dictionary[]} Keyword dictionaries cluster.i.asKeywords:{[parsedTab] keyWords:$[-9=type parsedTab[0]`keywords;parsedTab;parsedTab`keywords]; i.fillEmptyDocs keyWords } // @private // @kind function // @category nlpClusteringUtility // @desc Split the document into clusters using kmeans // @param iters {long} The number of times to iterate the refining step // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param clusters {long} Cluster indices // @returns {long[][]} The documents' indices, grouped into clusters cluster.i.bisect:{[iters;parsedTab;clusters] idx:i.minIndex cluster.MSE each parsedTab clusters; cluster:clusters idx; (clusters _ idx),cluster@/:cluster.kmeans[parsedTab cluster;2;iters] } // @private // @kind function // @category nlpClusteringUtility // @desc Apply k-means clustering to a document // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @param clusters {long[]} Cluster indices // @returns {long[][]} The documents' indices, grouped into clusters cluster.i.kmeans:{[parsedTab;clusters] centroids:(i.takeTop[3]i.fastSum@)each parsedTab clusters; value group i.maxIndex each centroids compareDocs\:/:parsedTab } // @private // @kind function // @category nlpClusteringUtility // @desc Find nearest neighbor of document // @param centroids {dictionary[]} Centroids as keyword dictionaries // @param parsedTab {table} A parsed document containing keywords and their // associated significance scores // @returns {long[][]} Document indices cluster.i.findNearestNeighbor:{[centroids;doc] similarities:compareDocs[doc] each centroids; m:max similarities; $[m>0f;similarities?m;0n] } // @private // @kind function // @category nlpClusteringUtility // @desc Merge any clusters with significant overlap into a single // cluster // @param clusters {any[][]} Cluster indices // @returns {any[][]} Appropriate clusters merged together cluster.i.mergeOverlappingClusters:{[clusters] counts:count each clusters; similar:cluster.i.similarClusters[clusters;counts]each til count clusters; // Merge any cluster that has at least one similar cluster // A boolean vector of which clusters will be getting merged merge:1<count each similar; // Filter out clusters of 1, and remove duplicates similarClusters:distinct desc each similar where merge; // Do the actual merging of the similar clusters newClusters:(distinct raze@)each clusters similarClusters; // Clusters not involved in any merge // This can't just be (not merge), as that only drops the larger cluster, // not the smaller one, in each merge untouchedClusters:(til count clusters)except raze similarClusters; clusters[untouchedClusters],newClusters } // @private // @kind function // @category nlpClusteringUtility // @desc Group together clusters that share over 50% of their elements // @param clusters {any[][]} Cluster indices // @param counts {long} Count of each cluster // @param idx {long} Index of cluster // @return {any[][]} Clusters grouped together cluster.i.similarClusters:{[clusters;counts;idx] superset:counts=sum each clusters[idx]in/:clusters; similar:.5<=avg each clusters[idx]in/:clusters; notSmaller:(count clusters idx)>=count each clusters; where superset or(similar & notSmaller) } // @private // @kind function // @category nlpClusteringUtility // @desc Normalize the columns of a matrix so they sum to 1 // @param matrix {float[][]} Numeric matrix of values // @returns {float[][]} The normalized columns cluster.i.columnNormalize:{[matrix] 0f^matrix%\:sum matrix }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="85"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// Create FIFO and unzip file into it, return FIFO name readintofifo:{[filename] fifo:"/tmp/logfifo",string .z.i; fifostr:"mkfifo ",fifo,";gunzip -cd ",filename," > ",fifo," &"; @[system;fifostr;{.lg.e[`replay;"Failed to read log into named pipe"]}]; fifo }; // upd functions down here realupd:{[f;t;x] // increment the tablecounts tablecounts[t]+::count first x; // run the supplied function in the error trap .[f;(t;x);{[t;x;e] errorcounts[t]+::count first x}[t;x]]; }[.replay.upd] // amend the upd function to filter based on the table list if[(not tablelist~enlist `all) and not segmentedmode; realupd:{[f;t;x] if[t in .replay.tablestoreplay; f[t;x]]}[realupd]] // amend to do chunked saves if[messagechunks < 0W; realupd:{[f;t;x] f[t;x]; checkcount[hdbdir;replaydate;1;tempdir]}[realupd]] initialupd:{[t;x] // spin through the first X messages $[msgcount < (firstmessage - 1); msgcount+::1; // Once we reach the correct message, reset the upd function @[`.;`upd;:;.replay.realupd]] } // extract user defined row counts mergemaxrows:{[tabname] mergenumrows^mergenumtab[tabname]}; // post replay function for merge replay, invoked after all the tables have been written down for a given log file postreplaymerge:{[td;p;h] .os.md[.os.pth[string .Q.par[td;p;`]]]; // ensures directory exists before removed mergelimits:(tabsincountorder[.replay.tablestoreplay],())!$[.merge.mergebybytelimit;(count tabsincountorder[.replay.tablestoreplay])#mergenumbytes;({[x] mergenumrows^mergemaxrows[x]}tabsincountorder[.replay.tablestoreplay])],(); // merge the tables from each partition in the tempdir together merge[td;p;;mergelimits;h] each tabsincountorder[.replay.tablestoreplay]; .os.deldir .os.pth[string .Q.par[td;p;`]]; // delete the contents of tempdir after merge completion } // function to upsert to specified directory upserttopartition:{[h;dir;tablename;tabdata;pt;expttype;expt] dirpar:.Q.par[dir;pt;`$string first expt]; directory:` sv dirpar,tablename,`; // make directories for tables if they don't exist if[count tabpar:tabsincountorder[.replay.tablestoreplay] except key dirpar; .lg.o[`dir;"creating directories under ",1_string dirpar]; tabpar:tabpar except `heartbeat`logmsg; .[{[d;h;t](` sv d,t,`) set .Q.en[h;0#value t]};] each dirpar,'h,'tabpar]; .lg.o[`save;"saving ",(string tablename)," data to partition ",string directory]; .[ upsert; (directory;r:update `sym!sym from ?[tabdata;{(x;y;(),z)}[in;;]'[expttype;expt];0b;()]); {[e] .lg.e[`savetablesbypart;"Failed to save table to disk : ",e];'e}]; /-key in partsizes are directory to partition, need to drop training slash in directory key .merge.partsizes[first ` vs directory]+:(count r;-22!r); }; savetablesbypart:{[dir;pt;tablename;h] arows: count value tablename; .lg.o[`rowcheck;"the ",(string tablename)," table consists of ", (string arows), " rows"]; // get additional partition(s) defined by parted attribute in sort.csv extrapartitiontype:.merge.getextrapartitiontype[tablename]; // check each partition type actually is a column in the selected table .merge.checkpartitiontype[tablename;extrapartitiontype]; // enumerate data to be upserted enumdata:update (`. `sym)?sym from .Q.en[h;value tablename]; // get list of distinct combiniations for partition directories extrapartitions:(`. `sym)?.merge.getextrapartitions[tablename;extrapartitiontype]; .lg.o[`save;"enumerated ",(string tablename)," table"]; // upsert data to specific partition directory upserttopartition[h;dir;tablename;enumdata;pt;extrapartitiontype] each extrapartitions; // empty the table .lg.o[`delete;"deleting ",(string tablename)," data from in-memory table"]; @[`.;tablename;0#]; // run a garbage collection (if enabled) if[gc;.gc.run[]]; }; merge:{[dir;pt;tablename;mergelimits;h] // get int partitions intpars:asc key ` sv dir,`$string pt; // list of enumerated partitions 0 1 2 3... k:key each intdir:.Q.par[hsym dir;pt] each intpars; // list of table names if[0=count raze k inter\: tablename; :()]; // get list of partition directories containing specified table partdirs:` sv' (intdir,'parts) where not ()~/:parts:k inter\: tablename; // get each of the directories that hold the table // permanent storage destination, where data being merged too dest:.Q.par[h;pt;tablename]; // exit function if no subdirectories are found if[0=count partdirs; :()]; // if no table data set empty table. If data to merge, merge with correct merge function $[0 = count partdirs inter exec ptdir from .merge.partsizes; [.lg.w[`merge;"no records for ", string[tablename]]; (` sv dest,`) set @[.Q.en[h;value tablename];.merge.getextrapartitiontype[tablename];`p#]; ]; [$[mergemethod~`part; [dest:` sv .Q.par[h;pt;tablename],`; // provides path to where to move data to /-get chunks to partitions to merge in batch partchunks:.merge.getpartchunks[partdirs;mergelimits[tablename]]; .merge.mergebypart[tablename;dest]'[partchunks]; ]; mergemethod~`col; [.merge.mergebycol[(tablename;value tablename);dest]'[partdirs]; /- merging data by column does not create .d file - set it here after merge .lg.o[`merge;"setting .d file"]; (` sv dest,`.d) set cols value tablename; ]; .merge.mergehybrid[(tablename;value tablename);dest;partdirs;mergelimits[tablename]] ] ] ]; .lg.o[`merge;"deleting ", string[tablename], " from temp storage"]; .os.deldir each .os.pth each string partdirs; // set the attributes .lg.o[`merge;"setting attributes"]; @[dest;;`p#] each .merge.getextrapartitiontype[tablename]; .lg.o[`merge;"merge complete"]; // run a garbage collection (if enabled) if[gc;.gc.run[]]; }; // Return log file if it exists and not in segmented mode getlogfile:{ if[.replay.segmentedmode;.lg.e[`getlogfile;m:"Segmented mode requires tplogdir."];'m]; if[()~key hsym f:.replay.tplogfile;.lg.e[`getlogfile;m:"Specified tplogfile ",string[f]," does not exist"];'m]; enlist hsym f }; // Return contents of log directory if it exists getlogdir:{ if[()~key hsym d:.replay.tplogdir;.lg.e[`getlogdir;m:"Specified log directory ",string[d]," does not exist"];'m]; if[d like "*.gz";.lg.e[`getlogdir;m:"Zipped log directories not supported."];'m]; $[.replay.segmentedmode;.replay.getstplogs[d];.Q.dd[logdir;] each key logdir:hsym d] }; // Use STP meta table and tplogdir to build log names getstplogs:{[logdir] // If trying to replay zipped files on Windows, error out winzip:(.z.o like "w*") and z:`stpmeta.gz in key d:hsym logdir; if[winzip;.lg.e[`replaylog;m:"Zipped log files cannot be directly replayed on Windows"];'m]; // If meta table is zipped, assume all other logs are zipped as well and build log names accordingly if[z;system "gunzip ",1_string .Q.dd[d;`stpmeta.gz]]; metatable:@[get;.Q.dd[d;`stpmeta];{.lg.e[`getstpmeta;m:"Log directory must contain valid STP meta table"];'m}]; if[z;system "gzip ",1_string .Q.dd[d;`stpmeta]]; names:exec distinct logname from metatable where any each tbls in .replay.tablestoreplay; .Q.dd[d;] each $[z;.Q.dd[;`gz];::] each last each ` vs' names }; // Set up log replay list and clean HDB if necessary, kick off replay initandrun:{ if[all not null .replay[`tplogfile`tplogdir];.lg.e[`getlogs;m:"Can't pass in log file and directory."];'m]; .lg.o[`initandrun;"Initialising replay settings."]; .replay.tablestoreplay:$[`all~first .replay.tablelist;tables[];.replay.tablelist,()]; .replay.logstoreplay:$[not null .replay.tplogfile;.replay.getlogfile[];.replay.getlogdir[]]; if[not count r:.replay.logstoreplay;.lg.e[`initandrun;m:"No log files found"];'m]; // If in segmented mode, get replay date and clean HDB once if[.replay.segmentedmode; // Pull out the date from the STP log file name - *_YYYYMMDDhhmmss (+ .gz if zipped) .replay.replaydate:first l:"D"$$[first[r] like "*.gz";-9_-17#;-6_-14#] each string r; if[not 1=count distinct l;.lg.e[`replay;m:"Cannot replay logs from different dates in segmented mode!"];'m]; if[.replay.clean;.replay.cleanhdb .replay.replaydate] ]; // Replay all logs and exit .lg.o[`initandrun;"Replaying the following log(s): ",csv sv 1_'string .replay.logstoreplay]; .replay.pathlist:()!(); .replay.replaylog each .replay.logstoreplay; if[sortafterreplay;applysortandattr[.replay.pathlist]]; if[partandmerge;postreplaymerge[tempdir;.replay.replaydate;hdbdir]]; .lg.o[`replay;"replay complete"]; if[.replay.exitwhencomplete;exit 0]; }; \d . // Load the sort csv and kick off replay if auto-running .sort.getsortcsv[.replay.sortcsv] if[.replay.autoreplay;.replay.initandrun[]]; ================================================================================ FILE: TorQ_code_processes_tickerplant.q SIZE: 3,457 characters ================================================================================ / q tick.q sym . -p 5001 </dev/null >foo 2>&1 & /2014.03.12 remove license check /2013.09.05 warn on corrupt log /2013.08.14 allow <endofday> when -u is set /2012.11.09 use timestamp type rather than time. -19h/"t"/.z.Z -> -16h/"n"/.z.P /2011.02.10 i->i,j to avoid duplicate data if subscription whilst data in buffer /2009.07.30 ts day (and "d"$a instead of floor a) /2008.09.09 .k -> .q, 2.4 /2008.02.03 tick/r.k allow no log /2007.09.03 check one day flip /2006.10.18 check type? /2006.07.24 pub then log /2006.02.09 fix(2005.11.28) .z.ts end-of-day /2006.01.05 @[;`sym;`g#] in tick.k load /2005.12.21 tick/r.k reset `g#sym /2005.12.11 feed can send .u.endofday /2005.11.28 zero-end-of-day /2005.10.28 allow`time on incoming /2005.10.10 zero latency "kdb+tick 2.8 2014.03.12" /q tick.q SRC [DST] [-p 5010] [-o h] /load schema from params, default to "sym.q" .proc.loadf[(src:$[`schemafile in key .proc.params;raze .proc.params`schemafile;"sym"]),".q"]; .proc.loadf[getenv[`KDBCODE],"/common/u.q"]; .proc.loadf[getenv[`KDBCODE],"/common/timezone.q"]; .proc.loadf[getenv[`KDBCODE],"/common/eodtime.q"]; .proc.loadf[getenv[`KDBCODE],"/common/datadog.q"]; \d . upd:{[tab;x] .u.icounts[tab]+::count first x;}</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="86"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category runModels // @desc Seeded cross validation function, designed to ensure that // models will be consistent from run to run in order to accurately assess // the benefit of updates to parameters. // @param tts {dictionary} Feature and target data split into training/testing // sets // @param config {dictionary} Information relating to the current run of AutoML // @param modelTab {table} Models to be applied to feature data // @return {boolean[]|float[]} Predictions and associated actual values for // each cross validation fold runModels.xValSeed:{[tts;config;modelTab] xTrain:tts`xtrain; yTrain:tts`ytrain; numReps:1; scoreFunc:get[config`predictionFunction]modelTab`minit; seedModel:`seed~modelTab`seed; isSklearn:`sklearn~modelTab`lib; // Seed handled differently for sklearn and keras seed:$[not seedModel; ::; isSklearn; enlist[`random_state]!enlist config`seed; (config`seed;modelTab`fnc) ]; $[seedModel&isSklearn; // Grid search required to incorporate the random state definition [gsFunc:utils.qpyFuncSearch config`gridSearchFunction; numFolds:config`gridSearchArgument; val:enlist[`val]!enlist 0; first value gsFunc[numFolds;numReps;xTrain;yTrain;scoreFunc;seed;val] ]; // Otherwise a vanilla cross validation is performed [xvFunc:utils.qpyFuncSearch config`crossValidationFunction; numFolds:config`crossValidationArgument; xvFunc[numFolds;numReps;xTrain;yTrain;scoreFunc seed] ] ] } // @kind function // @category runModels // @desc Extract the scoring function to be applied for model selection // @param config {dictionary} Information relating to the current run of AutoML // @param modelTab {table} Models to be applied to feature data // @return {<} Scoring function appropriate to the problem being solved runModels.scoringFunc:{[config;modelTab] problemType:$[`reg in distinct modelTab`typ;"Regression";"Classification"]; scoreFunc:config`$"scoringFunction",problemType; printScore:utils.printDict[`scoreFunc],string scoreFunc; config[`logFunc]printScore; scoreFunc } // @kind function // @category runModels // @desc Order average predictions returned by models // @param modelTab {table} Models to be applied to feature data // @param scoreFunc {<} Scoring function applied to predictions // @param orderFunc {<} Ordering function applied to scores // @param predictions {boolean[]|float[]} Predictions made by model // @return {dictionary} Scores returned by each model in appropriate order runModels.orderModels:{[modelTab;scoreFunc;orderFunc;predicts] avgScore:avg each scoreFunc .''predicts; scoreDict:modelTab[`model]!avgScore; orderFunc scoreDict } // @kind function // @category runModels // @desc Fit best model on holdout set and score predictions // @param scores {dictionary} Scores returned by each model // @param tts {dictionary} Feature and target data split into training/testing // sets // @param modelTab {table} Models to be applied to feature data // @param scoreFunc {<} Scoring function applied to predictions // @param config {dictionary} Information related to the current run of AutoML // @return {dictionary} Fitted model and scores along with time taken runModels.bestModelFit:{[scores;tts;modelTab;scoreFunc;config] config[`logFunc]scores; holdoutTimeStart:.z.T; bestModel:first key scores; printModel:utils.printDict[`bestModel],string bestModel; config[`logFunc]printModel; modelLib:first exec lib from modelTab where model=bestModel; fitScore:$[modelLib in key models; runModels.i.customModel[bestModel;tts;modelTab;scoreFunc;config]; runModels.i.sklModel[bestModel;tts;modelTab;scoreFunc] ]; holdoutTime:.z.T-holdoutTimeStart; returnDict:`holdoutTime`bestModel!holdoutTime,bestModel; fitScore,returnDict } // @kind function // @category runModels // @desc Create dictionary of meta data used // @param holdoutRun {dictionary} Information from fitting/scoring on the // holdout set // @param scores {dictionary} Scores returned by each model // @param scoreFunc {<} Scoring function applied to predictions // @param xValTime {time} Time taken to apply xval functions to data // @param modelTab {table} Models to be applied to feature data // @param modelName {string} Name of best model // @return {dictionary} Metadata to be contained within the end reports runModels.createMeta:{[holdoutRun;scores;scoreFunc;xValTime;modelTab;modelName] modelLib:first exec lib from modelTab where model=modelName; modelFunc:first exec fnc from modelTab where model=modelName; holdScore:holdoutRun`score; holdTime:holdoutRun`holdoutTime; `holdoutScore`modelScores`metric`xValTime`holdoutTime`modelLib`modelFunc! (holdScore;scores;scoreFunc;xValTime;holdTime;modelLib;modelFunc) } ================================================================================ FILE: ml_automl_code_nodes_runModels_init.q SIZE: 260 characters ================================================================================ // code/nodes/runModels/init.q - Load runModels node // Copyright (c) 2021 Kx Systems Inc // // Load code for runModels node \d .automl loadfile`:code/nodes/runModels/utils.q loadfile`:code/nodes/runModels/funcs.q loadfile`:code/nodes/runModels/runModels.q ================================================================================ FILE: ml_automl_code_nodes_runModels_runModels.q SIZE: 1,783 characters ================================================================================ // code/nodes/runModels/runModels.q - Run models // Copyright (c) 2021 Kx Systems Inc // // Select the most promising model from the list of provided models for the // user defined problem. This is done in a cross validated manner, with the // best model selected based on how well it generalizes to new data prior to // the application of grid/pseduo-random/sobol-random search optimization. \d .automl // @kind function // @category node // @desc Runs models from modelTable returns best one // @param config {dictionary} Location and method by which to retrieve the data // @param tts {dictionary} Feature and target data split into training/testing // sets // @param modelTab {table} Potential models to be applied to feature data // @return {dictionary} Best model returned along with name of model runModels.node.function:{[config;tts;modelTab] runModels.setSeed config; holdoutSet:runModels.holdoutSplit[config;tts]; startTime:.z.T; predictions:runModels.xValSeed[holdoutSet;config]each modelTab; scoreFunc:runModels.scoringFunc[config;modelTab]; orderFunc:runModels.jsonParse scoreFunc; scores:runModels.orderModels[modelTab;scoreFunc;orderFunc;predictions]; totalTime:.z.T-startTime; holdoutRun:runModels.bestModelFit[scores;holdoutSet;modelTab;scoreFunc; config]; metaData:runModels.createMeta[holdoutRun;scores;scoreFunc;totalTime;modelTab; holdoutRun`bestModel]; returnKeys:`orderFunc`bestModel`bestScoringName`modelMetaData; returnVals:(orderFunc;holdoutRun`model;holdoutRun`bestModel;metaData); returnKeys!returnVals } // Input information runModels.node.inputs:`config`ttsObject`models!"!!+" // Output information runModels.i.k:`orderFunc`bestModel`bestScoringName`modelMetaData; runModels.node.outputs:runModels.i.k!"<<s!" ================================================================================ FILE: ml_automl_code_nodes_runModels_utils.q SIZE: 2,712 characters ================================================================================ // code/nodes/runModels/utils.q - Utilities for the runModels node // Copyright (c) 2021 Kx Systems Inc // // Utility functions specific the the runModels node implementation \d .automl // @kind function // @category runModelsUtility // @desc Extraction of data from a file // @param filePath {string} File path from which to extract the data from // @return {dictionary} parsed from file runModels.i.readFile:{[filePath] key(!).("S=;")0:filePath } // @kind function // @category runModelsUtility // @desc Fit and score custom model to holdout set // @param bestModel {symbol} The best scorinng model from xval // @param tts {dictionary} Feature and target data split into training // and testing set // @param modelTab {table} Models to be applied to feature data // @param scoreFunc {<} Scoring metric applied to evaluate the model // @param cfg {dictionary} Configuration information assigned by the // user and related to the current run // @return {dictionary} The fitted model along with the predictions runModels.i.customModel:{[bestModel;tts;modelTab;scoreFunc;cfg] modelLib:first exec lib from modelTab where model=bestModel; modelType:first exec typ from modelTab where model=bestModel; if[(`keras~modelLib)&`multi~modelType; tts[`ytrain]:runModels.i.prepMultiTarget tts ]; modelDef:utils.bestModelDef[modelTab;bestModel]each`lib`fnc; customStr:".automl.models.",sv[".";string modelDef],"."; model:get[customStr,"model"][tts;cfg`seed]; modelFit:get[customStr,"fit"][tts;model]; modelPred:get[customStr,"predict"][tts;modelFit]; score:scoreFunc[modelPred;tts`ytest]; `model`score!(modelFit;score) } // @kind function // @category runModelsUtility // @desc One hot encodes target values and converts to Numpy array // @param tts {dictionary} Feature and target data split into training // and testing set // @return {dictionary} Preprocessed target values runModels.i.prepMultiTarget:{[tts] models.i.npArray flip value .ml.i.oneHot tts`ytrain } // @category runModelsUtility // @desc Fit and score sklearn model to holdout set // @param bestModel {symbol} The best scorinng model from xval // @param tts {dictionary} Feature and target data split into training // and testing set // @param modelTab {table} Models to be applied to feature data // @param scoreFunc {<} Scoring metric applied to evaluate the model // @return {dictionary} The fitted model along with the predictions runModels.i.sklModel:{[bestModel;tts;modelTab;scoreFunc] model:utils.bestModelDef[modelTab;bestModel;`minit][][]; model[`:fit]. tts`xtrain`ytrain; modelPred:model[`:predict][tts`xtest]`; score:scoreFunc[modelPred;tts`ytest]; `model`score!(model;score) } ================================================================================ FILE: ml_automl_code_nodes_saveGraph_funcs.q SIZE: 3,181 characters ================================================================================ // code/nodes/saveGraph/funcs.q - Functions called in saveGraph node // Copyright (c) 2021 Kx Systems Inc // // Definitions of the main callable functions used in the application of // .automl.saveGraph \d .automl // @kind function // @category saveGraph // @desc Save down target distribution plot // @param params {dictionary} All data generated during the process // @param savePath {string} Path where images are to be saved // return {::} Save target distribution plot to appropriate location saveGraph.targetPlot:{[params;savePath] problemTyp:string params[`config;`problemType]; plotFunc:".automl.saveGraph.i.",problemTyp,"TargetPlot"; get[plotFunc][params;savePath]; } // @kind function // @category saveGraph // @desc Save down result plot depending on problem type // @param params {dictionary} All data generated during the process // @param savePath {string} Path where images are to be saved // return {::} Save confusion matrix or residual plot to appropriate location saveGraph.resultPlot:{[params;savePath] problemTyp:params[`config;`problemType]; $[`class~problemTyp; saveGraph.confusionMatrix; saveGraph.residualPlot ][params;savePath] } // @kind function // @category saveGraph // @desc Save down confusion matrix // @param params {dictionary} All data generated during the process // @param savePath {string} Path where images are to be saved // return {::} Save confusion matrix to appropriate location saveGraph.confusionMatrix:{[params;savePath] confMatrix:params[`analyzeModel;`confMatrix]; modelName:params`modelName; classes:`$string key confMatrix; saveGraph.i.displayConfMatrix[value confMatrix;classes;modelName;savePath] } // @kind function // @category saveGraph // @desc Save down residual plot // @param params {dictionary} All data generated during the process // @param savePath {string} Path where images are to be saved // return {::} Save residual plot to appropriate location saveGraph.residualPlot:{[params;savePath] residuals:params[`analyzeModel;`residuals]; modelName:params`modelName; tts:params`ttsObject; saveGraph.i.plotResiduals[residuals;tts;modelName;savePath] } // @kind function // @category saveGraph // @desc Save down impact plot // @param params {dictionary} All data generated during the process // @param savePath {string} Path where images are to be saved // return {::} Save impact plot to appropriate location saveGraph.impactPlot:{[params;savePath] modelName:params`modelName; sigFeats:params`sigFeats; impact:params[`analyzeModel;`impact]; // Update impact dictionary to include column names instead of just indices updKeys:sigFeats key impact; updImpact:updKeys!value impact; saveGraph.i.plotImpact[updImpact;modelName;savePath]; }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="87"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Using modified .z functions to trace, monitor and control execution¶ Every client interaction with a kdb+ server is handled by one of the p ? functions you’ll find in the system namespace .z . These functions have reasonable, simple defaults that work fine right out of the box. What we’re doing here is taking advantage of the fact that they’re just functions, allowing you to overwrite them with your own custom code to show or modify what’s happening. The utility scripts in github.com/simongarland/dotz are examples of how to do this, and these files are described in detail below. In all of the examples the code to wrap up existing definitions looks complicated. The reason is that these are general scripts and so a combination of them could be loaded into applications with pre-existing custom .z.p ? definitions. The wrapping code protects these definitions, but in a particular application you can probably simply replace or extend existing definitions rather than wrapping them. Take it for a spin¶ The simplest way to get a feeling for what’s going on is to try it out. Start up two kdb+ sessions, load traceusage.q into one of them – and then talk to it from the other server. Watch the output in the traceusage session. Vanilla server session: q)h:hopen 5001 q)h 3 q)h"2+2 3" 4 5 q)h"2 3 4+2 3" 'length q)hclose h traceusage server session: $ q traceusage.q -p 5001 … 2008.05.09 12:38:10.367 ms:0.002003799 m+:0K pw a:localhost u: w:4 (`simon;"***") 2008.05.09 12:38:10.370 ms:0.003025343 m+:0K po a:localhost u:simon w:4 4 2008.05.09 12:38:17.151 ms:0.02098095 m+:0K pg a:localhost u:simon w:4 2+2 3 *2008.05.09 12:38:25.438 (error:length) pg a:localhost u:simon w:4 2 3 4+2 3 2008.05.09 12:38:33.246 ms:0.002986053 m+:0K pc a:192.168.1.34 u:simon w:0 4 (On non-Windows OSs the error line will be in glorious 1980s style color.) The gory details¶ What the individual files do, and how to use them. The toolkit¶ The “tools” you have to work with are the p ? functions from .z : .z.po , .z.pc , .z.pw , .z.pg , .z.ps , .z.ph , .z.pp and .z.exit . Combined with the .z variables .z.a , .z.u and .z.w which are always set to the values of the client during execution of the .z.p ? function. Depending on how the function is called, additional information may be provided as arguments to the .z.p ? functions (user ID and password for .z.pw , browser environment for .z.ph and .z.pp ). By default, execution is done using value so strings or symbol argument lists can be tested in a console. Q for Mortals: §11.6 Interprocess Communication saveorig.q ¶ This script just saves original values of things like .z.pg so you can revert to original definitions without having to restart the task. This is made a little more complicated by the way some of the default definitions aren’t materialized in the user workspace. For example by default .z.pg is just {value x} but that’s run in the kdb+ executable. The default values are created explicitly if need be. .dotz.exit.ORIG:.z.exit:@[.:;`.z.exit;{;}]; .dotz.pg.ORIG:.z.pg:@[.:;`.z.pg;{.:}]; .dotz.ps.ORIG:.z.ps:@[.:;`.z.ps;{.:}]; … Other functions and variables shared between multiple scripts (such as debug output level .debug.LEVEL , or the .z.a IP address <-> hostname cache .dotz.IPA ) are defined here too. Although it would be simpler to embed this setup code in each script, allowing them to be standalone, one tires of the cut’n’paste forays required by every tiny change. Note After the various state variables have been defined the script saveorig.custom.q is loaded, if found, allowing you to customise the setup without needing to have a modified version of the saveorig script. Again, for production use you should rip out the unneeded definitions. Tracing execution¶ Tracing execution of the various callbacks is the simplest application. It can be as simple as just sprinkling 0N! statements around the functions, or as complicated as logging to an external file. As these are samples they also track the use of .z.pi – but that can get tiresome if you’re debugging from a console. In that case just zap the custom .z.pi definition with: q)\x .z.pi The variable .usage.LEVEL can be set to control how much is output. By default (2) it displays all messages, a value of 1 will display only errors and a value of 0 will temporarily disable logging. In the examples below, the sample session is a simple hopen , get "2+2" , get "2 3+3 4 5" then hclose . dumpusage.q ¶ The simplest file of all, it just puts in 0N! to display the input and the results. It’s fine for debugging a simple conversation with a single client – but not informative enough for more complex setups. dotz$ q dumpusage.q -p 5001 … q)"***" `simon 1b 4 4 "2+2" 4 "2 3+3 4 5" 4 4 traceusage.q ¶ Dumps formatted output to the console, and on non-Windows consoles it will color errors and expensive calls. The definition of what’s expensive can be set by .usage.EXPENSIVE (in milliseconds). dotz$ q traceusage.q -p 5001 … q) 2008.05.13 11:42:56.290 ms:0.002003799 m+:0K pw a:localhost u: w:4 (`simon;"***") 2008.05.13 11:42:56.319 ms:0.003968307 m+:0K po a:localhost u:simon w:4 4 2008.05.13 11:43:00.866 ms:0.0159911 m+:0K pg a:localhost u:simon w:4 2+2 *2008.05.13 11:43:08.818 (error:length) pg a:localhost u:simon w:4 2 3+3 4 5 2008.05.13 11:43:13.986 ms:0.004007597 m+:0K pc a:192.168.1.34 u:simon w:0 4 lastusage.q ¶ When debugging it’s sometimes more helpful to be able to grab the last request that came in rather than just looking at a trace of what happened. This set of custom callbacks stores the last calls in namespace .last , allowing you to fetch the data and retry the request directly in your session. dotz$ q lastusage.q -p 5001 … q).last | :: pw | ``when`u`w`a`x`y`z`r!(::;2008.05.13T11:44:36.655;`;4;2130706433;{[x;y]1b};`simon;"";1b) zcmd| `pc po | ``when`u`w`a`x`y`r!(::;2008.05.13T11:44:36.655;`simon;4;2130706433;::;4;4) pg | ``when`u`w`a`x`y`r!(::;2008.05.13T11:44:40.951;`simon;4;2130706433;.:;"2+2";4) pc | ``when`u`w`a`x`y`r!(::;2008.05.13T11:44:52.111;`simon;0;-1062731486;::;4;4) q).last.pg | :: when| 2008.05.13T11:44:40.951 u | `simon w | 4 a | 2130706433 x | .: y | "2+2" r | 4 q)value .last.pg.y 4 monitorusage.q ¶ If the monitoring is to be left running for a long time scrolling back through the console is not a sensible way to look for problems. This script logs all requests to a local table USAGE , allowing you to analyse the data. As the data is stored in an in-memory table it’s of course lost when you exit unless you choose to do something with .z.exit . dotz$ q monitorusage.q -p 5001 … q)USAGE date time ms mdelta zcmd ipa u w cmd ok error --------------------------------------------- q)USAGE date time ms mdelta zcmd ipa u w cmd ok error ------------------------------------------------------------------------------------------------- 2008.05.13 11:48:19.360 0.2459958 0 pi 192.168.1.34 simon 0 "USAGE" 1 2008.05.13 11:48:31.728 0.002003799 0 pw localhost 4 "(`simon;\"***\")" 1 2008.05.13 11:48:31.729 0 0 po localhost simon 4 ,"4" 1 2008.05.13 11:48:36.360 0.0159911 0 pg localhost simon 4 "2+2" 1 2008.05.13 11:48:41.920 0 0 pg localhost simon 4 "2 3+3 4 5" 0 length 2008.05.13 11:48:46.512 0.003025343 0 pc 192.168.1.34 simon 0 ,"4" 1 q) logusage.q and loadusage.q ¶ Finally, the all-singing all-dancing version. This script logs all requests directly to an external logfile – using the same log mechanism as kdb+tick. This allows logging to be left running for days without having to worry about tables growing – and will ensure that the logging data is safe even if the session terminates unexpectedly. Use loadusage.q to load the logged data into a session as a table (same schema as that from monitorusage.q except hostname added). dotz$ q logusage.q -p 5001 … q)'type q)\\ / nothing to see here.. dotz$ q loadusage.q … q)USAGE date time ms mdelta zcmd ipa host u w cmd ok error -------------------------------------------------------------------------------------------------------- 2008.05.13 13:01:42.694 0.002003799 0 pw 127.0.0.1 localhost 5 (`simon;"***") 1 2008.05.13 13:01:42.695 0.0009822543 0 po 127.0.0.1 localhost simon 5 5 1 2008.05.13 13:01:46.198 0.0159911 0 pg 127.0.0.1 localhost simon 5 2+2 1 2008.05.13 13:01:57.350 0 0 pg 127.0.0.1 localhost simon 5 2 3+3 4 5 0 length 2008.05.13 13:02:00.901 0.004007597 0 pc 192.168.1.34 simon 0 5 1 q)q)select from USAGE where not ok date time ms mdelta zcmd ipa host u w cmd ok error -------------------------------------------------------------------------------------- 2008.05.13 11:50:55.988 0 0 pg 127.0.0.1 localhost simon 5 2 3+3 4 5 0 length q) Slamming the doors¶ Another important use for modified .z.p ? callbacks is to control access to a q session. Q contains some very coarse access controls settable by command-line options – particularly -u or -U to enforce password-controlled access (with MD5 passwords), -b to enforce read-only access and -T to set a maximum CPU time per single client call. The password control in -u and -U is all done in the kdb+ executable, so completely outside user control. However as soon as the initial check (if any) has been done control is passed to z.pw , which can say if a connection is to be allowed. This can be via some session internal table or function, or can go outside to something like a central single-signon server. blockusage.q ¶ Use this script simply to block all client interaction: it just sets .z.pw to always return false, i.e. no connection is allowed for supplied user ID and password. controlaccess.q and loadinvalidaccess.q ¶ There are so many ways to control access that this script is way too complicated for immediate use as it stands – just pick an interesting subset. It shows how to control access via - a user table – splitting users into superusers (who can do anything), powerusers (who can run ad-hoc queries, but can’t do things like shutdown the session) and defaultusers (who can only use a specific list of pre-defined commands). - the client’s server – with name matching a set of wildcards - a list of valid commands - a command validator which parses input Invalid access attempts are logged, and the logfile can be loaded into a table and queried with loadinvalidaccess.q Tracking clients and servers¶ Q provides a a list of handles in use with the keys of .z.W . These clients provide more background information about what's “behind” the handles by extending .z.po and .z.pc . trackclients.q ¶ Tracking of clients can be automated using this script, .z.po and .z.pc maintain the list automatically. By default, the table of clients just uses information provided by .z.po like .z.u and .z.w , but if .clients.INTRUSIVE is set, the server will ask the clients for more details like their q versions, number of secondary processes etc. Output from a session where a client did three hopen 5001 s, and one hclose . dotz$ q trackclients.q -p 5001 … q)CLIENTS w| ipa u a poz pcz -| -------------------------------------------------------------------------- 4| localhost simon 2130706433 2008.05.13T13:37:14.176 5| localhost simon 2130706433 2008.05.13T13:37:15.007 | localhost simon 2130706433 2008.05.13T13:37:15.735 2008.05.13T13:37:26.359 q) trackservers.q ¶ Giving client applications the ability to track servers simplifies application design – no need to hardcode server+port settings, or to handle servers becoming unavailable. Unlike trackclients.q you do have to add server records manually, although .z.pc handles them going away. You can either add a new server using a function like .servers.addnh (nh is name, hpup) or add a server record for an existing open handle using .servers.addw . Servers can be public or private – a private server is not handed on to other users who request a list of current servers (the simplest way of setting up a new session, no central “handle server” to be maintained). By default servers that disappear are retried regularly. $ q trackservers.q q).servers.addw hopen`:welly3:2018 3 q).servers.addw hopen`:welly3:2017 4 q)SERVERS name hpup w hits private lastz --------------------------------------------------------------- servers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337 taq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761 lava2006 :welly3:2017 4 0 0 2008.05.13T13:45:05.400 q)SERVERS name hpup w hits private lastz --------------------------------------------------------------- servers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337 taq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761 lava2006 :welly3:2017 4 0 0 2008.05.13T13:45:05.400 q).servers.handlefor`lava2006 4 q).servers.handlefor`lava2007 'lava2007.not.available q)SERVERS name hpup w hits private lastz --------------------------------------------------------------- servers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337 taq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761 lava2006 :welly3:2017 4 1 0 2008.05.13T13:45:44.824 q) Adding servers with user supplied name with .servers.addnh : q).servers.addnh[`taq;`::5001] 3 q).servers.addnh[`taq;`::5001] 5 q).servers.addnh[`taq;`::5001] 6 q).servers.addnh[`taq;`::5001] 7 q)SERVERS name hpup w hits private lastz --------------------------------------------------------------- servers :192.168.1.34:0 0 0 1 2008.05.13T13:44:10.337 taq2007 :welly3:2018 3 0 0 2008.05.13T13:45:00.761 lava2006 :welly3:2017 4 1 0 2008.05.13T13:45:44.824 taq ::5001 3 0 0 2008.05.13T13:50:03.777 taq ::5001 5 0 0 2008.05.13T13:50:05.513 taq ::5001 6 0 0 2008.05.13T13:50:06.848 taq ::5001 7 0 0 2008.05.13T13:50:07.640 q).servers.handlefor`taq 3 q).servers.handlefor`taq 5 q).servers.handlefor`taq 6 q).servers.handlefor`taq 7 q).servers.handlefor`taq 3 Running on other servers¶ The default Q IPC allows you to easily submit synchronous or asynchronous requests. Combined with a list of all available servers from trackservers.q above you can deal with most simple requests. remotetasks.q ¶ This script provides an extra way of dealing with a lot of data requests. It allows you to submit synchronous or asynchronous requests, locally or remotely – and collects all the results in a local table TASKS . So, for example, if you had to run a few hundred queries to be able to build a report and you had 10 server sessions available to query you’d simply submit all 100 queries and either pick up results as they drift in, or wait until all are complete. You can additionally allocate requests to a request group to make it easy to check when a complete group has completed. Here’s an example session using two servers on 5001 and 5002. First create the server table entries. q).servers.addnh[`hh;`::5001] 5 q).servers.addnh[`hh;`::5002] 6 q).servers.addnh[`hh;`::5002] 7 q)SERVERS name hpup w hits private lastz -------------------------------------------------------------- servers :192.168.1.34:0 0 0 1 2008.05.13T18:37:41.087 hh ::5001 5 0 0 2008.05.13T18:38:59.455 hh ::5002 6 0 0 2008.05.13T18:39:03.886 hh ::5002 7 0 0 2008.05.13T18:39:05.390 Submit a few tasks: q).tasks.rxa[.servers.handlefor`hh;"max til 10"] 10001 q).tasks.rxa[.servers.handlefor`hh;"max til 10"] 10002 q).tasks.rxa[.servers.handlefor`hh;"max til 10"] 10003 q)TASKS nr | grp startz endz w ipa status expr result -----| ---------------------------------------------------------------------------------------------- 10001| 20001 2008.05.13T18:41:43.057 2008.05.13T18:41:43.058 5 localhost complete "max til 10" 9 10002| 20002 2008.05.13T18:41:46.138 2008.05.13T18:41:46.138 6 localhost complete "max til 10" 9 10003| 20003 2008.05.13T18:41:47.009 2008.05.13T18:41:47.010 7 localhost complete "max til 10" 9 q).tasks.results 10002 10002| 9 q).tasks.results .tasks.completed[] 10001| 9 10002| 9 10003| 9 and one invalid task: q).tasks.rxa[.servers.handlefor`hh;"17+`this"] 10004 q).tasks.failed[] ,10004 q).tasks.results 10004 q).tasks.status 10004 10004| fail q)TASKS nr | grp startz endz w ipa status expr result -----| ---------------------------------------------------------------------------------------------- 10001| 20001 2008.05.13T18:41:43.057 2008.05.13T18:41:43.058 5 localhost complete "max til 10" 9 10002| 20002 2008.05.13T18:41:46.138 2008.05.13T18:41:46.138 6 localhost complete "max til 10" 9 10003| 20003 2008.05.13T18:41:47.009 2008.05.13T18:41:47.010 7 localhost complete "max til 10" 9 10004| 20004 2008.05.13T18:44:05.229 2008.05.13T18:44:05.229 5 localhost fail "17+`this" "type" q) Utilities¶ hutil.q ¶ Production usage All these utility files should be treated as examples. For any particular case they probably have too many options and should be cut down to do just what you want. The access control script is the most obvious case - it probably has far too many options/checks going on.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="88"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">generatehdb:{[x] .lg.o[`mockdata;"generating mock hdb"]; x:updatehdbdir x; setondisk[x].'exec .getrange[partitiontype]'[til n]from x; loadhdb x`hdbdir; }; updatehdbdir:{[x]update hdbdir:` sv(testpath;hdbname)from x}; loadhdb:{[hdbdir]system "l ",1_string hdbdir}; generaterdb:{ .lg.o[`mockdata;"generating mock hdb"]; [x]setinmemory[x]. exec .getrange[partitiontype][n]from x; }; .getrange.date:{[n]0D+2000.01.01+0 1+n}; .getrange.month:{[n]0D+.Q.addmonths[2000.01.01;0 1+n]}; .getrange.year:{[n]0D+.Q.addmonths[2000.01.01;12*0 1+n]}; generatedata:{[x;start;end] end:end-1; difference:(end-start)%x`nrecord; timestamp:start+til[x`nrecord]*difference; syms:`AUDUSD`EURUSD`USDCHF; sym:`p#syms where 3#x`nrecord; source:(`$"source",/:string til nsyms:count syms)where 3#x`nrecord; id:"x",/:string til count source; offset:til[nsyms]*difference%nsyms; time:raze timestamp+/:offset; sourcetime:raze timestamp+/:2*offset; price:raze 100+x[`nrecord]?/:10*1+til nsyms; size:raze 1000+x[`nrecord]?/:100*1+til nsyms; :([]sym;source;id;`timestamp$time;`timestamp$sourcetime;bidprice:0.9*price;bidsize:0.9*size;askprice:1.1*price;asksize:1.1*size); }; setondisk:{[x;start;end] data:generatedata[x;start;end]; x:update target:.Q.par[hdbdir;partitiontype$first start;tablename]from x; exec .Q.dd[target;`]set .Q.en[hdbdir;data]from x; :x; }; setinmemory:{[x;start;end] x[`tablename]set generatedata[x;start;end]; }; run:{[] x:params .proc.proctype; :x[`func]x; }; run[]; ================================================================================ FILE: TorQ_tests_dataaccess_queryorder_hdb_dbmaint.q SIZE: 6,148 characters ================================================================================ / kdb+ partitioned database maintenance \d .os WIN:.z.o in`w32`w64 pth:{p:$[10h=type x;x;string x];if[WIN;p[where"/"=p]:"\\"];(":"=first p)_ p} cpy:{system$[WIN;"copy /v /z ";"cp "],pth[x]," ",pth y} del:{system$[WIN;"del ";"rm "],pth x} ren:{system$[WIN;"move ";"mv "],pth[x]," ",pth y} here:{hsym`$system$[WIN;"cd";"pwd"]} \d . add1col:{[tabledir;colname;defaultvalue] if[not colname in ac:allcols tabledir; stdout"adding column ",(string colname)," (type ",(string type defaultvalue),") to `",string tabledir; num:count get(`)sv tabledir,first ac; .[(`)sv tabledir,colname;();:;num#defaultvalue]; @[tabledir;`.d;,;colname]]} allcols:{[tabledir]get tabledir,`.d} allpaths:{[dbdir;table] files:key dbdir; if[any files like"par.txt";:raze allpaths[;table]each hsym each`$read0(`)sv dbdir,`par.txt]; files@:where files like"[0-9]*";(`)sv'dbdir,'files,'table} copy1col:{[tabledir;oldcol;newcol] if[(oldcol in ac)and not newcol in ac:allcols tabledir; stdout"copying ",(string oldcol)," to ",(string newcol)," in `",string tabledir; .os.cpy[(`)sv tabledir,oldcol;(`)sv tabledir,newcol];@[tabledir;`.d;,;newcol]]} delete1col:{[tabledir;col] if[col in ac:allcols tabledir; stdout"deleting column ",(string col)," from `",string tabledir; .os.del[(`)sv tabledir,col];@[tabledir;`.d;:;ac except col]]} / enum:{[tabledir;val] if[not 11=abs type val;:val]; .[p;();,;u@:iasc u@:where not(u:distinct enlist val)in v:$[type key p:(`)sv tabledir,`sym;get p;0#`]];`sym!(v,u)?val} \ enum:{[tabledir;val]if[not 11=abs type val;:val];.Q.dd[tabledir;`sym]?val} find1col:{[tabledir;col] $[col in allcols tabledir; stdout"column ",string[col]," (type ",(string first"i"$read1((`)sv tabledir,col;8;1)),") in `",string tabledir; stdout"column ",string[col]," *NOT*FOUND* in `",string tabledir]} fix1table:{[tabledir;goodpartition;goodpartitioncols] if[count missing:goodpartitioncols except allcols tabledir; stdout"fixing table `",string tabledir;{add1col[x;z;0#get y,z]}[tabledir;goodpartition]each missing]} fn1col:{[tabledir;col;fn] if[col in allcols tabledir; oldattr:-2!oldvalue:get p:tabledir,col; newattr:-2!newvalue:fn oldvalue; if[$[not oldattr~newattr;1b;not oldvalue~newvalue]; stdout"resaving column ",(string col)," (type ",(string type newvalue),") in `",string tabledir; oldvalue:0;.[(`)sv p;();:;newvalue]]]} reordercols0:{[tabledir;neworder] if[not((count ac)=count neworder)or all neworder in ac:allcols tabledir;'`order]; stdout"reordering columns in `",string tabledir; @[tabledir;`.d;:;neworder]} rename1col:{[tabledir;oldname;newname] if[(oldname in ac)and not newname in ac:allcols tabledir; stdout"renaming ",(string oldname)," to ",(string newname)," in `",string tabledir; .os.ren[` sv tabledir,oldname;` sv tabledir,newname];@[tabledir;`.d;:;.[ac;where ac=oldname;:;newname]]]} ren1table:{[old;new]stdout"renaming ",(string old)," to ",string new;.os.ren[old;new];} add1table:{[dbdir;tablename;table] stdout"adding ",string tablename; @[tablename;`;:;.Q.en[dbdir]0#table];} stdout:{-1 raze[" "sv string`date`second$.z.P]," ",x;} validcolname:{(not x in `i,.Q.res,key`.q)and x = .Q.id x} ////////////////////////////////////////////////////////////////////////////////////////////////////////// // * public thisdb:`:. / if functions are to be run within the database instance then use <thisdb> (`:.) as dbdir addcol:{[dbdir;table;colname;defaultvalue] / addcol[`:/data/taq;`trade;`noo;0h] if[not validcolname colname;'(`)sv colname,`invalid.colname]; add1col[;colname;enum[dbdir;defaultvalue]]each allpaths[dbdir;table];} castcol:{[dbdir;table;col;newtype] / castcol[thisdb;`trade;`size;`short] fncol[dbdir;table;col;newtype$]} clearattrcol:{[dbdir;table;col] / clearattr[thisdb;`trade;`sym] setattrcol[dbdir;table;col;(`)]} copycol:{[dbdir;table;oldcol;newcol] / copycol[`:/k4/data/taq;`trade;`size;`size2] if[not validcolname newcol;'(`)sv newcol,`invalid.newname]; copy1col[;oldcol;newcol]each allpaths[dbdir;table];} deletecol:{[dbdir;table;col] / deletecol[`:/k4/data/taq;`trade;`iz] delete1col[;col]each allpaths[dbdir;table];} findcol:{[dbdir;table;col] / findcol[`:/k4/data/taq;`trade;`iz] find1col[;col]each allpaths[dbdir;table];} / adds missing columns, but DOESN'T delete extra columns - do that manually fixtable:{[dbdir;table;goodpartition] / fixtable[`:/k4/data/taq;`trade;`:/data/taq/2005.02.19] fix1table[;goodpartition;allcols goodpartition]each allpaths[dbdir;table]except goodpartition;} fncol:{[dbdir;table;col;fn] / fncol[thisdb;`trade;`price;2*] fn1col[;col;fn]each allpaths[dbdir;table];} listcols:{[dbdir;table] / listcols[`:/k4/data/taq;`trade] allcols first allpaths[dbdir;table]} renamecol:{[dbdir;table;oldname;newname] / renamecol[`:/k4/data/taq;`trade;`woz;`iz] if[not validcolname newname;'` sv newname,`invalid.newname]; rename1col[;oldname;newname]each allpaths[dbdir;table];} reordercols:{[dbdir;table;neworder] / reordercols[`:/k4/data/taq;`trade;reverse cols trade] reordercols0[;neworder]each allpaths[dbdir;table];} setattrcol:{[dbdir;table;col;newattr] / setattr[thisdb;`trade;`sym;`g] / `s `p `u fncol[dbdir;table;col;newattr#]} addtable:{[dbdir;tablename;table] / addtable[`:.;`trade;([]price...)] add1table[dbdir;;table]each allpaths[dbdir;tablename];} rentable:{[dbdir;old;new] / rentable[`:.;`trade;`transactions] ren1table'[allpaths[dbdir;old];allpaths[dbdir;new]];} \ test with https://github.com/KxSystems/kdb/blob/master/tq.q (sample taq database) if making changes to current database you need to reload (\l .) to make modifications visible if the database you've been modifying is a tick database don't forget to adjust the schema (tick/???.q) to reflect your changes to the data addcol[`:.;`trade;`num;10] addcol[`:.;`trade;`F;`test] delete1col[`:./2000.10.02/trade;`F] fixtable[`:.;`trade;`:./2000.10.03/trade] reordercols[`:.;`quote;except[2 rotate cols quote;`date]] clearattrcol[`:.;`trade;`sym] setattrcol[`:.;`trade;`sym;`p] castcol[`:.;`trade;`time;`second] renamecol[`:.;`trade;`price;`PRICE] pxcols:{(y,())renamecol[`:.;x]'z,()] `PRICE`size renamecol[`:.;`trade]'`p`s ================================================================================ FILE: TorQ_tests_dataaccess_queryorder_settings.q SIZE: 900 characters ================================================================================ inputpath:hsym`$getenv[`KDBTESTS],"/dataaccess/queryorder/inputs"; outputpath:hsym`$getenv[`KDBTESTS],"/dataaccess/queryorder/outputs"; processcsv:hsym`$getenv[`KDBTESTS],"/dataaccess/queryorder/`config`process.csv"; //- code to pass in a test name //- extract the input dictionary from {testname}.csv //- extract the respone from .queryorder.orderquery //- compare output with expected one getinputparams:{[test]exec parameter!get each parametervalue from .checkinputs.readcsv[` sv inputpath,`$string[test],".csv";"s*"]}; getoutputparams:{[test]T:exec parameter!get each parametervalue from .checkinputs.readcsv[` sv outputpath,`$string[test],".csv";"i*"];:(T[til 4])}; testfunction:{[testquery] getoutputparams[testquery]~(raze .queryorder.orderquery[getinputparams[testquery]])[1+til 4]}; testfunction1:{[testquery;expectedoutput] (getdata getinputparams[testquery])~value expectedoutput}; ================================================================================ FILE: TorQ_tests_dataaccess_settings.q SIZE: 422 characters ================================================================================ .servers.USERPASS:`$"admin:admin"; //- some custom functionality for tests .dataaccess.testfuncrollover:{[]2000.01.05D}; //- function to determine rollover to split the time ranges destined for the rdb and hdb. .dataaccess.testfuncpartitionrange:{[timecolumn;primarytimecolumn;partitionfield;hdbtimerange]@[partitionfield$hdbtimerange;1;+;not timecolumn~primarytimecolumn]}; //- offset times for non-primary time columns ================================================================================ FILE: TorQ_tests_helperfunctions.q SIZE: 970 characters ================================================================================ startorstopproc:{[startorstop;procname;processcsv] .proc.sys getenv[`TORQHOME],"/torq.sh ",startorstop," ",procname," -csv ",processcsv }; deadproccheck:{[proctype;procname] / pairs are of the form "<PID> ssh" for 'PID TTY...' part of pgrep, and "<PID> q" if process is running pidnamepairs:.proc.sys "pgrep -lf \"stackid ",getenv[`KDBBASEPORT]," -proctype ",proctype," -procname ",procname,"\""; not "q" in last each pidnamepairs }; // Kill process dead with -9 kill9proc:{[proc] a:"q" in' b:@[system;"pgrep -lf ",proc," -u $USER";" "];system "kill -9 ",first " " vs first b where a}; // Returns boolean true if process is alive isalive:{[proc] any "q" in' @[system;"pgrep -lf ",proc," -u $USER";" "]};</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="89"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cov , scov ¶ Covariance cov ¶ x cov y cov[x;y] Where x and y are conforming numeric lists returns their covariance as a floating-point number. Applies to all numeric data types and signals an error with temporal types, char and sym. q)2 3 5 7 cov 3 3 5 9 4.5 q)2 3 5 7 cov 4 3 0 2 -1.8125 q)select price cov size by sym from trade cov is an aggregate function. The function cov is equivalent to {avg[x*y]-avg[x]*avg y} . Domain and range: b g x h i j e f c s p m d z n u v t ---------------------------------------- b | f . f f f f f f f . f f f f f f f f g | . . . . . . . . . . . . . . . . . . x | f . f f f f f f f . f f f f f f f f h | f . f f f f f f f . f f f f f f f f i | f . f f f f f f f . f f f f f f f f j | f . f f f f f f f . f f f f f f f f e | f . f f f f f f f . f f f f f f f f f | f . f f f f f f f . f f f f f f f f c | f . f f f f f f f . f f f f f f f f s | . . . . . . . . . . . . . . . . . . p | f . f f f f f f f . f f f f f f f f m | f . f f f f f f f . f f f f f f f f d | f . f f f f f f f . f f f f f f f f z | f . f f f f f f f . f f f f f f f f n | f . f f f f f f f . f f f f f f f f u | f . f f f f f f f . f f f f f f f f v | f . f f f f f f f . f f f f f f f f t | f . f f f f f f f . f f f f f f f f Range: f cov is a multithreaded primitive. scov ¶ Sample covariance x scov y scov[x;y] Where x and y are conforming numeric lists returns their sample covariance as a float atom. \[scov(x,y)=\frac{n}{n-1} cov(x,y)\] Applies to all numeric data types and signals an error with temporal types, char and sym. q)2 3 5 7 scov 3 3 5 9 6f q)2 3 5 7 scov 4 3 0 2 -2.416667 q)select price scov size by sym from trade scov is an aggregate function. The function scov is equivalent to {cov[x;y]*count[x]%-1+count x} . Domain and range: b g x h i j e f c s p m d z n u v t ---------------------------------------- b | f . f f f f f f . . f f f f f f f f g | . . . . . . . . . . . . . . . . . . x | f . f f f f f f . . f f f f f f f f h | f . f f f f f f . . f f f f f f f f i | f . f f f f f f . . f f f f f f f f j | f . f f f f f f . . f f f f f f f f e | f . f f f f f f . . f f f f f f f f f | f . f f f f f f f . f f f f f f f f c | . . . . . . . f . . f f f f f f f f s | . . . . . . . . . . . . . . . . . . p | f . f f f f f f f . f . . . f f f f m | f . f f f f f f f . . f . . f f f f d | f . f f f f f f f . . . f . f f f f z | f . f f f f f f f . . . . f f f f f n | f . f f f f f f f . f f f f f f f f u | f . f f f f f f f . f f f f f f f f v | f . f f f f f f f . f f f f f f f f t | f . f f f f f f f . f f f f f f f f Range: f scov is a multithreaded primitive. cross ¶ x cross y cross[x;y] Returns the cross-product (i.e. all possible combinations) of x and y . q)1 2 3 cross 10 20 1 10 1 20 2 10 2 20 3 10 3 20 q)(cross/)(2 3;10;"abc") 2 10 "a" 2 10 "b" 2 10 "c" 3 10 "a" 3 10 "b" 3 10 "c" cross can work on tables and dictionaries. q)s:`IBM`MSFT`AAPL q)v:1 2 q)([]s:s)cross([]v:v) s v ------ IBM 1 IBM 2 MSFT 1 MSFT 2 AAPL 1 AAPL 2 The function cross is equivalent to {raze x,/:\:y} . csv ¶ CSV delimiter csv A synonym for "," for use in preparing text for CSV files, or reading them. Prepare Text, .h.cd (csv from data), .h.td (tsv from data) File system csv ¶CSV delimiter csv A synonym for "," for use in preparing text for CSV files, or reading them. Prepare Text, .h.cd (csv from data), .h.td (tsv from data) File system _ Cut, cut ¶ _ (cut operator)¶ Cut a list or table into sub-arrays x _ y _[x;y] Where x is a non-decreasing list of integers in the domaintil count y y is a list or table returns y cut at the indexes given in x . The result is a list with the same count as x . Examples using cut on lists: q)2 4 9 _ til 10 /first result item starts at index 2 2 3 4 5 6 7 8 ,9 q) q)2 4 4 9 _ til 10 /cuts are empty for duplicate indexes 2 3 `long$() 4 5 6 7 8 ,9 q)2 5 7 _ til 12 2 3 4 5 6 7 8 9 10 11 Example using cut on table sp created using sp.q q)\l sp.q q)count sp 12 q){}show each 2 5 7_sp / `show` returns the generic null :: s p qty --------- s1 p3 400 s1 p4 200 s4 p5 100 s p qty --------- s1 p6 100 s2 p1 300 s p qty --------- s2 p2 400 s3 p2 200 s4 p2 200 s4 p4 300 s1 p5 400 _ (cut) is a multithreaded primitive. Avoid confusion with underscores in names: separate the Cut operator with spaces. cut (keyword)¶ Cut a list or table into a matrix of x columns x cut y cut[x;y] Where x is an integer atomy is a list returns y splits into a list of lists, all (except perhaps the last) of count x . q)4 cut til 10 0 1 2 3 4 5 6 7 8 9 Otherwise cut behaves as _ Cut. ? Roll, Deal, Permute¶ Random lists, with or without duplicates Roll and Deal¶ Select items randomly, generate random values x?y ?[x;y] / Roll neg[x]?y ?[neg[x];y] / Deal Select¶ Where x is an integer atomy is a list returns abs[x] randomly selected items of y . Where x is - positive items are selected independently (Roll) - negative and x>=neg count y , items are selected from different indexes ofy (Deal) q)5?`Arthur`Steve`Dennis `Arthur`Arthur`Steve`Dennis`Arthur q)2?("a";0101b;`abc;`the`quick;2012.06m) `abc 2012.06m q)-3?`the`quick`brown`fox `brown`quick`fox Duplicate items in y If y contains duplicate items, so may the result of Deal. q)-2?`bye`bye`blackbird `bye`bye Generate¶ Where x is an int atomy is an atom > 0 returns a list of abs[x] items of the same type as y , generated as follows right domain (y) range operator ---------------------------------------------------------------- integer >0 til y Roll, Deal 0Ng GUIDs Roll, Deal float, temporal ≥0 0 to y Roll 0i ints Roll 0 longs Roll, Deal 0b 01b Roll " " .Q.a Roll 0x0 bytes Roll numeric symbol `n symbols, each of n chars (n≤8) Roll, Deal from abcdefghijklmnop Where x is negative (Deal), y must have a positive long or null GUID q)10?5 / roll 10 (5-sided dice) 4 2 1 1 3 2 0 0 2 2 q)-5?20 / deal 5 13 11 8 12 19 q)-10?10 / first 10 ints in random order 9 3 5 7 2 0 6 1 4 8 q)(asc -10?10)~asc -10?10 1b q)-1?0Ng / deal 1 GUID ,fd2db048-decb-0008-0176-01714e5eeced q)count distinct -1000?0Ng / deal 1000 GUIDs 1000 q)5?4.5 / roll floats 3.13239 1.699364 2.898484 1.334554 3.085937 q)4?2012.09m / roll months 2006.02 2007.07 2007.07 2008.06m q)30?" " "tusrgoufcetphltnkegcflrunpornt" q)16?0x0 / roll 16 bytes 0x8c6b8b64681560840a3e178401251b68 q)20?0b / roll booleans 00000110010101000100b q)10?`3 / roll short symbols `bon`dec`nei`jem`pgm`kei`lpn`bjh`flj`npo q)rand `6 `nemoad Roll and Deal return list results For an atom result, instead of first 1?x , use rand . Deal of GUID atom¶ Deal of GUID uses a mix of process ID, current time and IP address to generate the GUID, and successive calls may not allow enough time for the current time reading to change. q)count distinct {-1?0ng}each til 10 / Deal one GUID ten times 5 The range of GUIDs is large enough that Roll and Deal often return the same result. q)count distinct 1000000000?0Ng / Roll a billion GUIDs 1000000000 For a set of distinct GUIDs, use Deal to generate them in one operation Permute¶ 0N?x Where x is - a non-negative int atom, returns the items of til x in random order - a list, returns the items of x in random order (Since V3.3.) q)0N?10 / permute til 10 8 2 4 1 6 0 5 3 7 9 q)0N?5 4 2 / permute items 4 5 2 q)0N?"abc" / permute items "bac" q)0N?("the";1 2 4;`ibm`goog) / permute items `ibm`goog 1 2 4 "the" Seed¶ Deal, Roll, Permute and rand use a constant seed on kdb+ startup: scripts using them can be repeated with the same results. You can see and set the value of the seed with system command \S .) To use GUIDs as identifiers, use Deal, not Roll $ q .. q)1?0Ng / roll 1 GUID ,8c6b8b64-6815-6084-0a3e-178401251b68 q)\\ $ q .. q)1?0Ng / roll 1 GUID ,8c6b8b64-6815-6084-0a3e-178401251b68 q)\\ $ q .. q)-1?0Ng / deal 1 GUID ,2afe0040-2a1b-bfce-ef3e-7160260cf992 q)\\ $ q .. q)-1?0Ng / deal 1 GUID ,753a8739-aa6b-3cb4-2e31-0fcdf20fd2f0 Roll uses the current seed (\S 0N ). Deal uses a seed based on process properties and the current time. This means -10?0Ng is different from {first -1?0Ng}each til 10 . Errors¶ | error | cause | |---|---| | length | neg x exceeds count y | | type | x is negative (Roll only) | delete ¶ Delete rows or columns from a table, entries from a dictionary, or objects from a namespace delete from x delete from x where pw delete ps from x delete is a qSQL query template and varies from regular q syntax For the Delete operator ! , see Functional SQL Table rows¶ delete from x delete from x where pw Where x is a tablepw is a condition deletes from x rows matching pw , or all rows if where pw not specified. q)show table: ([] a: `a`b`c; n: 1 2 3) a n --- a 1 b 2 c 3 q)show delete from table where a = `c a n --- a 1 b 2 Attributes may or may not be dropped: reapply or remove as needed Table columns¶ delete from x delete ps from x Where x is a tableps a list of column names deletes from x columns ps or all columns if ps not specified. q)show delete n from table a - a b c Dictionary entries¶ delete from x delete ps from x Where x is a dictionaryps a list of keys to it deletes from x entries for ps . q)show d:`a`b`c!til 3 a| 0 b| 1 c| 2 q)delete b from `d `d q)d a| 0 c| 2 Cond is not supported inside q-SQL expressions Enclose in a lambda or use Vector Conditional instead. Namespace objects¶ delete from x delete ps from x Where x is a namespaceps a symbol atom or vector of name/s defined in it deletes the named objects from the namespace. q)a:1 q)\v ,`a q)delete a from `. `. q)\v `symbol$()</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="90"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Functional qSQL¶ The functional forms of delete , exec , select and update are particularly useful for programmatically-generated queries, such as when column names are dynamically produced. Functional form is an alternative to using a qSQL template to construct a query. For example, the following are equivalent: q)select n from t q)?[t;();0b;(enlist `n)!enlist `n] Performance The q interpreter parses delete , exec , select , and update into their equivalent functional forms, so there is no performance difference. The functional forms are ![t;c;b;a] /update and delete ?[t;i;p] /simple exec ?[t;c;b;a] /select or exec ?[t;c;b;a;n] /select up to n records ?[t;c;b;a;n;(g;cn)] /select up to n records sorted by g on cn where: t is a table, or the name of a table as a symbol atom.c is the Where phrase, a list of constraints. Every constraint inc is a parse tree representing an expression to be evaluated; the result of each being a boolean vector. The parse tree consists of a function followed by a list of its arguments, each an expression containing column names and other variables. Represented by symbols, it distinguishes actual symbol constants by enlisting them. The function is applied to the arguments, producing a boolean vector that selects the rows. The selection is performed in the order of the items inc , from left to right: only rows selected by one constraint are evaluated by the next. b is the By phrase. The domain of dictionaryb is a list of symbols that are the key names for the grouping. Its range is a list of column expressions (parse trees) whose results are used to construct the groups. The grouping is ordered by the domain items, from major to minor.b is one of:- the general empty list () - boolean atom: 0b for no grouping;1b for distinct - a symbol atom or list naming table column/s - a dictionary of group-by specifications - the general empty list a is the Select phrase. The domain of dictionarya is a list of symbols containing the names of the produced columns. QSQL query templates assign default column names in the result, but here each result column must be named explicitly. Each item of its range is an evaluation list consisting of a function and its argument(s), each of which is a column name or another such result list. For each evaluation list, the function is applied to the specified value(s) for each row and the result is returned. The evaluation lists are resolved recursively when operations are nested. a is one of- the general empty list () - a symbol atom: the name of a table column - a parse tree - a dictionary of select specifications (aggregations) - the general empty list i is a list of indexesp is a parse treen is a non-negative integer or infinity, indicating the maximum number of records to be returnedg is a unary grade function Call by name¶ Columns in a , b and c appear as symbols. To distinguish symbol atoms and vectors from columns, enlist them. q)t:([] c1:`a`b`a`c`a`b`c; c2:10*1+til 7; c3:1.1*1+til 7) q)select from t where c2>35,c1 in `b`c c1 c2 c3 --------- c 40 4.4 b 60 6.6 c 70 7.7 q)?[t; ((>;`c2;35);(in;`c1;enlist[`b`c])); 0b; ()] c1 c2 c3 --------- c 40 4.4 b 60 6.6 c 70 7.7 Note above that - the columns c1 andc2 appear as symbol atoms - the symbol vector `b`c appears asenlist[`b`c] Use enlist to create singletons to ensure appropriate entities are lists. Different types of a and b return different types of result for Select and Exec. | b a | bool () sym/s dict -----------|---------------------------------------- () | table dict - keyed table sym | - vector dict dict parse tree | - vector dict dict dict | table vector/s table table ? Select¶ ?[t;c;b;a] Where t , c , b , and a are as above, returns a table. q)show t:([]n:`x`y`x`z`z`y;p:0 15 12 20 25 14) n p ---- x 0 y 15 x 12 z 20 z 25 y 14 q)select m:max p,s:sum p by name:n from t where p>0,n in `x`y name| m s ----| ----- x | 12 12 y | 15 29 Following is the equivalent functional form. Note the use of enlist to create singletons, ensuring that appropriate entities are lists. q)c: ((>;`p;0);(in;`n;enlist `x`y)) q)b: (enlist `name)!enlist `n q)a: `m`s!((max;`p);(sum;`p)) q)?[t;c;b;a] name| m s ----| ----- x | 12 12 y | 15 29 Degenerate cases - For no constraints, make c the empty list - For no grouping make b a boolean0b - For distinct rows make b a boolean1b - To produce all columns of t in the result, makea the empty list() select from t is equivalent to functional form ?[t;();0b;()] . Select distinct¶ For special case select distinct specify b as 1b . q)t:([] c1:`a`b`a`c`b`c; c2:1 1 1 2 2 2; c3:10 20 30 40 50 60) q)?[t;(); 1b; `c1`c2!`c1`c2] / select distinct c1,c2 from t c1 c2 ----- a 1 b 1 c 2 b 2 Rank 5¶ Limit result rows ?[t;c;b;a;n] Returns as for rank 4, but where n is - an integer or infinity, only the first n rows, or the last ifn is negative - a pair of non-negative integers, up to n[1] rows starting with rown[0] q)show t:([] c1:`a`b`c`a; c2:10 20 30 40) c1 c2 ----- a 10 b 20 c 30 a 40 q)?[t;();0b;();-2] / select[-2] from t c1 c2 ----- c 30 a 40 q)?[t;();0b;();1 2] / select[1 2] from t c1 c2 ----- b 20 c 30 Rank 6¶ Limit result rows and sort by a column ?[t;c;b;a;n;(g;cn)] Returns as for rank 5, but where g is a unary grading functioncn is a column name as a symbol atom sorted by g on column cn . q)?[t; (); 0b; `c1`c2!`c1`c2; 0W; (idesc;`c1)] c1 c2 ----- c 30 b 20 a 10 a 40 Q for Mortals §9.12.1 Functional select ? Exec¶ A simplified form of Select that returns a list or dictionary rather than a table. ?[t;c;b;a] The constraint specification c (Where) is as for Select. q)show t:([] c1:`a`b`c`c`a`a; c2:10 20 30 30 40 40; c3: 1.1 2.2 3.3 3.3 4.4 3.14159; c4:`cow`sheep`cat`dog`cow`dog) c1 c2 c3 c4 ------------------- a 10 1.1 cow b 20 2.2 sheep c 30 3.3 cat c 30 3.3 dog a 40 4.4 cow a 40 3.14159 dog No grouping¶ b is the general empty list. b a result -------------------------------------------------------------- () () the last row of t as a dictionary () sym the value of that column () dict a dictionary with keys and values as specified by a q)?[t; (); (); ()] / exec last c1,last c2,last c3 from t c1| `a c2| 40 c3| 3.14159 c4| `dog q)?[t; (); (); `c1] / exec c1 from t `a`b`c`c`a`a q)?[t; (); (); `one`two!`c1`c2] / exec one:c1,two:c2 from t one| a b c c a a two| 10 20 30 30 40 40 q)?[t; (); (); `one`two!(`c1;(sum;`c2))] / exec one:c1,two:sum c2 from t one| `a`b`c`c`a`a two| 170 Group by column¶ b is a column name. The result is a dictionary. Where a is a column name, in the result - the keys are distinct values of the column named in b - the values are lists of corresponding values from the column named in a q)?[t; (); `c1; `c2] / exec c2 by c1 from t a| 10 40 40 b| ,20 c| 30 30 Where a is a dictionary, in the result - the key is a table with a single anonymous column containing distinct values of the column named in b - the value is a table with columns as defined in a q)?[t; (); `c1; enlist[`c2]!enlist`c2] / exec c2:c2 by c1 from t | c2 -| -------- a| 10 40 40 b| ,20 c| 30 30 q)?[t; (); `c1; `two`three!`c2`c3] / exec two:c2,three:c3 by c1 from t | two three -| ------------------------ a| 10 40 40 1.1 4.4 3.14159 b| ,20 ,2.2 c| 30 30 3.3 3.3 q)?[t;();`c1;`m2`s3!((max;`c2);(sum;`c3))] / exec m2:max c2,s3:sum c3 by c1 from t | m2 s3 -| ----------- a| 40 8.64159 b| 20 2.2 c| 30 6.6 Group by columns¶ b is a list of column names. Where a is a column name, returns a dictionary in which - the key is the empty symbol - the value is the value of the column/s specified in a q)?[t; (); `c1`c2; `c3] | 1.1 2.2 3.3 3.3 4.4 3.14159 q)?[t; (); `c1`c2; `c3`c4!((max;`c3);(last;`c4))] | c3 c4 | ------- | 4.4 dog Group by a dictionary¶ b is a dictionary. Result is a dictionary in which the key is a table with columns as specified by b and b a result value ----------------------------------------------------- dict () last records of table that match each key dict sym corresponding values from the column in a dict dict values as defined in a q)?[t; (); `one`two!`c1`c2; ()] one two| c1 c2 c3 c4 -------| ------------------- a 10 | a 10 1.1 cow a 40 | a 40 3.14159 dog b 20 | b 20 2.2 sheep c 30 | c 30 3.3 dog q)/ exec last c1,last c2,last c3,last c4 by one:c1,two:c2 from t q)?[t; (); enlist[`one]!enlist(string;`c1); ()] one | c1 c2 c3 c4 ----| ------------------- ,"a"| a 40 3.14159 dog ,"b"| b 20 2.2 sheep ,"c"| c 30 3.3 dog q)/ exec last c1,last c2,last c3,last c4 by one:string c1 from t q)?[t; (); enlist[`one]!enlist `c1; `c2] / exec c2 by one:c1 from t one| ---| -------- a | 10 40 40 b | ,20 c | 30 30 q)?[t; (); `one`four!`c1`c4; `m2`s3!((max;`c2);(sum;`c3))] one four | m2 s3 ---------| ---------- a cow | 40 5.5 a dog | 40 3.14159 b sheep| 20 2.2 c cat | 30 3.3 c dog | 30 3.3 Q for Mortals §9.12.2 Functional exec ? Simple Exec¶ ?[t;i;p] Where t is not partitioned, another form of Exec. q)show t:([]a:1 2 3;b:4 5 6;c:7 9 0) a b c ----- 1 4 7 2 5 9 3 6 0 q)?[t;0 1 2;`a] 1 2 3 q)?[t;0 1 2;`b] 4 5 6 q)?[t;0 1 2;(last;`a)] 3 q)?[t;0 1;(last;`a)] 2 q)?[t;0 1 2;(*;(min;`a);(avg;`c))] 5.333333 ! Update¶ ![t;c;b;a] Arguments t , c , b , and a are as for Select. q)show t:([]n:`x`y`x`z`z`y;p:0 15 12 20 25 14) n p ---- x 0 y 15 x 12 z 20 z 25 y 14 q)select m:max p,s:sum p by name:n from t where p>0,n in `x`y name| m s ----| ----- x | 12 12 y | 15 29 q)update p:max p by n from t where p>0 n p ---- x 0 y 15 x 12 z 25 z 25 y 15 q)c: enlist (>;`p;0) q)b: (enlist `n)!enlist `n q)a: (enlist `p)!enlist (max;`p) q)![t;c;b;a] n p ---- x 0 y 15 x 12 z 25 z 25 y 15 The degenerate cases are the same as in Select. Q for Mortals §9.12.3 Functional update ! Delete¶ A simplified form of Update ![t;c;0b;a] One of c or a must be empty, the other not. c selects which rows will be removed. a is a symbol vector with the names of columns to be removed. q)t:([]c1:`a`b`c;c2:`x`y`z) q)/following is: delete c2 from t q)![t;();0b;enlist `c2] c1 -- a b c q)/following is: delete from t where c2 = `y q)![t;enlist (=;`c2; enlist `y);0b;`symbol$()] c1 c2 ----- a x c z Q for Mortals §9.12.4 Functional delete Conversion using parse¶ Applying parse to a qSQL statement written as a string will return the internal representation of the functional form. With some manipulation this can then be used to piece together the functional form in q. This generally becomes more difficult as the query becomes more complex and requires a deep understanding of what kdb+ is doing when it parses qSQL form. An example of using parse to convert qSQL to its corresponding functional form is as follows: q)t:([]c1:`a`b`c; c2:10 20 30) q)parse "select c2:2*c2 from t where c1=`c" ? `t ,,(=;`c1;,`c) 0b (,`c2)!,(*;2;`c2) q)?[`t; enlist (=;`c1;enlist `c); 0b; (enlist `c2)!enlist (*;2;`c2)] c2 -- 60 Issues converting to functional form¶ To convert a select query to a functional form one may attempt to apply the parse function to the query string: q)parse "select sym,price,size from trade where price>50" ? `trade ,,(>;`price;50) 0b `sym`price`size!`sym`price`size As we know, parse produces a parse tree and since some of the elements may themselves be parse trees we can’t immediately take the output of parse and plug it into the form ?[t;c;b;a] . After a little playing around with the result of parse you might eventually figure out that the correct functional form is as follows. q)funcQry:?[`trade;enlist(>;`price;50);0b;`sym`price`size! `sym`price`size] q)strQry:select sym,price,size from trade where price>50 q) q)funcQry~strQry 1b This, however, becomes more difficult as the query statements become more complex: q)parse "select count i from trade where 140>(count;i) fby sym" ? `trade ,,(>;140;(k){@[(#y)#x[0]0#x 1;g;:;x[0]'x[1]g:.=y]};(enlist;#:;`i);`sym)) 0b (,`x)!,(#:;`i) In this case, it is not obvious what the functional form of the above query should be, even after applying parse . There are three issues with this parse-and-“by eye” method to convert to the equivalent functional form. We will cover these in the next three subsections. Parse trees and eval¶ The first issue with passing a select query to parse is that each returned item is in unevaluated form. As discussed here, simply applying value to a parse tree does not work. However, if we evaluate each one of the arguments fully, then there would be no nested parse trees. We could then apply value to the result: q)eval each parse "select count i from trade where 140>(count;i) fby sym" ? +`sym`time`price`size!(`VOD`IBM`BP`VOD`IBM`IBM`HSBC`VOD`MS.. ,(>;140;(k){@[(#y)#x[0]0#x 1;g;:;x[0]'x[1]g:.=y]};(enlist;#:;`i);`sym)) 0b (,`x)!,(#:;`i) The equivalence below holds for a general qSQL query provided as a string: q)value[str]~value eval each parse str 1b In particular: q)str:"select count i from trade where 140>(count;i) fby sym" q)value[str]~value eval each parse str 1b In fact, since within the functional form we can refer to the table by name we can make this even clearer. Also, the first item in the result of parse applied to a select query will always be ? (or ! for a delete or update query) which cannot be evaluated any further. So we don’t need to apply eval to it. q)pTree:parse str:"select count i from trade where 140>(count;i) fby sym" q)@[pTree;2 3 4;eval] ? `trade ,(>;140;(k){@[(#y)#x[0]0#x 1;g;:;x[0]'x[1]g:.=y]};(enlist;#:;`i);`sym)) 0b (,`x)!,(#:;`i) q)value[str] ~ value @[pTree;2 3 4;eval] 1b Variable representation in parse trees¶ Recall that in a parse tree a variable is represented by a symbol containing its name. So to represent a symbol or a list of symbols, you must use enlist on that expression. In k, enlist is the unary form of the comma operator in k: q)parse"3#`a`b`c`d`e`f" # 3 ,`a`b`c`d`e`f q)(#;3;enlist `a`b`c`d`e`f)~parse"3#`a`b`c`d`e`f" 1b This causes a difficulty as q has no unary syntax for operators. Which means the following isn’t a valid q expression and so returns an error. q)(#;3;,`a`b`c`d`e`f) ', In the parse tree we receive we need to somehow distinguish between k’s unary , (which we want to replace with enlist ) and the binary Join operator, which we want to leave as it is. Explicit definitions in .q are shown in full¶ The fby in the select query above is represented by its full k definition. q)parse "fby" k){@[(#y)#x[0]0#x 1;g;:;x[0]'x[1]g:.=y]} While using the k form isn’t generally a problem from a functionality perspective, it does however make the resulting functional statement difficult to read. The solution¶ We will write a function to automate the process of converting a select query into its equivalent functional form. This function, buildQuery , will return the functional form as a string. q)buildQuery "select count i from trade where 140>(count;i) fby sym" "?[trade;enlist(>;140;(fby;(enlist;count;`i);`sym));0b; (enlist`x)! enlist (count;`i)]" When executed it will always return the same result as the select query from which it is derived: q)str:"select count i from trade where 140>(count;i) fby sym" q)value[str]~value buildQuery str 1b And since the same logic applies to exec , update and delete it will be able to convert to their corresponding functional forms also. To write this function we will solve the three issues outlined above: - parse-tree items may be parse trees - parse trees use k’s unary syntax for operators - q keywords from .q. are replaced by their k definitions The first issue, where some items returned by parse may themselves be parse trees is easily resolved by applying eval to the individual items. The second issue is with k’s unary syntax for , . We want to replace it with the q keyword enlist . To do this we define a function that traverses the parse tree and detects if any element is an enlisted list of symbols or an enlisted single symbol. If it finds one we replace it with a string representation of enlist instead of , . ereptest:{ //returns a boolean (1=count x) and ((0=type x) and 11=type first x) or 11=type x} ereplace:{"enlist",.Q.s1 first x} funcEn:{$[ereptest x;ereplace x;0=type x;.z.s each x;x]} Before we replace the item we first need to check it has the correct form. We need to test if it is one of: - An enlisted list of syms. It will have type 0h , count 1 and the type of its first item will be11h if and only if it is an enlisted list of syms. - An enlisted single sym. It will have type 11h and count 1 if and only if it is an enlisted single symbol. The ereptest function above performs this check, with ereplace performing the replacement. Console size .Q.s1 is dependent on the size of the console so make it larger if necessary. Since we are going to be checking a parse tree which may contain parse trees nested to arbitrary depth, we need a way to check all the elements down to the base level. We observe that a parse tree is a general list, and therefore of type 0h . This knowledge combined with the use of .z.s allows us to scan a parse tree recursively. The logic goes: if what you have passed into funcEn is a parse tree then reapply the function to each element. To illustrate we examine the following select query. q)show pTree:parse "select from trade where sym like \"F*\",not sym=`FD" ? `trade ,((like;`sym;"F*");(~:;(=;`sym;,`FD))) 0b () q)x:eval pTree 2 //apply eval to Where clause Consider the Where clause in isolation. q)x //a 2-list of Where clauses (like;`sym;"F*") (~:;(=;`sym;,`FD)) q)funcEn x (like;`sym;"F*") (~:;(=;`sym;"enlist`FD")) Similarly we create a function which will replace k functions with their q equivalents in string form, thus addressing the third issue above. q)kreplace:{[x] $[`=qval:.q?x;x;string qval]} q)funcK:{$[0=t:type x;.z.s each x;t<100h;x;kreplace x]} Running these functions against our Where clause, we see the k representations being converted to q. q)x (like;`sym;"F*") (~:;(=;`sym;,`FD)) q)funcK x //replaces ~: with “not” (like;`sym;"F*") ("not";(=;`sym;,`FD)) Next, we make a slight change to kreplace and ereplace and combine them. kreplace:{[x] $[`=qval:.q?x;x;"~~",string[qval],"~~"]} ereplace:{"~~enlist",(.Q.s1 first x),"~~"} q)funcEn funcK x (like;`sym;"F*") ("~~not~~";(=;`sym;"~~enlist`FD~~")) The double tilde here is going to act as a tag to allow us to differentiate from actual string elements in the parse tree. This allows us to drop the embedded quotation marks at a later stage inside the buildQuery function: q)ssr/[;("\"~~";"~~\"");("";"")] .Q.s1 funcEn funcK x "((like;`sym;\"F*\");(not;(=;`sym;enlist`FD)))" thus giving us the correct format for the Where clause in a functional select. By applying the same logic to the rest of the parse tree we can write the buildQuery function. q)buildQuery "select from trade where sym like \"F*\",not sym=`FD" "?[trade;((like;`sym;\"F*\");(not;(=;`sym;enlist`FD)));0b;()]" One thing to take note of is that since we use reverse lookup on the .q namespace and only want one result we occasionally get the wrong keyword back. q)buildQuery "update tstamp:ltime tstamp from z" "![z;();0b;(enlist`tstamp)!enlist (reciprocal;`tstamp)]" q).q`ltime %: q).q`reciprocal %: These instances are rare and a developer should be able to spot when they occur. Of course, the functional form will still work as expected but could confuse readers of the code. Fifth and sixth arguments¶ Functional select also has ranks 5 and 6; i.e. fifth and sixth arguments. Q for Mortals: §9.12.1 Functional queries We also cover these with the buildQuery function. q)buildQuery "select[10 20] from trade" "?[trade;();0b;();10 20]" q)//5th parameter included The 6th argument is a column and a direction to order the results by. Use < for ascending and > for descending. q)parse"select[10;<price] from trade" ? `trade () 0b () 10 ,(<:;`price) q).q?(<:;>:) `hopen`hclose q)qfind each ("<:";">:") //qfind defined above hopen hclose We see that the k function for the 6th argument of the functional form is <: (ascending) or >: (descending). At first glance this appears to be hopen or hclose . In fact in earlier versions of q, iasc and hopen were equivalent (as were idesc and hclose ). The definitions of iasc and idesc were later altered to signal a rank error if not applied to a list. q)iasc k){$[0h>@x;'`rank;<x]} q)idesc k){$[0h>@x;'`rank;>x]} q)iasc 7 'rank Since the columns of a table are lists, it is irrelevant whether the functional form uses the old or new version of iasc or idesc . The buildQuery function handles the 6th argument as a special case so will produce iasc or idesc as appropriate. q)buildQuery "select[10 20;>price] from trade" "?[trade;();0b;();10 20;(idesc;`price)]" The full buildQuery function code is as follows: \c 30 200 tidy:{ssr/[;("\"~~";"~~\"");("";"")] $[","=first x;1_x;x]} strBrk:{y,(";" sv x),z} //replace k representation with equivalent q keyword kreplace:{[x] $[`=qval:.q?x;x;"~~",string[qval],"~~"]} funcK:{$[0=t:type x;.z.s each x;t<100h;x;kreplace x]} //replace eg ,`FD`ABC`DEF with "enlist`FD`ABC`DEF" ereplace:{"~~enlist",(.Q.s1 first x),"~~"} ereptest:{(1=count x) and ((0=type x) and 11=type first x) or 11=type x} funcEn:{$[ereptest x;ereplace x;0=type x;.z.s each x;x]} basic:{tidy .Q.s1 funcK funcEn x} addbraks:{"(",x,")"} //Where clause needs to be a list of Where clauses, //so if only one Where clause, need to enlist. stringify:{$[(0=type x) and 1=count x;"enlist ";""],basic x} //if a dictionary, apply to both keys and values ab:{ $[(0=count x) or -1=type x; .Q.s1 x; 99=type x; (addbraks stringify key x ),"!",stringify value x; stringify x] } inner:{[x] idxs:2 3 4 5 6 inter ainds:til count x; x:@[x;idxs;'[ab;eval]]; if[6 in idxs;x[6]:ssr/[;("hopen";"hclose");("iasc";"idesc")] x[6]]; //for select statements within select statements x[1]:$[-11=type x 1;x 1;[idxs,:1;.z.s x 1]]; x:@[x;ainds except idxs;string]; x[0],strBrk[1_x;"[";"]"] } buildQuery:{inner parse x} qSQL Q for Mortals §9.12 Functional Forms Functional Query Functions</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="91"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Performance tips¶ How do I execute functions in parallel?¶ In the expression f each xs , f is applied to each element of xs in sequence. In a multi-CPU setting, applications of f can be done in parallel by using peach instead of each . Typically this is worth it if f is computationally expensive. Evaluating a hardware configuration¶ The scripts throughput.q and io.q are a useful starting point for users wanting to measure the performance of the systems where kdb+ will be deployed. The results of these (somewhat rough) tests can be used to stress-test different CPU, disk and network configurations running kdb+. Throughput¶ This test measures the time to insert a million rows into a table, one at a time, and also as bulk inserts of 10, 100, 1000, and 10000 rows. To run the test, simply load throughput.q into a q session: $ q throughput.q On an AMD Opteron box with 4 GB of RAM, we get 0.672 million inserts per second (single insert) 6.944 million inserts per second (bulk insert 10) 20.408 million inserts per second (bulk insert 100) 24.39 million inserts per second (bulk insert 1000) 25 million inserts per second (bulk insert 10000) On an AMD Turion64 laptop with 0.5 GB of RAM 0.928 million inserts per second (single insert) 8.065 million inserts per second (bulk insert 10) 16.129 million inserts per second (bulk insert 100) 16.129 million inserts per second (bulk insert 1000) 16.129 million inserts per second (bulk insert 10000) On a 12-core Mac mini with 64 GB of RAM KDB+ 4.1t 2022.01.14 Copyright (C) 1993-2022 Kx Systems m64/ 12()core 65536MB .. 2.639 million inserts per second (single insert) 25 million inserts per second (bulk insert 10) 166.667 million inserts per second (bulk insert 100) 333.333 million inserts per second (bulk insert 1000) 333.333 million inserts per second (bulk insert 10000) throughput.q : STDOUT: -1 SYMS: -1000?`3 EXCHANGES: 10#.Q.A getRandomTrades: {[N] ([]sym: N?SYMS; time: N?.z.t; price: N?100e; size: N?1000i; stop:N?0b; cond:N?.Q.A; ex:N?EXCHANGES)} t1: getRandomTrades 1 t10: getRandomTrades 10 t100: getRandomTrades 100 t1000: getRandomTrades 1000 t10000: getRandomTrades 10000 tradeNew: 0#t1; tmp:value"\\t do[1000000;tradeNew,:t1]" / prepare space tradeNew:0#t1 ms:value"\\t do[1000000;tradeNew,:t1]" tmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms)," million inserts per second (single insert)" tradeNew:0#t1 ms:value"\\t do[100000;tradeNew,:t10]" tmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms)," million inserts per second (bulk insert 10)" tradeNew:0#t1 ms:value"\\t do[10000;tradeNew,:t100]" tmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms)," million inserts per second (bulk insert 100)" tradeNew:0#t1 ms:value"\\t do[1000;tradeNew,:t1000]" tmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms)," million inserts per second (bulk insert 1000)" tradeNew:0#t1 ms:value"\\t do[100;tradeNew,:t10000]" tmp:STDOUT(string 0.001*floor 0.5+(count tradeNew)%ms)," million inserts per second (bulk insert 10000)" exit 0 Disk input/output¶ This test measures the cost of disk access from kdb+. Things that are measured include: open and close of a file; read files (cold and in the cache); write files; appends; getting the size of a file; etc. The benchmark first creates the test files, and then does something else for a while to get them out of the cache. $ q io.q -prepare KDB+ 2.4t 2006.09.29 Copyright (C) 1993-2006 Kx Systems l64/ 4cpu 3943MB ... start local q server with: q -p 5555 tmpfiles created Next we need to start a second kdb+ process. $ q -p 5555 Now we can run the benchmark. $ q io.q -flush 32 -run On an AMD Opteron box with 4 GB of RAM, we get: memory flushed (32GB) * local file hclose hopen`:read.test 0.0094 ms read `:read.test - 270 MB/sec read `:read.test - 392 MB/sec (cached) write `:write.test - 157 MB/sec * local fileops .[`:file.test;();,;2 3] 0.017 ms .[`:file.test;();:;2 3] 0.093 ms append (2 3) to handle 0.00883 ms hcount`:file.test 0.0053 ms read1`:file.test 2.1732 ms value`:file.test 0.0251 ms * local comm hclose hopen`:127.0.0.1:5555 0.135 ms sync (key rand 100) 0.06277 ms async (string 23);collect 0.00773 ms sync (string 23) 0.05514 ms Finally, we can clean up the temporary files. $ q io.q -cleanup tmpfiles deleted Command-line arguments¶ q io.q [-run] [-prepare] [-cleanup] [-flush memsizeingb] [-rl remotelocation] [-rh remotehost] / hardware timings eg: q io.q -prepare -rl /mnt/foo q io.q -flush 32 -run -rl /mnt/foo -rh server19:5005 q io.q -cleanup -rl /mnt/foo If remote host/location aren’t supplied only local tests will be run. The local and remote q servers must be started manually. Performance of different versions of insert¶ There are several syntactic forms to insert rows into tables, with different costs. We demonstrate the differences. In the examples, we use a non-keyed table. q)trade date open high low close volume sym ------------------------------------------------ 2006.10.03 24.5 24.51 23.79 24.13 19087300 AMD 2006.10.03 27.37 27.48 27.21 27.37 39386200 MSFT q)row: first trade In our first test, we use insert . q)load `:trade q)row: first trade q)\t do[1000000; insert[`trade; row]] 1968 Next we test the version that uses the dot notation. q)load `:trade q)row: first trade q)\t do[1000000; .[`trade; (); ,; row]] 1890 Those two can take the table as a parameter. If the table is known, we can also use the Amend operator, which is faster: q)load `:trade q)row: first trade q)\t do[1000000; trade,: row] 1718 Differences between versions The result of this comparison might vary between different versions of kdb+. The tests shown above are for V2.4t. Finally, remember that bulk insert is faster than repeated inserts of single rows: q)load `:trade q)row: first trade q)rows: 1000000 # enlist row q)\t insert[`trade; rows] 109 q)load `:trade q)\t .[`trade; (); ,; rows] 78 q)load `:trade q)\t trade,: rows 78 Using the `g# attribute¶ This recipe demonstrates the use of the `g# attribute to improve performance of queries. The test is as follows: given 10 million trades and 10 million quotes, how long does it take to snapshot price, bid, ask, mid for the SP500 at some prior time? The tables can be set up like this: q)n:10000000 q)s:`$read0`:tick/sp500.txt q)S:s,-7500?`4 / 8000 symbols q)t:{09:30:00.0+floor 23400000%x%til x} / milliseconds from 9:30 to 16:00 q)trade:([]sym:n?S;time:t n;price:n?100.0;ox:n?2) / 10 million trades q)quote:([]sym:n?S;time:t n;bid:n?100.0;ask:n?100.0) / 10 million quotes q)r:first s / sample ric q)t:12:00:00.0 / sample time The test queries and their running times are as follows: q)\t select last sym,last price from trade where sym=r,ox=1,time<=t 84 q)\t select from trade where sym=r,ox=1,time=time time bin t 84 Now, let’s apply the attribute to the sym column: q)update `g#sym from `trade q)update `g#sym from `quote The queries now run faster. q)\t select last sym,last price from trade where sym=r,ox=1,time<=t 0 q)\t select from trade where sym=r,ox=1,time=time time bin t 0 In fact, we need to run them many times to get a measurable time: q)n:1000 q)\t do[n;select last sym,last price from trade where sym=r,ox=1,time<=t] 78 q)\t select from trade where sym=r,ox=1,time=time time bin t 83 STAC-M3 benchmark¶ STAC-M3 is an independent benchmark for testing solutions (such as kdb+) that manage large timeseries datasets (tick databases). This has been run using kdb+ on several platforms. The results are available to registered STAC users. These benchmarks are run on a year of daily NYSE TAQ-like data, approximately 5 TB in total. They use a series of up to 20 complex queries that were defined by financial institutions to reflect real business requirements. The benchmarks enable users and vendors to compare the performance of their database solutions against audited, third-party measurements. Pivot tables¶ Some notes on the theory and practice of pivoting tables. Simple pivot example¶ Given a source table q)t:([]k:1 2 3 2 3;p:`xx`yy`zz`xx`yy;v:10 20 30 40 50) we want to obtain q)pvt:([k:1 2 3]xx:10 40 0N;yy:0N 20 50;zz:0N 0N 30) As originally suggested by Jeff Borror, we begin by getting the distinct pivot values – these will become our column names in addition to the key column k . Note that p must be a column of symbols for this to work. q)P:asc exec distinct p from t; And then create the pivot table! q)pvt:exec P#(p!v) by k:k from t; which can be read as: for each key k , create a dictionary of the present columns p and their values v , take the full list of columns from that dict, and finally collapse the list of dicts to a table. Another variation on creating the pivot table q)pvt:exec P!(p!v)P by k:k from t; Explanation¶ A key point to remember is that a table is a list of dictionaries and that is key to how we build the resulting pivot table. A list of conforming dictionaries (same symbol keys, value types) collapses to a table. q)pvt:((`k`xx`yy`zz!1 10 0N 0N);(`k`xx`yy`zz!2 40 20 0N);(`k`xx`yy`zz!3 0N 50 30)) It’s helpful to play around with these constructs at the q prompt. q)exec p!v from t `xx`yy`zz`xx`yy!10 20 30 40 50 Extract key/value pairs for p and v grouped by k q)exec p!v by k from t 1 2 3!(enlist `xx!enlist 10;`yy`xx!20 40;`zz`yy!30 50) Create a list of dictionaries q)exec p!v by k:k from t (flip (enlist `k)!enlist 1 2 3)!(enlist `xx!enlist 10;`yy`xx!20 40;`zz`yy!30 50) In the dictionaries create nulls for missing values to allow them to conform with common column names and collapse to a table q)exec P#(p!v) by k:k from t (+(,`k)!,1 2 3)!+`s#`xx`yy`zz!(10 40 0N;0N 20 50;0N 0N 30) A very general pivot function, and an example¶ Credit The following is derived from a thread on the k4 listbox between Aaron Davies, Attila Vrabecz and Andrey Zholos. Create sample data set of level-2 data at 4 quotes a minute, two sides, five levels, NSYE day q)qpd:5*2*4*"i"$16:00-09:30 q)date:raze(100*qpd)#'2009.01.05+til 5 q)sym:(raze/)5#enlist qpd#'100?`4 q)sym:(neg count sym)?sym q)time:"t"$(raze/) 500#enlist 10#'09:30:00+15*til (qpd div 5*2) q)time+:(count time)?1000 q)side:raze 500#enlist raze(qpd div 2)#enlist"BA" q)level:raze 500#enlist raze(qpd div 5)#enlist 0 1 2 3 4 q)level:(neg count level)?level q)price:(500*qpd)?100f q)size:(500*qpd)?100 q)quote:([]date;sym;time;side;level;price;size) / pivot t, keyed by k, on p, exposing v / f, a function of v and pivot values, names the columns / g, a function of k, pivot values, and the return of f, orders the columns / either can be defaulted with (::) / conceptually, this is / exec f\[v;P\]!raze((flip(p0;p1;.))!/:(v0;v1;..))\[;P\]by k0,k1,.. from t / where P~exec distinct flip(p0;p1;..)from t / followed by reordering the columns and rekeying piv:{[t;k;p;v;f;g] v:(),v; G:group flip k!(t:.Q.v t)k; F:group flip p!t p; count[k]!g[k;P;C]xcols 0!key[G]!flip(C:f[v]P:flip value flip key F)!raze {[i;j;k;x;y] a:count[x]#x 0N; a[y]:x y; b:count[x]#0b; b[y]:1b; c:a i; c[k]:first'[a[j]@'where'[b j]]; c}[I[;0];I J;J:where 1<>count'[I:value G]]/:\:[t v;value F]} q)f:{[v;P]`$raze each string raze P[;0],'/:v,/:\:P[;1]} q)g:{[k;P;c]k,(raze/)flip flip each 5 cut'10 cut raze reverse 10 cut asc c} / `Bpricei`Bsizei`Apricei`Asizei for levels i Use a small subset for testing q)q:select from quote where sym=first sym q)book:piv[`q;`date`sym`time;`side`level;`price`size;f;g] q)![`book;();`date`sym!`date`sym;{x!fills,'x}cols get book]; q)book One user reports: This is able to pivot a whole day of real quote data, about 25 million quotes over about 4000 syms and an average of 5 levels per sym, in a little over four minutes.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="92"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">upsert ¶ Overwrite or append records to a table x upsert y upsert[x;y] Where x is a table, or the name of a table as a symbol atom, or the name of a splayed table as a directory handley is zero or more records the records are upserted into the table. The record/s y may be either - lists with types that match type each x cols x - a table with columns that are members of cols x and have corresponding types If x is the name of a table, it is updated in place. Otherwise the updated table is returned. If x is the name of a table as a symbol atom (or the name of a splayed table as a directory handle) that does not exist in the file system, it is written to file. Simple table¶ If the table is simple, new records are appended. If the records are in a table, it must be simple. q)t:([]name:`tom`dick`harry;age:28 29 30;sex:`M) q)t upsert (`dick;49;`M) name age sex ------------- tom 28 M dick 29 M harry 30 M dick 49 M q)t upsert((`dick;49;`M);(`jane;23;`F)) name age sex ------------- tom 28 M dick 29 M harry 30 M dick 49 M jane 23 F q)`t upsert ([]age:49 23;name:`dick`jane) `t q)t name age sex ------------- tom 28 M dick 29 M harry 30 M dick 49 jane 23 Keyed table¶ If the table is keyed, any new records that match on key are updated. Otherwise, new records are inserted. If the right argument is a table it may be keyed or unkeyed. q)a upsert (`e;30;70) / single record s| r u -| ----- q| 1 5 w| 2 6 e| 30 70 q)a upsert ((`e;30;70);(`r;40;80)) / multiple records s| r u -| ----- q| 1 5 w| 2 6 e| 30 70 r| 40 80 q)show a:([]s:`q`w`e;r:1 2 3;u:5 6 7) / simple table s| r u -| --- q| 1 5 w| 2 6 e| 3 7 q)/update `q and `e, insert new `r; return new table q)a upsert ([s:`e`r`q]r:30 4 10;u:70 8 50) / keyed table s| r u -| ----- q| 10 50 w| 2 6 e| 30 70 r| 4 8 q)`a upsert ([s:`e`r`q]r:30 4 10;u:70 8 50) / same but update table in place `a Serialized table¶ q)`:data/tser set ([] c1:`a`b; c2:1.1 2.2) `:data/tser q)`:data/tser upsert (`c; 3.3) `:data/tser q)get `:data/tser c1 c2 ------ a 1.1 b 2.2 c 3.3 Upserting to a serialized table reads the table into memory, updates it, and writes it back to file. Splayed table¶ q)`:data/tsplay/ set ([] c1:`sym?`a`b; c2:1.1 2.2) `:data/tsplay/ q)`:data/tsplay upsert (`sym?`c; 3.3) `:data/tsplay q)select from `:data/tsplay c1 c2 ------ a 1.1 b 2.2 c 3.3 Upserting to a splayed table appends new values to the column files. Upserting to a serialized or splayed table removes any attributes set. Cond is not supported inside q-SQL expressions Enclose in a lambda or use Vector Conditional instead. value ¶ Recurse the interpreter value x value[x] Returns the value of x : dictionary value of the dictionary symbol atom value of the variable it names enumeration corresponding symbol vector string result of evaluating it in current context list result of calling or indexing the first element with the remaining elements (if the first element is a string or symbol, it is evaluated first) note that this is different from a parse tree that is handled by eval . projection list: function followed by argument/s composition list of composed values derived function argument of the iterator operator internal code view list of metadata lambda structure file symbol content of datafile Examples: q)value `q`w`e!(1 2;3 4;5 6) / dictionary 1 2 3 4 5 6 q)a:1 2 3 q)value `a / symbol 1 2 3 q)e:`a`b`c q)x:`e$`a`a`c`b q)x `e$`a`a`c`b q)value x / enumeration `a`a`c`b q)value "enlist a:til 5" / string 0 1 2 3 4 q)value "{x*x}" {x*x} q)value "iasc 2 7 3 1" 3 0 2 1 q)\d .a q.a)value"b:2" 2 q.a)b 2 q.a)\d . q)b 'b q).a.b 2 q)value(+;1;2) / list - apply a function or index a list 3 q)/ if the first item is a string or symbol, it is evaluated first q)value(`.q.neg;2) -2 q)value("{x+y}";1;2) 3 q)value +[2] / projection + 2 q)value differ / composition ~: ~': q)f:,/:\: / derived function q)value f ,/: q)value each (::;+;-;*;%) / operator 0 1 2 3 4 The string form can be useful as a kind of ‘prepared statement’ from the Java client API since the Java serializer doesn’t support lambdas and keywords. View¶ returns a list of metadata: - cached value - parse tree - dependencies - definition When the view is pending, the cached value is :: . q)a:1 q)b::a+1 q)get`. `b :: (+;`a;1) ,`a "a+1" q)b 2 q)get`. `b 2 (+;`a;1) ,`a "a+1" q) Lambda¶ The structure of the result of value on a lambda is subject to change between versions. As of V3.5 the structure is: (bytecode;parameters;locals;(namespace,globals);constants[0];…;constants[n];m;n;f;l;s) where | this | is | |---|---| m | bytecode to source position map; -1 if position unknown | n | fully qualified (with namespace) function name as a string, set on first global assignment, with @ appended for inner lambdas; () if not applicable | f | full path to the file where the function originated from; "" if not applicable | l | line number in said file; -1 if n/a | s | source code | q)f:{[a;b]d::neg c:a*b+5;c+e} q)value f 0xa0624161430309220b048100028269410004 `a`b ,`c ``d`e 5 21 19 20 17 18 0 16 11 0 9 0 9 0 25 23 24 2 "..f" "" -1 "{[a;b]d::neg c:a*b+5;c+e}" q)/Now define in .test context – globals refer to current context of test q)\d .test q.test)f:{[a;b]d::neg c:a*b+5;c+e} q.test)value f 0xa0624161430309220b048100028269410004 `a`b ,`c `test`d`e 5 21 19 20 17 18 0 16 11 0 9 0 9 0 25 23 24 2 ".test.f" "" -1 "{[a;b]d::neg c:a*b+5;c+e}" Local values in suspended functions¶ See changes since V3.5 that support debugging. get ¶ The function value is the same as get By convention get is used for file I/O but the two are interchangeable. q)get "2+3" / same as value 5 q)value each (get;value) / same internal code 19 19 var , svar ¶ Variance, sample variance var ¶ Variance var x var[x] Where x is a numeric list, returns its variance as a float atom. Nulls are ignored. q)var 2 3 5 7 3.6875 q)var 2 3 5 0n 7 3.6875 q)select var price by sym from trade where date=2010.10.10,sym in`IBM`MSFT var is an aggregate function, equivalent, where sqr:{x*x} to {avg[sqr x]-sqr[avg x]} Since 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists. q)M:get`:m77 set m:(2 3;4 0N;1 7) q)var m 1.555556 4 q)var M 1.555556 4 q)T:get`:tab/ set t:flip`a`b!flip m q)var t a| 1.555556 b| 4 q)var T a| 1.555556 b| 4 var is a multithreaded primitive. svar ¶ Sample variance svar x svar[x] Where x is a numeric list, returns its sample variance as a float atom. \[svar(x)=\frac{n}{n-1}var(x)\] q)var 2 3 5 7 3.6875 q)svar 2 3 5 7 4.916667 q)select svar price by sym from trade where date=2010.10.10,sym in`IBM`MSFT svar is an aggregate function, equivalent to {var[x]*count[x]%-1+count x} . Since 4.1t 2022.04.15, can also traverse columns of tables and general/anymap/nested lists. q)M:get`:m77 set m:(2 3;4 0N;1 7) q)svar m 2.333333 8 q)svar M 2.333333 8 q)T:get`:tab/ set t:flip`a`b!flip m q)svar t a| 2.333333 b| 8 q)svar T a| 2.333333 b| 8 svar is a multithreaded primitive. Domain and range¶ domain: b g x h i j e f c s p m d z n u v t range: f . f f f f f f f . f f f f f f f f ? Vector Conditional¶ Replace selected items of one list with corresponding items of another ?[x;y;z] Where x is a boolean vectory andz are lists of the same typex ,y , andz conform returns a new list by replacing elements of y with the elements of z when x is false. All three arguments are evaluated. q)?[11001b;1 2 3 4 5;10 20 30 40 50] 1 2 30 40 5 If x , y , or z are atomic, they are repeated. q)?[11001b;1;10 20 30 40 50] 1 1 30 40 1 q)?[11001b;1 2 3 4 5;99] 1 2 99 99 5 Since V2.7 2010.10.07 ?[x;y;z] works for atoms too. Vector Conditional can be used in qSQL queries, which do not support Cond. For multiple cases – more than just true/false – see Controlling evaluation. ? Query, Cond, if Controlling evaluation Q for Mortals §10.1.3 Vector Conditional Evaluation view , views ¶ view ¶ Expression defining a view view x view[x] Where x is a view (by reference), returns the expression defining x . q)v::2+a*3 / define dependency v q)a:5 q)v 17 q)view `v / view the dependency expression "2+a*3" views ¶ List views defined in the default namespace views[] Returns a sorted list of the views currently defined in the default namespace. q)w::b*10 q)v::2+a*3 q)views[] `s#`v`w Metadata Views Q for Mortals §4.11 Alias</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="93"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">u.q¶ u.q is available from KxSystems/kdb-tick Overview¶ Contains functions to allow clients to subscribe to all or subsets of available data, publishing to interested clients and alerting clients to events, for example, end-of-day. Tracks client subscription interest and removes client subscription details on their disconnection. This script is loaded by other processes, for example a tickerplant. Usage¶ To allow the ability to publish data to any process, do the following: - load u.q - declare the tables to be published in the top level namespace. Each table must contain a column called sym , which acts as the single key field to which subscribers subscribe - initialize by calling .u.init[] - publish data by calling .u.pub[table name; table data] The list of tables that can be published and the processes currently subscribed are held in .u.w . Subscriber processes must open a connection to the publisher and call .u.sub[tablename;list_of_symbols_to_subscribe_to] . If a subscriber calls .u.sub again, the current subscription is overwritten either for all tables (if a wildcard is used) or the specified table. To add to a subscription, for example, add more syms to a current subscription, the subscriber can call .u.add ). Clients should define a upd function to receive updates, and .u.end function for end-of-day events. Variables¶ | Name | Description | |---|---| | .u.w | Dictionary of registered client interest in data being processed (for example, tables->(handle;syms) | | .u.t | Table names | Functions¶ Functions are open source and open to customisation. .u.init¶ Initialise variables used to track registered clients. .u.init[] Initialises variables by retrieving all tables defined in the root namespace. Used to track client interest in data being published. .u.del¶ Delete subscriber from dictionary of known subscribers (.u.w ) for given table .u.del[x;y] Where x is a table namey is the connection handle .u.sel¶ Select from table, given optional sym filter. Used to filter tables to clients who may not want everything from the table. .u.sel[x;y] Where x is a tabley is a list of syms (can be empty list) returns the table x , which can be filtered by y . .u.pub¶ Publish updates to subscribers. .u.pub[x;y] Where x is table name (sym type)y is new data for tablex (table type) Actions performed: - find interested client handles for table x and any filter they may have (using.u.w ) - for each client - filter y using.u.sel (if client specified a filter at subscription time) - publish asynchronously to client, calling their upd function with parameters table name and table data. - filter .u.add¶ Add client subscription interest in table with optional filter. .u.add[x;y] Where x is a table name (sym)y is list of syms used to filter table data, with empty sym representing for all table data Actions performed: - uses .z.w to get current client handle. - find any existing subscriptions to table x for client (using.u.w )- if existing, update filter with union on y - else a new entry is added to .u.w with client handle,x andy . - if existing, update filter with union on Returns 2 element list. The first element is the table name. The second element depends on whether x refers to a keyed table. - If x is a keyed table,.u.sel is used to select from the keyed table the required syms - otherwise returns an empty table x (schema definition of table), with the grouped attribute applied to the sym column. .u.sub¶ Used by clients to register subscription interest. .u.sub[x;y] Where x is a table name (sym)y is list of syms used to filter table data, with empty sym representing for all table data If x is empty symbol, client is subscribed to all known tables using y criteria. This is achieved by calling .u.sub for each table in .u.t . For the subscribing client, any previous registered in the given tables are removed prior to reinstating new criteria provided i.e. calls .u.del . Calls .u.add to record the client subscription. Returns - a two item list if x is an indivial table name. First item is the table name subscribed to as a symbol. Second item is an empty table (table schema). - a list of two item lists as described above for each individual table, if x is an empty symbol (i.e. subscribe to all tables) - an error if the table does not exist. .u.end¶ Inform all registered clients that end-of-day has occurred. .u.end[x] Where x is a date, representing the day that is ending. Iterates over all client handles via .u.w and asynchronously calls their .u.end function passing x . .z.pc¶ Implementation of .z.pc callback for connection close. Called when a client disconnects. The client handle provided is used to call .u.del for all tables. This ensures all subscriptions are removed for that client. Example¶ tick.q is an example of a tickerplant that uses u.q for pub/sub. In addition, the example scripts below demonstrate pub/sub in a standalone publisher and subscriber. They can be downloaded from KxSystems/cookbook/pubsub. Each script should be run from the OS command prompt as shown in the following example. $ q publisher.q $ q subscriber.q The publisher generates some random data and publishes it periodically on a timer. The subscriber receives data from the publisher and is displayed on the screen. You can modify the subscription request and the upd function of the subscriber as required. You can run multiple subscribers at once.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="94"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Implicit iteration¶ Before you specify iteration, see whether what you need is already implicit in the operators and keywords This tutorial as a video presentation Lists and dictionaries are first-class entities in q, and most operators and keywords iterate through them. This article is about when to leave it to q. That is, when not to specify iteration. Recall: - Map iteration - evaluates an expression once on each item in a list or dictionary. - Accumulator iteration - evaluates an expression successively: the result of one evaluation becomes an argument of the next. Implicit map iterations¶ The simplest and most common implicit map iteration is pairwise: between corresponding list items. q)10 100 1000 * (1 2 3;4 5 6;7 8) 10 20 30 400 500 600 7000 8000 Of course, this requires the lists to have the same number of items. q)10 100 1000 * (1 2 3;4 5 6) 'length [0] 10 100 1000 * (1 2 3;4 5 6) ^ Scalar extension¶ Unless! If one of the operands is an atom, scalar extension pairs it with every list item. q)5 < 1 2 3 4 5 6 7 8 00000111b q)"f" < ("abc";"def";"gh") 000b 000b 11b Atomic iteration¶ Many operators have atomic iteration: they iterate recursively, pairwise and with scalar extension, until they find the atoms in a list. q)1 4 7 < (1 2 3;4 5 6;7 8) 011b 011b 01b q)(1;2 3 4; 7) < (1 2 3;4 5 6;7 8) 011b 111b 01b q)(1;2 3 4;(5 6 7;8)) < (1 2 3;4 5 6;7 8) 011b 111b (110b;0b) Similarly, some unary keywords implicitly apply to each item of a list argument – and recurse to atoms. q)cos (1 2 3; 4 5 6) 0.5403023 -0.4161468 -0.9899925 -0.6536436 0.2836622 0.9601703 q)lower("THE";("Quick";"Brown");"FOX") "the" ("quick";"brown") "fox" Atomic operators are atomic in both their left and right domains. 4 < (1;2 3 4;(5 6 7;8)) 0b 000b (111b;1b) Some binary keywords are atomic in only one domain. For example, the right argument of within is an ascending pair of sortable type. But in its left domain, within is atomic. q)2 3 4 within 3 6 011b q)(2 3 4;(5; 6 7;8)) within 3 6 0 1 1 1b 10b 0b List iteration¶ List iteration is through list items only – not atomic. The like keyword has list iteration in its left domain. q)`quick like "qu?ck" 1b q)`quick`quack`quark like "qu?ck" / list iteration 110b q)(`quick;`quack`quark) like "qu?ck" / but not atomic 'type [0] (`quick;`quack`quark) like "qu?ck" ^ List iteration stops after the first level: it does not recurse. Simple visualizations¶ Even a simple visual display can be useful. Here are sines of the first twenty positive integers, tested to see which of them is greater than 0.5. q).5 < sin 1 + til 20 11000011000001100001b We can take that boolean vector and use it to index a short string, getting us a simple visual display. And, as you probably know, Index At @ can be elided and replaced with prefix notation. q)".#" @ .5 < sin 1 + til 20 "##....##.....##....#" q)".#" .5 < sin 1 + til 20 "##....##.....##....#" Index At is atomic in its right domain; that is, right-atomic. Here we’ll index a string with an integer vector and we’ll get a string result. q)" -|+" @ 0 3 1 1 1 3 0 " +---+ " If we index it with a 2-row matrix – two integer vectors – we’ll get a character matrix back. q)" -|+" @ (0 3 1 1 1 3 0;0 2 0 0 0 2 0) " +---+ " " | | " And if we take that 2-row matrix and index it – to make selections from it – the result is a numeric matrix. q)(0 3 1 1 1 3 0;0 2 0 0 0 2 0) @ 0 1 1 1 0 0 3 1 1 1 3 0 0 2 0 0 0 2 0 0 2 0 0 0 2 0 0 2 0 0 0 2 0 0 3 1 1 1 3 0 And because Index At is right-atomic we can use the numeric matrix to index the string. q)" -|+" @(0 3 1 1 1 3 0;0 2 0 0 0 2 0) @ 0 1 1 1 0 " +---+ " " | | " " | | " " | | " " +---+ " Index At is right-atomic, but in its left domain it has list iteration: list items need not be atoms. In this example, the list items are themselves strings. If we index that list of strings with an integer matrix, we get back a matrix of strings. q)show L:("the";"quick";"brown";"fox") "the" "quick" "brown" "fox" q)(1 3;2 0) 1 3 2 0 q)L@(1 3;2 0) "quick" "fox" "brown" "the" q)show q:4 5#.Q.a "abcde" "fghij" "klmno" "pqrst" q)q @ (1 2;3 1) / Index At: right-atomic "fghij" "klmno" "pqrst" "fghij" q)q . (1 2;3 1) / Index: list iteration on the right "ig" "nl" Some keywords evaluate a binary expression between adjacent items in a list. q)deltas 1 5 0 9 5 2 1 4 -5 9 -4 -3 q)ratios 2 3 4 5 2 1.5 1.333333 1.25 These are map iterations: the evaluations are independent and can be performed in parallel. Exercise 1¶ sensors.txt contains (24) hourly sensor readings over a 12-day period. Sensor readings are in the range 0-9. $ wget https://code.kx.com/download/learn/iteration/sensors.txt --2022-01-03 11:27:18-- https://code.kx.com/download/learn/iteration/sensors.txt Resolving code.kx.com (code.kx.com)... 74.50.49.235 Connecting to code.kx.com (code.kx.com)|74.50.49.235|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 300 [text/plain] Saving to: ‘sensors.txt’ sensors.txt 100%[===================>] 300 --.-KB/s in 0s 2022-01-03 11:27:19 (143 MB/s) - ‘sensors.txt’ saved [300/300] q)show s:read0`:sensors.txt "030557246251157265736086" "757251109999993270188377" "776439448625126896347568" "116491158137137589031187" "855938799541699262946623" "104948806186867057936025" "328964479858696484945053" "861596102999933729145653" "623589072102430497578780" "240663439999997746246672" "311551572414272384005263" "850884046457214232200714" a. For each of the 24 hours, on how many days of the period did the sensor reading for that hour fall to zero? Converting the sensor readings to numbers is not necessary: they can be compared directly to "0" . q)s="0" 101000000000000000000100b 000000010000000001000000b 000000000000000000000000b 000000000000000000100000b 000000000000000000000000b 010000010000000100000100b 000000000000000000000100b 000000010000000000000000b 000000100010001000000001b 001000000000000000000000b 000000000000000000110000b 001000100000000000011000b The Equals operator has implicit atomic iteration. Here it iterates across the items (rows) of the list s . Each item (row) is a character list (string) and Equals continues iterating through the items. The result of s="0" is a boolean matrix of the same shape as s . Summing it simply adds the rows together. q)sum s="0" 1 1 3 0 0 0 2 3 0 0 1 0 0 0 1 1 0 1 2 2 1 3 0 1i Your maintenance manager gets automated reports printed, but the last report got damaged. She needs your help. b. On which days did the sensor readings begin (8, 6, 1, 5, …) and (1, 1, 6, 4, …)? We can search the first four columns of s for these sequences. q)s[;til 4] "0305" "7572" "7764" "1164" "8559" "1049" "3289" "8615" "6235" "2406" "3115" "8508" The Find operator has list iteration in both left and right domains. q)s[;til 4]?("8615";"1164") 7 3 Visualizations help us find patterns in datasets. Even simple visualizations can be valuable. Normal operating levels are in the range (2,7). c. Display a simple plot showing when the sensors reported levels outside that range. The within keyword take as right argument a 2-item vector of sortable type. It has atomic iteration in its left domain. Keyword not is atomic. q)not s within "27" 101000000001100000000110b 000001111111110001111000b 000001001000100110000001b 110011101100100011101110b 100101011001011000100000b 110101110110100100100100b 001100001101010010100100b 101010110111100001100000b 000011100110001010001011b 001000001111110000000000b 011001000010000010110000b 101110100000010000011010b Because Index At is right-atomic we can use the boolean matrix to index a string. ".#"not s within "27" "#.#........##........##." ".....#########...####..." ".....#..#...#..##......#" "##..###.##..#...###.###." "#..#.#.##..#.##...#....." "##.#.###.##.#..#..#..#.." "..##....##.#.#..#.#..#.." "#.#.#.##.####....##....." "....###..##...#.#...#.##" "..#.....######.........." ".##..#....#.....#.##...." "#.###.#......#.....##.#." At level 9 productivity is highest. d. Plot when in the period this occurred. ".#"s="9" "........................" "........######.........." ".....#..........#......." "....#............#......" "...#...##....##...#....." "...#..............#....." "...#....#....#....#....." "....#....####....#......" ".....#..........#......." "........######.........." "........................" "........................" Implicit accumulator iterations¶ Accumulator iterations evaluate some expression successively: the result of one evaluation becomes the argument of the next. We have already used the sum keyword, which implicitly evaluates Add between successive items of a list. q)((2+3)+4)+5 14 q)sum 2 3 4 5 14 q)a:`cats`dogs!2 3; b:`cows`sheep!3 4; c:`dogs`sheep!5 6 q)sum (a;b;c) cats | 2 dogs | 8 cows | 3 sheep| 10 sum is an aggregator: it returns the result of its last evaluation. sums also iterates successively, but returns the results of all the evaluations. q)(2;2+3;2+3+4;2+3+4+5) 2 5 9 14 q)sums 2 3 4 5 2 5 9 14 Notice that the result has the same length as the argument: sums is a uniform function. Notice also that the index of the result corresponds to the number of evaluations: (sums 2 3 4 5)[3] is the result of three additions and (sums 2 3 4 5)[0] is the result of no additions. Keywords such as mavg and msum combine map iterations (e.g. evaluate on each group of three successive items) with an aggregator which might employ accumulator iteration, e.g. sum . q)3 msum 1 5 0 9 5 2 2 4 0 5 3 0 1 6 6 14 14 16 9 8 6 9 8 8 Exercise 2¶ Factory productivity is thought to be most affected by the machinery’s fuddling level. An automated process adjusts the fuddling level every 20 minutes to keep it stable; the level resets to zero each midnight. We have in fudadj.csv a log of the adjustments. q)\wget -q https://code.kx.com/download/learn/iteration/fudadj.csv q)read0 `:fudadj.csv / fuddling adjustments "-1,-1,3,3,2,3,3,3,1,-1,3,0,2,1,2,1,0,-1,3,0,3,1,1,1,3,0,-1,3,-1,2,0,2,1,3,0,0,0,.. "0,1,-1,-1,3,-1,-1,3,1,1,2,1,-1,1,3,2,2,3,2,2,2,3,3,3,2,3,0,3,3,1,2,1,-1,-1,-1,0,.. "1,0,-1,2,3,-1,1,-1,-1,-1,2,3,2,0,0,3,3,2,2,-1,2,-1,2,0,1,2,2,0,0,-1,1,3,-1,1,-1,.. "3,2,2,1,3,-1,-1,-1,1,-1,1,1,0,-1,0,3,-1,0,2,0,2,0,1,2,3,2,1,3,-1,2,-1,1,2,1,-1,3.. "1,0,3,-1,2,3,3,1,1,2,-1,1,1,3,-1,2,2,2,2,2,0,3,-1,1,2,-1,3,0,0,1,2,3,3,0,-1,0,-1.. "2,-1,3,2,1,2,3,3,1,2,-1,-1,1,-1,0,-1,3,2,-1,-1,-1,1,1,2,2,3,0,2,1,0,1,2,3,3,2,-1.. "3,1,-1,2,1,3,-1,1,0,1,2,2,1,3,1,1,1,3,2,-1,-1,1,0,3,3,0,0,2,1,0,2,3,2,2,2,0,-1,-.. "-1,2,-1,-1,1,2,-1,0,2,3,0,2,0,1,2,-1,3,3,1,2,-1,-1,-1,3,3,0,1,1,1,3,2,1,-1,1,2,2.. "-1,3,-1,2,0,0,1,1,1,3,0,2,2,2,2,-1,-1,-1,-1,1,1,3,0,3,-1,1,2,3,0,-1,2,2,2,2,0,2,.. "3,2,-1,-1,0,-1,3,2,0,3,1,0,0,2,3,2,1,1,-1,2,3,-1,3,3,3,-1,1,3,2,1,1,1,2,3,2,1,1,.. "0,2,0,1,-1,3,0,2,-1,2,-1,2,0,0,-1,3,0,3,1,0,2,2,3,-1,2,0,1,1,2,0,2,2,0,0,0,-1,1,.. "2,-1,2,-1,3,0,1,1,0,-1,2,2,3,3,0,0,-1,1,3,-1,1,2,2,3,2,-1,0,2,0,3,0,1,1,0,3,3,-1.. What were the fuddling levels corresponding to the sensor readings in Exercise 1? The file has no column headers, so Load CSV returns not a table but a list of columns. q)show fa:(prd[24 3]#"J";csv)0: read0 `:fudadj.csv / fuddling adjustments -1 0 1 3 1 2 3 -1 -1 3 0 2 -1 1 0 2 0 -1 1 2 3 2 2 -1 3 -1 -1 2 3 3 -1 -1 -1 -1 0 2 3 -1 2 1 -1 2 2 -1 2 -1 1 -1 2 3 3 3 2 1 1 1 0 0 -1 3 .. That suits us. The 72 rows correspond to 20-minute intervals. We take cumulative sums across the intervals, and select every third sum to get the hourly levels. Transposing the result gives us 12×24 fuddling levels. q)flip sums[fa]@2+3*til 24 1 9 16 18 23 23 29 32 34 38 41 44 44 50 52 54 59 62 65 72 72 76 78 80 0 1 4 8 11 18 24 33 38 45 47 45 50 51 54 55 58 58 57 61 64 68 69 66 0 4 3 7 9 17 20 21 26 25 28 27 28 33 34 34 36 42 46 49 51 55 55 61 7 10 9 10 9 11 15 18 24 28 30 33 35 41 41 43 43 49 51 56 57 57 62 63 4 8 13 15 18 24 28 31 35 36 44 43 45 47 49 55 58 60 64 62 65 67 68 70 4 9 16 16 16 20 17 21 26 29 35 39 42 49 57 59 65 69 73 73 75 74 74 77 3 9 9 14 19 24 24 28 31 34 41 45 46 49 55 57 59 62 68 71 75 77 79 79 0 2 3 8 11 16 18 19 23 28 30 35 36 37 41 41 42 47 54 59 58 59 63 67 1 3 6 11 17 14 15 21 23 25 31 35 41 46 47 51 54 59 63 67 73 77 81 86 4 2 7 11 16 20 24 29 32 38 42 48 51 56 60 67 64 72 75 78 79 82 85 91 2 5 6 9 8 14 17 21 24 27 31 30 32 36 38 39 41 44 42 47 50 50 51 53 3 5 7 10 16 16 19 26 27 32 34 40 39 40 43 51 51 53 57 62 65 63 65 71 It is clear that the automatic adjustments are not keeping the fuddling levels stable. Yet another way q is weird? If in other languages you are used to specifying iterations, you may at first experience this as an annoying distraction. Besides solving your problem, you also have to learn and keep in mind q’s implicit iterations. You already know how to write iterations. Why now learn this? The reward is that, as implicit iteration becomes familiar to you, you stop thinking about most of the iterations in your code, which leaves you more mental space for problem solving. (Only when we put on noise-cancelling headphones do we discover how much annoying background noise we had been filtering out.) As a bonus, many algorithms are startlingly simple to write in q. It’s way cool. Conclusion¶ That’s it. The big takeaway is that there is a lot of iteration built into the q primitives. It will almost always give you your shortest, fastest code – and the most readable.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="95"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">An introduction to neural networks with kdb+¶ Due to the desire to understand the brain and mimic the way it works by creating machines that learn, neural networks have been studied with great interest for many decades. A simple mathematical model for the neuron was first presented to the world by Warren McCulloch and Walter Pitts in 1943. With modern advances in technology and the computational power available, this field of research has had massive implications on how computers can be used to ‘think’ and learn to solve problems given an appropriate algorithm. A few interesting examples of this research in action include: - Interpreting art and painting images [Mordvintsev et al.] [Gatys et al.] A number of different algorithms have been developed around this field of research, and this paper is going to focus on the implementation of a feedforward neural network in kdb+. A feedforward network (also known as a multi-layer perceptron) is a type of supervised machine-learning algorithm which uses a series of nonlinear functions layered together with an output. It can be used for classification or regression purposes and has been shown to be a universal approximator – an algorithm that can model any smooth function given enough hidden units. Reference See Kurt Hornik, “Approximation Capabilities of Multilayer Feedforward Networks”, Neural Networks, Vol. 4, pp. 251-257, 1991 This design of feedforward networks can be represented through operations on matrices and vectors. Array-programming languages such as q are well suited to computational implementations in this format due to the vectorized operations on lists. All tests were run using kdb+ version 3.2 (2015.05.07) Feedforward networks¶ As mentioned in the introduction, a feedforward neural network is a type of supervised machine-learning algorithm. This means that it is trained on datasets for which the output for given inputs is already known. A neural network consists of a set of neurons connected together in some order. Each neuron receives inputs from other neurons, performs some action on these inputs and outputs a signal that becomes input for another connected neuron. The perceptron¶ A single neuron by itself is often referred to as a perceptron (Figure 1). The idea is to construct a predictor based on a linear combination of the inputs and a set of weights which control the impact an input has on the system. This linear combination (the dot product of the inputs and the weights) is then used as the input to a chosen threshold function which produces the desired output. Figure 1: A perceptron Threshold function¶ A threshold function represents the activation of the perceptron based on its inputs. A common choice for this function is the sigmoid function. q)sigmoid:{1%1+exp neg x} q)output:sigmoid[inputs mmu weights] This function provides a smooth curve bounded asymptotically by 0 and 1 on the vertical axis (Figure 2). Figure 2: A plot of the sigmoid function Perceptrons as linear predictors¶ Perceptrons can only solve linearly-separable problems. A linearly-separable problem is one where two sets in a plane can be cleanly divided by a straight line. We can demonstrate this by looking at plots of truth table outputs. Let us say that a red dot represents true and a blue dot represents false. If we take the standard truth table inputs of 1 or 0 and add some random noise to them so that they are either slightly smaller than 1 or slightly larger than 0, then we get the plots shown in Figure 3. Figure 3: Truth Table Plots Notice how we can easily separate the true results from false in the AND and OR truth tables with a straight line. Perceptrons are good predictors for these problems. However, if the data is not so easily separable, as with the XOR plot, we need a way to be able to make nonlinear predictions. Solving the XOR problem will form our motivation for connecting many perceptrons into a network. A neural network¶ A network consists of neurons connected together in layers as shown in Figure 4. Each neuron in one layer is connected to each neuron in the next layer and each connection has an associated weight. The first layer of neurons is the input layer. The neurons in this layer represent input values from a data sample. Following the input layer are the hidden layers. The number of neurons in a hidden layer is determined through a combination of background understanding of the problem and trial and error. Finally there is the output layer. The number of neurons in this layer is determined by the type of problem to be solved. For example one might wish to classify an input data sample into one of two categories (e.g. true or false as in the XOR problem). This type of problem is referred to as a binary classification problem. For this type of problem we only need one output neuron because the sigmoid function will return values close to 1 or 0 after the network is trained. However, the function which acts on the linear combination of inputs and weights at the output layer is not always the same as the threshold function used throughout the rest of the network. This is because we do not always desire a value between 0 and 1. Output functions for addressing different types of problems will be discussed in detail in Output functions for regression and multi-class classification. Figure 4: A feedforward network Bias neurons¶ A bias neuron is a neuron external to the main network. One is added to the input layer and one to each of the hidden layers. The value it passes to neurons in the next layer is always 1 and it receives no inputs from previous layers (see Figure 4). The purpose of bias neurons is analogous to the intercept parameter of a simple linear model – commonly written as The absence of \(\beta_{0}x_{0}\) in the simple linear model results in the predicted line always passing through (0, 0) and the model will perform poorly when attempting to predict unknown values. Hence we always set \(x_{0}\) to 1 and alter \(\beta_{0}\) as we find the line of best fit. In the neural network we represent the network’s version of the \(\beta_{0}x_{0}\) term as a bias neuron and associated weights. For more information on bias terms in statistical modelling see Chapter 3 in [2] and Chapter 7 in [1]. Bias neurons are added to the input layer by adding a 1 to each of the input samples in the data set used to train the network. // Inputs and expected target values for XOR problem q)input:((0 0f);(0 1f);(1 0f);(1 1f)) // Add a bias neuron to each input q)input:input,’1.0 q)target:0 1 1 0f Weight initialization¶ The initialization process begins by assigning values to the weights present in the network. The weights are randomly assigned such that the values of the weights between the input nodes and a receiving node on the next layer are in the range (-1, 1) with mean 0. Since there will be multiple neurons in the next layer we can represent all the weights between two layers as a matrix, where the number of rows represents the number of inputs and the number of columns represents the number of neurons in the next layer. An example of how this weight matrix and the input matrix interact is shown in Figure 5. wInit:{ // If only one input neuron is detected exit // This is most likely due to a missing bias neuron if[1=x;:"Number of input neurons must be greater than 1."]; flip flip[r]-avg r:{[x;y]x?1.0}[y]each til x } Initialize weights between three inputs and four outputs. The first column represents the weights (connections) between the three inputs and the first neuron in the next layer. The second column is the weights leading to the second neuron in the next layer and so on. q)wInit[3;4] -0.3586151 0.09051553 -0.2815408 -0.05282783 0.02154042 0.4219367 0.2320934 -0.05853578 0.3370747 -0.5124522 0.04944742 0.1113636 Figure 5: Diagram showing 2 input neurons (green and blue neurons) connecting to 2 hidden neurons. The colors in the matrices correspond to the area of the network those values are found during execution of a forward pass. The feedforward network in kdb+¶ Once we have prepared the input data and the weights they can be applied to the network to provide output. We will use the network to predict the outputs of the XOR function. // weights between input layer and hidden layer (2 inputs + 1 bias neuron) q)w:wInit[3;4] // weights between hidden layer and output layer (4 hidden neurons + 1 bias neuron) q)v:wInit[5;1] q)ffn:{[input;w;v] // Apply inputs and their weights to the hidden layer z:sigmoid[input mmu w]; // Use output from hidden layer to generate an output sigmoid[z mmu v] } q)ffn[input;w;v] 0.5028818 0.5136649 0.4891303 0.5 The network has produced an output, but these values are not close to the target values. This is understandable as the weights have been randomly initialized. In order to produce the desired output the network must learn a more appropriate set of weights. Training the network¶ Training a network to produce accurate results involves determining the weights which minimize the errors of the output nodes. The problem of minimizing the error by adjusting the weights is solved by a technique called back-propagation – a form of gradient descent. Gradient descent begins by calculating the derivative of the error function with respect to the weights. This derivative gives information about the direction needed to move along the surface described by the error function in order to arrive at a minimum. The weights are gradually adjusted using this information until an acceptable minimum in error has been reached. Back-propagation¶ Back-propagation trains the network by propagating information about the error at the output back through the network and adjusting all the weights of the connecting neurons. For an output node that applies the sigmoid function the error function is the cross-entropy error function defined as: This gives us the following update rule for adjusting the weights between the output node and the hidden layer: where: - \(z^{t}_h\) - the output after evaluating the hidden neuron \(h\) for input sample \(t\) - \(v_h\) - the weight between the output neuron and hidden neuron \(h\) - \(y_t\) - the target for sample \(t\) - \(\widehat{y}^t\) - the calculated output for sample \(t\) - \(\alpha\) - the rate at which we adjust the weights (usually < 0.1) Update rules The derivation of the update rules for back-propagation is beyond the scope of this paper. See Chapter 11 in [3] and Chapter 11 in [2]. Once the change in the above weights has been calculated we propagate the error to the hidden layer and generate the update rule for the weights between the input layer and the hidden layer: where: - \(w_{hj}\) - the weight between hidden neuron \(h\) and input neuron \(j\) - \(x^t_j\) - the input from neuron \(j\) for some sample \(t\) Using these formulas we can update our feedforward network function to implement back-propagation and allow training: // inputs - the input data set with bias node // targets – known outputs corresponding to inputs // lr – learning rate ‘alpha’ // d – dictionary with 3 items: output // weights between input and hidden layers // weights between hidden and output layers ffn:{[inputs;targets;lr;d] z:1.0,/:sigmoid[inputs mmu d`w]; o:sigmoid[z mmu d`v]; // Error of output neurons deltaO:(targets-o); // Error of hidden neurons deltaZ:1_/:$[deltaO;flip d`v]*z*1-z; `o`v`w!(o;d[`v]+lr*flip[z] mmu deltaO; d[`w]+lr*flip[inputs] mmu deltaZ) } // Example – the XOR problem q)inputs 0 0 1 0 1 1 1 0 1 1 1 1 q)targets 0 1 1 0f q)w 0.3257579 0.348099 -0.4320058 0.3356597 -0.07237444 -0.3028193 0.3088185 -0.3069554 -0.2533834 -0.04527963 0.1231874 -0.02870423 q)v -0.0133154 0.04739764 0.2894549 -0.3235371 // Two training passes show little change q)finalResult:(ffn[inputs;targets;0.1]/)[2;`o`w`v!(0,();w;v)] q)finalResult`o 0.5025557 0.5133001 0.4888265 0.4996545 // 10000 training passes shows significant improvement q)finalResult:(ffn[inputs;targets;0.1]/)[10000;`o`w`v!(0,();w;v)] q)finalResult`o 0.009305227 0.9890354 0.9890087 0.01142469 q)\ts finalResult:(ffn[inputs;targets;0.1]/)[10000;`o`w`v!(0,();w;v)] 164 2992 Now that the network has been trained it can be applied to a random permutation of XOR inputs to see how it performs on a single pass. // generate a random permutation of list 0 to 99 q)rp:-100?til 100 // use rp to shuffle inputs and targets // we generate rp first so that the shuffled indices of // the inputs and targets match up q)inputs:(100#inputs)rp q)targets:(100#targets)rp // using weights of trained network solve test inputs q)rw:finalResult`w; rv:finalResult`v q)res: (ffn[inputs;targets;0.1]/)[1;`o`w`v!(0;rw;rv)] // Are all the predictions correct? q)all raze[`int$res`o]=targets 1b Output functions for regression and multi-class classification¶ There are three types of output we are looking for when using a network of this type. The first we have already discussed in A neural network above – binary classification. We have also worked through an example of this problem when applying the network to predict XOR outputs. The other two problems we will discuss are multiclass outputs and nonlinear regression outputs. Multiclass outputs¶ For multiclass outputs the goal is to determine the correct classification of an input into three or more possible classifications. Unlike the binary classification situation, in the output layer there will be one neuron for each possible classification. The target values are transformed using one-hot encoding. This gives us a unique list of 0s and 1s for each possible classification that is the same length as the number of possible classifications. For example, if there are classifications A, B and C the transformations are 0 0 1, 0 1 0 and 1 0 0 – giving the output layer target patterns to match for training and testing. The output function used is the softmax function: where - \(\widehat{y}^t_i\) - the output from neuron \(i\) for sample \(t\) - \(S^t_i\) - the linear combination of outputs from the hidden layer and the weights connecting the hidden layer to output neuron \(i\) for sample \(t\) - \(S^t_k\) - the linear combination of outputs from the hidden layer and the weights connecting the hidden layer to output neuron \(k\) for sample \(t\) By using the softmax function we ensure that the sum of the outputs from each of the neurons in the output layer is 1. That allows us to pick the neuron with the highest output value as the most likely to be the classification we expect for the given input; the ‘winning’ neuron will be assigned a value of 1 and the other neurons a value of 0 resulting in a match to one of the one-hot encoded classifications. The cross-entropy error function in this case is: where \(\widehat{y}^t_i\) is the target value for output neuron \(i\) with sample \(t\). The update rules are: where \(v_{ih}\) is the weight between output neuron \(i\) and hidden neuron \(h\). An example implementation of the softmax output is shown below in Classification for 3+ classes. Nonlinear regression outputs¶ If the goal is to predict a real value, not necessarily constrained within the boundaries imposed by a threshold function, the output function is just the linear combination of the outputs from the hidden layer. where - \(\textbf{v}\) - the vector of weights between the hidden layer and the output layer - \(\textbf{z}^t\) - the vector of outputs from the hidden layer In this case we change the error function from cross-entropy to the sum-of-squared errors: The update rules for a regression output are: q)lin:{x} q)linErr:{0.5*sum sum a*a:x-y} It’s useful now to put the different functions for error and output in dictionary format as this will allow us to use the same ffn function for all 3 types of classification: // x is linear combination of hidden outputs and weights outputFuncs:`sig`smax`lin! ({1%1+exp neg x};{exp[x]%sum flip exp x};{x}) // x is target value, y is calculated errFuncs:`sig`smax`lin! ({neg sum sum flip(x*log y)+(1-x)*log 1-y}; {neg sum sum flip x*log y}; {sum sum a*a:x-y}) ffn:{[inputs;targets;lr;of;d] // Calculate the outputs of the hidden layer // and add bias node z:1.0,/:sigmoid[inputs mmu d`w]; o:outputFuncs[of][z mmu d`v]; // Error of output neurons deltaO:(targets-o); // Error of hidden neurons // Hidden bias node is not connected to any // input layer nodes so we drop it deltaZ:1_/:$[deltaO;flip d`v]*z*1-z; `o`v`w`err! (o; d[`v]+lr*flip[z] mmu deltaO; d[`w]+lr*flip[inputs] mmu deltaZ; errFuncs[of][targets;o]) } Classification for 3+ classes¶ As an example, we will study a set of Iris flower data which was originally introduced into research by Ronald Fisher in 1936. It contains samples from three different species of the Iris flower and has become a standard test case for many classification techniques within machine learning. By taking measurements of certain metrics (eg. length and width of sepals) the plants can be classified and computationally distinguished from each other. The data and a description of the data can be found in the links at archive.ics.uci.edu We one-hot encode the different possible species of Iris, resulting in a neural network with 5 inputs (including the bias neuron), 7 hidden neurons (including the bias neuron) and 3 outputs. The data set is randomly shuffled to reduce the likelihood of a biased output. From this randomized selection of the data a random selection of 20 samples is taken as the test set and the other 130 samples are used in training. // one-hot encoding of classes q)IrisOneH:oneHot[distinct Iris.species] q)IrisOneH Iris-setosa | 1 0 0 Iris-versicolor| 0 1 0 Iris-virginica | 0 0 1 q)Iris1h slength swidth plength pwidth species onehot ------------------------------------------------ 5.1 3.5 1.4 0.2 Iris-setosa 1 0 0 4.9 3 1.4 0.2 Iris-setosa 1 0 0 4.7 3.2 1.3 0.2 Iris-setosa 1 0 0 .. // Random permutation of the dataset q)IrisRP:Iris1h{neg[x]?til x}count Iris1h // Pick a test set – samples from data not used in training q)IrisTest:IrisRP[-20?count IrisRP] q)IrisTrain:IrisRP except IrisTest // Init weights, input and output variables q)w:wInit[5;6] q)v:wInit[7;3] q)input:1.0,'flip flip[IrisTrain]`slength`swidth`plength`pwidth q)output:IrisTrain.onehot // Train network q)resIris:(ffn[input;output;0.01;`smax]/)[800;`o`w`v`err!(0;w;v;1f)] // After 800 iterations (or epochs) how well does it perform? q)all(IrisOneH?"f"$"j"$resIris`o)=IrisOneH?output 0b q)100*sum[(IrisOneH?"f"$"j"$resIris`o)=IrisOneH?output]%count output 96.89922 // Init variables for test data q)tinput:1.0,'flip flip[IrisTest]`slength`swidth`plength`pwidth q)toutput:IrisTest.onehot // Run test data through network without training q)resIrisT:(ffn[tinput;toutput;0.01;`smax]/)[1;`o`w`v`err!(0;resIris`w;resIris`v;1f)] q)all (IrisOneH?"f"$"j"$resIrisT`o)=IrisOneH?toutput 1b Plot of error vs training epoch is given in Figure 6. We see that the error settles into an oscillation pattern around training epoch 700. While these oscillations are slowly converging it is likely that overfitting begins to take place shortly after the 700th iteration of training. Figure 6: Error while training network on Iris dataset Stochastic gradient descent¶ Often, in practice, computing the error and gradient for the entire training set can be very slow and in some cases not possible if the data will not fit entirely in memory. Stochastic gradient descent solves this problem by performing the back-propagation training on small batches of the training set chosen at random. Randomly chosen batches simulate real-time streaming of data and help to reduce bias in the final trained network (see chapter 7 in [2] for more details on bias and variance in machine learning). Classification for 3+ classes using stochastic batch training¶ This time we will again use Fisher’s Iris dataset, but the network will be trained using randomly selected small batches from the training data. Conclusion¶ In this white paper we have explored a proof-of-concept implementation of a feedforward network in kdb+. By constructing the model for the network using linear algebra (inputs, outputs and weights represented by matrices) we have shown that an array-processing language is well suited to developing a complex system. We have shown that multiple types of output functions can be easily applied to the network depending on the desired result. This generalization demonstrates the adaptability of kdb+ to many problems where numerical data can be arranged into lists, arrays or tables. In the event that there is not enough main memory to carry out the calculations on all the training data in one pass we presented an alternative approach, stochastic gradient descent, which allows the network to be trained on small batches of the data. The network exhibited in this paper forms the groundwork for more complicated networks. Adding additional hidden layers and different options for threshold function will allow more complex convolutional and deep networks to be developed. All tests were run using kdb+ version 3.2 (2015.05.07) Author¶ James Neill works as a kdb+ consultant for one of the world’s largest investment banks, developing a range of applications. James has also been involved in the design of training courses in data science and machine learning as part of the First Derivatives training programme. References¶ - Murphy, K. P. (2012). Machine Learning: a probabilistic perspective. MIT Press. - Hastie, T., Tibshirani, R. and Friedman, J. The Elements of Statistical Learning. Springer, New York. (Online version) - Alpaydin, E. Introduction to Machine Learning, Second Edition. MIT Press.</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="96"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @kind function // @category main // @subcategory new // // @overview // Generates a new named experiment within the specified registry without // adding a model on-prem or within a supported cloud providers storage solution // // @todo // It should be possible via configuration to add descriptive information // about an experiment. // // @param folderPath {dict|string|null} Registry location, can be: // 1. A dictionary containing the vendor and location as a string, e.g. // ```enlist[`local]!enlist"myReg"``` or // ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc; // 2. A string indicating the local path; // 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON. // @param experimentName {string} The name of the experiment to be located // under the namedExperiments folder which can be populated by new models // associated with the experiment // @param config {dict|null} Any additional configuration needed for // initialising the experiment // // @return {dict} Updated config dictionary containing relevant // registry paths registry.new.experiment:{[folderPath;experimentName;config] config:registry.util.check.config[folderPath;config]; if[not`local~storage:config`storage;storage:`cloud]; experimentName:registry.util.check.experiment experimentName; registry[storage;`new;`experiment][experimentName;config] } ================================================================================ FILE: ml_ml_registry_q_main_query.q SIZE: 1,717 characters ================================================================================ // query.q - Main callable functions for querying the modelStore // Copyright (c) 2021 Kx Systems Inc // // @overview // Querying the modelStore table. Currently, the below features can // be referenced by users to query the modelStore table: // 1. registrationTime // 2. experimentName // 3. modelName // 4. modelType // 5. version // 6. uniqueID // // @category Model-Registry // @subcategory Functionality // // @end \d .ml // @kind function // @category main // @subcategory query // // @overview // Query the modelStore // // @param folderPath {dict|string|null} Registry location, can be: // 1. A dictionary containing the vendor and location as a string, e.g. // ```enlist[`local]!enlist"myReg"``` or // ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc; // 2. A string indicating the local path; // 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON. // @param config {dict} Any additional configuration needed for // retrieving the modelStore. Can also be empty dictionary `()!()`. // // @return {table} Most recent version of the modelStore registry.query.modelStore:{[folderPath;config] if[config~(::);config:()!()]; // Retrieve entire modelStore modelStore:registry.get.modelStore[folderPath;config]; // If no user-defined config return entire modelStore k:`modelName`experimentName`modelType`version`registrationTime`uniqueID; if[not any k in key config;:modelStore]; // Generate where clause and query modelStore keys2check:(`modelName`experimentName`modelType;enlist`version;`registrationTime`uniqueID); whereClause:registry.util.query.checkKey[config]/[();keys2check;(like;{all each x=\:y};=)]; ?[modelStore;whereClause;0b;()] } ================================================================================ FILE: ml_ml_registry_q_main_set.q SIZE: 11,054 characters ================================================================================ // set.q - Main callable functions for adding information to the model registry // Copyright (c) 2021 Kx Systems Inc // // @overview // Setting items within the registry including // 1. Models: // - q (functions/projections/appropriate dictionaries) // - Python (python functions + sklearn/keras specific functionality) // 2. Configuration // 3. Model information table // // @category Model-Registry // @subcategory Functionality // // @end \d .ml // @kind function // @category main // @subcategory set // // @overview // Add a q object, Python function, Keras model or sklearn model // to the registry so that it can be retrieved and applied to new data. // In the current iteration there is an assumption of complete // independence for the q functions/files i.e. q function/workflows // explicitly don't use Python to make it easier to store and generate // reintroduce models // // @todo // Improve the configuration information that is being persisted // presently this contains all information within the config folder // however this is not particularly informative and may be confusing // // @param folderPath {dict|string|null} Registry location, can be: // 1. A dictionary containing the vendor and location as a string, e.g. // ```enlist[`local]!enlist"myReg"``` or // ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc; // 2. A string indicating the local path; // 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON. // @param experimentName {string|null} Name of experiment model belongs to // @param model {any} `(<|dict|fn|proj)` Model to be saved to the registry. // @param modelName {string} The name to be associated with the model // @param modelType {string} The type of model that is being saved, namely // "q"|"sklearn"|"keras"|"python" // @param config {dict} Any additional configuration needed for // setting the model // // @return {null} registry.set.model:{[folderPath;experimentName;model;modelName;modelType;config] config:registry.util.check.config[folderPath;config]; if[not`local~storage:config`storage;storage:`cloud]; experimentName:$[(any experimentName ~/: (::;""))|10h<>abs type experimentName; "undefined"; experimentName ]; c:registry[storage;`set;`model][experimentName;model;modelName;modelType;config]; first c`uniqueID } // @kind function // @category main // @subcategory set // // @overview // Add a q object to the registry. This should be a q object in the // current process which is either a function/projection/dictionary // containing a predict key // // @param registryPath {string} Full/relative path to the model registry // @param model {any} `(dict|fn|proj)` Model to be saved to the registry. // @param config {dict} Information relating to the model that is // to be saved, this includes version, experiment and model names // // @return {null} registry.set.object:{[typ;registryPath;model;config] toSet:$[type[model]in 10 11 -11h;"File";"Model"]; registry.util.set[`$typ,toSet][registryPath;model;config] } // @kind function // @category main // @subcategory set // // @overview // Set the configuration associated with a specified model version such // that all relevant information needed to redeploy the model is present // with a packaged model // // @param config {dict} Information relating to the model // being saved, this includes version, experiment and model names // // @return {null} registry.set.modelConfig:{[model;modelType;config] safeWrite:{[config;path] if[not count key hsym `$config[`versionPath],"/config/",path,".json"; registry.util.set.json[config;`config;path;enlist config] ]}; $[99h=type model; $[not (("q"~modelType)&((`predict in key model)|(`modelInfo in key model))); {[safeWrite;config;sym;model] safeWrite[config;string[sym],"/modelInfo"] }[safeWrite;config]'[key model;value model]; safeWrite[config;"modelInfo"]]; safeWrite[config;"modelInfo"] ] } // @kind function // @category main // @subcategory set // // @overview // Set the configuration associated with monitoring a specified model version // such that all relevant information needed to monitor the model is present // with a packaged model // // @param model {any} `(<|dict|fn|proj)` Model to be monitored. // @param modelType {string} The type of model that is being saved, namely // "q"|"sklearn"|"keras" // @param data {table} Historical data to understand model behaviour // @param config {dict} Information relating to the model // being saved, this includes version, experiment and model names // // @return {null} registry.set.monitorConfig:{[model;modelType;data;config] func : {[sym;model;modelType;data;config] if[not 98h~type data;:(::)]; $[sym~(::); newConfig:.j.k raze read0 hsym `$config[`versionPath],"/config/modelInfo.json"; newConfig:.j.k raze read0 hsym `$config[`versionPath],"/config/",string[sym],"/modelInfo.json" ]; newConfig[`monitoring;`schema;`values]:registry.util.create.schema data; newConfig[`monitoring;`schema;`monitor]:1b; newConfig[`monitoring;`nulls;`values]:registry.util.create.null data; newConfig[`monitoring;`nulls;`monitor]:1b; newConfig[`monitoring;`infinity;`values]:registry.util.create.inf data; newConfig[`monitoring;`infinity;`monitor]:1b; newConfig[`monitoring;`latency;`values]:registry.util.create.latency[model;modelType;data]; newConfig[`monitoring;`latency;`monitor]:1b; newConfig[`monitoring;`csi;`values]:registry.util.create.csi data; newConfig[`monitoring;`csi;`monitor]:1b; newConfig[`monitoring;`psi;`values]:registry.util.create.psi[model;modelType;data]; newConfig[`monitoring;`psi;`monitor]:1b; params:`maxDepth`indent!(10;" "); $[sym~(::); (hsym `$config[`versionPath],"/config/modelInfo.json") 0: enlist .j.j newConfig; (hsym `$config[`versionPath],"/config/",string[sym],"/modelInfo.json") 0: enlist .j.j newConfig] }[;;modelType;;config]; $[all 99h=(type[model];type[data]); [k:key[model] inter key[data];func'[k;model k;data k]]; not 99h=type[model]; func[::;model;data]; '"data to fit monitoring statistics is not partitioned on model key" ] } // @kind function // @category main // @subcategory set // // @overview // Set the configuration associated with supervised monitoring // // @param config {dict} Information relating to the model // being saved, this includes version, experiment and model names // // @return {null} registry.set.superviseConfig:{[model;config] func:{[sym;model;config] $[sym~(::); newConfig:.j.k raze read0 hsym `$config[`versionPath],"/config/modelInfo.json"; newConfig:.j.k raze read0 hsym `$config[`versionPath],"/config/",string[sym],"/modelInfo.json" ]; newConfig[`monitoring;`supervised;`values]:config `supervise; newConfig[`monitoring;`supervised;`monitor]:1b; params:`maxDepth`indent!(10;" "); $[sym~(::); (hsym `$config[`versionPath],"/config/modelInfo.json") 0: enlist .j.j newConfig; (hsym `$config[`versionPath],"/config/",string[sym],"/modelInfo.json") 0: enlist .j.j newConfig ]; }[;;config]; $[99h~type[model]; func'[key[model];value[model]]; func[::;model]] } // @kind function // @category main // @subcategory set // // @overview // Upsert relevant data from current run to modelStore // // @param config {dict} Information relating to the model // being saved, this includes version, experiment and model names // // @return {null} registry.set.modelStore:{[config] enlistCols:`experimentName`modelName`modelType`version`description; regularCols:`registrationTime`uniqueID!config`registrationTime`uniqueID; experimentName:config`experimentName; experimentName:$[0h=type experimentName;;enlist]experimentName; modelName:enlist config`modelName; modelType:config`modelType; modelType:enlist$[-10h=type modelType;enlist;]modelType; description:config`description; if[0=count description;description:""]; description:enlist$[-10h=type description;enlist;]description; version:enlist config`version; info:regularCols,enlistCols! (experimentName;modelName;modelType;version;description); // check if model already exists whereClause:enlist (&;(&;(~\:;`version;config[`version]);(~\:;`modelName;config[`modelName])); (~\:;`experimentName;config[`experimentName])); columns:enlist `uniqueID; if[not count ?[config[`modelStorePath];whereClause;0b;columns!columns]`uniqueID; config[`modelStorePath]upsert flip info ]; }</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="97"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// @private // @kind function // @category optimizationUtility // @desc Optimize a function until gradient tolerance is reached or // maximum number of allowed iterations is met. The following outlines a // python equivalent // https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/optimize.py#L1131 // @param func {fn} Function to be minimized // @param optimDict {dictionary} Variables to be updated at each iteration of // optimization // @param args {any} Arguments to the optimization function that do not // change per iteration // @param params {dictionary} Parameters controlling non default optimization // behaviour // @return {dictionary} Variables, gradients, matrices and indices at the end // of each iteration i.BFGSFunction:{[func;optimDict;args;params] // Calculate search direction pk:neg mmu[optimDict`hess;optimDict`gk]; // Line search func to be inserted to get alpha wolfe:i.wolfeSearch[;;;pk;func;;args;params]. optimDict`fk`fkPrev`gk`xk; // Old fk goes to previous val optimDict[`fkPrev]:optimDict`fk; // Update values based on wolfe line search alpha:wolfe 0; optimDict[`fk]:wolfe 1; gNew:wolfe 2; // Redefine the x value at k-1 to the current x value optimDict[`xkPrev]:optimDict`xk; // Calculate the step distance for moving from x(k-1) -> x(k) sk:alpha*pk; // Update values of x at the new position k optimDict[`xk]:optimDict[`xkPrev]+sk; // If null gNew, then get gradient of new x value if[any null gNew;gNew:i.grad[func;optimDict`xk;args;params`geps]]; // Subtract new gradients yk:gNew-optimDict`gk; optimDict[`gk]:gNew; // Get new norm of gradient optimDict[`gnorm]:i.vecNorm[optimDict`gk;params`norm]; // Calculate new hessian matrix for next iteration rhok:1%mmu[yk;sk]; if[0w=rhok; rhok:1000f; -1"Division by zero in calculation of rhok, assuming rhok large"; ]; A1:optimDict[`I]-sk*\:yk*rhok; A2:optimDict[`I]-yk*\:sk*rhok; hessMul:mmu[A1;mmu[optimDict`hess;A2]]; optimDict[`hess]:hessMul+rhok*(sk*/:sk); // if x(k) returns infinite value update gnorm and fk if[0w in abs optimDict`xk;optimDict[`gnorm`fk]:(0n;0w)]; optimDict[`idx]+:1; if[params`display;show optimDict;-1"";]; optimDict } // @private // @kind function // @category optimizationUtility // @desc Complete a line search across an unconstrained minimization // problem making use of wolfe conditions to constrain the search. The naming // convention for dictionary keys in this implementation is based on the // python implementation of the same functionality here // https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/linesearch.py#L193 // @param fk {float} Function return evaluated at position k // @param fkPrev {float} Function return evaluated at position k-1 // @param gk {float} Gradient at position k // @param pk {float} Search direction // @param func {fn} Function being optimized // @param xk {number[]} Parameter values at position k // @param args {dictionary|number[]} Function arguments that do not change per // iteration // @param params {dictionary} Parameters controlling non default optimization // behaviour // @return {number[]} New alpha, fk and derivative values i.wolfeSearch:{[fk;fkPrev;gk;pk;func;xk;args;params] phiFunc :i.phi[func;pk;;xk;args]; derPhiFunc:i.derPhi[func;params`geps;pk;;xk;args]; // Initial Wolfe conditions wolfeKeys:`idx`alpha0`phi0`phia0; wolfeVals:(0;0;fk;fk); wolfeDict:wolfeKeys!wolfeVals; // Calculate the derivative at that phi0 derPhi0:gk mmu pk; wolfeDict[`derPhia0`derPhi0]:2#derPhi0; // Calculate step size this should be 0 < x < 1 // with min(x;maxstepsize) or 1f otherwise alpha:1.01*2*(fk-fkPrev)%derPhi0; alphaVal:$[alpha within 0 1f;min(alpha;params`stepSize);1f]; wolfeDict[`alpha1]:alphaVal; // function value at alpha1 wolfeDict[`phia1]:phiFunc wolfeDict`alpha1; // Repeat until wolfe criteria is reached or max iterations have been done // to get new alpha, phi and derPhi values wolfeDict:i.stopWolfe[;params] i.scalarWolfe[derPhiFunc;phiFunc;pk;params]/wolfeDict; // if the line search did not converge, use last alpha , phi and derPhi $[not any null raze wolfeDict`alphaStar`phiStar`derPhiStar; wolfeDict`alphaStar`phiStar`derPhiStar; wolfeDict`alpha1`phia1`derPhia0Fin ] } // @private // @kind function // @category optimizationUtility // @desc Apply a scalar search to find an alpha value that satisfies // strong Wolfe conditions, a python implementation of this is outlined here // https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/linesearch.py#L338 // This functions defines the bounds between which the step function can // be found. When the optimal bound is found, the area is zoomed recursively // until the optimal value is found // @param derPhiFunc {fn} Function to calculate the value of the objective // function derivative at alpha // @param phiFunc {fn} Function to calculate the value of the objective // function at alpha // @param pk {float} Search direction // @param params {dictionary} Parameters controlling non default optimization // behaviour // @param wolfeDict {dictionary} All data relevant to the calculation of the // optimal alpha values // @returns {dictionary} New alpha, fk and derivative values i.scalarWolfe:{[derPhiFunc;phiFunc;pk;params;wolfeDict] // Set up zoom function constant params zoomSetup:i.zoomFunc[derPhiFunc;phiFunc;;;params]. wolfeDict`phi0`derPhi0; // If criteria 1 is met, zoom and break loop if[i.wolfeCriteria1[wolfeDict;params]; wolfeDict[`idx]:0w; wolfeVals:wolfeDict`alpha0`alpha1`phia0`phia1`derPhia0; updZoom:zoomSetup wolfeVals; wolfeDict[i.zoomReturn]:updZoom; :wolfeDict ]; // Calculate the derivative of the function at the new position derPhiCalc:derPhiFunc wolfeDict`alpha1; // Update the new derivative function wolfeDict[`derPhia1]:derPhiCalc`derval; $[i.wolfeCriteria2[wolfeDict;params]; [wolfeDict[`alphaStar]:wolfeDict`alpha1; wolfeDict[`phiStar]:wolfeDict`phia1; wolfeDict[`derPhiStar]:derPhiCalc`grad; wolfeDict[`idx]:0w; wolfeDict ]; 0<=wolfeDict`derPhia1; [wolfeDict[`idx]:0w; updZoom:zoomSetup wolfeDict`alpha1`alpha0`phia1`phia0`derPhia1; wolfeDict[i.zoomReturn]:updZoom ]; // Update dictionary and repeat process until criteria is met [wolfeDict[`alpha0]:wolfeDict`alpha1; wolfeDict[`alpha1]:2*wolfeDict`alpha1; wolfeDict[`phia0]:wolfeDict`phia1; wolfeDict[`phia1]:phiFunc wolfeDict`alpha1; wolfeDict[`derPhia0]:wolfeDict`derPhia1; wolfeDict[`derPhia0Fin]:derPhiCalc`grad; wolfeDict[`idx]+:1 ] ]; wolfeDict } // @private // @kind function // @category optimizeUtility // @desc Function to apply 'zoom' iteratively during linesearch to find // optimal alpha value satisfying strong Wolfe conditions // @param derPhiFunc {fn} Function to calculate the value of the objective // function derivative at alpha // @param phiFunc {fn} Function to calculate the value of the objective // function at alpha // @param phi0 {float} Value of function evaluation at x(k-1) // @param derPhi0 {float} Value of objective function derivative at x(k-1) // @param params {dictionary} Parameters controlling non default optimization // behaviour // @param cond {number[]} Bounding conditions for alpha, phi and derPhi used in // zoom algorithm // @returns {number[]} New alpha, fk and derivative values i.zoomFunc:{[derPhiFunc;phiFunc;phi0;derPhi0;params;cond] zoomDict:i.zoomKeys!cond,phi0; zoomDict[`idx`aRec]:2#0f; zoomDict:i.stopZoom[;params] i.zoom[derPhiFunc;phiFunc;phi0;derPhi0;params]/zoomDict; // If zoom did not converge, set to null $[count star:zoomDict[i.zoomReturn];star;3#0N] } // @private // @kind function // @category optimizeUtility // @desc Function to apply an individual step in 'zoom' during // linesearch to find optimal alpha value satisfying strong Wolfe conditions. // An outline of the python implementation of this section of the algorithm // can be found here // https://github.com/scipy/scipy/blob/v1.5.0/scipy/optimize/linesearch.py#L556 // @param derPhiFunc {fn} Function to calculate the value of the objective // function derivative at alpha // @param phiFunc {fn} Function to calculate the value of the objective // function at alpha // @param phi0 {float} Value of function evaluation at x(k-1) // @param derPhi0 {float} Value of objective function derivative at x(k-1) // @param params {dictionary} Parameters controlling non default optimization // behaviour // @param zoomDict {dictionary} Parameters to be updated as 'zoom' procedure is // applied to find the optimal value of alpha // @returns {dictionary} Parameters calculated for an individual step in line // search procedure to find optimal alpha value satisfying strong Wolfe // conditions i.zoom:{[derPhiFunc;phiFunc;phi0;derPhi0;params;zoomDict] alphaDiff:zoomDict[`aHi]-zoomDict`aLo; // define high and low values highLowVal:$[alphaDiff>0;zoomDict`aHi`aLo;zoomDict`aLo`aHi]; highLow:`high`low!highLowVal; if["i"$zoomDict`idx; cubicCheck:alphaDiff*0.2; findMin:i.cubicMin . zoomDict`aLo`phiLo`derPhiLo`aHi`phiHi`aRec`phiRec ]; if[i.quadCriteria[findMin;highLow;cubicCheck;zoomDict]; quadCheck:0.1*alphaDiff; findMin:i.quadMin . zoomDict`aLo`phiLo`derPhiLo`aHi`phiHi; lowerCheck:findMin<highLow[`high]+quadCheck; upperCheck:findMin>highLow[`low]-quadCheck; if[upperCheck|lowerCheck; findMin:zoomDict[`aLo]+0.5*alphaDiff ] ]; // Update new values depending on findMin phiMin:phiFunc[findMin]; // First condition, update and continue loop if[i.zoomCriteria1[phi0;derPhi0;phiMin;findMin;zoomDict;params]; zoomDict[`idx]+:1; zoomDict[i.zoomKeys1]:zoomDict[`phiHi`aHi],findMin,phiMin; :zoomDict ]; // Calculate the derivative at the cubic minimum derPhiMin:derPhiFunc findMin; // Second scenario, create new features and end the loop $[i.zoomCriteria2[derPhi0;derPhiMin;params]; [zoomDict[`idx]:0w; zoomDict:zoomDict,i.zoomReturn!findMin,phiMin,enlist derPhiMin`grad ]; i.zoomCriteria3[derPhiMin;alphaDiff]; [zoomDict[`idx]+:1; zoomDict[i.zoomKeys1,i.zoomKeys2]:zoomDict[`phiHi`aHi`aLo`phiLo], findMin,phiMin,derPhiMin`derval ]; [zoomDict[`idx]+:1; zoomDict[i.zoomKeys3,i.zoomKeys2]:zoomDict[`phiLo`aLo], findMin,phiMin,derPhiMin`derval ] ]; zoomDict } // Vector norm calculation</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="98"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">// Find offset of central directory signature in a zip vector. // Assumes last match is valid; more sophisticated algos are possible, // but they can be implemented as needed. // @param x bytes // @return long .finos.unzip.priv.ovcds:{ last("c"$x)ss"c"$0x504b0506} // Find offset of central directory signature in a zip file. // Implemented via sliding four-byte read starting at end of file. // Assumes last match is valid; more sophisticated algos are possible, // but they can be implemented as needed. // @param x hsym // @return long .finos.unzip.priv.ofcds:{ c:hcount x; r:{(not 0x504b0506~y 0)&x>=y 1}[c]{(read1(x;y-z 1;4);1+z 1)}[x;c]/(0x00000000;0); $[0x504b0506~r 0;1+c-r 1;0N]} // Find offset of zip64 end of central directory locator signature in a zip vector. // Assumes last match is valid; more sophisticated algos are possible, // but they can be implemented as needed. // @param x bytes // @return long .finos.unzip.priv.ovecls64:{ last("c"$y)ss"c"$0x504b0607} // Find offset of zip64 end of central directory locator signature in a zip file. // Implemented via sliding four-byte read starting at end of file. // Assumes last match is valid; more sophisticated algos are possible, // but they can be implemented as needed. // @param x hsym // @return long .finos.unzip.priv.ofecls64:{ c:hcount x; r:{(not 0x504b0607~y 0)&x>y 1}[c]{(read1(x;y-z 1;4);1+z 1)}[x;c]/(0x00000000;0); $[0x504b0607~r 0;1+c-r 1;0N]} // Extract one file from an archive using unzip(1). // @param x hsym // @param y sym // @return character vector .finos.unzip.priv.unzip_system:{ f:hsym`$first system"mktemp"; system"(unzip -p \"",(1_string x),"\" \"",(string y),"\" >",(1_string f),")"; r:"c"$read1 f; hdel f; r} // Perform various zip-related operations. // Possible values for x, and expected z arg in each case: // `list: List files in an archive. // z: ignored // `unzip: Extract (specific file(s) from) an archive. // z: sym, sym vector, or (::) to unzip all files // See https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT, // https://users.cs.jmu.edu/buchhofp/forensics/formats/pkzip.html, // https://fossies.org/linux/zip/proginfo/extrafld.txt, etc. // @param x sym // @param y hsym, character vector, or byte vector // @param z see above // @return dictionary of filenames and character vectors .finos.unzip.priv.unzip:{ if[not x in`list`unzip; '`domain; ]; / accept chars if[10h=type y; y:"x"$y; ]; / accept hsym and bytes if[$[-11h=t:type y;not":"=first string y;4h<>t]; '`type; ]; if[`unzip=x; if[not(11h=abs type z)|z~(::); '`domain; ]; ]; .finos.log.info"processing ",$[-11h=t;1_string y;"archive"]; / get byte count c:.finos.unzip.priv.bcount y; / look for central directory signature cds:$[4h=t;.finos.unzip.priv.ovcds;.finos.unzip.priv.ofcds]y; if[null cds; '"no central directory signature"; ]; / parse end-of-central-directory record ecd:.finos.unzip.priv.pecd .finos.unzip.priv.bytes[y;cds;c-cds]; / punt on multi-disk archives if[0<>ecd`dnu;'`nyi]; if[0<>ecd`dcd;'`nyi]; / bytes of central directory record cd:exec .finos.unzip.priv.bytes[y;cof;csz]from $[ -1=ecd`cof; / zip64 [ / look for zip64 end of central directory locator signature ecls64:$[4h=t;.finos.unzip.priv.ovecls64;.finos.unzip.priv.ofecls64]y; if[null ecls64; '"no end of central directory locator"; ]; / parse zip64 end-of-central-directory locator record ecl64:.finos.unzip.priv.pecl64 .finos.unzip.priv.bytes[y;ecls64;c-ecls64]; / parse zip64 end-of-central-directory record ecd64:.finos.unzip.priv.pecd64 .finos.unzip.priv.bytes[y;ecl64`cof;12+.finos.unzip.priv.parseNum .finos.unzip.priv.bytes[y;4+ecl64`cof;8]]]; ecd]; / check for empty zip if[not count cd; :$[ `list=x; ([name:0#`]size:0#0Ni;timestamp:0#0Np); `unzip=x; $[ -11h=type z; [ .finos.log.error(string z),": file not found in archive"; 'z; ]; 11h=type z; [ {.finos.log.error(string x),": file not found in archive"}each z; 'first z; ]; z~(::); ((0#`)!())]; '`domain]; ]; / start of central directory scd:$[-1=ecd`cof;ecd64;ecd]`cof; / parse central directory .finos.log.debug"parsing central directory"; cd:.finos.unzip.priv.parse[(.finos.unzip.priv.pcd;.finos.unzip.priv.wcd);cd;count cd]; .finos.log.debug"done parsing central directory"; / apply extra field cd:.finos.unzip.priv.axfd[(enlist`context)!enlist`cd]each cd; r:$[ `list=x; [ 1!select name:fnm,size:usz,timestamp:mdt+mtm from cd]; `unzip=x; [ / calculate next offsets cd:update nof:scd^next lof from cd; / apply file filter, if any if[not z~(::); cd:select from cd where fnm in z; if[count e:exec(raze z)except fnm from cd; {.finos.log.error(string x),": file not found in archive"}each e; 'first e; ]; ]; / parse file data fd:$[ .finos.unzip.filescan; [ / read file if neccesary if[-11h=type y; y:read1 y; ]; / trim any leading garbage y:(exec min lof from cd)_y; / extract all files .finos.unzip.priv.parse[(.finos.unzip.priv.pfd;.finos.unzip.priv.wfd;z);y;scd-exec min lof from cd]]; [ / extract each file mentioned in the central directory f:{[w;x;y;z] h:.finos.unzip.priv.split[w;0].finos.unzip.priv.bytes[x;y`lof;sum w]; first .finos.unzip.priv.pfd[(.finos.unzip.priv.bytes[x;y`lof;z-y`lof];::);sum w;h]}; / assume the end of the last file is the beginning of the central directory / might be wrong if archive decryption header and/or archive extra data record are present? cd f[.finos.unzip.priv.wfd;y]'exec nof from cd]]; r:exec fnm!fdu from fd; r:$[ 11h=type z; z#r; -11h=type z; r z; r]; if[.finos.unzip.verify&-11h=type y; .finos.log.info"verifying"; v:r~$[ -11h=type z; .finos.unzip.priv.unzip_system[y]z; {y!x y}[y .finos.unzip.priv.unzip_system/:]key r]; if[not v; break; '`parse; ]; .finos.log.info"verified"; ]; r]; '`domain]; r} // Public API // Set to true to verify extraction against unzip(1). // N.B. will not work if .finos.unzip.unzip is called from a thread. // N.B. will not work in file scan mode. .finos.unzip.verify:0b // Set to true to extract files via file scan, rather than by using the // central directory. // N.B. currently, will likely fail for data-descriptor-based archives .finos.unzip.filescan:0b // List files in an archive. // @param x hsym, character vector, or byte vector // @return table of filenames and file metadata .finos.unzip.list:{.finos.unzip.priv.unzip[`list;x;::]} // Unzip an archive. // @param x hsym, character vector, or byte vector // @return dictionary of filenames and character vectors .finos.unzip.unzip:{.finos.unzip.priv.unzip[`unzip;x;::]} // Unzip specific files from an archive. // @param x hsym, character vector, or byte vector // @param y sym vector // @return dictionary of filenames and character vectors .finos.unzip.unzip2:{.finos.unzip.priv.unzip[`unzip;x;y]} ================================================================================ FILE: kdb_q_util_util.q SIZE: 3,389 characters ================================================================================ // General-purpose utility functions. /// // read0, but compatible with non-seekable files (fifos, /proc, etc.). // @param x file symbol // @return A list of strings containing the contents of the file. // @see read0 .finos.util.read0f:{r:{y,read0 x}[h:hopen`$":fifo://",1_string x]over();hclose h;r} /// // read1, but compatible with non-seekable files (fifos, /proc, etc.). // @param x file symbol // @return A byte vector containing the contents of the file. // @see read1 .finos.util.read1f:{r:{y,read1 x}[h:hopen`$":fifo://",1_string x]over();hclose h;r} .finos.util.compose:('[;])/ // create a list. e.g. list(`a;1) -> (`a;1) // allows a trailing delimiter, e.g. // list( // `a; // 1; // ) .finos.util.list:{$[104h=type x;1_-1_get x;x]} // create a dictionary. e.g. dict (1;2;3;4) -> 1 3!2 4 .finos.util.dict:{(!) . flip 2 cut .finos.util.list x} // create a table. e.g. table[`x`y;(1;2;3;4)] -> ([]x:1 3;y:2 4) .finos.util.table:{flip x!flip(count x)cut .finos.util.list y}</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="99"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">\d .subcut enabled:1b // switch on subscribercutoff \d .servers CONNECTIONS,:`segmentedtickerplant CONNECTIONSFROMDISCOVERY:1b ================================================================================ FILE: TorQ_config_settings_segmentedtickerplant.q SIZE: 1,446 characters ================================================================================ // Segmented TP config \d .stplg multilog:`tabperiod; // [tabperiod|none|periodic|tabular|custom] multilogperiod:0D01; // Length of period for STP periodic logging modes errmode:1b; // Enable error mode for STP batchmode:`defaultbatch; // [memorybatch|defaultbatch|immediate] replayperiod:`day // [period|day|prior] customcsv:hsym first .proc.getconfigfile["stpcustom.csv"]; // Location for custom logging mode csv kdbtplog:`$getenv`KDBTPLOG; \d .proc loadcommoncode:0b // do not load common code loadprocesscode:1b // load process code logroll:0b // do not roll logs // Configuration used by the usage functions - logging of client interaction \d .usage enabled:0b // switch off the usage logging // Client tracking configuration // This is the only thing we want to do // and only for connections being opened and closed \d .clients enabled:1b // whether client tracking is enabled opencloseonly:1b // only log open and closing of connections // Server connection details \d .servers enabled:0b // disable server tracking \d .timer enabled:0b // disable the timer \d .hb enabled:0b // disable heartbeating \d .zpsignore enabled:0b // disable zpsignore - zps should be empty ================================================================================ FILE: TorQ_config_settings_sort.q SIZE: 3,968 characters ================================================================================ // Bespoke SORT config \d .wdb ignorelist:`heartbeat`logmsg // list of tables to ignore hdbtypes:`hdb // list of hdb types to look for and call in hdb reload rdbtypes:`rdb // list of rdb types to look for and call in rdb reload tickerplanttypes:`tickerplant // list of tickerplant types to try and make a connection to wdbtypes:`wdb // list of wdb types to look for and call in wdb init tables subtabs:` // list of tables to subscribe for (` for all) subsyms:` // list of syms to subscribe for (` for all) savedir:hsym`$getenv[`TORQHOME],"/wdbhdb" // location to save wdb data numrows:100000 // default number of rows numtab:`quote`trade!10000 50000 // specify number of rows per table mode:`sort // the wdb process can operate in three modes // 1. saveandsort: the process will subscribe for data, // periodically write data to disk and at EOD it will flush // remaining data to disk before sorting it and informing // GWs, RDBs and HDBs etc... // 2. save: the process will subscribe for data, // periodically write data to disk and at EOD it will flush // remaining data to disk. It will then inform it's respective // sort mode process to sort the data // 3. sort: the process will wait to get a trigger from it's respective // save mode process. When this is triggered it will sort the // data on disk, apply attributes and the trigger a reload on the // rdb and hdb processes mergenumrows:100000 // default number of rows for merge process mergenumtab:`quote`trade!10000 50000 // specify number of rows per table tpconnsleepintv:10 // number of seconds between attempts to connect to the tp upd:insert // value of the upd function replay:1b // replay the tickerplant log file schema:1b // retrieve schema from tickerplant settimer:0D00:00:10 // timer to check if data needs written to disk partitiontype:`date // set type of partition (defaults to `date, can be `date, `month or `year) getpartition:{@[value; `.wdb.currentpartition; (`date^partitiontype)$.proc.cd[]]} //function to determine the partition value reloadorder:`hdb`rdb // order to reload hdbs and rdbs hdbdir:`:hdb // move wdb database to different location sortcsv:hsym first .proc.getconfigfile["sort.csv"] // location of csv file permitreload:1b // enable reload of hdbs/rdbs compression:() // specify the compress level, empty list if no required gc:1b // garbage collect at appropriate points (after each table save and after sorting data) eodwaittime:0D00:00:10.000 // time to wait for async calls to complete at eod // Server connection details \d .servers CONNECTIONS:`wdb`hdb`tickerplant`rdb`gateway // list of connections to make at start up STARTUP:1b // create connections ================================================================================ FILE: TorQ_config_settings_sortworker.q SIZE: 2,368 characters ================================================================================ // Sort Worker config \d .wdb savedir:hsym`$getenv[`TORQHOME],"/wdbhdb" // location to save wdb data mode:`sort // the wdb process can operate in three modes // 1. saveandsort: the process will subscribe for data, // periodically write data to disk and at EOD it will flush // remaining data to disk before sorting it and informing // GWs, RDBs and HDBs etc... // 2. save: the process will subscribe for data, // periodically write data to disk and at EOD it will flush // remaining data to disk. It will then inform it's respective // sort mode process to sort the data // 3. sort: the process will wait to get a trigger from it's respective // save mode process. When this is triggered it will sort the // data on disk, apply attributes and the trigger a reload on the // rdb and hdb processes mergenumrows:100000 // default number of rows for merge process mergenumtab:`quote`trade!10000 50000 // specify number of rows per table hdbdir:`:hdb // move wdb database to different location sortcsv:hsym first .proc.getconfigfile["sort.csv"] // location of csv file gc:1b // garbage collect at appropriate points (after each table save and after sorting data) tickerplanttypes:rdbtypes:hdbtypes:gatewaytypes:sorttypes:sortworkertypes:() // sortworkers don't need these connections</span></div> </div></div> </td> </tr></tbody></table> </div> <div class="bg-linear-to-b from-gray-100 to-white dark:from-gray-950 dark:to-gray-900 rounded-b-lg"><hr class="flex-none -translate-y-px border-t border-dashed border-gray-300 bg-white dark:border-gray-700 dark:bg-gray-950"> <nav><ul class="flex select-none items-center justify-between space-x-2 text-gray-700 sm:justify-center py-1 text-center font-mono text-xs "><li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 pointer-events-none cursor-default text-gray-400 hover:text-gray-700" href=""><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg> Previous</a></li> <li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-900 dark:text-yellow-500 dark:ring-gray-900 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/morganstanley/q_pretrained_dataset/viewer/default/train?p=0">1</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/morganstanley/q_pretrained_dataset/viewer/default/train?p=1">2</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/morganstanley/q_pretrained_dataset/viewer/default/train?p=2">3</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/morganstanley/q_pretrained_dataset/viewer/default/train?p=3">4</a> </li> <li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 " href="/datasets/morganstanley/q_pretrained_dataset/viewer/default/train?p=1">Next <svg class="ml-1.5 transform rotate-180" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg></a></li></ul></nav></div></div> </div></div></div></div></div></div></div> <div class="hidden items-center md:flex"> <div class="mx-1 flex items-center justify-center"><div class="h-8 w-1 cursor-ew-resize rounded-full bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 max-sm:hidden" role="separator"></div></div> <div class="flex h-full flex-col" style="height: calc(100vh - 48px)"><div class="my-4 mr-4 h-full overflow-auto rounded-lg border shadow-lg dark:border-gray-800" style="width: 480px"><div class="flex h-full flex-col"><div class="flex flex-col "> <div class="px-4 md:mt-4"><div class="mb-4 flex justify-end"> <div class="flex w-full flex-col rounded-lg border-slate-200 bg-white p-2 shadow-md ring-1 ring-slate-200 dark:border-slate-700 dark:bg-slate-800 dark:ring-slate-700"> <div class="mt-0 flex items-start gap-1"><div class="flex items-center rounded-md bg-slate-100 p-2 dark:bg-slate-700"><svg class="size-4 text-gray-700 dark:text-gray-300" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 11 11"><path fill="currentColor" d="M4.881 4.182c0 .101-.031.2-.087.283a.5.5 0 0 1-.242.18l-.65.217a1.3 1.3 0 0 0-.484.299 1.3 1.3 0 0 0-.298.484l-.222.639a.46.46 0 0 1-.18.242.5.5 0 0 1-.288.092.5.5 0 0 1-.294-.097.5.5 0 0 1-.175-.242l-.211-.644a1.26 1.26 0 0 0-.299-.48 1.14 1.14 0 0 0-.479-.298L.328 4.64a.48.48 0 0 1-.247-.18.515.515 0 0 1 .247-.758l.644-.21a1.28 1.28 0 0 0 .788-.789l.211-.634a.5.5 0 0 1 .165-.242.5.5 0 0 1 .283-.103.5.5 0 0 1 .294.083c.086.058.152.14.19.237l.217.659a1.28 1.28 0 0 0 .788.788l.644.222a.476.476 0 0 1 .237.18.5.5 0 0 1 .092.288"></path><path fill="currentColor" d="M10.031 7.458a.5.5 0 0 1-.098.314.5.5 0 0 1-.267.196l-.881.293c-.272.09-.519.242-.721.443a1.8 1.8 0 0 0-.443.721l-.31.876a.5.5 0 0 1-.185.263.56.56 0 0 1-.319.098.515.515 0 0 1-.515-.366l-.294-.88a1.8 1.8 0 0 0-.443-.722c-.204-.2-.45-.353-.72-.448l-.881-.288a.57.57 0 0 1-.263-.191.56.56 0 0 1-.014-.64.5.5 0 0 1 .271-.194l.886-.294A1.82 1.82 0 0 0 6.01 5.465l.293-.87a.515.515 0 0 1 .49-.377c.11 0 .219.03.314.088a.56.56 0 0 1 .206.263l.298.896a1.82 1.82 0 0 0 1.175 1.174l.875.31a.5.5 0 0 1 .263.195c.07.09.108.2.108.314"></path><path fill="currentColor" d="M7.775 1.684a.5.5 0 0 0 .088-.262.45.45 0 0 0-.088-.263.5.5 0 0 0-.21-.155L7.24.896a.5.5 0 0 1-.165-.103.5.5 0 0 1-.103-.17l-.108-.33a.5.5 0 0 0-.165-.21A.5.5 0 0 0 6.426 0a.5.5 0 0 0-.252.098.5.5 0 0 0-.145.206l-.108.32a.5.5 0 0 1-.103.17.5.5 0 0 1-.17.102L5.334 1a.45.45 0 0 0-.216.155.5.5 0 0 0-.088.262c0 .094.029.186.083.263a.5.5 0 0 0 .216.16l.32.103q.095.03.164.103a.37.37 0 0 1 .103.165l.108.319c.031.09.088.17.165.227a.56.56 0 0 0 .252.077.42.42 0 0 0 .268-.093.5.5 0 0 0 .15-.2l.113-.325a.43.43 0 0 1 .268-.268l.32-.108a.42.42 0 0 0 .215-.155"></path></svg></div> <div class="flex min-w-0 flex-1"><textarea placeholder="Ask AI to help write your query..." class="max-h-64 min-h-8 w-full resize-none overflow-y-auto border-none bg-transparent py-1 text-sm leading-6 text-slate-700 placeholder-slate-400 [scrollbar-width:thin] focus:ring-0 dark:text-slate-200 dark:placeholder-slate-400" rows="1"></textarea> </div> </div> </div></div> <div class="relative flex flex-col rounded-md bg-gray-100 pt-2 dark:bg-gray-800/50"> <div class="flex h-64 items-center justify-center "><svg class="animate-spin text-xs" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path class="opacity-75" fill-rule="evenodd" clip-rule="evenodd" d="M6 0C2.6862 0 0 2.6862 0 6H1.8C1.8 4.88609 2.2425 3.8178 3.03015 3.03015C3.8178 2.2425 4.88609 1.8 6 1.8V0ZM12 6C12 9.3138 9.3138 12 6 12V10.2C7.11391 10.2 8.1822 9.7575 8.96985 8.96985C9.7575 8.1822 10.2 7.11391 10.2 6H12Z" fill="currentColor"></path><path class="opacity-25" fill-rule="evenodd" clip-rule="evenodd" d="M3.03015 8.96985C3.8178 9.7575 4.88609 10.2 6 10.2V12C2.6862 12 0 9.3138 0 6H1.8C1.8 7.11391 2.2425 8.1822 3.03015 8.96985ZM7.60727 2.11971C7.0977 1.90864 6.55155 1.8 6 1.8V0C9.3138 0 12 2.6862 12 6H10.2C10.2 5.44845 10.0914 4.9023 9.88029 4.39273C9.66922 3.88316 9.35985 3.42016 8.96985 3.03015C8.57984 2.64015 8.11684 2.33078 7.60727 2.11971Z" fill="currentColor"></path></svg></div></div> <div class="mt-2 flex flex-col gap-2"><div class="flex items-center justify-between max-sm:text-sm"><div class="flex w-full items-center justify-between gap-4"> <span class="flex flex-shrink-0 items-center gap-1"><span class="font-semibold">Subsets and Splits</span> <span class="inline-block "><span class="contents"><svg class="text-xs text-gray-500 dark:text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M17 22v-8h-4v2h2v6h-3v2h8v-2h-3z" fill="currentColor"></path><path d="M16 8a1.5 1.5 0 1 0 1.5 1.5A1.5 1.5 0 0 0 16 8z" fill="currentColor"></path><path d="M16 30a14 14 0 1 1 14-14a14 14 0 0 1-14 14zm0-26a12 12 0 1 0 12 12A12 12 0 0 0 16 4z" fill="currentColor"></path></svg></span> </span> </span> <div class="ml-4 flex flex-1 items-center justify-end gap-1"> </div></div></div> <div class="flex flex-nowrap gap-1 overflow-x-auto"></div></div> <button type="button" class="btn mt-2 h-10 w-full text-sm font-semibold md:text-base" ><span class="flex items-center gap-1.5"> <span>Run Query</span> <span class="shadow-xs ml-2 hidden items-center rounded-sm border bg-white px-0.5 text-xs font-medium text-gray-700 sm:inline-flex">Ctrl+↵</span></span></button></div> <div class="flex flex-col px-2 pb-4"></div></div> <div class="mt-auto pb-4"><div class="flex justify-center"><div class="w-full sm:px-4"><div class="mb-3"><ul class="flex gap-1 text-sm "><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 text-gray-500 hover:bg-gray-100 hover:text-gray-700 dark:hover:bg-gray-900 dark:hover:text-gray-300">Saved Queries </button> </li><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 bg-black text-white dark:bg-gray-800">Top Community Queries </button> </li></ul></div> <div class="h-48 overflow-y-auto"><div class="flex flex-col gap-2"><div class="flex h-48 flex-col items-center justify-center rounded border border-gray-200 bg-gray-50 p-4 text-center dark:border-gray-700/60 dark:bg-gray-900"><p class="mb-1 font-semibold text-gray-600 dark:text-gray-400">No community queries yet</p> <p class="max-w-xs text-xs text-gray-500 dark:text-gray-400">The top public SQL queries from the community will appear here once available.</p></div></div></div></div></div></div></div></div></div></div> </div></div></div></main> </div> <script data-cfasync="false" src="/cdn-cgi/scripts/5c5dd728/cloudflare-static/email-decode.min.js"></script><script> import("\/front\/build\/kube-41c4082\/index.js"); window.moonSha = "kube-41c4082\/"; window.__hf_deferred = {}; </script> <!-- Stripe --> <script> if (["hf.co", "huggingface.co"].includes(window.location.hostname)) { const script = document.createElement("script"); script.src = "https://js.stripe.com/v3/"; script.async = true; document.head.appendChild(script); } </script> </body> </html>