text
stringlengths 1.46k
56.1k
|
---|
patterns:.log.cfg.format 1 + where "%" = .log.cfg.format;
if[not all patterns in key .log.cfg.patterns;
'"InvalidLogPatternException";
];
.log.pattern:patterns#.log.cfg.patterns;
};
================================================================================
FILE: kdb-common_src_mail.q
SIZE: 5,605 characters
================================================================================
// E-mail Sending via mailx
// Copyright (c) 2016 Sport Trades Ltd
// Documentation: https://github.com/BuaBook/kdb-common/wiki/mail.q
.require.lib each `util`os`convert`ns;
/ The required arguments in order to send an e-mail
.mail.cfg.requiredArgs:`subject`to;
/ The default executable to use to send e-mail
.mail.cfg.defaultMailCmd:`mailx;
.mail.availableCmds:`symbol$();
.mail.init:{
nsContents:.ns.get`.mail.i.send;
mailCmdFuncs:nsContents where .type.isFunction each get each nsContents;
.mail.availableCmds:last each ` vs/:mailCmdFuncs;
.log.if.info "Configured mail commands detected [ Commands: ",.convert.listToString[.mail.availableCmds]," ]";
};
/ Sends an e-mail on the calling process. This function also supports sending HTML e-mail. The underlying command
/ that is used can be changed as appropriate.
/ NOTE: The process will hang until the mail has been sent by the underlying process.
/ @param dict (Dict) Required keys - subject (String), to (Symbol|SymbolList). Optional keys - cc (Symbol|SymbolList), bcc (Symbol|SymbolList), body (String), attachments (FilePathList), deleteAttachments (Boolean), from (Symbol), useMailCmd (Symbol)
/ @throws MissingArgumentException If any required arguments are missing
/ @throws InvalidEmailAttachmentPathException If any of the attachments have a space in the path (not supported)
/ @throws InvalidMailCommandException If the mail command specified is not supported
/ @throws EmailSendFailedException If mailx returns any error
/ @see .mail.i.send
.mail.send:{[dict]
argCheck:where not .mail.cfg.requiredArgs in key dict;
if[0 < count argCheck;
'"MissingArgumentException (",.convert.listToString[.mail.cfg.requiredArgs argCheck],")";
];
if[.util.isEmpty dict`deleteAttachments;
dict[`deleteAttachments]:0b;
];
if[.util.isEmpty dict`useMailCmd;
dict[`useMailCmd]:.mail.cfg.defaultMailCmd;
];
if[not dict[`useMailCmd] in .mail.availableCmds;
'"InvalidMailCommandException";
];
mailStr:.mail.i.send[dict`useMailCmd] dict;
.log.if.info "Sending e-mail [ To: ",.convert.listToString[(),dict`to]," ] [ Subject: ",dict[`subject]," ] [ Mail Cmd: ",string[dict`useMailCmd]," ]";
res:@[.util.system;mailStr;{ (`MAIL_CMD_FAILED;x) }];
if[`MAIL_CMD_FAILED~first res;
.log.if.error "Failed to send e-mail [ To: ",.convert.listToString[(),dict`to]," ] [ Subject: ",dict[`subject]," ] [ Mail Cmd: ",string[dict`useMailCmd]," ]. Error - ",last res;
'"EmailSendFailedException";
];
if[(not .util.isEmpty dict`attachments) & dict`deleteAttachments;
.log.if.info "Deleting attachments after successful send as requested [ Attachments: ",.convert.listToString[dict`attachments]," ]";
.os.run[`rm;] each 1_/:string (),dict`attachments;
];
:1b;
};
/ Sending mail with 'mailx'. This function assumes the mailutils version of mailx which generally only seems to be
/ available on Ubuntu. On other distributions, you may find that you cannot send HTML e-mail or attach files.
/ @throws InvalidEmailAttachmentPathException If any of the attachments have a space in the path (not supported)
.mail.i.send.mailx:{[dict]
mailStr:"mailx -s \"",dict[`subject],"\"";
bodyStr:"";
if[not .util.isEmpty dict`body;
bodyStr:ssr[dict`body;"'";""];
if[0 < count ss[bodyStr;"<html>"];
mailStr,:" -a 'Content-Type: text/html' ";
];
];
if[not .util.isEmpty dict`from;
mailStr,:" -a 'From: ",string[dict`from],"'";
];
mailStr:"echo '",bodyStr,"' | ",mailStr;
if[not .util.isEmpty dict`cc;
mailStr,:" -c ",.mail.i.getEmailAddresses dict`cc;
];
if[not .util.isEmpty dict`bcc;
mailStr,:" -b ",.mail.i.getEmailAddresses dict`bcc;
];
if[not .util.isEmpty dict`attachments;
attach:(),dict`attachments;
if[any " " in/:string attach;
.log.if.error "Attachment file path contains a space, which is not supported";
'"InvalidEmailAttachmentPathException";
];
mailStr,:" -A "," -A " sv 1_/: string attach;
];
mailStr,:" ",.mail.i.getEmailAddresses dict`to;
:mailStr;
};
/ Sending mail with 'mutt'. This command seems to work best on CentOS where mailx availability differs.
/ @throws InvalidEmailAttachmentPathException If any of the attachments have a space in the path (not supported)
.mail.i.send.mutt:{[dict]
mailStr:"mutt -s \"",dict[`subject],"\"";
bodyStr:"";
if[not .util.isEmpty dict`body;
bodyStr:ssr[dict`body;"'";""];
if[0 < count ss[bodyStr;"<html>"];
mailStr,:" -e 'set content_type=text/html'";
];
];
if[not .util.isEmpty dict`from;
mailStr,:" -e 'my_hdr From: ",string[dict`from],"'";
];
mailStr:"echo '",bodyStr,"' | ",mailStr;
if[not .util.isEmpty dict`cc;
mailStr,:" -c ",.mail.i.getEmailAddresses dict`cc;
];
if[not .util.isEmpty dict`bcc;
mailStr,:" -b ",.mail.i.getEmailAddresses dict`bcc;
];
mailStr,:" ",.mail.i.getEmailAddresses dict`to;
if[not .util.isEmpty dict`attachments;
attach:(),dict`attachments;
if[any " " in/:string attach;
.log.if.error "Attachment file path contains a space, which is not supported";
'"InvalidEmailAttachmentPathException";
];
mailStr,:" -a "," -a " sv 1_/: string attach;
];
:mailStr;
};
.mail.i.getEmailAddresses:{
:"\"",("," sv string distinct (),x),"\" ";
};
================================================================================
FILE: kdb-common_src_ns.q
SIZE: 7,143 characters
================================================================================
// Namespace Management Functions
// Copyright (c) 2016 - 2020 Sport Trades Ltd, 2021 Jaskirat Rajasansir
// Documentation: https://github.com/BuaBook/kdb-common/wiki/ns.q
.require.lib each `type`convert;
/ Configures the protected execution mode within '.ns.protectedExecute'. This is enabled, if supported, during library
/ initialisation. Options:
/ 1b: Uses -105! (.Q.trp) to provide an error stack alongside the exception (only available with kdb+ >= 3.5)
/ 0b: Uses '@' to provide legacy protected execution, returning just the exception (available with all kdb+ versions)
.ns.cfg.protectExecWithStack:0b;
/ Value to check if the execution fails in .ns.protectedExecute
/ @see .ns.protectedExecute
.ns.const.pExecFailure:`PROT_EXEC_FAILED;
.ns.init:{
.ns.cfg.protectExecWithStack:3.5 <= .z.K;
};
/ Gets the contents of the specified namespace and returns them fully qualified
/ @param ns (Symbol) The namespace to get the contents of
/ @returns (SymbolList) The contents of the namespace fully qualified
.ns.get:{[ns]
if[not .type.isSymbol ns;
'"IllegalArgumentException";
];
if[`.~ns;
:key ns;
];
:` sv/:ns,/:key[ns] except `;
};
/ Recurses down from the specified root namespace until no more namespaces are found.
/ All returned elements are fully qualified
/ @param ns (Symbol) The root namespace to flatten from
/ @returns (SymbolList) All elements of namespace and child namespaces
.ns.flatten:{[ns]
nsElements:.ns.get ns;
subNs:nsElements where .type.isNamespace each get each nsElements;
:raze (nsElements except subNs),.z.s each subNs;
};
/ Attempts to resolve the specified function <i>body</i> back into the declared function name.
/ @param func (Function) The body of the function
/ @returns (Symbol) The name of the function, or null symbol if it could not be calculated
.ns.resolveFunctionName:{[func]
if[not .type.isFunction func;
'"IllegalArgumentException";
];
funcs:(!). (get;::)@/:\:.ns.flatten `;
funcName:funcs func;
if[`~funcName;
$[101h~type func;
funcName:`native;
funcName:`anonymous
];
];
:funcName;
};
/ @param x (Symbol) The reference to check
/ @returns (Boolean) True if the specified reference exists, false otherwise
.ns.isSet:{
res:@[get;x;{ (`REF_NO_EXIST;x) }];
:not `REF_NO_EXIST~first res;
};
/ @param x (Symbol|Function) The function to check the arguments for
/ @returns (SymbolList) The arguments required for the specified function
/ @see .ns.i.getFunction
.ns.getFunctionArguments:{
x:.ns.i.getFunction x;
$[101h = type x;
:enlist `x;
type[x] in 102 103h;
:`x`y
];
if[104h = type x;
origArgs:.ns.getFunctionArguments first get x;
filledArgs:count[origArgs] sublist 1_ get x;
filledArgs:filledArgs,(count[origArgs] - count filledArgs)#(::);
:origArgs where (::) ~/: filledArgs;
];
:@[;1] get x;
};
/ Executes the specified function with the specified arguments. First checks the number of arguments
/ expected by the function to execute and then uses protected execution (try/catch) to run it
/ @param func (Symbol) The function to execute
/ @param args () The arguments to pass to the function. Pass generic null (::) if function requires no arguments
/ @returns () The results of the function or a dictionary `isError`errorMsg!(`PROT_EXEC_FAILED; theError) if it fails. If running with '.ns.cfg.protectExecWithStack' enabled, `backtrace will also be added as the 2nd element
/ @see .ns.cfg.protectExecWithStack
.ns.protectedExecute:{[func;args]
func:.ns.i.getFunction func;
funcArgCount:count .ns.getFunctionArguments func;
if[1 = funcArgCount;
args:enlist args;
];
/ Can't use .Q.trp directly (for multi-argument functions)
$[.ns.cfg.protectExecWithStack;
:-105!(func; args; { `isError`backtrace`errorMsg!(.ns.const.pExecFailure; .Q.sbt y; x) });
/ else
:.[func; args; { `isError`errorMsg!(.ns.const.pExecFailure; x) }]
];
};
/ Allows a function to be executed with a dictionary of arguments mapping back to the original arguments required
/ by that function. Functions exposed on a gateway process generally require the use of dictionaries but this allows
/ the underlying function to use standard arguments. This function will also validate that all the expected arguments
/ of the function are present.
/ @param func (Symbol|Function) The function to execute
/ @param args (Dict) The arguments of the function with the key as that argument name
/ @throws MissingFunctionArgumentException If any arguments are missing in the dictionary
/ @returns () Result of the function
/ @see .ns.getFunctionArguments
.ns.executeFuncWithDict:{[func;args]
funcArgs:.ns.getFunctionArguments func;
/ If function takes a single "x" argument and no arguments passed, assume no argument function
if[(enlist[`x]~funcArgs) & 0 = count args;
args:enlist[`x]!enlist (::);
];
argCheck:where not funcArgs in key args;
if[0 < count argCheck;
'"MissingFunctionArgumentException (",.convert.listToString[funcArgs argCheck],")";
];
/ No need to dereference, as dot in this mode accepts function reference
:func . args funcArgs;
};
|
// @kind function
// @category main
// @subcategory get
//
// @overview
// Load the parameter information for a specific model
//
// @param folderPath {dict|string|null} Registry location, can be:
// 1. A dictionary containing the vendor and location as a string, e.g.
// ```enlist[`local]!enlist"myReg"``` or
// ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc;
// 2. A string indicating the local path;
// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.
// @param experimentName {string|null} The name of an experiment from which
// to retrieve a model, if no modelName is provided the newest model
// within this experiment will be used. If neither modelName or
// experimentName are defined the newest model within the
// "unnamedExperiments" section is chosen
// @param modelName {string|null} The name of the model to be retrieved
// in the case this is null, the newest model associated with the
// experiment is retrieved
// @param version {long[]|null} The specific version of a named model to retrieve
// in the case that this is null the newest model is retrieved (major;minor)
// @param param {symbol|string} The name of the parameter to retrieve
//
// @return {string|dict|table|float} The value of the parameter associated
// with a named parameter saved for the model.
registry.get.parameters:registry.util.get.object[`params]
// @kind function
// @category main
// @subcategory get
//
// @overview
// Retrieve a q/python/sklearn/keras model from the registry for prediction
//
// @todo
// Add type checking for modelName/experimentName/version
//
// @param folderPath {dict|string|null} Registry location, can be:
// 1. A dictionary containing the vendor and location as a string, e.g.
// ```enlist[`local]!enlist"myReg"``` or
// ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc;
// 2. A string indicating the local path;
// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.
// @param experimentName {string|null} The name of an experiment from which
// to retrieve a model, if no modelName is provided the newest model
// within this experiment will be used. If neither modelName or
// experimentName are defined the newest model within the
// "unnamedExperiments" section is chosen
// @param modelName {string|null} The name of the model to be retrieved
// in the case this is null, the newest model associated with the
// experiment is retrieved
// @param version {long[]|null} The specific version of a named model to retrieve
// in the case that this is null the newest model is retrieved (major;minor)
//
// @return {any} `(<|dict|fn|proj)` Model retrieved from the registry.
registry.get.predict:{[folderPath;experimentName;modelName;version]
getModel:registry.get.model[folderPath;experimentName;modelName;version];
if[registry.config.commandLine[`deployType];:getModel`model];
modelType:`$getModel[`modelInfo;`model;`type];
if[`graph~modelType;
logging.error"Retrieval of prediction function not supported for 'graph'"
];
axis:getModel[`modelInfo;`model;`axis];
if[""~axis;axis:0b];
model:getModel`model;
mlops.wrap[modelType;model;axis]
}
// @kind function
// @category main
// @subcategory get
//
// @overview
// Retrieve a q/python/sklearn/keras model from the registry for update
//
// @todo
// Add type checking for modelName/experimentName/version
//
// @param folderPath {dict|string|null} Registry location, can be:
// 1. A dictionary containing the vendor and location as a string, e.g.
// ```enlist[`local]!enlist"myReg"``` or
// ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc;
// 2. A string indicating the local path;
// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.
// @param experimentName {string|null} The name of an experiment from which
// to retrieve a model, if no modelName is provided the newest model
// within this experiment will be used. If neither modelName or
// experimentName are defined the newest model within the
// "unnamedExperiments" section is chosen
// @param modelName {string|null} The name of the model to be retrieved
// in the case this is null, the newest model associated with the
// experiment is retrieved
// @param version {long[]|null} The specific version of a named model to retrieve
// in the case that this is null the newest model is retrieved (major;minor)
// @param supervised {boolean} Decide is model update supervised
//
// @return {any} `(<|dict|fn|proj)` Model retrieved from the registry.
registry.get.update:{[folderPath;experimentName;modelName;version;supervised]
getModel:registry.get.model[folderPath;experimentName;modelName;version];
if[registry.config.commandLine[`deployType];:getModel`model];
modelType:`$getModel[`modelInfo;`model;`type];
if[`graph~modelType;
logging.error"Retrieval of prediction function not supported for 'graph'"
];
axis:getModel[`modelInfo;`model;`axis];
model:getModel`model;
mlops.wrapUpdate[modelType;model;axis;supervised]
}
// @kind function
// @category main
// @subcategory get
//
// @overview
// Wrap models such that they all have a predict key regardless of where
// they originate
//
// @param mdlType {symbol} Form of model being used ```(`q/`sklearn/`keras)```, this
// defines how the model gets interpreted in the case it is Python code
// in particular.
// @param model {any} `(<|dict|fn|proj|foreign)` Model retrieved from registry.
//
// @return {any} `(<|fn|proj|foreign)` Predict function.
mlops.formatUpdate:{[mdlType;model]
$[99h=type model;
$[`update in key model;
model[`update];
logging.error"model does not come with update function"];
mdlType~`sklearn;
$[`partial_fit in .ml.csym model[`:__dir__][]`;
model[`:partial_fit];
logging.error"No update function available for sklearn model"];
logging.error"Update functionality not available for requested model"
]
}
// @kind function
// @category main
// @subcategory get
//
// @overview
// Wrap models retrieved such that they all have the same format regardless of
// from where they originate, the data passed to the model will also be transformed
// to the appropriate format
//
// @param mdlType {symbol} Form of model being used ```(`q/`sklearn/`keras)```. This
// defines how the model gets interpreted in the case it is Python code
// in particular.
// @param model {any} `(<|dict|fn|proj|foreign)` Model retrieved from the registry.
// @param axis {boolean} Data in a 'long' or 'wide' format (`0b/1b`)
//
// @return {any} `(<|fn|proj|foreign)` The update function wrapped with a transformation
// function.
mlops.wrapUpdate:{[mdlType;model;axis;supervised]
model:mlops.formatUpdate[mdlType;model];
transform:mlops.transform[;axis;mdlType];
$[supervised;
model . {(x y;z)}[transform]::;
model transform::]
}
// @kind function
// @category main
// @subcategory get
//
// @overview
// Load the model registry at the user specified location into process.
//
// @param folderPath {dict|string|null} Registry location, can be:
// 1. A dictionary containing the vendor and location as a string, e.g.
// ```enlist[`local]!enlist"myReg"``` or
// ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc;
// 2. A string indicating the local path;
// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.
// @param config {dict} Any additional configuration needed for
// retrieving the modelStore
//
// @return {table} Most recent version of the modelStore
registry.get.modelStore:{[folderPath;config]
config:registry.util.check.config[folderPath;config];
if[not`local~storage:config`storage;storage:`cloud];
$[storage~`local;
[modelStorePath:registry.util.check.registry[config]`modelStorePath;
load modelStorePath;
?[modelStorePath;();0b;()]
];
[modelStore:get hsym`$config[`folderPath],"/KX_ML_REGISTRY/modelStore";
key hsym` sv `$#[3;("/") vs ":",config`folderPath],"_";
modelStore
]
]
}
================================================================================
FILE: ml_ml_registry_q_main_init.q
SIZE: 443 characters
================================================================================
// init.q - Initialise the main q functionality for the model registry
// Copyright (c) 2021 Kx Systems Inc
\d .ml
if[not @[get;".ml.registry.q.main.init";0b];
loadfile`:registry/q/main/new.q;
loadfile`:registry/q/main/log.q;
loadfile`:registry/q/main/set.q;
loadfile`:registry/q/main/delete.q;
loadfile`:registry/q/main/get.q;
loadfile`:registry/q/main/update.q;
loadfile`:registry/q/main/query.q
]
registry.q.main.init:1b
================================================================================
FILE: ml_ml_registry_q_main_log.q
SIZE: 2,561 characters
================================================================================
// log.q - Main callable functions for logging information to the
// model registry
// Copyright (c) 2021 Kx Systems Inc
//
// @overview
// Log information to the registry
//
// @category Model-Registry
// @subcategory Functionality
//
// @end
\d .ml
// @kind function
// @category main
// @subcategory log
//
// @overview
// Log metric values for a model
//
// @todo
// Add type checking for modelName/experimentName/version
// Improve function efficiency when dealing with cloud vendors presently this is limited
// by retrieval of registry and republish.
//
// @param folderPath {dict|string|null} Registry location, can be:
// 1. A dictionary containing the vendor and location as a string, e.g.
// ```enlist[`local]!enlist"myReg"``` or
// ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc;
// 2. A string indicating the local path;
// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.
// @param experimentName {string|null} The name of an experiment from which
// to retrieve a model, if no modelName is provided the newest model
// within this experiment will be used. If neither modelName or
// experimentName are defined the newest model within the
// "unnamedExperiments" section is chosen
// @param modelName {string|null} The name of the model to be retrieved
// in the case this is null, the newest model associated with the
// experiment is retrieved
// @param version {long[]|null} The specific version of a named model to retrieve
// in the case that this is null the newest model is retrieved (major;minor)
// @param metricName {symbol|string} The name of the metric to be persisted
// in the case when this is a string, it is converted to a symbol
// @param metricValue {float} The value of the metric to be persisted
//
// @return {null}
registry.log.metric:{[folderPath;experimentName;modelName;version;metricName;metricValue]
metricName: $[10h=abs[type metricName]; `$; ]metricName;
config:registry.util.check.config[folderPath;()!()];
if[not`local~storage:config`storage;storage:`cloud];
config:$[storage~`local;
registry.local.util.check.registry config;
[checkFunction:registry.cloud.util.check.model;
checkFunction[experimentName;modelName;version;config`folderPath;config]
]
];
logParams:(storage;experimentName;modelName;version;config;metricName;metricValue);
.[registry.util.set.metric;
logParams;
{[x;y;z]
$[`local~x;;registry.util.delete.folder]y;
logging.error z
}[storage;config`folderPath]
]
}
================================================================================
FILE: ml_ml_registry_q_main_new.q
SIZE: 2,696 characters
================================================================================
// new.q - Functionality for generation of new elements of the ML registry
// Copyright (c) 2021 Kx Systems Inc
//
// @overview
// This functionality is intended to provide the ability to generate new
// registries and experiments within these registries.
//
// @category Model-Registry
// @subcategory Functionality
//
// @end
\d .ml
// @kind function
// @category main
// @subcategory new
//
// @overview
// Generates a new model registry at a user specified location on-prem
// or within a supported cloud providers storage solution
//
// @param folderPath {dict|string|null} Registry location, can be:
// 1. A dictionary containing the vendor and location as a string, e.g.
// ```enlist[`local]!enlist"myReg"``` or
// ```enlist[`aws]!enlist"s3://ml-reg-test"``` etc;
// 2. A string indicating the local path;
// 3. A generic null to use the current .ml.registry.location pulled from CLI/JSON.
// @param config {dict|null} Any additional configuration needed for
// initialising the registry
//
// @return {dict} Updated config dictionary containing relevant
// registry paths
registry.new.registry:{[folderPath;config]
config:registry.util.check.config[folderPath;config];
if[not`local~storage:config`storage;storage:`cloud];
registry[storage;`new;`registry]config
}
|
/ - the process does not have a registered server connection, then signal an error
if[not count select from .servers.SERVERS where proctype in typetoquery,{$[null x;(count y)#1b;x=y]}[procn;procname];
finishquery[0b;qid];
.lg.e[`sendinner;e:"process with proctype=",(string typetoquery)," and procname=",(string procn)," does not exist and is not in .servers.SERVERS"];'e];
/- get the handle to run against, depending on whether a name is specified
hd:first $[not null procn;
.servers.getservers[`procname;procn;()!();1b;1b]`w;
.servers.gethandlebytype[typetoquery;`any]];
/ - check if server is available
/ if[ not count select from .servers.SERVERS where w in hd,(proctype in (proct;`gateway)[gw]) or procname in procn;
if[null hd; .lg.e[`sendinner;e:"Attempted to run report ",string[Name]," but process not available: ",string typetoquery^procn];'e];
/ - submit the query to the server
$[not null gateway;
[joinfunction:value first exec joinfunction from `..reports where name=Name;
write[qid;"Running report: ",string[Name]," against proctypes: ",(" " sv string[proct])," on a gateway: ",string[typetoquery^procn]," on handle: ",string hd;0b];
@[neg[hd];(gwwrapper;Name;query;qid;proct;joinfunction; `gwpostback);{.lg.e[`sendinner;e:"Asynchronous query to gateway failed: ,"x];'e}]];
[write[qid;"Running report: ",string[Name]," against ",$[null procn;"proctype : ",string typetoquery;"procname : ",string procn]," on handle: ",.Q.s hd;0b];
wrappedquery:(wrapper;Name;query;qid);
.[.async.postback;(hd;wrappedquery;`postback);{'x}]]
];
/ - update the query submittime for the query id
update submittime:.proc.cp[] from `..querystatus where queryid in qid;
};
/- postback used for async queries [result: dictionary result of query on process]
postback:{[result] @[postbackinner;result;{[qid;err] write[qid;err;1b]}[result`queryid]]};
postbackinner:{[result]
/ - pull the query id from the data returned from the server
queryid:result`queryid;
res:result`result;
write[queryid;"Received result";0b];
/- error handling
if[10h = type res;
if["error:" ~ 6#res;
finishquery[1b;queryid];
.lg.e[`postbackinner;e:"Query execution failed on remote process ",string[result`proctype],": ",7_result`result];'e]];
dictkeys:@[cols;result;{.lg.e[`postbackinner;e:"Result is not a dictionary"];'e}];
columns:value `wrappercols;
/ - signal an error if the columns are not in the expected format
if[all not dictkeys in columns;
.lg.e[`postbackinner;e:"Incorrect column format, must be: ","; " sv string columns];'e];
/ - log that the query has completed
finishquery[1b;queryid];
/ - find any result handlers and apply them to the rseult
if[count resulthandler: first exec resulthandler from reports where name in result`name;
write[queryid;"Running resulthandler";0b];
.[{[x;y] (value x) @ y;};(resulthandler;result);{.lg.e[`postbackinner;e:"Resulthandler failed: ",x];'e}]];
/ - set the report status as complete (C)
updatestage[queryid;`C;.proc.cp[]];
write[queryid;"Finished report";0b];
};
/- GATEWAY FUNCTIONALITY
/- wrapper function used for sending asynchronous queries to the gateway
gwwrapper:{[name;query;qid;procs;join;postback]
.gw.asyncexecjpt[query; procs; join; (postback;`queryid`time`name`procname`proctype!(qid;.proc.cp[];name;.proc.procname;.proc.proctype)); 0Wn]}
/- when the result from the gateway is recieved it is formatted before being
/- passed onto the postback function as is normal with the non gateway queries
gwpostback:{[queryinfo; query; result] postback queryinfo,(enlist `result)!enlist result}
/- LOGGING
/- write a querylog message
write:{[qid;msg;err]
/- special case for queries which have timedout, queries timedout even if they failed to run
if[err~1b;updatestage[qid;`E;.proc.cp[]];.lg.e[`reporter;msg]];
stage:$[qid=0;`S;first exec stage from querystatus where queryid in qid];
if[writetostdout;.lg.o[`reporter;format[qid;string[stage],"|",msg]]];
`..querylogs upsert ([] time:.proc.cp[];queryid:qid;stage:stage;message:enlist raze msg);
/- custom handler
writecustom[qid;msg;err]}
/- add additional functionality to the write function
writecustom:@[value;`writecustom;{{[qid;msg;err]}}]
/- flushing function to clear querylogs, only allow 1 day of logs
flushquerylogs:{[flushtime]
cutofftime:.proc.cp[]-flushtime;
flushing: string fcnt:count select from `..querylogs where time <= cutofftime;
remaining: string count[value `..querylogs] - fcnt;
write[`long$0;"Flushing ",flushing," records. ",remaining," remaining.";0b];
delete from `..querylogs where time <= cutofftime;}
/- flushing any stale timers from the .timer.timer table
flushtimer:{
currenttime:.proc.cp[];
flushing:exec id from `..timerids where periodend<currenttime;
remaining: string count select from `..timerids where periodend>=currenttime;
if[count flushing; write[`long$0;"Flushing ",string[count flushing]," timers. ",remaining," still active.";0b]];
.timer.remove each flushing;
delete from `..timerids where id in flushing;}
/- format log message
format:{[qid;msg] raze string[.proc.cp[]],"|",string[qid],"|",msg}
/- RESULT HANDLERS
/- returns string current date time YYYY_MM_DD_HH_MM_SS_mmm
dtsuffix:{enlist ssr[;;"_"]/["_" sv string .proc.cd[],.proc.ct[];".:"]};
emailstats:([procname:(); alertname:()] lastsent:`timestamp$());
emailalert:{[period; recipients; data]
lasttime:0p^exec first lastsent from emailstats where procname=(data`procname),alertname=(data`name);
result:data`result;
if[not count result; write[data`queryid;"emailalert: nothing to email";0b]; :()];
if[period > .proc.cp[] - lasttime; write[data`queryid;"emailalert: data available to email but previous email was too soon";0b]; :()];
upsert[`emailstats](data`procname; data`name; .proc.cp[]);
subject:"Process [",(string data`procname),"] has triggered an alert [",(string data`name),"]";
write[data`queryid;"emailalert: sending warning email";0b];
res:.email.senddefault[`to`subject`body!(`$recipients;subject;enlist result`messages)];
$[0<res;
write[data`queryid; "emailalert: sent email alert for alert: ",string data`name;0b];
write[data`queryid; "emailalert: failed to send email alert: ",string data`name;1b]];
}
emailreport:{[temppath;recipients;filename;filetype;data]
filepath:writetofile[temppath;filename;filetype;data;""];
subject:"Report '",(string data`name),"' has been generated by TorQ [",(string .proc.cd[]),"]";
body:"A report has been generated by TorQ. Please see the attached file for the results.";
write[data`queryid;"emailreport: sending email with attached report";0b];
if[1>res:.email.senddefault[`to`subject`body`attachment!(`$recipients;subject;enlist body;filepath)];
write[data`queryid;"emailreport: failed to send email";1b]];
write[data`queryid;"emailreport: cleaning up temporary report file: ",string filepath;0b];
.os.del[string filepath];}
/- formats table with a nested int list column into string
stringnestedlists:{[res]
/- remove character type and empty spaces
nestedtypes:upper .Q.t except " c";
/- if there are any nested lists, otherwise returns original res
$[count select from meta[res] where t in nestedtypes;
{[t;c] ![t;();0b;(enlist c)!enlist ((';{" " sv string x});c)]}/[res;exec c from meta[res] where t in nestedtypes];
res]};
/- writetofiletype: write to disk as specified file type [path: string;filename: string;filetype: string e.g. txt,csv;data: dictionary]
writetofile:{[path;filename;filetype;data;suffix]
if[not (ty:`$filetype) in key .h.tx;write[data`queryid; "writetofile: filetype parameter not found in .h.tx";1b]];
res:stringnestedlists[data`result];
filepath:`$path,("_" sv (filename;string[data`procname]),$[count suffix;suffix;()]),".",filetype;
.[{hsym[x] 0:.h.tx[y;z]};(filepath;ty;res);{[data;e] write[data`queryid;"writetofile: ",e;1b]}[data]]; filepath};
writetofiletype:{[path;filename;filetype;data] writetofile[path;filename;filetype;data;dtsuffix[]]}
/- save as splayed table [path: string;file: string;data: dictionary]
writetosplayed:{[path;file;data]
tab:stringnestedlists[data`result];
.[{[h;t;d] h:hsym `$h; (` sv .Q.par[h;`;`$t],`) upsert .Q.en[h;0!d]};
(path;file;tab);
{[data;e] write[data`queryid;"writetosplayed: ",e;1b]}[data]]
};
/- publishes results data using the reporterprocessresults table schema
publishresult:{[result]
tablename:`reporterprocessresults;
data:([] queryid:enlist result`queryid;time:.proc.cp[];sym:result`name;result:enlist result);
.[.ps.publish;(tablename;data);{'"Failed to publish: ", x}]}
/- INITIALISE REPORTER
/- run csvloader using filepath inputcsv
@[csvloader;inputcsv;{write[`long$0;x;1b];exit 0}];
/- Add to timer and run datecheck
.timer.repeat[`timestamp$.proc.cd[]+00:00;0Wp;1D00:00:00;(`datecheck;`);"Reporter - datecheck runs each day at midnight and schedules timers if they are needed on the current day"];
.timer.repeat[.proc.cp[];0Wp;0D00:00:05;(`checktimeout;`);"Reporter - cancel queries which have timed out"];
.timer.repeat[.proc.cp[];0Wp;0D00:02:00;(`flushquerylogs;flushqueryloginterval);"Reporter - flush querylogs table of data that is older than the parameter"];
write[`long$0;"Reporter Process Initialised";0b];
datecheck[];
/- Initialise server connections
.servers.startup[];
================================================================================
FILE: TorQ_code_processes_segmentedtickerplant.q
SIZE: 3,559 characters
================================================================================
// Segmented TP process
// Contains all TP functionality with additional flexibility
// Configurable logging and subscriptions
// Default settings create single TP log per table and rolls logs hourly
// Subscription to a table can be made in two modes - all or filtered
// All - publish all data for table
// Filtered - apply filters to published data, filters defined on client side
createlogs:@[value;`createlogs;1b]; // allow tickerplant to create a log file
// subscribers use this to determine what type of process they are talking to
tptype:`segmented
.proc.loadf[getenv[`KDBCODE],"/common/os.q"];
.proc.loadf[getenv[`KDBCODE],"/common/timezone.q"];
.proc.loadf[getenv[`KDBCODE],"/common/eodtime.q"];
// In singular or tabular mode, intraday rolling not required
if[.stplg.multilog in `singular`tabular;.stplg.multilogperiod:1D];
// In custom mode, load logging type for each table
if[.stplg.multilog~`custom;
@[{.stplg.custommode:1_(!) . ("SS";",")0: x};.stplg.customcsv;{.lg.e[`stp;"failed to load custom mode csv"]}]
];
// functions used by subscribers
tablelist:{.stpps.t}
// subscribers who want to replay need this info
subdetails:{[tabs;instruments]
`schemalist`logfilelist`rowcounts`date`logdir!(.ps.subscribe\:[tabs;instruments];.stplg.replaylog[tabs];tabs#.stplg `rowcount;(.eodtime `d);.stplg.kdbtplog)
}
// Generate table and schema information and set up default table UPD functions
generateschemas:{
.stpps.init[tables[] except `currlog`heartbeat`logmsg`svrstoload];
.stpps.attrstrip[.stpps.t];
// Table UPD functions attach the current timestamp by default, if STP is chained these do nothing
$[.sctp.chainedtp;
.stplg.updtab:(.stpps.t!(count .stpps.t)#{[x;y] x}),.stplg.updtab;
.stplg.updtab:(.stpps.t!(count .stpps.t)#{(enlist(count first x)#y),x}),.stplg.updtab
]
}
|
================================================================================
FILE: TorQ_code_common_html.q
SIZE: 4,541 characters
================================================================================
\d .html
// set html home
if[count getenv`KDBHTML; .h.HOME:getenv`KDBHTML]
// PUB / SUB functionality
// pub/sub code for websockets - modified version of u.q from kx
// no point writing from scratch when something is tried/tested/working for years!
t:`symbol$()
w:()!()
modifier:()!()
dataformat:{[t;d] `name`data!(t;jsformat\:[d;typemap])}
updformat:{[t;d] `name`data!(t;(key d)!(d`tablename;jsformat[d`tabledata;typemap]))}
// init must be called with the list of tables
// modified so init can be called multiple times
// the default modifier is to serialize the data
// could add other modifiers
init:{
new:(x,:()) except t;
t::t,new;
w,::new!(count new)#();
modifier,::new!(count new)#{-8!.j.j updformat["upd";`tablename`tabledata!(x 1;x 2)]}}
del:{w[x]_:w[x;;0]?y};
//Version checking code. .z.pc is only used in versions prior to 3.3
close:{{.html.del[;y] each .html.t; x@y}@[value;x;{{[x]}}]}
if[.z.K >= 3.3;.dotz.set[`.z.wc;close[.dotz.getcommand[`.z.wc]]];.dotz.set[`.z.pc;close[.dotz.getcommand[`.z.pc]]]]
// Create a new version of sel - for the time being, all pages get all data
/ sel:{$[`~y;x;select from x where sym in y]}
sel:{[x;y] x}
// Apply the modifier before sending the data
pub:{[t;x]{[t;x;w]if[count x:sel[x]w 1;(neg first w) modifier[t]@(`upd;t;x)]}[t;x]each w t}
add:{$[(count w x)>i:w[x;;0]?.z.w;.[`.u.w;(x;i;1);union;y];w[x],:enlist(.z.w;y)];(x;$[99=type v:value x;sel[v]y;@[0#v;`sym;`g#]])}
sub:{if[x~`;:sub[;y]each t];if[not x in t;'x];del[x].z.w;add[x;y]}
// add wssub method - have to subscribe to everything, don't return anything
wssub:{sub[x;`];}
end:{(neg union/[w[;;0]])@\:(`.u.end;x)}
// JAVASCRIPT CONVERTERS
// ISO 8601 date time format, used for JSON.
jstsiso8601:{("-" sv "." vs string `date$x),"T",string[`second$x],"Z"}'
// convert to javascript timestamp format
jstsfromts:{"j"$946684800000j+86400000*"z"$x}
// times,seconds - all will end up as 1970 values
jstsfromt:{"j"$"t"$x}
// month
jstsfromm:{jstsfromts `date$x}
// mapping of types to formatting function
typemap:12 13 14 15 16 17 18 19h!(jstsiso8601;jstsfromm;jstsfromts;jstsiso8601;jstsfromt;jstsfromt;jstsfromt;jstsfromt);
// given a table, format each of the columns that need formatted
jsformat:{ k:cols x; flip k !(y value t:type each x)@'value x:flip 0!x}
// EVALUATION FUNCTION
// evaluate: used to evaluate front end input data
// Arg: dictionary decoded front end JSON
// format should be `func`arg1`arg2 ... `arg8!(function;arg1;arg2;...;arg3)
// all args except arg1 are optional
execdict:{$[not `func in key x;'"no func in dictionary";1=count key x;(value x`func) @ 1;1<count key x;(value x`func) . value x _ `func;()]}
// Arg: string JSON encoded string from front end
evaluate:{@[execdict;x;{'"failed to execute ",(-3!x)," : ",y}[x]]}
// PAGE REPLACEMENT FUNCTIONALITY (read a page, replace some variables)
// find replace on a dictionary of elements
// input should be [string;`find1`find2!("replace1";"replace2")]
replace:{(ssr/)[x;string key y;value y]}
// read a webpage
readpage:{
$[count r:@[read1;`$":",p:.h.HOME,"/",x;""];
"c"$r;
p,": not found"]}
// need to be able to get the local address
// taken from dotz.q
IPA:(`int$())!`symbol$()
ipa:{$[`~r:IPA x;IPA[x]:$[`~r:.Q.host x;`$"."sv string"i"$0x0 vs x;r];r]}
getport:{string system"p"}
// read a page, replace the host and port details
// GLEN: MYKDBSERVER Must be an absolute URL. Since the javascript is run on client side it interprets localhost or 127.0.0.2 as a URL referring to a client's own computer
// Host must be formatted as a string for javascript, single or double quotes must be added to each side of it "server.aquaq.co.uk".
// Otherwise javascript will try to find a variable called server and it's property aquaq, it's property co etc.
readpagereplaceHP:{replace[readpage[x];`MYKDBSERVER`MYKDBPORT!("\"",(string ipa .z.a),"\"";getport[])]}
// add handlers for .non type
.h.tx[`non]:{enlist x}
.h.ty[`non]:"text/html"
// idea here is to read a webpage and do a find/replace on it
// so q can server its own webpages, connecting back to itself
// example would be in the html5 source (lets assume the html5 source is called monitor.html)
// if you put MYKDBSERVER:MYKDBPORT for the websocket connection in monitor.html
// the create a function of
// monitorui:.html.readpagereplaceHP["index.html"]
// then from the browser do
// http://monitorhost:monitorport/.non?monitorui[]
// then it should host it's own ui
// WEBSOCKET DEFINITION
.dotz.set[`.z.ws;{neg[.z.w] -8!.j.j[.html.evaluate[.j.k -9!x]];}]
================================================================================
FILE: TorQ_code_common_json.k
SIZE: 725 characters
================================================================================
// Created by Arthur Whitney, kx Systems
// Should be kept up to date if/when updates are available
$[.z.K>3.1; ."\\d .jOLD";."\\d .j"]
/[]{} Cbg*xhijefcspmdznuvt
q:"\"";s:{q,x,q};t:{s@[x;&"."=8#x;:;"-"]}
J:(($`0`1)!$`false`true;s;{$[#x;x;"null"]};s;t;s)1 2 5 11 12 16h bin
/enclose
e:{(*x),(","/:y),*|x};a:"\t\n\r\"\\";f:{$[x in a;"\\","tnr\"\\"a?x;x]}
j:{$[10=abs t:@x;s$[|/x in a;,/f'x;x];99=t;e["{}"](j'!x),'":",'j'. x;-1<t;e["[]"].Q.fc[j']x;J[-t]@$x]}
/disclose
v:{=\~("\\"=-1_q,x)<q=x};d:{$[1<n::(s:+\1 -1 1 -1"{}[]"?x)?0;1_'(0,&(v[x]&","=x)&1=n#s)_x:n#x;()]}
c:{$["{"=*x;(`$c'n#'x)!c'(1+n:x?'":")_'x:d x;"["=*x;.Q.fc[c']d x;q=*x;$[1<+/v x;'`err;"",. x];"a">*x;"F"$x;"n"=*x;0n;"t"=*x]}
k:{c x@&~v[x]&x in" \t\n\r"}
================================================================================
FILE: TorQ_code_common_kafka.q
SIZE: 1,745 characters
================================================================================
\d .kafka
// configuration
enabled:@[value;`enabled;.z.o in `l64] // whether kafka is enabled
kupd:@[value;`kupd;{[k;x] -1 `char$x;}] // default definition for kupd
lib:`$getenv[`KDBLIB],"/",string[.z.o],"/kafkaq";
if[.kafka.enabled;
libfile:hsym ` sv lib,$[.z.o like "w*"; `dll; `so];
libexists:not ()~key libfile;
if[not .kafka.libexists; .lg.e[`kafka;"no such file ",1_string libfile]];
if[.kafka.libexists;
/ initconsumer[server;optiondict]
/ initialise consumer object with the specified config options. Required in order to call 'subscribe'
/ e.g. initconsumer[`fetch.wait.max.ms`fetch.error.backoff.ms!`5`5]
initconsumer: lib 2: (`initconsumer;2);
/ initpr oducer[server;optiondict]
/ e.g. initproducer[`localhost:9092;`queue.buffering.max.ms`batch.num.messages!`5`1]
initproducer: lib 2: (`initproducer;2);
/ cleanupconsumer[]
/ disconnect and free up consumer object, stop and subscription thread
cleanupconsumer: lib 2: (`cleanupconsumer;1);
/ cleanupproducer[]
/ disconnect and free up producer object
cleanupproducer: lib 2: (`cleanupproducer;1);
/ subscribe[topic;partition]
/ start subscription thread for topic on partition - data entry point is 'kupd' function
/ e.g. subscribe[`test;0]
subscribe: lib 2: (`subscribe;2);
/ publish[topic;partition;key;message]
/ publish 'message' byte vector to topic, partition. symbol key can be null
/ e.g. publish[`test;0;`;`byte$"hello world"]
publish: lib 2: (`publish;4);
/ default entry point - if subscription is active this will be called with any messages
/ k (symbol) - key
/ x (bytes) - message content
if[not `kupd in key `.; @[`.;`kupd;:;.kafka.kupd]];
.lg.o[`kafka;"kupd is set to ",-3!kupd];
];
];
\d .
================================================================================
FILE: TorQ_code_common_memusage.q
SIZE: 2,148 characters
================================================================================
// Functionality to return approx. memory size of kdb+ objects
\d .mem
// half size for 2.x
version:.5*1+3.0<=.z.K;
// set the pointer size based on architecture
ptrsize:$["32"~1_string .z.o;4;8];
attrsize:{version*
// `u#2 4 5 unique 32*u
$[`u=a:attr x;32*count distinct x;
// `p#2 2 1 parted (8*u;32*u;8*u+1)
`p=a;8+48*count distinct x;
0]
};
// (16 bytes + attribute overheads + raw size) to the nearest power of 2
calcsize:{[c;s;a] `long$2 xexp ceiling 2 xlog 16+a+s*c};
vectorsize:{calcsize[count x;typesize x;attrsize x]};
// raw size of atoms according to type, type 20h->76h have 4 bytes pointer size
typesize:{4^0N 1 16 0N 1 2 4 8 4 8 1 8 8 4 4 8 8 4 4 4 abs type x};
threshold:100000;
// pick samples randomly accoding to threshold and apply function
sampling:{[f;x]
$[threshold<c:count x;f@threshold?x;f x]
};
// scale sampling result back to total population
scaleSampling:{[f;x]
sampling[f;x]*max(1;count[x]%threshold)
};
objsize:{
// count 0
if[not count x;:0];
// flatten table/dict into list of objects
x:$[.Q.qt x;(key x;value x:flip 0!x);
99h=type x;(key x;value x);
x];
// special case to handle `g# attr
// raw list + hash
if[`g=attr x;x:(`#x;group x)];
// atom is fixed at 16 bytes, GUID is 32 bytes
$[0h>t:type x;$[-2h=t;32;16];
// list & enum list
t within 1 76h;vectorsize x;
// exit early for anything above 76h
76h<t;0;
// complex = complex type in list, pointers + size of each objects
0h in t:sampling[type each;x];calcsize[count x;ptrsize;0]+"j"$scaleSampling[{[f;x]sum f each x}[.z.s];x];
// complex = if only 1 type and simple list, pointers + sum count each*first type
// assume count>1000 has no attrbutes (i.e. table unlikely to have 1000 columns, list of strings unlikely to have attr for some objects only
(d[0] within 1 76h)&1=count d:distinct t;calcsize[count x;ptrsize;0]+"j"$scaleSampling[{sum calcsize[count each x;typesize x 0;$[1000<count x;0;attrsize each x]]};x];
// other complex, pointers + size of each objects
calcsize[count x;ptrsize;0]+"j"$scaleSampling[{[f;x]sum f each x}[.z.s];x]]
};
\d .
================================================================================
FILE: TorQ_code_common_merge.q
SIZE: 7,140 characters
================================================================================
\d .merge
mergebybytelimit:@[value;`.merge.mergebybytelimit;0b]; /- merge limit configuration - default is 0b row count limit 1b is byte size limit
partlimit:@[value;`.merge.partlimit;1000] /- limit the number of partitions in a chunk
partsizes:([ptdir:`symbol$()] rowcount:`long$(); bytes:`long$()); /- partsizes table used to keep track of table row count and bytesize estimate when data is written to disk
|
// function to add date column on request on rdb processes
rdbdate:{[dict;parameter]
if[.proc.proctype=`rdb;
f:{[y;x]$[x~`date;`$((string .checkinputs.getdefaulttime y),".date");x]};
:@[dict;parameter;:;f[dict;] each dict parameter]];
:dict;
};
// function to add date column to free form parameters on rdb processes
freeformrdbdate:{[dict;parameter]
if[.proc.proctype=`rdb;
:@[dict;parameter;:;ssr[(dict parameter);"date";(string (.checkinputs.getdefaulttime dict)),".date"]]];
:dict;
};
//- returns columns: `lastMid`maxBidprice`maxAskprice`wavgAsksizeAskprice`wavgBidsizeBidprice
checkaggregations:{[dict]
input:dict`aggregations;
.dataaccess.checkcolumns[dict`tablename;raze last each input;`aggregations];
columns:distinct(raze/)get input;
dict:checkcolumns[dict`tablename;raze last each input;`aggregations];
inputfuncs:key input;
if[(`distinct in key input)&(not ((count flip(key[input]where count each get input;raze input)))=1); '`$.schema.errors[`distinctagg;`errormessage]]
if[any not inputfuncs in .schema.validfuncs;'`$.checkinputs.formatstring[.schema.errors[`undefinedaggs;`errormessage];distinct inputfuncs except .schema.validfuncs]];
dvalidfuncs:(key input) inter .schema.dvalidfuncs;
if[0<>count except[count each raze input dvalidfuncs;2];'`$.checkinputs.formatstring[.schema.errors[`agglength;`errormessage];dvalidfuncs]];
:dict;
};
// check that the timebar size chosen isn't too small for DA to handle
checktimebar:{[dict]
if[not in[`aggregations;key dict];
'`$.schema.errors[`aggtimebar;`errormessage]];
.dataaccess.checkcolumns[dict`tablename;last dict`timebar;`timebar];
input:dict`aggregations;
input:flip(key[input]where count each get input;raze input);
if[any not in[first each input;.schema.returnone];
'`$.schema.errors[`singleaggtimebar;`errormessage]];
size:dict[`timebar][1];
if[not in[size;key .schema.timebarmap];
'`$.checkinputs.formatstring[.schema.errors[`timebarsize;`errormessage];`size`app!(size;key .schema.timebarmap)]];
if[1>floor (dict`timebar)[0]*.schema.timebarmap(dict`timebar)[1];
'`$.schema.errors[`smalltimebar;`errormessage]];
if[(not first (exec t from meta dict`tablename where c=(dict`timebar)[2]) in "pmnuvtzd") & (not (dict`timebar)[2]=`date);'`$.checkinputs.formatstring["Parameter:`timebar - column:{column} in table:{table} is of type:{type}, validtypes:-12 -13 -14 -15 -16 -17 -18 -19h";`column`table`type!((dict`timebar)[2];dict`tablename;(type( exec from dict`tablename)(dict`timebar)[2]))]];
:dict;
};
// check errors in the freeform parameters
checkfreeformwhere:{[dict]
cond:"," vs dict`freeformwhere;
cond:(parse each cond);
if[any (not(first each cond[;1]) in (in;within;like))*((type each first each cond[;1])=102h);'`$.schema.errors[`freeformnot;`errormessage]];
if[any 2=count each cond;cond:?[2=count each cond;cond[;1];cond[]]];
.dataaccess.checkcolumns[dict`tablename;cond[;1];`freeformwhere];
if[not all [last each (cond[;0] in .schema.allowedops)];'`$(dict`freeformwhere),.schema.errors[`freeformoperators;`errormessage]];
};
checkfreeformby:{[dict]
if[not ((dict`freeformby) ss "!")~`long$();'`$.schema.errors[`freeformbydict;`errormessage],.schema.examples[`freeformby;`example]];
cond:"," vs dict`freeformby;
cond:(parse each cond);
.dataaccess.checkcolumns[dict`tablename;cond;`freeformby];
};
checkfreeformcolumns:{[dict]
example:"sym,time,mid:0.5*bidprice+askprice";
validfuncs:`avg`cor`count`cov`dev`distinct`first`last`max`med`min`prd`sum`var`wavg`wsum`0`1`2`3`4`5`6`7`8`9`;
cond:(dict`freeformcolumn),",",(dict`freeformcolumn);
cond:"," vs cond;
if[(cond ?\:":")~(count each cond);
cond:cond,'" ";
cond:distinct " " vs raze cond;
.dataaccess.checkcolumns[dict`tablename;(`$cond) except validfuncs;`freeformby]];
if[not (cond ?\:":")~(count each cond);
loc:(cond ?\:":")=(count each cond);
.dataaccess.checkcolumns[dict`tablename;`$(cond where loc);`freeformby];
rcond:(1+(cond where not loc) ?\:":")_'(cond where not loc),'" ";
isletter:rcond in .Q.an;
scond:distinct " " vs trim ?[raze isletter;raze rcond;" "];
.dataaccess.checkcolumns[dict`tablename;(`$scond) except validfuncs;`freeformcolumns];]
};
================================================================================
FILE: TorQ_code_dataaccess_customfuncs.q
SIZE: 2,176 characters
================================================================================
//- Script to load in custom functionality:
//- Useful for splitting queries
\d .dacustomfuncs
//- (i) rollover
//- Function to determine which partitions the getdata function should query
//- e.g If the box is based in Paris (GMT+01:00) and rollover is at midnight London time then tzone:-01:00
//- e.g If the box is UTC based and rollover is at 10pm UTC then rover: 22:00
rollover:{[tabname;hdbtime;prc]
// Extract the data from tableproperties.csv
A:?[.checkinputs.tablepropertiesconfig;((=;`tablename;(enlist tabname));(=;`proctype;(enlist prc)));();`rolltimeoffset`rolltimezone`datatimezone`partitionfield!`rolltimeoffset`rolltimezone`datatimezone`partitionfield];
// Output
A:first each A;
// Get the hdbtime adjustment
adjroll::exec adjustment from .tz.t asof `timezoneID`localDateTime!(A[`rolltimezone];hdbtime);
// convert rolltimeoffset from box timezone -> utc
rolltimeUTC:`time$A[`rolltimeoffset]+adjroll;
// convert from data timezone -> utc
adjdata:exec adjustment from .tz.t asof `timezoneID`gmtDateTime!(A[`datatimezone];hdbtime+adjroll);
querytimeUTC:`time$hdbtime+$[0Nn~adjdata;00:00;adjdata];
$[querytimeUTC >= rolltimeUTC;:A[`partitionfield]$hdbtime;:offsetbyone[hdbtime;A[`partitionfield]]];
};
//- (ii) getpartitionrange
//- offset times for non-primary time columns
// example @[`date$(starttime;endtime);1;+;not `time~`time]
partitionrange:{[tabname;hdbtimerange;prc;timecol]
// Get the partition fields from default rollover
hdbtimerange:rollover[tabname;;prc] each hdbtimerange+00:00;
partfield:@[value;`.Q.pf;`];
C:?[.checkinputs.tablepropertiesconfig;((=;`tablename;(enlist tabname));(=;`proctype;(enlist prc)));();(1#`ptc)!1#`primarytimecolumn];
// Output the partitions allowing for non-primary timecolumn
@[hdbtimerange;1;+;any timecol=raze C[`ptc]];
if[partfield=`int;hdbtimerange:`long$`timestamp$hdbtimerange];
:hdbtimerange};
// Gets the last rollover
lastrollover:{:rollover[x;.proc.cp[];`hdb]};
offsetbyone:{[time;pfield]
if[pfield~`date;:`date$time-1D];
if[pfield~`month;:.Q.addmonths[time;-1]];
:(`year$time)-1;
};
================================================================================
FILE: TorQ_code_dataaccess_dataaccessutils.q
SIZE: 4,254 characters
================================================================================
\d .checkinputs
//- utils for reading in config
readtableproperties:{[tablepropertiepath]
.lg.o[`readtableproperties;"loading table properties"];
table:`tablename`proctype xkey readcsv[tablepropertiepath;"ssssstsss"]; //read in table from file
alltable:?[table;enlist(in;`proctype;enlist`all`);0b;()]; //find any instance of the use "all" or blank for proctype
table:table,![alltable;();0b;(enlist`proctype)!enlist(enlist `hdb)],![alltable;();0b;(enlist`proctype)!enlist(enlist `rdb)]; //join rdb and hdb entries for any "all" or blank entries
table:![table;enlist(in;`proctype;enlist`all`);0b;`symbol$()]; //remove "all" or blank entries from table
table:?[table;$[.proc.proctype=`gateway;();enlist(=;`proctype;`.proc.proctype)];0b;()];
table:update .eodtime.datatimezone ^ datatimezone, .eodtime.rolltimeoffset ^ rolltimeoffset,.eodtime.rolltimezone^rolltimezone from table;
table:update `date ^ partitionfield from table where proctype<>`rdb;
.lg.o[`readtableproperties;"Table properties successfully loaded"];
:table;
};
readcheckinputs:{[checkinputspath] spliltcolumns[readcsv[checkinputspath;"sbs*"];`invalidpairs;`]};
readcsv:{[path;types]
if[not pathexists path:hsym path;'path];
:(types;1#",")0:path;
};
pathexists:{[path] path~key path};
spliltcolumns:{[x;columns;types]@[x;columns;spliltandcast;types]};
spliltandcast:{[x;typ]typ$"|"vs/:x};
//- functions:
//- (i) .dataaccess.getmetainfo - mapping from tablename to metainfo;
getmetainfo:{
partfield:$[()~key`.Q.pf;`;.Q.pf];
metainfo:1!/:`columns`types`attributes xcol/:`c`t`a#/:0!/:meta each tables`.;
:1!flip(`tablename`partfield`metas`proctype)!(tables`.;partfield;metainfo;.proc.proctype);
};
//- formatstring - inserts text into strings
//- formatstring["I have {} apples and {} oranges";10] - "I have 10 apples and 10 oranges"
//- formatstring["I have {n1} apples and {n2} oranges";`n1`n2!10 20] - "I have 10 apples and 20 oranges"
//- params can be type (+/-)1-19, otherwise ignored
formatstring:{[str;params]
if[not 99h~type params;params:enlist[`]!enlist[params]];
if[not 11h~type key params;:params];
params:where[abs[type each params]within 1 19]#params;
params:-1_/:.Q.s each params;
ssr/[str;"{",'string[key params],'"}";get params]
};
//- join table properties for a given table onto input params
jointableproperties:{[inputparams]
tableproperties:.checkinputs.tablepropertiesconfig (inputparams`tablename;.proc.proctype);
metainfo:.dataaccess.metainfo inputparams`tablename;
inputparams[`metainfo]:metainfo;
inputparams[`tableproperties]:tableproperties,enlist[`partfield]#metainfo;
:.[inputparams;(`tableproperties;`getrollover`getpartitionrange);.Q.dd[`.dataaccess]];
};
//- extract from subdict of inputparams
extractfromsubdict:{[inputparams;subdict;property]
if[not property in key inputparams subdict;'`$"gettableproperty:invalid property"];
:inputparams[subdict;property];
};
gettableproperty:extractfromsubdict[;`tableproperties;]; //- extract from `tableproperties key in inputparams
|
// Field names and widths for end-of-central-directory.
.finos.unzip.priv.wecd:.finos.util.dict(
`sig;4; / end of central dir signature 4 bytes (0x06054b50)
`dnu;2; / number of this disk 2 bytes
`dcd;2; / number of the disk with the start of the central directory 2 bytes
`den;2; / total number of entries in the central directory on this disk 2 bytes
`ten;2; / total number of entries in the central directory 2 bytes
`csz;4; / size of the central directory 4 bytes
`cof;4; / offset of start of central directory with respect to the starting disk number 4 bytes
`cln;2; / .ZIP file comment length 2 bytes
`cmt;0; / .ZIP file comment (variable size)
)
// Field names and widths for ZIP64 end-of-central-directory locator.
.finos.unzip.priv.wecl64:.finos.util.dict(
`sig;4; / zip64 end of central dir locator signature 4 bytes (0x07064b50)
`dcd;4; / number of the disk with the start of the zip64 end of central directory 4 bytes
`cof;8; / relative offset of the zip64 end of central directory record 8 bytes
`tnd;4; / total number of disks 4 bytes
)
// Field names and widths for ZIP64 end-of-central-directory.
.finos.unzip.priv.wecd64:.finos.util.dict(
`sig;4; / zip64 end of central dir signature 4 bytes (0x06064b50)
`s64;8; / size of zip64 end of central directory record 8 bytes
`ver;2; / version made by 2 bytes
`vrr;2; / version needed to extract 2 bytes
`dnu;4; / number of this disk 4 bytes
`dcd;4; / number of the disk with the start of the central directory 4 bytes
`den;8; / total number of entries in the central directory on this disk 8 bytes
`ten;8; / total number of entries in the central directory 8 bytes
`csz;8; / size of the central directory 8 bytes
`cof;8; / offset of start of central directory with respect to the starting disk number 8 bytes
`xds;0; / zip64 extensible data sector (variable size)
)
// Field names and widths for file data.
.finos.unzip.priv.wfd:.finos.util.dict(
`sig;4; / local file header signature 4 bytes (0x04034b50)
`ver;1; / version needed to extract 2 bytes
`os ;1; / ??
`flg;2; / general purpose bit flag 2 bytes
`cmp;2; / compression method 2 bytes
`mtm;2; / last mod file time 2 bytes
`mdt;2; / last mod file date 2 bytes
`crc;4; / crc-32 4 bytes
`csz;4; / compressed size 4 bytes
`usz;4; / uncompressed size 4 bytes
`nln;2; / file name length 2 bytes
`xln;2; / extra field length 2 bytes
)
// Field names and widths for central directory.
.finos.unzip.priv.wcd:.finos.util.dict(
`sig;4; / central file header signature 4 bytes (0x02014b50)
`ver;2; / version made by 2 bytes
`vrr;2; / version needed to extract 2 bytes
`flg;2; / general purpose bit flag 2 bytes
`cmp;2; / compression method 2 bytes
`mtm;2; / last mod file time 2 bytes
`mdt;2; / last mod file date 2 bytes
`crc;4; / crc-32 4 bytes
`csz;4; / compressed size 4 bytes
`usz;4; / uncompressed size 4 bytes
`nln;2; / file name length 2 bytes
`xln;2; / extra field length 2 bytes
`cln;2; / file comment length 2 bytes
`dnu;2; / disk number start 2 bytes
`iat;2; / internal file attributes 2 bytes
`xat;4; / external file attributes 4 bytes
`lof;4; / relative offset of local header 4 bytes
)
// Field names and widths for extra field.
.finos.unzip.priv.wxfd:.finos.util.dict(
`id ;2; / header id 2 bytes
`sz ;2; / data size 2 bytes
)
// Private API
// Parse end-of-central-directory record.
// @param x bytes
// @return end-of-central-directory record
.finos.unzip.priv.pecd:{
r:.finos.unzip.priv.split[.finos.unzip.priv.wecd;0]x;
r:except`sig`cmt];
r:update cmt:"c"$(neg cln)#x from r;
r}
// Parse a central directory record.
// @param x (bytes;extra)
// @param y index
// @param z header
// @return (record;next index)
// @see .finos.unzip.priv.parse
.finos.unzip.priv.pcd:{
e:x 1;
x:x 0;
|
// @kind function
// @category stats
// @desc Train an ordinary least squares model on data
// @param endog {number[][]|number[]} The endogenous variable
// @param exog {number[][]|number[]} A variables that predict the
// endog variable
// @param trend {boolean} Whether a trend is added to the model
// @returns {dictionary} Contains the following information:
// modelInfo - Coefficients and statistical values calculated during the
// fitting process
// predict - A projection allowing for prediction on new input data
stats.OLS.fit:{[endog;exog;trend]
stats.i.checkLen[endog;exog;"exog"];
endog:"f"$endog;
exog:"f"$$[trend;1f,'exog;exog];
if[1=count exog[0];exog:flip enlist exog];
coef:first enlist[endog]lsq flip exog;
modelInfo:stats.i.OLSstats[coef;endog;exog;trend];
returnInfo:enlist[`modelInfo]!enlist modelInfo;
predict:stats.OLS.predict returnInfo;
returnInfo,enlist[`predict]!enlist predict
}
// @desc Predict values using coefficients calculated via OLS
// @param config {dictionary} Information returned from `OLS.fit`
// including:
// modelInfo - Coefficients and statistical values calculated during the
// fitting process
// predict - A projection allowing for prediction on new input data
// @param exog {table|number[][]|number[]} The exogenous variables
// @returns {number[]} The predicted values
stats.OLS.predict:{[config;exog]
modelInfo:config`modelInfo;
trend:`yIntercept in key modelInfo`variables;
exog:"f"$$[trend;1f,'exog;exog];
coef:modelInfo`coef;
if[1=count exog[0];exog:flip enlist exog];
sum coef*flip exog
}
// @kind function
// @category stats
// @desc Train a weighted least squares model on data
// @param endog {number[][]|number[]} The endogenous variable
// @param exog {number[][]|number[]} A variables that predict the
// endog variable
// @param weights {float[]} The weights to be applied to the endog variable
// @param trend {boolean} Whether a trend is added to the model
// @returns {dictionary} Contains the following information:
// modelInfo - Coefficients and statistical values calculated during the
// fitting process
// predict - A projection allowing for prediction on new input data
stats.WLS.fit:{[endog;exog;weights;trend]
stats.i.checkLen[endog;exog;"exog"];
if[weights~(::);weights:()];
if[count weights;stats.i.checkLen[endog;weights;"weights"]];
endog:"f"$endog;
// Calculate the weights if not given
// Must be inversely proportional to the error variance
if[not count weights;
trained:stats.OLS.fit[endog;exog;0b];
residuals:endog-trained[`predict]exog;
trained:stats.OLS.fit[abs residuals;exog;0b];
weights:1%{x*x}trained[`predict]exog
];
exog:"f"$$[trend;1f,'exog;exog];
if[1=count exog[0];exog:flip enlist exog];
updDependent:flip[exog]mmu weights*'endog;
updPredictor:flip[exog]mmu weights*'exog;
coef:raze inv[updPredictor]mmu updDependent;
modelInfo:stats.i.OLSstats[coef;endog;exog;trend];
modelInfo,:enlist[`weights]!enlist weights;
returnInfo:enlist[`modelInfo]!enlist modelInfo;
predict:stats.WLS.predict returnInfo;
returnInfo,enlist[`predict]!enlist predict
}
// @desc Predict values using coefficients calculated via WLS
// @param config {dictionary} Information returned from `WLS.fit`
// including:
// modelInfo - Coefficients and statistical values calculated during the
// fitting process
// predict - A projection allowing for prediction on new input data
// @param exog {table|number[][]|number[]} The exogenous variables
// @returns {number[]} The predicted values
stats.WLS.predict:stats.OLS.predict
// @kind data
// @category stats
// @desc Load in functions defined within `describe.json`
// @type dictionary
stats.describeFuncs:.j.k raze read0`$path,"/stats/describe.json"
// @kind function
// @category stats
// @desc Generates descriptive statistics of a table
// @param tab {table} A simple table
// @returns {dictionary} A tabular description of aggregate information
// of each column
stats.describe:{[tab]
funcTab:stats.describeFuncs;
if[not all `func`type in cols value funcTab;
'"Keyed table must contain a func and type attribute"];
typeKeys:`num`temporal`other;
typeFunc:distinct raze value[funcTab][`type];
typCheck:raze not enlist[typeFunc] in string each typeKeys;
if[any typCheck;
'"Invalid type given:",raze typeFunc where typCheck
];
descKeys:key funcTab;
funcs:get each value[funcTab]`func;
// Get indices of where each type of function is in the function list
typeDict:typeKeys!where@'(string each typeKeys) in/:\:value[funcTab]`type;
numTypes:"hijef";
temporalTypes:"pmdznuvt";
numCols:exec c from meta[tab]where t in numTypes;
temporalCols:exec c from meta[tab]where t in temporalTypes;
otherCols:cols[tab]except numCols,temporalCols;
colDict:typeKeys!(numCols;temporalCols;otherCols);
applyInd:where 0<count each colDict;
inds:asc distinct raze typeDict applyInd;
n:count funcs;
m:count applyInd;
// Create empty list so num/other have same amount of funcs
// so that they can be joined later
funcDict:applyInd!(m,n)#{(::)};
funcUpd:stats.i.updFuncDict[funcs;typeDict]/[funcDict;applyInd];
tabUpd:colDict[applyInd]#\:tab;
descVals:(,'/){flip x@\:/:flip y}'[funcUpd;tabUpd];
// Reorder columns to original order
descVals:cols[tab]xcols descVals;
descKeys[inds]!descVals[inds]
}
// @kind function
// @category utilities
// @desc Percentile calculation for an array
// @param array {number[]} A numerical array
// @param perc {float} Percentile of interest
// @returns {float} The value below which `perc` percent of the observations
// within the array are found
stats.percentile:{[array;perc]
array:array where not null array;
percent:perc*-1+count array;
i:0 1+\:floor percent;
iDiff:0^deltas asc[array]i;
iDiff[0]+(percent-i 0)*last iDiff
}
================================================================================
FILE: ml_ml_stats_utils.q
SIZE: 6,578 characters
================================================================================
// stats/utils.q - Utility functions
// Copyright (c) 2021 Kx Systems Inc
//
// Utility functions for implementations within the stats library
\d .ml
// @private
// @kind function
// @category statsUtility
// @desc Check that the length of the endog and another parameter
// are equal
// @param endog {float[]} The endogenous variable
// @param param {number[][]|number[]} A parameter to compare the length of
// @param paramName {string} The name of the parameter
// @returns {::;err} Return an error if they aren't equal
stats.i.checkLen:{[endog;param;paramName]
if[not count[endog]=count param;
'"The length of the endog variable and ",paramName," must be equal"
]
}
// @private
// @kind function
// @category statsUtility
// @desc Calculate descriptive stats for an OLS regression
// @param coef {float[]} The coefficients for each predictor variable
// @param endog {float[]} The endogenous variable
// @param exog {float[][]} Values that predict the endog variable
// @param trend {boolean} Whether a trend is added to the model
// @returns {dictionary[]} The descriptive statistics
stats.i.OLSstats:{[coef;endog;exog;trend]
n:count endog;
p:count[coef]-trend;
statsDict:stats.i.OLScalcs[coef;endog;exog;n;p];
variables:stats.i.coefStats[coef;endog;exog;trend;n;p];
`coef`variables`statsDict!(coef;variables;statsDict)
}
// @private
// @kind function
// @category statsUtility
// @desc Calculate descriptive stats for an OLS regression
// @param coef {float[]} The coefficients for each predictor variable
// @param endog {float[]} The endogenous variable
// @param exog {float[][]} Values that predict the endog variable
// @param n {long} The number of endog variables
// @param p {long} Number of coefs not including trend value
// @returns {dictionary[]} The descriptive statistics
stats.i.OLScalcs:{[coef;endog;exog;n;p]
coefDict:enlist[`coef]!enlist coef;
modelInfo:enlist[`modelInfo]!enlist coefDict;
predicted:stats.OLS.predict[modelInfo;exog];
mseCalc:mse[predicted;endog];
r2:r2Score[predicted;endog];
r2Adj:r2AdjScore[predicted;endog;p];
// Calculate degrees of freedom
dfTotal:n-1;
dfModel:p;
dfResidual:dfTotal-dfModel;
// Sum of squares
SSTotal:sse[endog;avg endog];
SSModel:sse[predicted;first avg predicted];
SSResidual:sse[predicted;endog];
// Regression mean squares are the sum squares%degrees of freedom
MSTotal:SSTotal%dfTotal;
MSModel:SSModel%dfModel;
MSResidual:SSResidual%dfResidual;
fStat:MSModel%MSResidual;
logLike:stats.i.logLikelihood[SSResidual;n];
rseCalc:rse[predicted;endog;dfResidual];
pValue:2*1-pyStats[`:t][`:cdf;<][fStat;p;dfResidual];
dictKeys:`dfTotal`dfModel`dfResidual`sumSquares`meanSquares,
`fStat`r2`r2Adj`mse`rse`pValue`logLike;
dictVals:(dfTotal;dfModel;dfResidual;SSResidual;MSResidual;fStat;r2;
r2Adj;mseCalc;rseCalc;pValue;logLike);
dictKeys!dictVals
}
// @private
// @kind function
// @category statsUtility
// @desc Calculate the loglikelihood of the residuals
// @param SSResiduals {float} Sum of squares of the residual
// @param n {long} The number of endog variables
// @returns {float[]} The loglikelihood value
stats.i.logLikelihood:{[SSResidual;n]
n2:n%2;
((neg[n2]*log[2*3.14])-(n2*log[SSResidual%n]))-n2
}
// @private
// @kind function
// @category statsUtility
// @desc Calculate descriptive stats for the calculated coefficients
// @param coef {float[]} The coefficients for each predictor variable
// @param endog {float[]} The endogenous variable
// @param exog {float[][]} Values that predict the endog variable
// @param trend {boolean} Whether a trend is added to the model
// @param n {long} The number of endog variables
// @param p {long} Number of coefs not including trend value
// @returns {dictionary[]} The descriptive statistics for the
// calculated coefficients
stats.i.coefStats:{[coef;endog;exog;trend;n;p]
varNames:`$"x",'string til count coef;
if[trend;varNames:`yIntercept,-1_varNames];
stdErr:stats.i.coefStdErr[coef;exog;endog];
tStat:coef%stdErr;
pValue:2*1-pyStats[`:t][`:cdf;<][;n-p-1]each abs tStat;
// Calculate the confidence interval
C195:stats.i.CI95[n;p]each stdErr;
([name: varNames]coef;stdErr;tStat;pValue;C195)
}
// @private
// @kind function
// @category statsUtility
// @desc Calculate the standard error of the coefficient
// @param coef {float[]} The calculated coefficient
// @param exog {float[][]} Values that predict the endog variable
// @param endog {float[]} The endogenous variable
// @returns {float[]} The standard error of the coefficient
stats.i.coefStdErr:{[coef;exog;endog]
shape:count[exog]-count first exog;
error:{x*x}endog-exog mmu coef;
dSigmaSq:sum error%shape;
matrixInv:inv flip[exog]mmu exog;
mVarCovar:dSigmaSq*matrixInv;
// Get the diagonal values from a matrix
diag:mVarCovar ./: 2#/:til count mVarCovar;
sqrt diag
}
// @private
// @kind function
// @category statsUtility
// @desc Calculate the 95% confidence interval of the standard error
// of the coefficient
// @param n {long} Number of endog values
// @param p {long} Number of coefficients
// @param stdErr {float} The standard error of the coefficient
// @returns {float} The confidence interval
stats.i.CI95:{[n;p;stdErr]
alpha:(1-.95)%2;
// Degrees of freedom
df:(n-p)-1;
// Calculate the percent point function
ppf:pyStats[`:t][`:ppf][alpha; df]`;
neg ppf*stdErr
}
// @private
// @kind dictionary
// @category statsUtility
// @desc Infinity values for different types
// @type dictionary
infTypes:`int`long`real`float`timestamp`month`date`datetime`timespan`minute`second`time
stats.i.infinity:infTypes!infTypes$\:0w
|
// Have slightly more fluid handle opening mechanic - update: force it to open a new handle each time
gethandle:{[name]
.z.pc exec first w from .servers.SERVERS where procname=name;
exec first w from .servers.getservers[`procname;name;()!();1b;1b]
};
================================================================================
FILE: TorQ_tests_k4unit.q
SIZE: 8,581 characters
================================================================================
/ k4 unit testing, loads tests from csv's, runs+logs to database
/ csv columns: action,ms,bytes,lang,code (csv with colheaders)
/ if your code contains commas enclose the whole code in "quotes"
/ usage: q k4unit.q -p 5001
\d .KU
VERBOSE:@[value;`.KU.VERBOSE;1]; // 0 - no logging to console, 1 - log filenames, >1 - log tests
DEBUG:$[`stop in key .Q.opt .z.x;1;0]; // 0 - trap errors, 1 - suspend if errors (except action=`fail)
DELIM:@[value;`.KU.DELIM;","]; // csv delimiter
SAVEFILE:@[value;`.KU.SAVEFILE;`:KUTR.csv]; // test results savefile
\d .
/ KUT <-> KUnit Tests
KUT:([]action:`symbol$();ms:`int$();bytes:`long$();lang:`symbol$();code:`symbol$();repeat:`int$();minver:`float$();file:`symbol$();comment:())
/ KUltd `:dirname and/or KUltf `:filename.csv
/ KUrt[] / run tests
/ KUTR <-> KUnit Test Results
/ KUrtf`:filename.csv / refresh expected <ms> and <bytes> based on observed results in KUTR
KUTR:flip `action`ms`bytes`lang`code`repeat`file`msx`bytesx`ok`okms`okbytes`valid`timestamp`csvline!"SIJSSISIJBBBBZI" $\: ();
/ look at KUTR in browser or q session
/ select from KUTR where not ok // KUerr
/ select from KUTR where not okms // KUslow
/ select count i by ok,okms,action from KUTR
/ select count i by ok,okms,action,file from KUTR
/ KUstr[] / save test results
/ KUltr[] / reload previously saved test results
/ action:
/ `beforeany - onetime, run before any tests
/ `beforeeach - run code before tests in every file
/ `before - run code before tests in this file ONLY
/ `run - run code, check execution time against ms
/ `true - run code, check if returns true(1b)
/ `fail - run code, it should fail (2+`two)
/ `after - run code after tests in this file ONLY
/ `aftereach - run code after tests in each file
/ `afterall - onetime, run code after all tests, use for cleanup/finalise
/ lang: k or q (or s if you really feel you must..), default q
/ code: code to be executed
/ repeat: number of repetitions (do[repeat;code]..), default 1
/ ms: max milliseconds it should take to run, 0 => ignore
/ bytes: bytes it should take to run, 0 => ignore
/ minver: minimum version of kdb+ (.z.K)
/ file: filename
/ action,ms,bytes,lang,code,file: from KUT
/ msx: milliseconds taken to eXecute code
/ bytesx: bytes used to eXecute code
/ ok: true if the test completes correctly (note: its correct for a fail task to fail)
/ okms: true if msx is not greater than ms, ie if performance is ok
/ okbytes: true if bytesx is not greater than bytes, ie if memory usage is ok
/ valid: true if the code is valid (ie doesn't crash - note: `fail code is valid if it fails)
/ timestamp: when test was run
/ comment: description of the test if it's obscure..
KUstr:{.KU.SAVEFILE 0:.KU.DELIM 0:update code:string code from KUTR} / save test results
KUltr:{`KUTR upsert("SIJSSIJSIBBBBZI";enlist .KU.DELIM)0:.KU.SAVEFILE} / reload previously saved test results
KUltf:{ / (load test file) - load tests in csv file <x> into KUT
before:count KUT;
this:update file:x,action:lower action,lang:`q^lower lang,code:`$code,ms:0^ms,bytes:0j^bytes,repeat:1|repeat,minver:0^minver from `action`ms`bytes`lang`code`repeat`minver`comment xcol("SIJS*IF*";enlist .KU.DELIM)0:x:hsym x;
KUT,:select from this where minver<=.z.K;
/KUT,:update file:x,action:lower action,lang:`q^lower lang,ms:0^ms,bytes:0j,repeat:1|repeat from `action`ms`lang`code`repeat`comment xcol("SISSI*";enlist .KU.DELIM)0:x:hsym x;
neg before-count KUT}
KUltd:{ / (load test dir) - load all *.csv files in directory <x> which conform to k4unit into KUT
before:count KUT;
files:f where (lower[f:(` sv) each (x,'key x)] like "*.csv");
KUltf each files where all each `action`lang`code in/: `$csv vs' first each read0 each files;
neg before-count KUT}
KUrt:{ / (run tests) - run contents of KUT, save results to KUTR
update csvline:(raze value exec 2+til count i by file from KUT) from `KUT;
before:count KUTR;uf:exec asc distinct file from KUT;i:0;
if[.KU.VERBOSE;.lg.o[`k4unit;"start"]];
KUerrparse[`beforeany;] exec KUexec'[lang;code;repeat],file,csvline from KUT where action=`beforeany;
do[count uf;
ufi:uf[i];KUTI:select from KUT where file=ufi;
if[.KU.VERBOSE;.lg.o[`k4unit;(string ufi)," ",(string exec count i from KUTI where action in `run`true`fail)," test(s)"]];
KUerrparse[`beforeach;] exec KUexec'[lang;code;repeat],file,csvline from KUT where action=`beforeeach;
KUerrparse[`before;] exec KUexec'[lang;code;repeat],file,csvline from KUTI where action=`before;
/ preserve run,true,fail order
exec KUact'[action;lang;code;repeat;ms;bytes;file;csvline] from KUTI where action in`true`fail`run;
KUerrparse[`after;] exec KUexec'[lang;code;repeat],file,csvline from KUTI where action=`after;
KUerrparse[`aftereach;] exec KUexec'[lang;code;repeat],file,csvline from KUT where action=`aftereach;
i+:1];
KUerrparse[`afterall;] exec KUexec'[lang;code;repeat],file,csvline from KUT where action=`afterall;
if[.KU.VERBOSE;.lg.o[`k4unit;"end"]];
neg before-count KUTR}
KUpexec:{[prefix;lang;code;repeat;allowfail]
s:(string lang),")",prefix,$[1=repeat;string code;"do[",(string repeat),";",(string code),"]"];
if[1<.KU.VERBOSE;.lg.o[`k4unit;s]];
$[.KU.DEBUG & allowfail;value s;@[value;s;{(`err;`$x;y)}[;code]]]
}
// If in error - it now returns the error as well as the offending code
KUexec:KUpexec["";;;;1b]
// Generate error logs from beforeeach, before, after and aftereach tests
KUerrparse:{[action;out]
vals:1_' out where `err ~/: first each out:raze each flip value out;
.lg.e[`KUexecerr;] each KUerrparseinner[action;;;;] .' vals;
if[action in `run`true`fail;:1b]
}
// Handle test runs including error handling
KUrunerr:{[action;out]
$[`err~first out;
// For run and true tests, log the error and signal the test failed, for fail tests signal no error
$[action~`fail;0b;
action in `run`true;[.lg.e[`KUexecerr;] KUerrparseinner[action;] . 1_out;1b]];
// For true and fail tests add error string for 'stop mode' if it is activated
$[action~`run;0b;
action~`fail;$[.proc.stop;'string[action]," test failure in file ",string[out 1]," on line ",string out 2;1b];
action~`true;$[first out;0b;$[.proc.stop;'string[action]," test failure in file ",string[out 1]," on line ",string out 2;1b]]
]
]
}
// Generate more detailed error messages
KUerrparseinner:{[action;err;code;file;line]
string[action]," error in file ",string[file]," on line ",string[line]," - ",string[err],". Code: '",string[code],"'"
}
KUact:{[action;lang;code;repeat;ms;bytes;file;line]
msx:0;bytesx:0j;ok:okms:okbytes:valid:0b;
if[action=`run;
r:KUpexec["\\ts ";lang;code;repeat;1b];failed:KUrunerr[action;r,file,line];
msx:`int$$[failed;0;r 0];bytesx:`long$$[failed;0;r 1];
ok:not failed;okms:$[ms;not msx>ms;1b];okbytes:$[bytes;not bytesx>bytes;1b];valid:not failed
];
if[action=`true;
r:KUpexec["";lang;code;repeat;1b];failed:KUrunerr[action;r,file,line];
ok:$[failed;0b;r~1b];okms:okbytes:1b;valid:not failed];
if[action=`fail;
r:KUpexec["";lang;code;repeat;0b];failed:not KUrunerr[action;r,file,line];
ok:failed;okms:okbytes:valid:1b];
`KUTR insert(action;ms;bytes;lang;code;repeat;file;msx;bytesx;ok;okms;okbytes;valid;.z.Z;line);
}
KUrtf:{ / (refresh test file) updates test file x with realistic <ms>/<bytes>/<repeat> based on seen values of msx/bytesx from KUTR
if[not x in exec file from KUTR;'"no test results found"];
/x 0:.KU.DELIM 0:select action,ms,lang,string code,repeat,comment from((`code xkey KUT)upsert select code,ms:floor 1.25*msx from KUTR)where file=x}
kut:`code xkey select from KUT where file=x;kutr:select from KUTR where file=x,action=`run;
kutr:update repeat:1,ms:floor 1.5*msx%repeat from kutr where 75<ms%repeat;
kutr:update repeat:500000&floor repeat*50%1|msx,ms:75 from kutr where 75>=ms%repeat;
kutr:update bytes:`long$floor 1.5*bytesx from kutr;
x 0:.KU.DELIM 0:select action,ms,bytes,lang,string code,repeat,minver,comment from kut upsert select code,ms,bytes,repeat from kutr}
KUsaveresults:{set[hsym[`$getenv[`KDBTESTS],"/previousruns/",string[x]];y]}
KUf::distinct exec file from KUTR / fristance: KUrtf each KUf
KUslow::delete okms from select from KUTR where not okms
KUslowf::distinct exec file from KUslow
KUbig::delete okbytes from select from KUTR where not okbytes
KUbigf::distinct exec file from KUbig
KUerr::delete ok from select from KUTR where not ok
KUerrf::distinct exec file from KUerr
KUinvalid::delete ok,valid from select from KUTR where not valid
KUinvalidf::distinct exec file from KUinvalid
\d .
@[value;"\\l k4unit.custom.q";::];
================================================================================
FILE: TorQ_tests_merge_tplogreplay_database.q
SIZE: 878 characters
================================================================================
quote:([]time:`timestamp$(); sym:`g#`symbol$(); bid:`float$(); ask:`float$(); bsize:`long$(); asize:`long$(); mode:`char$(); ex:`char$(); src:`symbol$())
trade:([]time:`timestamp$(); sym:`g#`symbol$(); price:`float$(); size:`int$(); stop:`boolean$(); cond:`char$(); ex:`char$();side:`symbol$())
quote_iex:([]time:`timestamp$(); sym:`g#`symbol$(); bid:`float$(); ask:`float$(); bsize:`long$(); asize:`long$(); mode:`char$(); ex:`char$(); srctime:`timestamp$())
trade_iex:([]time:`timestamp$(); sym:`g#`symbol$(); price:`float$(); size:`int$(); stop:`boolean$(); cond:`char$(); ex:`char$(); srctime:`timestamp$())
packets:([] time:`timestamp$(); sym:`symbol$(); src:`symbol$(); dest:`symbol$(); srcport:`long$(); destport:`long$(); seq:`long$(); ack:`long$(); win:`long$(); tsval:`long$(); tsecr:`long$(); flags:(); protocol:`symbol$(); length:`long$(); len:`long$(); data:())
================================================================================
FILE: TorQ_tests_merge_tplogreplay_settings.q
SIZE: 1,707 characters
================================================================================
|
createissue:{[project;summary;description;issuetype;assignee;labels]
d:enlist[`]!enlist(::); //create null key to prevent casting type of dict
d[`project]:enlist[`key]!enlist project; //add project key
d[`summary]:summary; //title of issue
d[`issuetype]:enlist[`name]!enlist issuetype; //type by name (`Task `Bug etc.)
d[`assignee]:enlist[`name]!enlist assignee; //username to assign to
d[`description]:description; //body of issue
d[`labels]:labels; //labels
d:enlist[`]_d; //remove null key
d:enlist[`fields]!enlist d; //create JIRA data structure
r:.req.post[url,"issue";enlist["Content-Type"]!enlist .req.ty`json;.j.j d]; //send POST request
:r`self; //return API URL for created issue
}
getprojects:{[]
:`name`key`id#.req.get[url,"project";()!()]; //get projects available in JIRA
}
\d .
================================================================================
FILE: reQ_examples_util.q
SIZE: 147 characters
================================================================================
\d .utl
cfg:{(!/)"S*"$flip "=" vs'read0 ` sv`:examples,` sv x,`cfg}
writecfg:{(` sv`:examples,` sv x,`cfg)0:"="sv'flip({string key x};value)@\:y}
================================================================================
FILE: reQ_lint.q
SIZE: 1,075 characters
================================================================================
.utl.require"req"; //import module
system"l ",getenv[`DEVELOPER_HOME],"/ws/qlint.q_"; //load linter
rt:delete from .qlint.rules.defaultRules where label in `MISSING_DEPENDENCY`RESERVED_NAME`VAR_Q_ERROR`DEPRECATED_DATETIME;
/ MISSING_DEPENDENCY - doesn't properly handle dependencies in other namespaces
/ RESERVED_NAME - .req.get, .req.delete, .url.parse ...
/ VAR_Q_ERROR - use "host" as variable in oplaces
/ DEPRECATED_DATETIME - used in cookie code
fn:{if[104=type v:$[-11=type x;value;] x;:.z.s first value v];`$first -3#value v} //get filename
ln:{if[104=type v:$[-11=type x;value;] x;:.z.s first value v];first -2#value v} //get line number of function
t:update
fname:fn'[qualifiedName],
startLine+ln'[qualifiedName],
endLine+ln'[qualifiedName]
from .qlint.lintNS[`.req`.b64`.url`.cookie`.status`.auth;rt];
-1 .qlint.i.writers.stdout t;
if[not count .z.x;exit count t]; //keep alive if any args on cmd line
================================================================================
FILE: reQ_req_auth.q
SIZE: 2,443 characters
================================================================================
\d .auth
// @kind function
// @category private
// @fileoverview *EXPERIMENTAL* prompt for authorization if requested
// @param h {dict} HTTP response headers
// @param u {string|symbol|#hsym} URL
// @return {string} updated URL with supplied credentials
getauth:{[h;u] /h-headers,u-URL
/* prompt for user & pass when site requests basic auth */
h:upper[key h]!value h; //upper case header names
if[not h[`$"www-authenticate"] like "Basic *";'"unsupported auth challenge"]; //check it needs basic auth
-1"Site requested basic auth\nWARNING: user & pass will show in plain text\n"; //warn user before they type pass
1"User: ";s:read0 0; //get username
1"Pass: ";p:read0 0; //get password
:.url.format @[.url.parse0[0b] u;`auth;:;s,":",p]; //update URL with supplied username & pass
}
// @kind data
// @category public
// @fileoverview storage for basic auth credential cache
cache:([host:`$()]auth:();expires:`timestamp$())
// @kind function
// @category private
// @fileoverview cache auth string for a given host
// @param host {string} hostname
// @param auth {string} auth string in format "user:pass"
// @return null
setcache:{[host;auth]cache[`$host]:`auth`expires!(auth;.z.p+0D00:15:00)}
// @kind function
// @category private
// @fileoverview get cached auth string for a given host
// @param hst {string} hostname
// @return {string} cached auth string
getcache:{[hst]
r:exec first auth from cache where host=`$hst,expires>.z.p;
if[count r;:r];
if[netrcenabled;:readnetrc hst];
:();
}
// @kind data
// @category public
// @fileoverview boolean flag to determine whether to use ~/.netrc by default
netrcenabled:not()~key .os.hfile`.netrc
// @kind data
// @category public
// @fileoverview location of .netrc file, by default ~/.netrc
netrclocation:.os.hfile`.netrc
// @kind function
// @category private
// @fileoverview retrieve login from .netrc file
// @param host {string} hostname to get login for
// @return {string} auth string in format "user:pass"
readnetrc:{[host]
i:.os.read netrclocation;
t:(uj/){enlist(!/)"S*"$flip x} each (where i like "machine *") cut " " vs/:i;
if[0=count t:select from t where machine like host;:()];
:":"sv first[t]`login`password;
}
\d .
================================================================================
FILE: reQ_req_b64.q
SIZE: 581 characters
================================================================================
\d .b64
// @kind function
// @category public
// @fileoverview base64 encode a string. Where available, defaults to .Q.btoa built-in
// @param x {string} string to be encoded
// @return {string} encoded string
enc:{(neg[c] _ .Q.b6 0b sv' 00b,/:6 cut raze (0b vs'`byte$x),(8*c)#0b),(c:neg[count x]mod 3)#"="}
enc:@[value;`.Q.btoa;{[x;y]x}enc]
// @kind function
// @category public
// @fileoverview base64 decode a string
// @param x {string} base64 string to be decoded
// @return {string} decoded string
dec:{(`char$0b sv'8 cut raze 2_'0b vs'`byte$.Q.b6?x) except "\000"}
\d .
================================================================================
FILE: reQ_req_cookie.q
SIZE: 4,825 characters
================================================================================
\d .cookie
// @kind data
// @category public
// @fileoverview storage for cookies
jar:([host:();path:();name:()] val:();expires:`datetime$();maxage:`long$();secure:`boolean$();httponly:`boolean$();samesite:`$())
// @kind function
// @category public
// @fileoverview Add or update a cookie in the jar
// @param h {string} hostname on which to apply cookie
// @param c {string} cookie string
// @return {null}
addcookie:{[h;c]
d:(!). "S=;"0:c; //parse cookie into dict
n:string first key d;v:first value d; //extract cookie name & value
d:lower[key d]!value d; //make all keys lower case
r:`host`path`name`val!(".",h;d[`path],"*";n;v); //build up record
if[`domain in key d;r[`host]:"*.",d`domain]; //if domain in cookie, use it for host
r[`expires]:"Z"$" "sv@[;1 2]" "vs d`expires; //parse expiration date & time
r[`maxage]:"J"$d`$"max-age"; //TODO calculate expires from maxage
r[`secure]:`secure in key d; //check if Secure attribute is set
r[`httponly]:`httponly in key d; //check if HttpOnly attribute is set
r[`samesite]:`$d`samesite; //check if SameSite attribute is set
`.cookie.jar upsert enlist r; //add cookie to the jar
}
|
// @private
// @kind data
// @category statsUtility
// @desc Meta type letters to symbolic names
stats.i.metaTypes:" bgxhijefcCspmdznuvt"!
`general`boolean`guid`byte`short`int`long`real`float`char`string,
`symbol`timestamp`month`date`datetime`timespan`minute`second`time
// @private
// @kind function
// @category statsUtility
// @desc Update the function dictionary applied to data
// @param funcs {fn[]} Functions loaded from `.ml.stats.describeFuncs`
// @param typeDict {dictionary} Indices of functions to be applied for
// each type
// @param funcDict {dictionary} Contains all functions to be applied
// for each type
// @param typ {symbol} The type of function to be extracted (
// `num`temporal`other)
// @returns {dictionary} The updated funcDict for each `typ`
stats.i.updFuncDict:{[funcs;typeDict;funcDict;typ]
funcDict[typ;typeDict typ]:funcs typeDict typ;
funcDict
}
================================================================================
FILE: ml_ml_timeseries_fit.q
SIZE: 8,162 characters
================================================================================
// timeseries/fit.q - Fit timeseries models
// Copyright (c) 2021 Kx Systems Inc
//
// Fitting functionality for time series models.
// Models include AR, ARCH, ARMA, ARIMA, and SARIMA.
\d .ml
// @kind function
// @category modelFit
// @desc Fit an AutoRegressive model (AR)
// @param endog {float[]} Endogenous variable (time-series) from which to build
// a model. This is the target variable from which a value is to be predicted
// @param exog {table|float[]|(::)} Exogenous variables are additional
// variables which may be accounted for to improve the model, if (::)/()
// this will be ignored
// @param p {int} The number/order of time lags of the model
// @param trend {boolean} Is a trend line to be accounted for when fitting
// the model
// @return {dictionary} Contains the following information:
// modelInfo - Model coefficients and data needed for future predictions
// predict - A projection allowing for prediction of future values
ts.AR.fit:{[endog;exog;p;trend]
// Cast endog to floating value
endog:"f"$endog;
exog:ts.i.fitDataCheck[endog;exog];
// Estimate coefficients
coeffs:$[sum trend,count exog;
ts.i.estimateCoefficients[endog;exog;endog;`p`q`trend!p,0,trend];
ts.i.durbinLevinson[endog;p]
];
// Get lagged values needed for future predictions
lagVals:neg[p]#endog;
// Return dictionary with required info for predictions
dictKeys:`coefficients`trendCoeff`exogCoeff`pCoeff`lagVals;
dictVals:(coeffs;trend#coeffs;coeffs trend +til count exog 0;
neg[p]#coeffs;lagVals);
modelDict:dictKeys!dictVals;
returnInfo:enlist[`modelInfo]!enlist modelDict;
predictFunc:ts.AR.predict returnInfo;
returnInfo,enlist[`predict]!enlist predictFunc
}
// @kind function
// @category modelFit
// @desc Fit an AutoRegressive Moving Average model (ARMA)
// @param endog {float[]} Endogenous variable (time-series) from which to build
// a model. This is the target variable from which a value is to be predicted
// @param exog {table|float[]|(::)} Exogenous variables are additional
// variables which may be accounted for to improve the model, if (::)/()
// this will be ignored
// @param p {int} The number/order of time lags of the model
// @param q {int} The number of residual errors to be accounted for
// @param trend {boolean} Is a trend line to be accounted for when fitting
// the model
// @return {dictionary} Contains the following information:
// modelInfo - Model coefficients and data needed for future predictions
// predict - A projection allowing for prediction of future values
ts.ARMA.fit:{[endog;exog;p;q;trend]
// Cast endog to floating value
endog:"f"$endog;
exog:ts.i.fitDataCheck[endog;exog];
paramDict:`p`q`trend!p,q,trend;
modelDict:$[q~0;
// If q = 0 then model is an AR model
[dictKeys:`qCoeff`residualVals`residualCoeffs`paramDict;
dictVals:(();();();paramDict);
ts.AR.fit[endog;exog;p;trend][`modelInfo],dictKeys!dictVals
];
ts.i.ARMA.model[endog;exog;paramDict]
];
returnInfo:enlist[`modelInfo]!enlist modelDict;
predictFunc:ts.ARMA.predict returnInfo;
returnInfo,enlist[`predict]!enlist predictFunc
}
// @kind function
// @category modelFit
// @desc Fit an AutoRegressive Integrated Moving Average model (ARIMA)
// @param endog {float[]} Endogenous variable (time-series) from which to build
// a model. This is the target variable from which a value is to be predicted
// @param exog {table|float[]|(::)} Exogenous variables are additional
// variables which may be accounted for to improve the model, if (::)/()
// this will be ignored
// @param p {int} The number/order of time lags of the model
// @param d {int} The order of time series differencing used in integration
// @param q {int} The number of residual errors to be accounted for
// @param trend {boolean} Is a trend line to be accounted for in fitting of
// model
// @return {dictionary} Contains the following information:
// modelInfo - Model coefficients and data needed for future predictions
// predict - A projection allowing for prediction of future values
ts.ARIMA.fit:{[endog;exog;p;d;q;trend]
exog:ts.i.fitDataCheck[endog;exog];
// Apply integration (non seasonal)
I:ts.i.differ[endog;d;()!()]`final;
// Fit an ARMA model on the differenced time series
modelDict:ts.ARMA.fit[I;d _exog;p;q;trend]`modelInfo;
// Retrieve the original data to be used when fitting on new data
originalData:neg[d]#endog;
// Produce the relevant differenced data for use in future predictions
originalDiff:enlist[`originalData]!enlist d{deltas x}/originalData;
// return relevant data
modelDict,:originalDiff;
returnInfo:enlist[`modelInfo]!enlist modelDict;
predictFunc:ts.ARIMA.predict returnInfo;
returnInfo,enlist[`predict]!enlist predictFunc
}
// @kind function
// @category modelFit
// @desc Fit a Seasonal AutoRegressive Integrated Moving Average model
// (SARIMA)
// @param endog {float[]} Endogenous variable (time-series) from which to build
// a model. This is the target variable from which a value is to be predicted
// @param exog {table|float[]|(::)} Exogenous variables are additional
// variables which may be accounted for to improve the model, if (::)/()
// this will be ignored
// @param p {int} The number/order of time lags of the model
// @param d {int} The order of time series differencing used in integration
// @param p {int} The number of residual errors to be accounted for
// @param trend {boolean} Is a trend line to be accounted for in fitting of
// model
// @param season {dictionary} Is a dictionary containing required seasonal
// components
// @return {dictionary} Contains the following information:
// modelInfo - Model coefficients and data needed for future predictions
// predict - A projection allowing for prediction of future values
ts.SARIMA.fit:{[endog;exog;p;d;q;trend;season]
// Cast endog to floating value
endog:"f"$endog;
ts.i.dictCheck[season;`P`Q`D`m;"seas"];
// Apply error checking (exogenous data not converted to matrix?)
exog:ts.i.fitDataCheck[endog;exog];
// Apply appropriate seasonal+non seasonal differencing
I:ts.i.differ[endog;d;season];
// Create dictionary with p,q and seasonal components
seasonInfo:((1+til each season`P`Q)*season`m),season[`m],trend;
dict:`p`q`P`Q`m`trend!p,q,seasonInfo;
// Add additional seasonal components
dict[`additionalP`additionalQ]:(raze'){1+til[x]+/:y}'[(p;q);dict`P`Q];
// Generate data for regenerate data following differencing
diffKeys:`originalData`seasonData;
diffVals:(d{deltas x}/neg[d]#endog;neg[prd season`D`m]#I`init);
diffDict:diffKeys!diffVals;
// Apply SARMA model and postpend differenced original data
modelDict:ts.i.SARMA.model[I`final;exog;dict],diffDict;
returnInfo:enlist[`modelInfo]!enlist modelDict;
predictFunc:ts.SARIMA.predict returnInfo;
returnInfo,enlist[`predict]!enlist predictFunc
}
// @kind function
// @category modelFit
// @desc Fit an AutoRegressive Conditional Heteroscedasticity model
// (ARCH)
// @param residuals {number[]} Residual errors from fitted time series model
// @param p {int} The number/order of time lags of the model
// @return {dictionary} Contains the following information:
// modelInfo - Model coefficients and data needed for future predictions
// predict - A projection allowing for prediction of future values
ts.ARCH.fit:{[residuals;p]
// Cast to floating value
residuals:"f"$residuals;
// Cast endog to floating value
squareResiduals:residuals*residuals;
paramDict:`p`q`trend!p,0,1b;
// Using the residuals errors calculate coefficients
coeff:ts.i.estimateCoefficients[squareResiduals;();squareResiduals;paramDict];
// Get lagged values needed for future predictions
lastResiduals:neg[p]#squareResiduals;
// Return dictionary with required info for predictions
dictKeys:`coefficients`trendCoeff`pCoeff`residualVals;
dictVals:(coeff;coeff 0;1_coeff;lastResiduals);
modelDict:dictKeys!dictVals;
returnInfo:enlist[`modelInfo]!enlist modelDict;
predictFunc:ts.ARCH.predict returnInfo;
returnInfo,enlist[`predict]!enlist predictFunc
}
================================================================================
FILE: ml_ml_timeseries_init.q
SIZE: 470 characters
================================================================================
// timeseries/init.q - Load timeseries library
// Copyright (c) 2021 Kx Systems Inc
//
// Timeseries forecasting is the use of a model to predict
// the future values of a dataset based on historical observations.
.ml.loadfile`:optimize/init.q
.ml.loadfile`:timeseries/utils.q
.ml.loadfile`:fresh/extract.q
.ml.loadfile`:timeseries/fit.q
.ml.loadfile`:timeseries/predict.q
.ml.loadfile`:timeseries/misc.q
.ml.loadfile`:util/utils.q
.ml.i.deprecWarning`timeSeries
================================================================================
FILE: ml_ml_timeseries_misc.q
SIZE: 4,956 characters
================================================================================
// timeseries/misc.q - Timeseries functions
// Copyright (c) 2021 Kx Systems Inc
//
// Miscellaneous functionality relating to time series analysis
// and model generation procedures
\d .ml
// @kind function
// @category misc
// @desc Summary of the stationarity of each vector of a multivariate
// time series or a single vector
// @param data {dictionary|table|number[]} a time series of interest,
// the entries should
// in each case be numeric data types.
// @return {dictionary} informative outputs from the python adfuller test
// indicating the stationarity of each vector entry of the relevant dataset
ts.stationarity:{[data]
dataType:type data;
// Names to be provided to form the key for the return table
keyNames:$[99h=dataType;key data;
98h=dataType;cols data;
enlist`data
];
// Column names associated with the returns from the augmented Dickey Fuller
// test
criticalVals:`$raze each"CriticalValue_",/:string(1;5;10),\:"%";
dataCols:`ADFstat`pvalue`stationary,criticalVals;
scores:ts.i.stationaryScores[data;dataType];
keyNames!flip dataCols!scores
}
// @kind function
// @category misc
// @desc Retrieve the best parameters for an ARIMA model based on the
// Akaike Information Criterion (AIC)
// @param train {dictionary} training data dictionary
// containing `endog/`exog data
// @param test {dictionary} testing data dictionary
// containing `endog/`exog data
// @param len {int} number of steps forward to predict
// @param params {dictionary} parameter sets to fit ARIMA model with
// @return {dictionary} parameter set which produced the lowest AIC score
ts.ARIMA.aicParam:{[train;test;len;params]
ts.i.dictCheck[;`endog`exog;]'[(train;test);("train";"test")];
ts.i.dictCheck[params;`p`d`q`trend;"params"];
// Get AIC scores for each set of params
scores:ts.i.aicFitScore[train;test;len]each flip params;
// Return best value
bestScore:min scores;
scoreEntry:enlist[`score]!enlist bestScore;
params[;scores?bestScore],scoreEntry
}
// Time-series feature engineering functionality
|
// @kind function
// @category check
// @desc Load keras customized models
// @return {::} Load models or standard out if not available
check.loadkeras:{
$[check.keras[];
[loadfile`:code/customization/models/libSupport/keras.p;
loadfile`:code/customization/models/libSupport/keras.q
];
[-1"Requirements for Keras models not satisfied. Keras along with ",
"Tensorflow or Theano must be installed. Keras models will be excluded ",
"from model evaluation."
];
]
}
// @kind function
// @category check
// @desc Load PyTorch customized models
// @return {::} Load models or standard out if not available
check.loadtorch:{
$[0~checkimport 1;
[loadfile`:code/customization/models/libSupport/torch.p;
loadfile`:code/customization/models/libSupport/torch.q
];
[-1"Requirements for PyTorch models not satisfied. Torch must be ",
"installed. PyTorch models will be excluded from model evaluation."
];
]
}
// @kind function
// @category check
// @desc Load latex module
// @return {::} Load latex or standard out if not available
check.loadlatex:{
$[0~checkimport 2;
[loadfile`:code/nodes/saveReport/latex/latex.p;
loadfile`:code/nodes/saveReport/latex/latex.q
];
-1"Requirements for Latex report generation not satisfied. ",
"Reports will be generated using reportlab.";
]
}
// @kind function
// @category check
// @desc Load Theano customized models
// @return {::} Load models or standard out if not available
check.loadtheano:{
$[0~checkimport 5;
[loadfile`:code/customization/models/libSupport/theano.p;
loadfile`:code/customization/models/libSupport/theano.q
];
[-1"Requirements for Theano models not satisfied. Theano must be ",
"installed. Theano models will be excluded from model evaluation."
];
]
}
================================================================================
FILE: ml_automl_code_customization_init.q
SIZE: 423 characters
================================================================================
// code/customization/init.q - Load customized models
// Copyright (c) 2021 Kx Systems Inc
//
// Attempt to load keras/pytorch/theano
\d .automl
loadfile`:code/customization/check.q
// Initialize model key within AutoML namespace needed for when keras or torch
// are not installed
models.init:()
// Attempt to load keras/pytorch functionality
check.loadkeras[]
check.loadtorch[]
check.loadtheano[]
check.loadlatex[]
================================================================================
FILE: ml_automl_code_customization_models_libSupport_keras.q
SIZE: 6,397 characters
================================================================================
// code/customization/models/libSupport/keras.q - Customized keras models
// Copyright (c) 2021 Kx Systems Inc
//
// The purpose of this file is to include all the necessary utilities to
// create a minimal interface for the support of keras models. It also
// acts as a location to which users defined keras models are added
\d .automl
// @kind function
// @category models
// @desc Fit model on training data and score using test data
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param seed {int} Seed used for initialising the same model
// @param mname {symbol} Name of the model being applied
// @return {int|float|boolean} The predicted values for a given model as
// applied to input data
models.keras.fitScore:{[data;seed;mname]
if[mname~`multi;
data[;1]:models.i.npArray@'flip@'value@'.ml.i.oneHot each data[;1]
];
dataDict:`xtrain`ytrain`xtest`ytest!raze data;
mdl:get[".automl.models.keras.",string[mname],".model"][dataDict;seed];
mdl:get[".automl.models.keras.",string[mname],".fit"][dataDict;mdl];
get[".automl.models.keras.",string[mname],".predict"][dataDict;mdl]
}
// @kind function
// @category models
// @desc Fit a vanilla keras model to data
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param model {<} Model object being passed through the system
// (compiled/fitted)
// @return {<} A vanilla fitted keras model
models.keras.binary.fit:models.keras.reg.fit:models.keras.multi.fit:{[data;model]
model[`:fit][models.i.npArray data`xtrain;data`ytrain;`batch_size pykw 32;
`verbose pykw 0];
model
}
// @kind function
// @category models
// @desc Compile a keras model for binary problems
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param seed {int} Seed used for initialising the same model
// @return {<} The compiled keras models
models.keras.binary.model:{[data;seed]
models.i.numpySeed[seed];
if[models.i.tensorflowBackend;models.i.tensorflowSeed[seed]];
mdl:models.i.kerasSeq[];
mdl[`:add]models.i.kerasDense[32;`activation pykw`relu;
`input_dim pykw count first data`xtrain];
mdl[`:add]models.i.kerasDense[1;`activation pykw`sigmoid];
mdl[`:compile][`loss pykw`binary_crossentropy;`optimizer pykw`rmsprop];
mdl
}
// @kind function
// @category models
// @desc Predict test data values using a compiled model
// for binary problem types
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param model {<} Model object being passed through the system (
// compiled/fitted)
// @return {boolean} The predicted values for a given model
models.keras.binary.predict:{[data;model]
.5<raze model[`:predict][models.i.npArray data`xtest]`
}
// @kind function
// @category models
// @desc Compile a keras model for regression problems
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param seed {int} Seed used for initialising the same model
// @return {<} The compiled keras models
models.keras.reg.model:{[data;seed]
models.i.numpySeed[seed];
if[models.i.tensorflowBackend;models.i.tensorflowSeed[seed]];
mdl:models.i.kerasSeq[];
mdl[`:add]models.i.kerasDense[32;`activation pykw`relu;
`input_dim pykw count first data`xtrain];
mdl[`:add]models.i.kerasDense[1 ;`activation pykw`relu];
mdl[`:compile][`loss pykw`mse;`optimizer pykw`rmsprop];
mdl
}
// @kind function
// @category models
// @desc Predict test data values using a compiled model
// for regression problem types
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param model {<} Model object being passed through the system
// (compiled/fitted)
// @return {int|float} The predicted values for a given model
models.keras.reg.predict:{[data;model]
raze model[`:predict][models.i.npArray data`xtest]`
}
// @kind function
// @category models
// @desc Compile a keras model for multiclass problems
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param seed {int} Seed used for initialising the same model
// @return {<} The compiled keras models
models.keras.multi.model:{[data;seed]
models.i.numpySeed[seed];
if[models.i.tensorflowBackend;models.i.tensorflowSeed[seed]];
mdl:models.i.kerasSeq[];
mdl[`:add]models.i.kerasDense[32;`activation pykw`relu;
`input_dim pykw count first data`xtrain];
mdl[`:add]models.i.kerasDense[count distinct data[`ytrain]`;
`activation pykw`softmax];
mdl[`:compile][`loss pykw`categorical_crossentropy;
`optimizer pykw`rmsprop];
mdl
}
// @kind function
// @category models
// @desc Predict test data values using a compiled model
// for multiclass problem types
// @param data {dictionary} Containing training and testing data according to
// keys `xtrn`ytrn`xtst`ytst
// @param model {<} Model object being passed through the system (
// compiled/fitted)
// @return {int|float|boolean} The predicted values for a given model
models.keras.multi.predict:{[data;model]
predict:model[`:predict][models.i.npArray data`xtest]`;
models.i.npArgMax[predict;1]`
}
// load required python modules
models.i.npArgMax:.p.import[`numpy ]`:argmax;
models.i.npArray:.p.import[`numpy ]`:array;
models.i.kerasSeq:.p.import[`keras.models ]`:Sequential;
models.i.kerasDense:.p.import[`keras.layers ]`:Dense;
models.i.numpySeed:.p.import[`numpy.random ]`:seed;
models.i.backend:.p.import[`keras.backend]`:backend;
// Check if tensorflow is being used as the backend for keras
models.i.tensorflowBackend:"tensorflow"~models.i.backend[]`
// import appropriate random seed depending on tensorflow version
if[models.i.tensorflowBackend;
models.i.tf:.p.import[`tensorflow];
models.i.tfType:$[2>"I"$first models.i.tf[`:__version__]`;
`:set_random_seed;
`:random.set_seed
];
models.i.tensorflowSeed:models.i.tf models.i.tfType
];
p)def tfWarnings(warn):
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = warn
// allow multiprocess
.ml.loadfile`:util/mproc.q
if[0>system"s";
.ml.multiProc.init[abs system"s"]("system[\"l automl/automl.q\"]";
".automl.loadfile`:init.q")
];
================================================================================
FILE: ml_automl_code_customization_models_libSupport_theano.q
SIZE: 394 characters
================================================================================
// code/customization/models/libSupport/theano.q - Customized Theano models
// Copyright (c) 2021 Kx Systems Inc
//
// The purpose of this file is to include all the necessary utilities to
// create a minimal interface for the support of Theano models. It also acts
// as a location to which users defined Theano models could be added
\d .automl
// import theano
theano:.p.import[`theano];
================================================================================
FILE: ml_automl_code_customization_models_libSupport_torch.q
SIZE: 404 characters
================================================================================
// code/customization/models/libSupport/torch.q - Customized PyTorch models
// Copyright (c) 2021 Kx Systems Inc
//
// The purpose of this file is to include all the necessary utilities to
// create a minimal interface for the support of PyTorch models. It also
// acts as a location to which users defined PyTorch models could be added
\d .automl
// import pytorch as torch
torch:.p.import[`torch];
================================================================================
FILE: ml_automl_code_graph.q
SIZE: 6,692 characters
================================================================================
// code/graph.q - Generate graph for automl
// Copyright (c) 2021 Kx Systems Inc
//
// Generate the complete graph for AutoML following the configuration defined
// in `graph/Automl_graph.png`. Code is structured through the addition of all
// relevant nodes followed by the connection of input nodes for these nodes to
// the relevant source node.
\d .automl
// Generate an empty graph
graph:.ml.createGraph[]
|
/ map (u)nicode characters to their (a)scii equivalents
ua:(!/) 2 1 0#""
ua["\342\200\223"]:"--" / endash
ua["\342\200\224"]:"---" / emdash
ua["\342\200[\231\230]"]:"'" / single quotes
ua["\342\200[\234\235]"]:"\"" / double quotes
ua["\342\200\246"]:"..." / ellipses
ua["\302\222"]:"'" / single quotes
ua["\302\241"]:"!" / !
ua["\302\243"]:"$" / pound symbol
ua["\302\260"]:"o" / o
ua["\302\262"]:"^2" / ^2
ua["\302\263"]:"^3" / ^3
ua["\302\267"]:"-" / -
ua["\302\274"]:"1/4" / 1/4
ua["\302\275"]:"1/2" / 1/2
ua["\302\276"]:"3/4" / 3/4
ua["\302\277"]:"?" / ?
ua["\303[\200\201\202\203\204\205]"]:"A" / A
ua["\303\206"]:"AE" / AE
ua["\303\207"]:"C" / C
ua["\303[\210\211\212\213]"]:"E" / E
ua["\303[\214\215\216\217]"]:"I" / I
ua["\303\220"]:"D" / D
ua["\303\221"]:"N" / N
ua["\303[\222\223\224\225\226\230]"]:"O" / O
ua["\303[\231\232\233\234]"]:"U" / U
ua["\303\235"]:"Y" / y
ua["\303\237"]:"s" / s
ua["\303[\240\241\242\243\244\245]"]:"a" / a
ua["\303\246"]:"ae" / ae
ua["\303\247"]:"c" / c
ua["\303[\250\251\252\253]"]:"e" / e
ua["\303[\254\255\256\257]"]:"i" / i
ua["\303\260"]:"d" / d
ua["\303\261"]:"n" / n
ua["\303[\262\263\264\265\266\270]"]:"o" / o
ua["\303[\271\272\273\274]"]:"u" / u
ua["\303\275"]:"y" / y
ua:1_ua
/ map (h)tml entities to their (a)scii equivalents
ha:(!/) 2 1 0#""
ha["<"]:"<" / <
ha[">"]:">" / >
ha["&"]:"&" / &
ha["'"]:"'" / '
ha["""]:"\"" / "
ha[" "]:" " /
ha:1_ha
/ map (p)unctuation characters to their (w)hitespace replacements
pw:(!/) 2 1 0#""
pw["[][\n\\/()<>@#$%^&*=_+.,;:!?-]"]:" " / replace with whitespace
pw["['\"0-9]"]:"" / delete
pw:1_pw
/ search (s)tring for all instances of key[d] and replace with value[d]
sr:{[d;s] ssr/[s;key d;value d]}
================================================================================
FILE: funq_wdbc.q
SIZE: 528 characters
================================================================================
wdbc.f:"wdbc.data"
wdbc.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/"
wdbc.b,:"breast-cancer-wisconsin/"
-1"[down]loading wisconsin-diagnostic-breast-cancer data set";
.ut.download[wdbc.b;;"";""] wdbc.f;
wdbc.XY:(" C",30#"E";",") 0: `$wdbc.f
wdbc.X:1_wdbc.XY
wdbc.y:first wdbc.Y:1#wdbc.XY
wdbc.c:`radius`texture`perimeter`area`smoothness`compactness`concavity
wdbc.c,:`concave_points`symmetry`fractal_dimension
wdbc.c:raze `$"_" sv'string raze wdbc.c,\:/: `mean`se`worst
wdbc.t:flip (`diagnosis,wdbc.c)!wdbc.XY
================================================================================
FILE: funq_wine.q
SIZE: 465 characters
================================================================================
wine.f:"wine.data"
wine.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/"
wine.b,:"wine/"
-1"[down]loading wine data set";
.ut.download[wine.b;;"";""] wine.f;
wine.XY:("H",13#"E";",")0:`$wine.f
wine.X:1_wine.XY
wine.y:first wine.Y:1#wine.XY
wine.c:`class`alcohol`malic_acid`ash`alcalinity_of_ash`magnesium
wine.c,:`total_phenols`flavanoids`nonflavanoid_phenols`proanthocyanins
wine.c,:`color_intensity`hue`OD280_OD315`proline
wine.t:flip wine.c!wine.XY
================================================================================
FILE: funq_winequality.q
SIZE: 442 characters
================================================================================
winequality.f:`red`white!("winequality-red.csv";"winequality-white.csv")
winequality.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/"
winequality.b,:"wine-quality/"
-1"[down]loading wine-quality data set";
.ut.download[winequality.b;;"";""] each winequality.f;
.winequality.load:{[f]
YX:value flip t:`quality xcols .Q.id (12#"F";1#";")0:f;
d:`X`Y`y`t!(1_YX;1#YX;YX 0;t);
d}
winequality,:.winequality.load each `$winequality.f
================================================================================
FILE: funq_zoo.q
SIZE: 550 characters
================================================================================
zoo.f:"zoo.data"
zoo.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/"
zoo.b,:"zoo/"
-1"[down]loading zoo data set";
.ut.download[zoo.b;;"";""] zoo.f;
zoo.c:`animal`hair`feathers`eggs`milk`airborne`aquatic`predator`toothed
zoo.c,:`backbone`breathes`venomous`fins`legs`tail`domestic`catsize`typ
zoo.typ:``mamal`bird`reptile`fish`amphibian`insect`invertebrate
zoo.t:`typ xcols flip zoo.c!("SBBBBBBBBBBBBHBBBJ";",") 0: `$zoo.f
update `zoo.typ!typ from `zoo.t;
zoo.y:first first zoo[`Y`X]: 0 1 cut value flip[zoo.t] _ `animal
zoo.X:"f"$zoo.X
================================================================================
FILE: kdb-common_boot.q
SIZE: 3,543 characters
================================================================================
// Simple Process Initialisation - kdb-common
// Copyright (c) 2021 Jaskirat Rajasansir
// Provides a simple initialisation of kdb-common with require and core libraries
/ Environment variable to change the root that 'require' will initialise and search for libraries from. If this is not
/ set, it will be the folder that 'boot.q' is specified from. If it is specified, 'require' is assumed to be
/ available at *boot.q-dir*/src/require.q
.boot.cfg.appRootEnvVar:`KDB_APPLICATION_ROOT;
/ Modes to start a kdb-common based kdb process.
/ Key is the expected command line argument, the value is the function to perform the initialisation
/ If there are no matches, the process starts with just the core libraries loaded
/ - '--load-libs': Loads comma-separated libraries into the process
/ - '--script': Loads the specified file (not via 'require'), optionally execute a function (via '--script-func') and then exit
.boot.cfg.bootModes:(`symbol$())!`symbol$();
.boot.cfg.bootModes[`$"load-libs"]: `.boot.inits.lib;
.boot.cfg.bootModes[`script]: `.boot.inits.script;
/ The libraries that are always loaded
.boot.cfg.coreLibs:`log`cargs`ns;
/ The root path of the kdb-common libraries
.boot.root.kdbCommon:`:.
/ The root path of the application. This will only be different from '.boot.root.kdbCommon' if $KDB_APPLICATION_ROOT is set
.boot.root.app:`:.;
/ The command line arguments parsed with '.cargs.getWithInternal'
.boot.args:(`symbol$())!();
/ If true, '--debug' was specified on the command line. Currently this enables:
/ * Enables error trap mode 1
/ * Stops the process from exiting when running a '--script'
.boot.debug:0b;
.boot.init:{
.boot.root[`kdbCommon`app]:first ` vs hsym .z.f;
envRoot:getenv .boot.cfg.appRootEnvVar;
if[0 < count envRoot;
.boot.root.app:`$":",envRoot;
];
.boot.debug:0 < count ss[" " sv .z.x; "-debug "];
if[.boot.debug;
system "e 1";
];
-1 "Application root: ",(1_ string .boot.root.app)," | kdb-common root: ",1_ string .boot.root.kdbCommon;
require:` sv .boot.root.kdbCommon,`src`require.q;
system "l ",1_ string require;
.require.init .boot.root.app;
.require.lib each .boot.cfg.coreLibs;
.boot.args,:.cargs.getWithInternal[];
bootMode:first key[.boot.cfg.bootModes] where key[.boot.cfg.bootModes] in key .boot.args;
if[not null bootMode;
.log.info ("Running boot mode function [ Mode: {} ] [ Arg: {} ]"; bootMode; .boot.args bootMode);
get[.boot.cfg.bootModes bootMode] .boot.args bootMode;
];
};
.boot.inits.lib:{[additionalLibs]
additionalLibs:`$"," vs additionalLibs;
if[0 < count additionalLibs except `;
.require.lib each additionalLibs;
];
};
.boot.inits.script:{[qScript]
qScript:hsym `$qScript;
if[not .type.isFile qScript;
.log.error ("Script specified via '--script' is invalid. Cannot load [ File: {} ]"; qScript);
"BootScriptDoesNotExistException";
];
.log.info ("Loading script specified via '--script' [ File: {} ]"; qScript);
system "l ",1_ string qScript;
scriptFunc:`$.boot.args `$"script-func";
if[.ns.isSet scriptFunc;
.log.info ("Executing script function [ Function: {} ]"; scriptFunc);
get[scriptFunc] (`symbol$())!();
];
if[.boot.debug;
.log.info "Script execution completed. Not exiting process as running in DEBUG mode";
:(::);
];
.log.info "Script execution completed. Exiting process";
exit 0;
};
.boot.init[];
================================================================================
FILE: kdb-common_src_cargs.q
SIZE: 1,932 characters
================================================================================
// Enhanced Command Line Argument Parser
// Copyright (c) 2019 Sport Trades Ltd
// Documentation: https://github.com/BuaBook/kdb-common/wiki/cargs.q
.require.lib`type;
|
summarystats:{
/- table with compressionratio for each file
statstab::`compressionratio xdesc (update compressionratio:?[algo=0; neg uncompressedLength%compressedLength; uncompressedLength%compressedLength] from statstab);
compressedfiles: select from statstab where not algo = 0;
uncompressedfiles:select from statstab where algo = 0;
/- summarytable
memorysavings: ((sum compressedfiles`uncompressedLength) - sum compressedfiles`compressedLength) % 2 xexp 20;
totalcompratio: (sum compressedfiles`uncompressedLength) % sum compressedfiles`compressedLength;
memoryusage:((sum uncompressedfiles`uncompressedLength) - sum uncompressedfiles`compressedLength) % 2 xexp 20;
totaldecompratio: neg (sum uncompressedfiles`compressedLength) % sum uncompressedfiles`uncompressedLength;
.lg.o[`compression;"Memory savings from compression: ", .Q.f[2;memorysavings], "MB. Total compression ratio: ", .Q.f[2;totalcompratio],"."];
.lg.o[`compression;"Additional memory used from de-compression: ",.Q.f[2;memoryusage], "MB. Total de-compression ratio: ", .Q.f[2;totaldecompratio],"."];
.lg.o[`compression;"Check .cmp.statstab for info on each file."];}
compress:{[filetoCompress;algo;blocksize;level;sizeuncomp]
compressedFile: hsym `$(string filetoCompress),"_kdbtempzip";
/ compress or decompress as appropriate:
cmp:$[algo=0;"de";""];
$[((0 = count -21!filetoCompress) & not 0 = algo)|((not 0 = count -21!filetoCompress) & 0 = algo);
[.lg.o[`compression;cmp,"compressing ","file ", (string filetoCompress), " with algo: ", (string algo), ", blocksize: ", (string blocksize), ", and level: ", (string level), "."];
/ perform the compression/decompression
if[0=algo;comprL:(-21!filetoCompress)`compressedLength];
-19!(filetoCompress;compressedFile;blocksize;algo;level);
/ check the compressed/decomp file and move if appropriate; else delete compressed file and log error
$[((get compressedFile)~sf:get filetoCompress) & (count -21!compressedFile) or algo=0;
[.lg.o[`compression;"File ",cmp,"compressed ","successfully; matches orginal. Deleting original."];
system "r ", (last ":" vs string compressedFile)," ", last ":" vs string filetoCompress;
/ move the hash files too.
hashfilecheck[compressedFile;filetoCompress;sf];
/-log to the table if the algo wasn't 0
statstab,:$[not 0=algo;(filetoCompress;algo;(-21!filetoCompress)`compressedLength;sizeuncomp);(filetoCompress;algo;comprL;sizeuncomp)]];
[$[not count -21!compressedFile;
[.lg.o[`compression; "Failed to compress file ",string[filetoCompress]];hdel compressedFile];
[.lg.o[`compression;cmp,"compressed ","file ",string[compressedFile]," doesn't match original. Deleting new file"];hdel compressedFile]]]]
];
/ if already compressed/decompressed, then log that and skip.
.lg.o[`compression; "file ", (string filetoCompress), " is already ",cmp,"compressed",". Skipping this file"]]}
hashfilecheck:{[compressedFile;filetoCompress;sf]
/ if running 3.6 or higher, account for anymap type for nested lists
/ check for double hash file if nested data contains symbol vector/atom
$[3.6<=.z.K;
if[77 = type sf; system "r ", (last ":" vs string compressedFile),"# ", (last ":" vs string filetoCompress),"#";
.[{system "r ", (last ":" vs string x),"## ", (last ":" vs string y),"##"};(compressedFile;filetoCompress);.lg.o[`compression;"File does not have enumeration domain"]]];
/ if running below 3.6, nested list types will be 77h+t and will not have double hash file
if[78 <= type sf; system "r ", (last ":" vs string compressedFile),"# ", (last ":" vs string filetoCompress),"#"]]}
================================================================================
FILE: TorQ_code_common_dataaccess.q
SIZE: 3,120 characters
================================================================================
//- common script to enable remote data access via generic get data function
\d .dataaccess
//set the default tableproperties path to ` allow initialisation intra-process
tablepropertiespath:@[value;`.dataaccess.tablepropertiespath;`]
// to set table properties path passed from -dataaccess parameter
settablepropertiespath:{[]
if[not`dataaccess in key .proc.params;.lg.e[`.dataaccess.settablepropertiespath;"No table properties passed by -dataaccess parameter"]];
resettablepropertiespath hsym`$first .proc.params`dataaccess;
};
// to set table properties path from given path
resettablepropertiespath:{[tablepropertiespath]`.dataaccess.tablepropertiespath set tablepropertiespath};
// to check if table properties config file exists at given path
validtablepropertiespath:{[].dataaccess.tablepropertiespath~key .dataaccess.tablepropertiespath};
// get path to config file for checking data api input parameters
checkinputspath:first .proc.getconfigfile"dataaccess/checkinputs.csv";
// instantiate table for holding table metas of current process
metainfo:([tablename:`$()]partfield:`$();metas:();proctype:`$());
//- init function takes:
//- (i) tablepropertiespath - either from a valid -dataaccess path from cmd line, or passed explicitly
//- otherwise a user can call "init[`:/path/to/tableproperties.csv]"
//- init will:
//- - load data access code from code/common dataaccess
//- - validate table properties config file exists + load it
//- - load config for checking input parameters
//- - write meta info for tables in current process to .dataaccess.metainfo
init:{[tablepropertiespath]
if[@[value;`.aqrest.loadexecute;0b];.aqrest.execute:{[req;props] @[value;req;{(neg .z.w)(.gw.formatresponse[0b;0b;"error: ",x])}]}];
.lg.o[`.dataaccess.init;"running .dataaccess.init"];
.proc.loaddir getenv[`KDBCODE],"/dataaccess";
if[not validtablepropertiespath[];resettablepropertiespath tablepropertiespath];
if[()~key`.checkinputs.tablepropertiesconfig;`.checkinputs.tablepropertiesconfig set .checkinputs.readtableproperties tablepropertiespath];
if[()~key`.checkinputs.checkinputsconfig;`.checkinputs.checkinputsconfig set .checkinputs.readcheckinputs checkinputspath];
`.dataaccess.metainfo upsert .checkinputs.getmetainfo[];
// Check if all the tables presented in tableproperties.csv have been
if[.proc.proctype=`hdb;
if[not all(exec tablename from .checkinputs.readtableproperties[.dataaccess.tablepropertiespath] where proctype in `hdb`all`) in exec tablename from .dataaccess.metainfo;
.lg.e[`.dataaccess.init;"Missing table from HDB in schema"]]];
.lg.o[`.dataaccess.init;"running .dataaccess.init - finished"];
};
connectcustom:{[f;connectiontab]
@[f;connectiontab;()];
@[.dataaccess.init;.dataaccess.tablepropertiespath;()];
}@[value;`.servers.connectcustom;{{[x]}}]
\d .
if[`dataaccess in key .proc.params;
// set table properties path
.dataaccess.settablepropertiespath[];
// re-initialize on new connections
if[.dataaccess.validtablepropertiespath[];.servers.connectcustom:.dataaccess.connectcustom];
];
================================================================================
FILE: TorQ_code_common_datadog.q
SIZE: 2,749 characters
================================================================================
// DATADOG CHECKS
//Create datadog namespace
\d .dg
//default to disabled - .lg.ext will not be overwritten
enabled:@[value;`enabled;0b]
//default to disabled - datadog agent used
webreq:@[value;`webreq;0b]
//define dogstatsd_port
dogstatsd_port:@[value;`dogstatsd_port;getenv[`DOGSTATSD_PORT]]
//define dogstatsd_apikey
dogstatsd_apikey:@[value;`dogstatsd_apikey;getenv[`DOGSTATSD_APIKEY]]
//define dogstatsd_url
dogstatsd_url:":https://api.datadoghq.com/api/v1/"
//Functions are set to return 1b or 0b based on the health of the process
//Default check (isok) returns 1b from each process to indicate process is up and can be queried.
handlers:(`$())!()
isok:{$[.proc.proctype in key .dg.handlers;.dg.handlers .proc.proctype;1b]}
//define sendmetric and sendevent functions using datadog agent
.dg.sendevent:{[event_title;event_text;tags;alert_type]
$[.z.o like "l*";
system"event_title=",event_title,"; event_text=","\"",event_text,"\"","; tags=","\"#",$[0h=type tags;","sv tags;tags],"\"",";alert_type=",alert_type,"; ","echo \"_e{${#event_title},${#event_text}}:$event_title|$event_text|#$tags|t:$alert_type\" |nc -4u -w0 127.0.0.1 ",dogstatsd_port;
.lg.w[`sendevent;"Currently only linux operating systems are supported to send events"]]
}
.dg.sendmetric:{[metric_name;metric_value]
$[.z.o like "l*";
system"bash -c \"echo -n '",metric_name,":",(string metric_value),"|g|#shell' > /dev/udp/127.0.0.1/",dogstatsd_port,"\"";
.lg.w[`sendmetric;"Currently only linux operating systems are supported to send metrics"]]
}
//define sendmetric and sendevent functions using web request
.dg.sendevent_webreq:{[event_title;event_text;tags;alert_type]
url:`$dogstatsd_url,"events?api_key=",dogstatsd_apikey;
.Q.hp[url;.h.ty`json]
.j.j`title`text`priority`tags`alert_type!(event_title;event_text;"normal";$[0h=type tags;","sv tags;tags];alert_type)
}
.dg.sendmetric_webreq:{[metric_name;metric_value]
url:`$dogstatsd_url,"series?api_key=",dogstatsd_apikey;
unix_time:floor((`long$.z.p)-1970.01.01D00:00)%1e9;
.Q.hp[url;.h.ty`json]
.j.j (enlist `series)!enlist(enlist (`metric`points`host`tags!(metric_name;enlist (unix_time;metric_value);.z.h;"shell")))
}
//Option to override default .lg.ext functionality to send error and warning events to datadog
enablelogging:{[]
.lg.ext:{[olddef;loglevel;proctype;proc;id;message;dict]
olddef[loglevel;proctype;proc;id;message;dict];
if[loglevel in `ERR`WARN;.dg.sendevent[string proc;message;string proctype;]$[loglevel=`ERR;"error";"warning"]]}[@[value;`.lg.ext;{{[loglevel;proctype;proc;id;message;dict]}}]]
}
\d .
if[.dg.enabled;.dg.enablelogging[]]
if[.dg.webreq;.dg.sendevent:.dg.sendevent_webreq;.dg.sendmetric:.dg.sendmetric_webreq]
================================================================================
FILE: TorQ_code_common_dataloader.q
SIZE: 7,100 characters
================================================================================
// A generic dataloader library
// generalisation of http://code.kx.com/wiki/Cookbook/LoadingFromLargeFiles
// will read in a directory of input files and write them out to an HDB
// files are read in chunks using .Q.fsn
// main function to call is loadallfiles
// loadallfiles takes a directory of files to read, and a dictionary
// headers = names of headers in the file e.g. `sym`time`price`size`condition
// types = data types e.g. "SPFIC"
// separator = separator field. enlist it if the first row in the file is header data (same as standard q way) e.g. enlist","
// tablename = name of table to load to, e.g. `trade
// dbdir = database directory to write to e.g. `:hdb
// symdir [optional] = directory to enumerate against; default is to enumerate against dbdir
// dataprocessfunc [optional] = diadic function to use to further process data before saving.
// Parameters passed in are loadparams dict and data to be modified. Default is {[x;y] y}
// partitiontype [optional] = the partition type - one of `date`month`year`int. Default is `date
// partitioncol [optional] = the name of the column to cast to the partition type to work out which partition the data should go in. default is `time
// chunksize [optional] = size of data chunks in bytes to read at a time. default is 100MB
// compression [optional] = compression parameters to use. list of 3 integers e.g. 17 2 6.
// filepattern [optional] = specify pattern used to filter files
// These are only set when the data is sorted on disk (in the finish function) to save on writing the data compressed, reading in and uncompressing, sorting, and writing out compressed again
// gc [optional] = boolean flag to turn garbage collection on and off. Default is 0b
// e.g.
// .loader.loadallfiles[`headers`types`separator`tablename`dbdir!(`sym`time`price`volume`mktflag`cond`exclude;"SPFICHB";",";`tdc;`:hdb); `:TDC/toload]
\d .loader
|
Machine learning:
Using embedPy to apply LASSO regression¶
From its deep roots in financial technology KX is expanding into new fields. It is important for q to communicate seamlessly with other technologies. The embedPy interface allows this to be done with Python.
The interface allows the kdb+ interpreter to manipulate Python objects, call Python functions, and load Python libraries. Developers can fuse the technologies, allowing seamless application of q’s high-speed analytics and Python’s extensive libraries.
This white paper introduces embedPy, covering both a range of basic tutorials as well as a comprehensive solution to a machine-learning project.
EmbedPy is available on GitHub for use with kdb+ V3.5+ and
- Python 3.5+ on macOS or Linux
- Python 3.6+ on Windows
The installation directory also contains a README.txt
about embedPy, and a
directory of detailed examples.
EmbedPy basics¶
In this section, we introduce some core elements of embedPy that will be used in the LASSO regression problem that follows.
Installing embedPy in kdb+¶
Download the embedPy package from GitHub: KxSystems/embedPy
Follow the instructions in README.md
for installing the interface.
Load p.q
into kdb+ through either:
- the command line:
$ q p.q
- the q session:
q)\l p.q
Executing Python code from a q session¶
Python code can be executed in a q session by either using the p)
prompt, or the .p
namespace:
q)p)def add1(arg1): return arg1+1
q)p)print(add1(10))
11
q).p.e"print(add1(5))"
6
q).p.qeval"add1(7)" / print Python result
8
Interchanging variables¶
Python objects live in the embedded Python memory space. In q, these are
foreign objects that contain pointers to the Python objects. They can be
stored in q as variables, or as parts of lists, tables, and
dictionaries, and will display foreign
when inspected in the q
console. Foreign objects cannot be serialized by kdb+ or passed over
IPC; they must first be converted to q.
This example shows how to convert a Python variable into a foreign object
using .p.pyget
. A foreign object can be converted into q by using
.p.py2q
:
q)p)var1=2
q).p.pyget`var1
foreign
q).p.py2q .p.pyget`var1
2
Foreign objects¶
Whilst foreign objects can be passed back and forth between q and Python and operated on by Python, they cannot be operated on by q directly. Instead, they must be converted to q data.
To make it easy to convert back and forth between q and Python representations a foreign object can be wrapped as an embedPy object using .p.wrap
q)p:.p.wrap .p.pyget`var1
q)p
{[f;x]embedPy[f;x]}[foreign]enlist
Given an embedPy object representing Python data, the underlying data can be returned as a foreign object or q item:
q)x:.p.eval"(1,2,3)" / Define Python object
q)x
{[f;x]embedPy[f;x]}[foreign]enlist
q)x`. / Return the data as a foreign object
foreign
q)x` / Return the data as q
1 2 3
Edit Python objects from q¶
Python objects are retrieved and executed using .p.get
. This will return
either a q item or foreign object. There is no need to keep a copy of a
Python object in the q memory space; it can be edited directly.
The first parameter of .p.get
is the Python object name, and the second
parameter is either <
or >
, which will return q or foreign objects
respectively. The following parameters will be the input parameters to
execute the Python function.
This example shows how to call a Python function with one input parameter, returning q:
q)p)def add2(x): res = x+2; return(res);
q).p.get[`add2;<] / < returns q
k){$[isp x;conv type[x]0;]x}.[code[code;]enlist[;;][foreign]]`.p.q2pargsenlist
q).p.get[`add2;<;5] / get and execute func, return q
7
q).p.get[`add2;>;5] / get execute func, return foreign object
foreign
q)add2q:.p.get`add2 / define as q function, return an embedPy object
q)add2q[3]` / call function, and convert result to q
5
Python keywords¶
EmbedPy allows keyword arguments to be specified, in any order, using
pykw
:
q)p)def times_args(arg1, arg2): res = arg1 * arg2; return(res)
q).p.get[`times_args;<;`arg2 pykw 10; `arg1 pykw 3]
30
Importing Python libraries¶
To import an entire Python library, use .p.import
and call individual
functions:
q)np:.p.import`numpy
q)v:np[`:arange;12]
q)v`
0 1 2 3 4 5 6 7 8 9 10 11
Individual packages or functions are imported from a Python library by specifying them during the import command.
q)arange:.p.import[`numpy]`:arange / Import function
q)arange 12
{[f;x]embedPy[f;x]}[foreign]enlist
q)arange[12]`
0 1 2 3 4 5 6 7 8 9 10 11
q)p)import numpy as np # Import package using Python syntax
q)p)v=np.arange(12)
q)p)print(v)
[ 0 1 2 3 4 5 6 7 8 9 10 11]
q)stats:.p.import[`scipy.stats] / Import package using embedPy syntax
q)stats[`:skew]
{[f;x]embedPy[f;x]}[foreign]enlist
Applying LASSO regression for analyzing housing prices¶
This analysis uses LASSO regression to determine the prices of homes in Ames, Iowa. The dataset used in this demonstration is the Ames Housing Dataset, compiled by Dean De Cock for use in data-science education. It contains 79 explanatory variables describing various aspects of residential homes which influence their sale prices.
The Least Absolute Shrinkage and Selection Operator (LASSO) method was used for the data analysis. LASSO is a method that improves the accuracy and interpret-ability of multiple linear regression models by adapting the model fitting process to use only a subset of relevant features.
It performs L1 regularization, adding a penalty equal to the absolute value of the magnitude of coefficients, which reduces the less-important features’ coefficients to zero. This leaves only the most relevant feature vectors to contribute to the target (sale price), which is useful given the high dimensionality of this dataset.
A kdb+ Jupyter notebook on GitHub accompanies this paper.
Cleaning and pre-processing data in kdb+¶
Load data¶
As the data are stored in CSVs, the standard kdb+ method of loading a CSV is used. The raw dataset has column names beginning with numbers, which kdb+ will not allow in queries, so the columns are renamed on loading.
ct:"IISIISSSSSSSSSSSSIIIISSSSSISSSSSSSISIII" / column types
ct,:"SSSSIIIIIIIIIISISISSISIISSSIIIIIISSSIIISSF"
train:(ct;enlist csv) 0:`: train.csv
old:`1stFlrSF`2ndFlrSF`3SsnPorch / old column names
new:`firFlrSF`secFlrSF`threeSsnPorch / new column names
train:@[cols train; where cols[train] in old; :; new] xcol train
Log-transform the sale price¶
The sale price is log-transformed to obtain a simpler relationship of data to the sale price.
update SalePrice:log SalePrice from `train
y:train.SalePrice
Clean data¶
Cleaning the data involves several steps. First, ensure that there are no duplicated data, and then remove outliers as suggested by the dataset’s author.
q)count[train]~count exec distinct Id from train
1b
q)delete Id from `train
q)delete from `train where GrLivArea > 4000
Nxext, null data points within the features are assumed to mean that it does not have that feature. Any `NA
values are filled with `No
or `None
, depending on category.
updateNulls:{[t]
noneC:`Alley`MasVnrType;
noC:`BsmtQual`BsmtCond`BsmtExposure`BsmtFinType1`BsmtFinType2`Fence`FireplaceQu;
noC,:`GarageType`GarageFinish`GarageQual`GarageCond`MiscFeature`PoolQC;
a:raze{y!{(?;(=;enlist`NA;y);enlist x;y)}[x;]each y}'[`None`No;(noneC;noC)];
![t;();0b;a]}
train:updateNulls train
Convert some numerical features into categorical features, such as mapping months and sub-classes. This is done for one-hot encoding later.
monthDict:(1+til 12)!`Jan`Feb`Mar`Apr`May`Jun`Jul`Aug`Sep`Oct`Nov`Dec
@[`train;`MoSold;monthDict]
subclDict:raze {enlist[x]!enlist[`$"SC",string[x]]}
each 20 30 40 45 50 60 70 75 80 85 90 120 160 180 190
@[`train;`MSSubClass;subclDict]
Convert some categorical features into numerical features, such as assigning grading to each house quality, encoding as ordered numbers. These fields were selected for numerical conversion as their categorical values are easily mapped intuitively, while other fields are less so.
@[`train;;`None`Grvl`Pave!til 3] each `Alley`Street
quals: `BsmtCond`BsmtQual`ExterCond`ExterQual`FireplaceQu
quals,:`GarageCond`GarageQual`HeatingQC`KitchenQual
@[`train;;`No`Po`Fa`TA`Gd`Ex!til 6] each quals
@[`train;`BsmtExposure;`No`Mn`Av`Gd!til 4]
@[`train;;`No`Unf`LwQ`Rec`BLQ`ALQ`GLQ!til 7] each `BsmtFinType1`BsmtFinType2
@[`train;`Functional;`Sal`Sev`Maj2`Maj1`Mod`Min2`Min1`Typ!1+til 8]
@[`train;`LandSlope;`Sev`Mod`Gtl!1+til 3]
@[`train;`LotShape;`IR3`IR2`IR1`Reg!1+til 4]
@[`train;`PavedDrive;`N`P`Y!til 3]
@[`train;`PoolQC;`No`Fa`TA`Gd`Ex!til 5]
@[`train;`Utilities;`ELO`NoSeWa`NoSewr`AllPub!1+til 4]
Feature engineering¶
To increase the model’s accuracy, some features are simplified and combined based on similarities. This is done in three steps demonstrated below.
Simplification of existing features¶
Some numerical features’ scopes are reduced, and several categorical features are mapped to become simple numerical features.
ftrs:`OverallQual`OverallCond`GarageCond`GarageQual`FireplaceQu`KitchenQual
ftrs,:`HeatingQC`BsmtFinType1`BsmtFinType2`BsmtCond`BsmtQual`ExterCond`ExterQual
rng:(1+til 10)!1 1 1 2 2 2 3 3 3 3
{![`train;();0b;enlist[`$"Simpl",string[x]]!enlist (rng;x)]} each ftrs
rng:(1+til 8)!1 1 2 2 3 3 3 4
{![`train;();0b;enlist[`$"Simpl",string[x]]!enlist (rng;x)]} each `PoolQC`Functional
Combination of existing features¶
Some of the features are very similar and can be combined into one. For
example, Fireplaces
and FireplaceQual
can become one overall feature
of FireplaceScore
.
gradeFuncPrd:{[t;c1;c2;cNew]![t;();0b;enlist[`$string[cNew]]!enlist (*;c1;c2)]}
combineFeat1:`OverallQual`GarageQual`ExterQual`KitchenAbvGr,
`Fireplaces`GarageArea`PoolArea`SimplOverallQual`SimplExterQual,
`PoolArea`GarageArea`Fireplaces`KitchenAbvGr
combineFeat2:`OverallCond`GarageCond`ExterCond`KitchenQual,
`FireplaceQu`GarageQual`PoolQC`SimplOverallCond`SimplExterCond,
`SimplPoolQC`SimplGarageQual`SimplFireplaceQu`SimplKitchenQual
combineFeat3:`OverallGrade`GarageGrade`ExterGrade`KitchenScore,
`FireplaceScore`GarageScore`PoolScore`SimplOverallGrade`SimplExterGrade,
`SimplPoolScore`SimplGarageScore`SimplFireplaceScore`SimplKitchenScore;
train:train{gradeFuncPrd[x;]. y}/flip(combineFeat1; combineFeat2; combineFeat3)
update TotalBath:BsmtFullBath+FullBath+0.5*BsmtHalfBath+HalfBath,
AllSF:GrLivArea+TotalBsmtSF,
AllFlrsSF:firFlrSF+secFlrSF,
AllPorchSF:OpenPorchSF+EnclosedPorch+threeSsnPorch+ScreenPorch,
HasMasVnr:((`BrkCmn`BrkFace`CBlock`Stone`None)!((4#1),0))[MasVnrType],
BoughtOffPlan:((`Abnorml`Alloca`AdjLand`Family`Normal`Partial)!((5#0),1))[SaleCondition]
from `train
Use correlation (cor
) to find the features that have a positive
relationship with the sale price. These will be the most important
features relative to the sale price, as they become more prominent with
an increasing sale price.
q)corr:desc raze {enlist[x]!enlist train.SalePrice cor ?[train;();();x]}
each exec c from meta[train] where not t="s"
q)10#`SalePrice _ corr / Top 10 most relevant features
OverallQual | 0.8192401
AllSF | 0.8172716
AllFlrsSF | 0.729421
GrLivArea | 0.7188441
SimplOverallQual| 0.7079335
ExterQual | 0.6809463
GarageCars | 0.6804076
TotalBath | 0.6729288
KitchenQual | 0.6671735
GarageScore | 0.6568215
Polynomials on the top ten existing features¶
Create new polynomial features from the top ten most relevant features. These mathematically derived features will improve the model by increasing flexibility. Polynomial regression is used as it describes the relationship between the data and sale price most accurately.
polynom:{[t;c]
a:raze(!).'(
{`$string[x] ,/:("_2";"_3";"_sq")};
{((xexp;x;2);(xexp;x;3);(sqrt;x))}
)@\:/:c;
![t;();0b;a]}
train:polynom[train;key 10#`SalePrice _ corr]
Handling categorical and numerical features separately¶
Split the dataset into numerical features (minus the sale price) and categorical features.
.feat.categorical:?[train;();0b;]{x!x}
exec c from meta[train] where t="s"
.feat.numerical:?[train;();0b;]{x!x}
(exec c from meta[train] where not t="s") except `SalePrice
Numerical features¶
Fill nulls with the median value of the column:
![`.feat.numerical;();0b;{x!{(^;(med;x);x)}each x}cols .feat.numerical]
Outliers in the numerical features are assumed to have a skewness of >0.5. These are log-transformed to reduce their impact:
skew:.p.import[`scipy.stats;`:skew] / import Python skew function
skewness:{skew[x]`}each flip .feat.numerical
@[`.feat.numerical;where abs[skewness]>0.5;{log[1+x]}]
Categorical features¶
Create dummy features via one-hot encoding, then join with numerical results for complete dataset.
oneHot:{[pvt;t;clm]
t:?[t;();0b;{x!x}enlist[clm]];
prePvt:![t;();0b;`name`true!(($;enlist`;((/:;,);string[clm],"_";($:;clm)));1)];
pvtCol:asc exec distinct name from prePvt;
pvtTab:0^?[prePvt;();{x!x}enlist[clm];(#;`pvtCol;(!;`name;`true))];
pvtRes:![t lj pvtTab;();0b;enlist clm];$[()~pvt;pvtRes;pvt,'pvtRes]}
train:.feat.numerical,'()oneHot[;.feat.categorical;]/cols .feat.categorical
Modeling¶
Splitting data¶
Partition the dataset into training sets and test sets by extracting random rows. The training set will be used to fit the model, and the test set will be used to provide an unbiased evaluation of the final model.
trainIdx:-1019?exec i from train // training indices
X_train:train[trainIdx]
yTrain:y[trainIdx]
X_test:train[(exec i from train) except trainIdx]
yTest:y[(exec i from train) except trainIdx]
Standardize numerical features¶
Standardization is done after the partitioning of training and test sets to apply the standard scalar independently across both. This is done to produce more standardized coefficients from the numerical features.
stdSc:{(x-avg x) % dev x}
@[`X_train;;stdSc] each cols .feat.numerical
@[`X_test;;stdSc] each cols .feat.numerical
Transform kdb+ tables into Python-readable matrices¶
xTrain:flip value flip X_train
xTest:flip value flip X_test
Analysis using embedPy¶
This section analyses the data using several Python libraries from inside the q process: pandas, NumPy, sklearn and Matplotlib.
Import Python libraries¶
pd:.p.import`pandas
np:.p.import`numpy
cross_val_score:.p.import[`sklearn.model_selection;`:cross_val_score]
qLassoCV:.p.import[`sklearn.linear_model;`:LassoCV]
Train the LASSO model¶
Create NumPy arrays of kdb+ data:
arrayTrainX:np[`:array][0^xTrain]
arrayTrainY:np[`:array][yTrain]
arrayTestX: np[`:array][0^xTest]
arrayTestY: np[`:array][yTest]
Use pykw
to set alphas, maximum iterations, and cross-validation generator.
qLassoCV:qLassoCV[
`alphas pykw (.0001 .0003 .0006 .001 .003 .006 .01 .03 .06 .1 .3 .6 1);
`max_iter pykw 50000;
`cv pykw 10;
`tol pykw 0.1]
Fit linear model using training data, and determine the amount of
penalization chosen by cross-validation (sum of absolute values of
coefficients). This is defined as alpha
, and is expected to be close to
zero given LASSO’s shrinkage method:
q)qLassoCV[`:fit][arrayTrainX;arrayTrainY]
q)alpha:qLassoCV[`:alpha_]`
q)alpha
0.01
Define the error measure for official scoring: Mean squared error (MSE)¶
The MSE is commonly used to analyze the performance of statistical
models utilizing linear regression. It measures the accuracy of the
model and is capable of indicating whether removing some explanatory
variables is possible without impairing the model’s predictions. A value
of zero measures perfect accuracy from a model. The MSE will measure the
difference between the values predicted by the model and the values
actually observed. MSE scoring is set in the qLassoCV
function using
pykw
, which allows individual keywords to be specified.
crossValScore:.p.import[`sklearn;`:model_selection;`:cross_val_score]
mseCV:{crossValScore[qLassoCV;x;y;`scoring pykw `neg_mean_squared_error]`}
The average of the MSE results shows that there are relatively small error measurements from this model.
q)avg mseCV[np[`:array][0^xTrain];np[`:array][yTrain]]
-0.1498252
Find the most important coefficients¶
q)impCoef:desc cols[train]!qLassoCV[`:coef_]`
q)count where value[impCoef]=0
284
q)(5#impCoef),-5#impCoef
TotalBsmtSF | 0.05086288
GrLivArea | 0.02549123
OverallCond | 0.02080637
TotalBath_sq| 0.01637903
PavedDrive | 0.01415822
LandSlope | -0.009958406
BsmtFinType2| -0.01045939
KitchenAbvGr| -0.01527232
Street | -0.01618361
LotShape | -0.02050625
As seen above, LASSO eliminated 284 features, and therefore only used
one-tenth of the features. The most influential coefficients show that
LASSO gives higher weight to the overall size and condition of the
house, as well as some land and street characteristics, which
intuitively makes sense. The total square foot of the basement area
(TotalBsmtSF
) has a large positive impact on the sale price, which seems
unintuitive, but could be correlated to the overall size of the house.
Prediction results¶
lassoTest:qLassoCV[`:predict][arrayTestX]
The image lassopred.png
illustrates the predicted results, plotted on a scatter graph using Matplotlib:
qplt:.p.import[`matplotlib.pyplot];
ptrain:qLassoCV[`:predict][arrayTrainX];
ptest: qLassoCV[`:predict][arrayTestX];
qplt[`:scatter]
[ptrain`;
yTrain;
`c pykw "blue";
`marker pykw "s";
`label pykw "Training Data"];
qplt[`:scatter];
[ptest`;
yTest;
`c pykw "lightgreen";
`marker pykw "s";
`label pykw "Validation Testing Data"];
qplt[`:title]"Linear regression with Lasso regularization";
qplt[`:xlabel]"Predicted values";
qplt[`:ylabel]"Real values";
qplt[`:legend]`loc pykw "upper left";
bounds:({floor min x};{ceiling max x})@\:/:raze
each((ptrain`;ptest`);(yTrain;yTest));
bounds:4#bounds first idesc{abs x-y}./:bounds;
qplt[`:axis]bounds;
qplt[`:savefig]"lassopred.png";
Conclusion¶
In this white paper, we have shown how easily embedPy allows q to
communicate with Python and its vast range of libraries and packages. We
saw how machine-learning libraries can be coupled with q’s high-speed
analytics to significantly enhance the application of solutions across
big data stored in kdb+. Python’s keywords can be easily communicated
across functions using the powerful pykw
, and visualization tools such
as Matplotlib can create useful graphics of kdb+ datasets and analytics
results. Also demonstrated was how q’s vector-oriented nature is optimal
for cleaning and pre-processing big datasets.
This interface is useful across many institutions that are developing in both languages, allowing for the best features of both technologies to fuse into a powerful tool. Further machine-learning techniques powered by kdb+ can be found under Featured Resources at kx.com/machine-learning.
Author¶
Samantha (Gallagher) Devlin has worked on implementing kdb+ solutions for financial institutions globally.
|
C/C++ quick guide¶
Use Cases¶
There are three cases in which to to use the C API for kdb+:
- Dynamically-loaded library called by q, e.g. OS, math, analytics. Using C functions
- Dynamically-loaded library doing callbacks into q, e.g. feedhandlers (e.g. Bloomberg client)
- C/C++ clients talking to kdb+ servers (standalone applications), e.g. feedhandlers and clients. Links with
c.o
/c.dll
.
Two sets of files¶
To minimize dependencies for existing projects, there are now two sets of files available.
The e
set of files, those with SSL/TLS support, contain all the functionality of the c
files.
Do not link with both c
and e
files; just choose one set.
Linux¶
| capability | dependencies | 32-bit | 64-bit |
|---|---|---|---|
| no SSL/TLS | l32/c.o |
l64/c.o l64arm/c.o |
|
| SSL/TLS | OpenSSL | l32/e.o |
l64/e.o l64arm/e.o |
macOS¶
| capability | dependencies | 32-bit | 64-bit |
|---|---|---|---|
| no SSL/TLS | m32/c.o (Intel) |
m64/c.o (Intel and ARM) |
|
| SSL/TLS | OpenSSL | m32/e.o (Intel) |
m64/e.o (Intel and ARM) |
Windows¶
c.lib
is a stub library which loads c.dll
and resolves the functions dynamically; e.lib
does the same for e.dll
.
We no longer ship c.obj
or cst.obj
; they have been replaced by c_static.lib
and cst_static.lib
, and are complemented by e_static.lib
and est_static.lib
– these static libraries have no dependency on the aforementioned DLLs.
cst
continues to represent ‘single-threaded’ apps, those which on Windows have issues due to the LoadLibrary
API.
Overview¶
The best way to understand the underpinnings of q, and to interact with it from C, is to start with the header file available from KxSystems/kdb/c/c/k.h .
This is the file you will need to include in your C or C++ code to interact with q from a low level.
Let’s explore the basic types and their synonyms that you will commonly encounter when programming at this level. First though, it is worth noting the size of data types in 32- versus 64-bit operating systems to avoid a common mistake.
To provide succinct composable names, the q header defines synonyms for the common types as in the following table:
| type | synonym |
|---|---|
| 16-bit int | H |
| 32-bit int | I |
| 64-bit int | J |
| char* | S |
| unsigned char | G |
| char | C |
| 32-bit float | E |
| 64-bit double | F |
| void | V |
With this basic knowledge, we can now tackle the types available in q and their matching C types and accessor functions provided in the C interface. We will see shortly how the accessor functions are used in practice.
| q type name | q type number | encoded type name | C type | size in bytes | interface list accessor function |
|---|---|---|---|---|---|
| mixed list | 0 | - | K | - | kK |
| boolean | 1 | KB | char | 1 | kG |
| guid | 2 | UU | U | 16 | kU |
| byte | 4 | KG | char | 1 | kG |
| short | 5 | KH | short | 2 | kH |
| int | 6 | KI | int | 4 | kI |
| long | 7 | KJ | int64_t | 8 | kJ |
| real | 8 | KE | float | 4 | kE |
| float | 9 | KF | double | 8 | kF |
| char | 10 | KC | char | 1 | kC |
| symbol | 11 | KS | char* | 4 or 8 | kS |
| timestamp | 12 | KP | int64_t | 8 | kJ |
| month | 13 | KM | int | 4 | kI |
| date | 14 | KD | int | 4 | kI (days from 2000.01.01) |
| datetime | 15 | KZ | double | 8 | kF (days from 2000.01.01) |
| timespan | 16 | KN | int64_t | 8 | kJ (nanoseconds) |
| minute | 17 | KU | int | 4 | kI |
| second | 18 | KV | int | 4 | kI |
| time | 19 | KT | int | 4 | kI (milliseconds) |
| table/flip | 98 | XT | - | - | x->k |
| dict/table with primary keys | 99 | XD | - | - | kK(x)[0] for keys and kK(x)[1] for values |
| error | -128 | - | char* | 4 or 8 | x->s |
Note that the type numbers given are for vectors of that type. For example, 9 for vectors of the q type float. By convention, the negative value is an atom: -9 is the type of an atom float value.
The K object structure¶
The q types are all encapsulated at the C level as K objects.
(Recall that k is the low-level language underlying the q language.)
K objects are all instances of the following structure (note this is technically defining K objects as pointers to the k0
structure but we’ll conflate the terms and refer to K objects as the actual instance).
- for V3.0 and later
typedef struct k0{
signed char m,a; // m,a are for internal use.
signed char t; // The object's type
C u; // The object's attribute flags
I r; // The object's reference count
union{
// The atoms are held in the following members:
G g;H h;I i;J j;E e;F f;S s;
// The following members are used for more complex data.
struct k0*k;
struct{
J n; // number of elements in vector
G G0[1];
};
};
}*K;
- prior to V3.0 it is defined as
typedef struct k0 {
I r; // The object's reference count
H t, u; // The object's type and attribute flags
union { // The data payload is contained within this union.
// The atoms are held in the following members:
G g;H h;I i;J j;E e;F f;S s;
// The following members are used for more complex data.
struct k0*k;
struct {
I n; // number of elements in vector
G G0[1];
};
};
}*K;
As an exercise, it is instructive to count the minimum and the maximum number of bytes a K object can use on your system, taking into account any padding or alignment constraints.
Given a K object x
, we can use the accessors noted in the table above to access elements of the object.
For example, given a K object containing a vector of floats, we can access kF(x)[42]
to get the 42nd element of the vector.
For accessing atoms, use the following accessors:
| type | accessor | additional types |
|---|---|---|
| byte | x->g |
boolean, char |
| short | x->h |
|
| int | x->i |
month, date, minute, second, time |
| long | x->j |
timestamp, timespan |
| real | x->e |
|
| float | x->f |
datetime |
| symbol | x->s |
error |
Changes in V3.0
The k struct changed with the release of V3.0, and if you are compiling using the C library (c.o/c.dll) stamped on or after 2012.06.25 you should ensure you use the correct k struct by defining KXVER accordingly, e.g.
gcc -D KXVER=3 …
If you need to link against earlier releases of the C library, you can obtain those files from the earlier version of 2011.04.20.
Examining K objects¶
Whether you know beforehand the type of the K objects, or you are writing a function to work with different types, it is useful to dispatch based on the type flag x->t
for a given K object x
.
Where x->t
is:
- negative, the object is an atom, and we should use the atom accessors noted above.
- greater than zero, we use the vector accessors as all the elements are of the same type (eg.
x->t == KF
for a vector of q floats). - exactly zero, the K object contains a mixed list of other K objects.
Each item in the list is a pointer to another K object.
To access each item of
x
we use thekK
object accessor. For example:kK(x)[42]
to access the 42nd element of the mixed list.
Nulls and infinities¶
The next table provides the null and infinite immediate values for the q types. These are constants defined in k.h.
| type | null | infinity |
|---|---|---|
| short | 0xFFFF8000 (nh) | 0x7FFF (wh) |
| int | 0x80000000 (ni) | 0x7FFFFFFF (wi) |
| long | 0x8000000000000000 (nj) | 0x7FFFFFFFFFFFFFFF (wj) |
| float | log(-1.0) on Windows or (0/0.0) on Linux (nf) | -log(0.0) in Windows or (1/0.0) on Linux (wf) |
Null objects can be created using ks(""),kh(nh),ki(ni),kj(nj),kc(" ")
, etc. A null guid can be created with U g={0};ku(g);
Managing memory and reference counting¶
Although memory in q is managed for the programmer implicitly, when interfacing from C or C++ we must (as is usual in those languages) manage memory explicitly. The following functions are provided to interface with the q memory manager.
| purpose | function |
|---|---|
| Increment the object‘s reference count | r1(K) |
| Decrement the object‘s reference count | r0(K) |
| Free up memory allocated for the thread‘s pool | m9() |
| Set whether interning symbols uses a lock | setm(I) |
A reference count indicates the usage of an object, allowing the same object to be used by more than one piece of code.
If you create a K object through one of the ‘generator’ functions (ki
, kj
, knk
, etc), you automatically have a reference to that object.
Once you have finished using that object, you should call r0
.
r0(ki(5));
creates and immediately destroys an integer object.
Initialize the kdb+ memory system
Before calling any 'generator' functions in a standalone application, you must initialize the kdb+ internal memory system. (It is done automatically when you open a connection to other kdb+ processes.) Without making a connection, use khp("",-1);
In the case of a function being called from q
K myfunc(K x)
{
return ki(5);
}
the object is returned to q, and q will eventually decrement the reference count.
In this scenario, the arg x
from q is passed to the C function. If it is to be returned to q, the reference count must be incremented with r1
.
K myfunc(K x)
{
return r1(x);
}
It is vital to increment and decrement when adding or removing references to values that should be managed by the q runtime, to avoid memory leaks or access faults due to double frees.
Note that K objects must be freed from the thread they are allocated within, and m9()
should be called when the thread is about to complete, freeing up memory allocated for that thread's pool.
Furthermore, to allow symbols to be created in other threads, setm(1)
should be called from the main thread before any other threads are started.
When a K object is created, it usually has a reference count of 0 – exceptions are common constants such as (::)
which may vary in their current reference count, as they may be used by other areas of the C API library or q.
If r0
happens to be passed a K object with a reference count of 0, that object’s memory is freed (returned to an internal pool).
Be aware that if a reference count is >0, you should very likely not change the data stored in that object as it is being referenced by another piece of code which may not expect the change.
In this case, create a new copy of the object, and change that.
If in doubt, the current reference count can be seen in C with
printf("Reference count for x is %d\n",x->r);
and in q with
-16!x
The function k
, as in
K r=k(handle,"functionname",params,(K)0);
requires a little more explanation.
If the handle is
- ≥0, it is a generator function, and can return 0 (indicating a network error) or a pointer to a k object.
If that object has type -128, it indicates an error, accessible as a null-terminated string in
r->s
. When you have finished using this object, it should be freed by callingr0(r)
. - <0, this is for async messaging, and the return value can be either 0 (network error) or non-zero (success). This result should not be passed to
r0
.
K objects passed as parameters to the k
function call have their reference counts decremented automatically on the return from that call.
(To continue to use the object later in that C function, after the k
call, increment the reference count before the call.)
K r=k(handle,"functionname",r1(param),(K)0);
Creating atom values¶
To create atom values the following functions are available. Function ka
creates an atom of the given type, and the rest create an atom with the given value:
| purpose | call |
|---|---|
| Create an atom of type | K ka(I); |
| Create a boolean | K kb(I); |
| Create a guid | K ku(U); |
| Create a byte | K kg(I); |
| Create a short | K kh(I); |
| Create an int | K ki(I); |
| Create a long | K kj(J); |
| Create a real | K ke(F); |
| Create a float | K kf(F); |
| Create a char | K kc(I); |
| Create a symbol | K ks(S); |
| Create a timestamp | K ktj(-KP,J); |
| Create a time | K kt(I); |
| Create a date | K kd(I); |
| Create a timespan | K ktj(-KN,J); |
| Create a datetime | K kz(F); |
An example of creating an atom:
K z = ka(-KI);
z->i = 42;
Equivalently:
K z = ki(42);
Creating lists¶
To create
- a simple list
K ktn(I type,J length);
- a mixed list
K knk(I n,...);
where length
is a non-negative, non-null integer.
Limit of length
Before V3.0. length
had to be in the range 0…2147483647, and was type I. See KXVER sections in k.h.
For example, to create an integer list of 5 we say ktn(KI,5)
. A mixed list of 5 items can be created with ktn(0,5)
but note that each element must be initialized before further usage.
A convenient shortcut to creating a mixed list when all items already exist at the creation of the list is to use knk
, e.g. knk(2,kf(2.3),ktn(KI,10))
.
As we've noted, the type of a mixed list is 0, and the elements are pointers to other K objects – hence it is mandatory to initialize those n elements either via knk
params, or explicitly setting each item when created with ktn(0,n)
.
To join
- an atom to a list:
K ja(K*,V*);
- a string to a list:
K js(K*,S);
- another K object to a list:
K jk(K*,K);
- another K list to the first:
K jv(K*,K);
The join functions assume there are no other references to the list, as the list may need to be reallocated during the call.
In case of reallocation passed K*
pointer will be updated to refer to new K object and returned from the function.
K x=ki(42);
K list=ktn(0,0);
jk(&list,x); // append a k object to a list
K vector=ktn(KI,0);
int i=2;
ja(&vector,&i); // append a primitive int to an int vector
K syms=ktn(KS,0);
S sym=ss("IBM");
js(&syms,sym); // append an interned symbol to a symbol vector
K more=ktn(KS,2);
kS(more)[0]=ss("INTC");
kS(more)[1]=ss("GOOG");
jv(&syms,more); // append a vector with two symbols to syms
Strings and datetimes¶
Strings and datetimes are special cases and extra utility functions are provided:
| purpose | function |
|---|---|
| Create a char array from string | K kp(string); |
| Create a char array from string of length n | K kpn(string, n); |
| Intern a string | S ss(string); |
| Intern n chars from a string | S sn(string,n); |
| Convert q date to yyyymmdd integer | I dj(date); |
Encode a year/month/day as q date 0==ymd(2000,1,1) |
I ymd(year,month,day); |
Recall that Unix time is the number of seconds since 1970.01.01D00:00:00
while q time types have an epoch of 2000.01.01D00:00:00
.
q)`long$`timestamp$2000.01.01
0
q)`int$2000.01.01
0i
Utilities to convert between Unix and q temporal types may be defined as below.
F zu(I u){return u/8.64e4-10957;} // kdb+ datetime from unix
I uz(F f){return 86400*(f+10957);} // unix from kdb+ datetime
J pu(J u){return 1000000LL*(u-10957LL*86400000LL);} // kdb+ timestamp from unix, use ktj(Kj,n) to create timestamp from n
I up(J f){return (f/8.64e13+10957)*8.64e4;} // unix from kdb+ timestamp
struct tm* lt(int kd) { time_t t = uz(kd); return localtime(&t); }
struct tm* lt_r(int kd, struct tm* res) { time_t t = uz(kd); return localtime_r(&t, res); }
struct tm* gt(int kd) { time_t t = uz(kd); return gmtime(&t); }
struct tm* gt_r(int kd, struct tm* res) { time_t t = uz(kd); return gmtime_r(&t, res); }
char* fdt(struct tm* ptm, char* d) { strftime(d, 10, "%Y.%m.%d", ptm); return d; }
void tsms(unsigned ts,char*h,char*m,char*s,short*mmm) {*h=ts/3600000;ts-=3600000*(*h);*m=ts/60000;ts-=60000*(*m);*s=ts/1000;ts-=1000*(*s);*mmm=ts;}
char* ftsms(unsigned ts, char* d){char h, m, s; short mmm; tsms(ts, &h, &m, &s, &mmm); sprintf(d, "%02d:%02d:%02d.%03d", h, m, s, mmm); return d;}
What’s the difference between a symbol and a char vector?¶
A symbol is a pointer to a location in an internal map of strings; that is, symbols are interned zero-terminated strings. In contrast, a char vector is similar to an int vector and is instead a counted K vector as usual.
When symbol is created it is automatically interned and stored in internal map of strings.
K someSymbol = ks("some symbol"); // "some symbol" is placed into internal map
K nullSymbol = ks("");
When storing strings in symbol vector, they should be interned manually using ss
function, i.e.
kS(v)[i] = ss("some symbol");
Creating dictionaries and tables¶
To create
- a dict:
K xD(K,K);
- a table from a dict:
K xT(K);
- a simple table from a keyed table:
K ktd(K);
- a keyed table:
K knt(J,K);
A dictionary is a K object of type 99. It contains a list of two K objects; the keys and the values. We can use kK(x)[0]
and kK(x)[1]
to get these contained data.
A simple table (a ‘flip’) is a K object of type 98. In terms of the K object, this is an atom that points to a dictionary. This means that to access the columns we can use the kK(x->k)[0]
accessor and the kK(x->k)[1]
for the values.
A keyed table is a dictionary where keys and values are both simple tables. A keyed table has type 99.
The following example shows the steps to create a keyed table:
K maketable(){
K c,d,e,v,key,val;
/* table of primary keys */
c=ktn(KS,1);kS(c)[0]=ss("sid");
d=ktn(KS,3);kS(d)[0]=ss("ibm");kS(d)[1]=ss("gte");kS(d)[2]=ss("kvm");
v=knk(1,d);
key=xT(xD(c,v));
/* table of values */
c=ktn(KS,2);kS(c)[0]=ss("amt");kS(c)[1]=ss("date");
d=ktn(KI,3);kI(d)[0]=100;kI(d)[1]=300;kI(d)[2]=200;
e=ktn(KD,3);kI(e)[0]=2;kI(e)[1]=3;kI(e)[2]=5;
v=knk(2,d,e);
val=xT(xD(c,v));
return xD(key,val);
}
Although we can thus access the data using the accessors already introduced, you many find it easier to first convert it to a simple table before manipulating it in C.
// Get a keyed table
K x = maketable();
// Convert the result to a simple table.
K y=ktd(x);
/*
Note that if the ktd conversion fails for any reason,
it returns 0 and x is not freed.
since 2011-01-27, ktd always decrements ref count of input.
*/
if (!y)
printf("x is still a keyed table because the conversion failed.");
else
printf("y is a simple table and x has been deallocated.");
Connecting to a q server¶
We use the int khpu(host, port,username)
function to connect to a q server.
Note you must call khpu
before generating any q data, and the very first call to khpu
must not be concurrent to other khpu
calls.
To initialize memory without making a connection, use khp("",-1);
It is highly recommended to use khpu
and supply a meaningful username, as this will help server administrators identify a user’s connection.
The khp
,khpu
, khpun
and khpunc
functions are for use in stand-alone applications only; they are not for use within a q server via a shared library. Hence, to avoid potential confusion, these functions have been removed from more recent releases of q.
A timeout can be specified with function khpun
.
int c=khpun("localhost",1234,"myname:mypassword",1000); // timeout in mS
Return values for khp
/khpu
/khpun
are:
>0 - active handle
0 - authentication error
-1 - error
-2 - timeout(khpun case)
Note that with the release of c.o
with V2.6, c.o
now tracks the connection type (pre-V2.6, or V2.6+). Hence to close the connection you must call kclose
(instead of close
or closeSocket
) – this will clean up the connection tracking and close the socket.
The k
function is used to send messages over the connection. If a positive handle is used then the call is synchronous, otherwise it is an asynchronous call.
// Connect to a q server on the localhost port 1234.
int c = khpu("localhost", 1234,"myusername:mypassword");
if(c<=0) {perror("Connection error");return;}
K r = k(-c,"a:2+2",(K)0); // Asynchronously set a to be 4 on the server.
r = k(c,"b:til 1000000",(K)0); // Synchronously set b to be a list up to 1000000.
r = k(c,(S)0); // Read incoming data (blocking call)
Note that the object returned from an async set call must not be passed to r0
.
There is no timeout argument for the k(handle,…,(K)0)
call, but you can use socket timeouts as described below.
Unix domain sockets¶
A Unix domain socket may be requested via the IP address 0.0.0.0
, e.g.
int handle=khpu("0.0.0.0",5000,"user:password");
SSL/TLS¶
To use this feature, you must link with one of the e
libs.
Encrypted connections may be requested via the capability parameter of the new khpunc
function, e.g.
extern I khpunc(S hostname,I port,S usernamepassword,I timeout,I capability);
// capability is a bit field (1 - 1TB limit, 2 - use TLS)
int handle=khpunc("remote host",5000,"user:password",timeout,2);
There’s an additional return value for TLS connections, -3
, which indicates the openssl init
failed. This can be checked via
extern K sslInfo(K x); // returns an error if init fails, or a dict of settings similar to -26!x
if(handle==-3){
K x=ee(sslInfo((K)0));
printf("Init error %s\n",xt==-128?x->s:"unknown");
r0(x);
}
The lib is sensitive to the same environment variables as kdb+, noted at Knowledge Base: SSL/TLS.
Using khpunc
for SSL/TLS connections can be used from the initialization thread only, see SSL/TLS thread support for more details.
The OpenSSL libs are loaded dynamically, the first time a TLS connection is requested. It may be forced on startup with
int h=khpunc("",-1,"",0,2); // remember to test the return value for -3
Socket timeouts¶
There are a number of reasons not to specify or implement timeouts. Typically these will be hit at the least convenient of times when under load from e.g. a sudden increase in trading volumes. Cascading timeouts can rapidly bring systems down and/or waste server resources. But if you are convinced they are the only solution for your problem scenario, the following code may help you. (Note that in the event of a timeout, you must close the connection.)
#if defined(_WIN32) || defined(__WIN32__)
V sst(I d,I sendTimeout,I recvTimeout){
setsockopt(d,SOL_SOCKET,SO_SNDTIMEO,(char*)&sendTimeout,sizeof(I));
setsockopt(d,SOL_SOCKET,SO_RCVTIMEO,(char*)&recvTimeout,sizeof(I));}
#else
V sst(I d,I sendTimeout,I recvTimeout){
struct timeval tv;tv.tv_sec=sendTimeout/1000;tv.tv_usec=sendTimeout%1000000;
setsockopt(d,SOL_SOCKET,SO_SNDTIMEO,(char*)&tv,sizeof(tv));
tv.tv_sec=recvTimeout/1000;tv.tv_usec=recvTimeout%1000000;
setsockopt(d,SOL_SOCKET,SO_RCVTIMEO,(char*)&tv,sizeof(tv));}
#endif
// usage
int c=khpun("localhost",1234,"myname:mypassword",1000); // connect timeout 1000mS
if(c>0) sst(c,30000,45000); // timeout sends with 30s, receives with 45s
Bulk transfers¶
A kdb+tick feed handler can send one record at a time, like this
I kdbSocketHandle = khpu("localhost", 5010, "username");
if (kdbSocketHandle > 0)
{
K row = knk(3, ks((S)"ibm"), kf(93.5), ki(300));
K r = k(-kdbSocketHandle, ".u.upd", ks((S)"trade"), row, (K)0);
if(!r) { perror("network error"); return;}
kclose(kdbSocketHandle);
}
or send multiple records at a time:
int n = 100;
S sid[] = {"ibm","gte","kvm"};
K x = knk(3, ktn(KS, n), ktn(KF, n), ktn(KI, n));
for(int i=0; i<n ; i++) {
kS(kK(x)[0])[i] = ss(sid[i%3]);
kF(kK(x)[1])[i] = 0.1*i;
kI(kK(x)[2])[i] = i;
}
K r = k(-kdbSocketHandle, ".u.upd", ks((S)"trade"), x, (K)0);
if(!r) perror("network");
This example assumes rows with three fields: symbol, price and size.
Error signaling and catching¶
Note the two different directions of error flow below.
-
To signal an error from your C code to kdb+ use the function
krr(S)
. A utility functionorr(S)
can be used to signal system errors. It is similar tokrr(S)
, but it appends a system error message to the user-provided string before passing it tokrr
. -
To catch an error code from the results of a call to
r=k(h, …)
, check the return value and type. If result isNULL
, then a network error has occurred. If it has type -128, thenr->s
will point to the error string. Note that K object with type -128 acts as a marker only and other uses are not supported(i.e. passing it to other C API or kdb+ functions).
K r=k(handle, "f", arg1, arg2, (K)0);
if(r && -128==r->t)
printf("error string: %s\n", r->s);
Under some network-error scenarios, errno
can be used to obtain the details of the error,
e.g. perror(“network”);
Return values¶
If your C function, called from q, has nothing to return to q, it can return (K)0
.
K doSomething(K x)
{
// do something with x;
return (K)0;
}
From a standalone C application, it can sometimes be convenient to return the identity function (::)
.
This atom can be created with
K identity(){
K id=ka(101);
id->g=0;
return id;
}
Callbacks¶
The void sd0(I)
and K sd1(I, K(*)(I))
functions are for use with callbacks and are available only within q itself, i.e. used from a shared library loaded into q.
The value of the file descriptor passed to sd1
must be 0 < fd
< 1024, and 1021 happens to be the maximum number of supported connections (recalling 0, 1, 2 used for stdin,stdout,stderr).
sd1(d,f);
puts the function K f(I d){…}
on the q main event loop given a socket d
(or -d
for non-blocking).
The function f
should return (K)0
or a pointer to a K object, and its reference count will be decremented.
sd0(d);
sd0x(d,1);
Each of the above calls removes the callback on d
and calls kclose(d)
. sd0x(I d,I f)
was introduced in V3.0 2013.04.04: its second argument indicates whether to call kclose(d)
.
On Linux, eventfd
can be used with sd1
and sd0
. Given a file efd.c
// compile with
// gcc -shared -m64 -DKXVER=3 efd.c -o efd.so -fPIC
// or
// g++ -shared -m64 -DKXVER=3 efd.cpp -o efd.so -fPIC
#include<stdio.h>
#include<sys/eventfd.h>
#include<unistd.h>
#include"k.h"
#ifdef __cplusplus
extern"C"{
#endif
K callback(I d){K r;J a;R -1!=read(d,&a,8)?r=k(0,(S)"onCallback",ki(d),kj(a),(K)0),r->t==-128?krr(r->s),r0(r),(K)0:r:(sd0(d),orr((S)"read"));}
K newFd(K x){I d;R x->t!=-KJ?krr((S)"type"):(d=eventfd(x->j,0))==-1?orr((S)"eventfd"):sd1(d,callback);}
K writeFd(K x,K y){R x->t!=-KI||y->t!=-KJ?krr((S)"type"):-1!=write(x->i,&y->j,8)?0:(sd0(x->i),orr((S)"write"));}
#ifdef __cplusplus
}
#endif
and combined with appropriate q code
q)newFd:(`$"./efd")2:(`newFd;1)
q)writeFd:(`$"./efd")2:(`writeFd;2)
q)fd:newFd 0 / arg is start value of eventfd counter
q)onCallback:{0N!(x;y)}
q)writeFd[fd;3] / increments the eventfd counter by 3, triggering the callback later
This demonstrates the deferred invocation of onCallback
until q has at least finished processing the current handle or script.
In situations where you can’t hook a feedhandler’s callbacks directly into sd1
, on Linux eventfd
may be a viable option for you.
Callbacks from sd1
are executed on the main thread of q.
Windows developers may be interested in ncm/selectable-socketpair.
Callbacks from sd1
are executed on the main thread of q, in the handle context (.z.w
) of the registered handle, and hence are also subject to permissions checks:
- read-only (Command-line option
-b
) - access-controlled path (Command-line option
-u
) reval
Serialization and deserialization¶
The K b9(I,K)
and K d9(K)
functions serialize and deserialize K objects.
b9
will generate a K byte vector that contains the serialized data.
Since V3.0, for shared libraries loaded into q the value for mode
must be -1.
For standalone applications binding with c.o/c.dll, or shared libraries prior to V3.0, the values for mode
can be viewed here.
d9
will deserialize the provided byte stream returning a new kObject
.
The byte stream passed to d9
is not altered in any way.
If you are concerned that the byte vector that you wish to deserialize may be corrupted, call okx
to verify it is well formed first.
unsigned char bytes[]={0x01,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0xf5,0x68,0x65,0x6c,0x6c,0x6f,0x00}; // -8!`hello
K r,x=ktn(KG,sizeof(bytes));
memcpy(kG(x),bytes,x->n);
int ok=okx(byteVector);
if(ok){
r=d9(byteVector);
r0(x);
}
else
perror("bad data");
Miscellaneous¶
The K dot(K x, K y)
function is the same as the q function .[x;y]
.
q).[{x+y};(1 2;3 4)]
4 6
The dynamic link, K dl(V* f, I n)
, function takes a C function that would take n K objects as arguments and return a new K object, and returns a q function.
It is useful, for example, to expose more than one function from an extension module.
#include "k.h"
Z K1(f1){R r1(x);}
Z K2(f2){R r1(y);}
K1(lib){
K y=ktn(0,2);
x=ktn(KS,2);
xS[0]=ss("f1");
xS[1]=ss("f2");
kK(y)[0]=dl(f1,1);
kK(y)[1]=dl(f2,2);
R xD(x,y);
}
Alternatively, for simpler editing of your lib API:
#define sdl(f,n) (js(&x,ss(#f)),jk(&y,dl(f,n)))
K1(lib){
K y=ktn(0,0);
x=ktn(KS,0);
sdl(f1,1);
sdl(f2,2);
R xD(x,y);
}
With the above compiled into lib.so
:
q).lib:(`:lib 2:(`lib;1))`
q).lib.f1 42
42
q).lib.f2 . 42 43
43
Debugging with gdb¶
It can be a struggle printing q values from a debugger, but you can call the handy k.h macros in gdb like xt
, xC
, xK
, …
If your client is a shared library, you might get away with p k(0,"show",r1(x),(K)0)
GDB Manual: §12. C Preprocessor Macros
Now, we compile the program using the GNU C compiler, gcc
. We pass the -gdwarf-21
and -g3
flags to ensure the compiler includes information about preprocessor macros in the debugging information.
$ gcc -gdwarf-2 -g3 sample.c -o sample
$
Now, we start gdb
on our sample program:
$ gdb -nw sample
GNU gdb 2002-05-06-cvs
Copyright 2002 Free Software Foundation, Inc.
GDB is free software, ...
(gdb)
And all you need is
gcc -g3 client.c -o client
gdb ./client
… get signal, go up stack frame with up
:
Thread 1 "sdl" received signal SIGSEGV, Segmentation fault.
0x000000000040711b in nx ()
(gdb) up
#1 0x0000000000407411 in nx ()
(gdb)
#2 0x0000000000407411 in nx ()
(gdb)
#3 0x0000000000408a15 in b9 ()
(gdb)
#4 0x0000000000409ac2 in ww ()
(gdb)
#5 0x0000000000409d33 in k ()
(gdb)
#6 0x000000000040410d in main (n=1, v=0x7fffffffdf68) at sdl.c:108
108 }else if(e.type==SDL_USEREVENT){K x=e.user.data1;A(!xt);A(xn==2);k(-c,"{value[x]y}",xK[0]->s,xK[1],(K)0);}
Now use k.h
macros!
(gdb) p xt
$20 = 0 '\000'
(gdb) p xn
$21 = 2
so it’s a q list. Show two elements:
(gdb) p xK[0]->t
$23 = -11 '\365'
(gdb) p xK[0]->s
$24 = (S) 0x2078b98 `"blink"
(gdb) p xK[1]->t
$25 = -7 '\371'
(gdb) p xK[1]->j
$27 = 0
which is a bit easier than:
(gdb) p *(((K*)(x->G0))[0])
$14 = {m = 0 '\000', a = 1 '\001', t = -11 '\365', u = 0 '\000', r = 0, {g = 152 '\230', h = -29800, i = 34048920, j = 34048920,
e = 9.95829503e-38, f = 1.6822401649996936e-316, s = 0x2078b98 "blink", k = 0x2078b98, {n = 34048920, G0 = ""}}}
(gdb) p *(((K*)(x->G0))[1])
$13 = {m = 0 '\000', a = 0 '\000', t = -7 '\371', u = 0 '\000', r = 0, {g = 0 '\000', h = 0, i = 0, j = 0, e = 0, f = 0, s = 0x0, k = 0x0,
{n = 0, G0 = "\002"}}}
Windows and the LoadLibrary API¶
The q multithreaded C library (c.dll
) uses static thread-local storage (TLS), and is incompatible with the LoadLibrary
Win32 API.
If you are writing an Excel plugin, this point is relevant to you, as loading of the plugin uses this mechanism.
Microsoft Knowledge Base: PRB: Calling LoadLibrary() to Load a DLL That Has Static TLS
When trying to use the library, the problem manifests itself as a crash during the khpu()
call.
Hence KX also provides at KxSystems/kdb a single-threaded version of this library as w32/cst.dll
and w64/cst.dll
, which do not use TLS.
To use this library:
- download
cst.dll
andcst.lib
- rename them to
c.dll
/c.lib
- relink and ensure that
c.dll
is in your path
If in doubt whether the c.dll
you have uses TLS, run
dumpbin /EXPORTS c.dll
and look for a .tls
entry under the summary section.
If it is present it uses TLS and is the wrong library to link with use with Excel add-ins.
...
Summary
4000 .data
1000 .rdata
1000 .reloc
1000 .rsrc
7000 .text
1000 .tls
Troubleshooting: loading a library¶
In some cases 2:
may fail because of missing dependencies. Sadly, OS error messages are not always helpful.
You can check dependencies using the methods described at qt.io.
Example¶
KxSystems/cookbook/c/csv.c – CSV export example in C
|
// if version is correct, gettablerow is iterated over each datapacket, extracting data
data:1_ last each {[filebinary] // initial x and binary starting numbers removed from array list to make table
filebytesize: count filebinary;
// x here is a list containing starting binary point for packet (x[0]), the most recent win scaling factor (x[1]) and row of data for that packet (x[1])
gettablerow[filebinary;]\[{y>(first x[0])+40}[;filebytesize];(),0,1]
} read1 file;
data: update sym:` from data;
`time`sym xcols data
}
// returns starting point of next packet and row data
gettablerow:{[filebinary;x] // data for a single row
// x is a list containing starting binary point for packet (x[0]), the most recent win scaling factor (x[1]) and row of data for that packet (x[2])
time: gettime[filebinary;x];
protocol: getprotocol[filebinary;x];
ips: getips[filebinary;x];
flags: getflags[filebinary;x];
winscalefactor: $[`SYN in flags;"i"$ 2 xexp filebinary[globheader+packetheader+x[0]+75];x[1]];
info: getinfo[filebinary;x;flags;winscalefactor];
totallength: 0x0 sv datafromfile[filebinary;x;18;2];
IPheader: 4*"J"$last string filebinary[x[0]+globheader+packetheader+16];
TCPheader: 4* first "0123456789abcdef"?/:string filebinary[x[0]+globheader+packetheader+48];
len: (first first totallength - IPheader + TCPheader) mod 65536;
length: (0x0 sv reverse filebinary[x[0]+36 37]) mod 65536;
data: datafromfile[filebinary;x;length - len;len];
// array containing starting point for next byte and dictionary of data for current packet
(x[0] + length + 16;winscalefactor;`time xcols ips,info,`time`flags`protocol`length`len`data!(time;flags;protocol;length;len;data))
}
gettime:{[filebinary;x]
first linuxtokdbtime ("iiii";4 4 4 4)1: packetheader#(globheader+x[0]) _ filebinary
}
linuxtokdbtime:{[time]
// converts time in global header to nanoseconds then accounts for difference in epoch dates in kdb and linux
// time[0] is in seconds, time[1] is microseconds offset to time[0]
"p"$1000*time[1]+1000000*time[0]-10957*86400
}
datafromfile:{[filebinary;x;start;numofbytes]
numofbytes#(globheader+packetheader+x[0]+start) _ filebinary
}
getflags:{[filebinary;x]
// flag data stored at 49th byte
bools: 2 vs filebinary[globheader+packetheader+x[0]+49];
`CWR`ECE`URG`ACK`PSH`RST`SYN`FIN where ((8 - count bools)#0), bools
}
getprotocol:{[filebinary;x]
// code number is stored at 25th byte of packet
code: "i"$filebinary[globheader+packetheader+x[0]+25];
protocol: $[code in key allcodes; allcodes[code]; code]
}
getinfo:{[filebinary;x;flags;windowscale]
// grabs multiple sets of data starting at 36th byte
elements: first each ((2 2 4 4 2 2 8 4 4;"hhii h ii")1: datafromfile[filebinary;x;36;32]);
// elements must be less than the max of their respective types, so mod needs to be applied
elements[0 1 4]: elements[0 1 4] mod 65536; // 65536 = max. of unsigned short - 1
elements[2 3 5 6]: elements[2 3 5 6] mod 4294967296; // 4294967296 = max of unsigned integer - 1
//multiplies window by window scaling from most recent SYN packet
if[not `SYN in flags;"j"$elements[4]:elements[4] * windowscale];
`srcport`destport`seq`ack`win`tsval`tsecr!elements
}
getips:{[filebinary;x]
// ip data starts at 28th byte
elements: `$"." sv ' string 4 cut "i"$datafromfile[filebinary;x;28;8];
`src`dest!elements
}
================================================================================
FILE: TorQ_code_gateway_daqrest.q
SIZE: 1,667 characters
================================================================================
\d .dataaccess
// .gw.formatresponse:{[status;sync;result]$[not[status]and sync;'result;result]}};
//Gets the json and converts to input dict before executing .dataaccess.getdata on the input
qrest:{
// Set the response type
.gw.formatresponse:{[status;sync;result] $[sync and not status; 'result; `status`result!(status;result)]};
// Run the function
:getdata jsontodict x};
// Converts json payload to .dataaaccess input dictionary
jsontodict:{
// convert the input to a dictionary
dict:.j.k x;
k:key dict;
// Change the Type of `tabname`instruments`grouping to chars
dict:@[dict;`tablename`instruments`grouping`columns inter k;{`$x}];
// Change the Type of `start/end time to timestamps (altering T -> D and - -> . if applicable)
dict:@[dict;`starttime`endtime inter k;{x:ssr[x;"T";"D"];x:ssr[x;"-";"."];value x}];
// retrieve aggregations
if[`aggregations in k;dict[`aggregations]:value dict[`aggregations]];
// Convert timebar
if[`timebar in k;dict[`timebar]:@[value dict[`timebar];1+til 2;{:`$x}]];
// Convert the filters key
if [`filters in k;dict[`filters]:filterskey dict`filters];
//output
:dict};
quotefinder:{y[2#where y>x]}
filterskey:{[filtersstrings]
likelist:ss[filtersstrings;"like"];
if[0=count likelist;value filtersstrings];
// Get the location of all the backticks
apostlist:ss[filtersstrings;"'"];
// Get the location of all the likes
swaplist:raze {y[2#where y>x]}[;apostlist] each likelist;
// Swap the ' to "
filtersstrings:@[filtersstrings;swaplist;:;"\""];
// Convert the string to a dict
:value filtersstrings
};
================================================================================
FILE: TorQ_code_gateway_dataaccess.q
SIZE: 10,611 characters
================================================================================
\d .dataaccess
forceservers:0b;
// dictionary containing aggregate functions needed to calculate map-reducable
// values over multiple processes
aggadjust:(!). flip(
(`avg; {flip(`sum`count;2#x)});
(`cor; {flip(`wsum`count`sum`sum`sumsq`sumsq;@[x;(enlist(0;1);0;0;1;0;1)])});
(`count; `);
(`cov; {flip(`wsum`count`sum`sum;@[x;(enlist(0;1);0;0;1)])});
(`dev; {flip(`sumsq`count`sum;3#x)});
(`distinct;`);
(`first; `);
(`last; `);
(`max; `);
(`min; `);
(`prd; `);
(`sum; `);
(`var; {flip(`sumsq`count`sum;3#x)});
(`wavg; {flip(`wsum`sum;(enlist(x 0;x 1);x 0))});
(`wsum; {enlist(`wsum;enlist(x 0;x 1))}));
// function to make symbols strings with an upper case first letter
camel:{$[11h~type x;@[;0;upper]each string x;@[string x;0;upper]]};
// function that creates aggregation where X(X1,X2)=X(X(X1),X(X2)) where X is
// the aggregation and X1 and X2 are non overlapping subsets of a list
absagg:{enlist[`$x,y]!enlist(value x;`$x,y)};
// functions to calculate avg, cov and var in mapaggregate dictionary
avgf:{(%;(sum;`$"sum",x);scx y)};
covf:{(-;(%;swsum[x;y];scx x);(*;avgf[x;x];avgf[y;x]))};
varf:{(-;(%;(sum;`$"sumsq",y);scx x);(xexp;avgf[y;x];2))};
// functions to sum counts and wsums in mapaggregate dictioanry
scx:{(sum;`$"count",x)};
swsum:{(sum;`$"wsum",x,y)}
// dictionary containing the functions needed to aggregate results together for
// map reducable aggregations
mapaggregate:(!). flip(
(`avg; {enlist[`$"avg",x]!enlist(%;(sum;`$"sum",x);scx x)});
(`cor; {enlist[`$"cor",x,w]!enlist(%;covf[x;w];(*;(sqrt;varf[x;x]);(sqrt;varf[(x:x 0);w:x 1])))});
(`count; {enlist[`$"count",x]!enlist scx x});
(`cov; {enlist[`$"cov",x,w]!enlist covf[x:x 0;w:x 1]});
(`dev; {enlist[`$"dev",x]!enlist(sqrt;varf[x;x])});
(`first; {enlist[`$"first",x]!enlist(*:;`$"first",x)});
(`last; {absagg["last";x]});
(`max; {absagg["max";x]});
(`min; {absagg["min";x]});
(`prd; {absagg["prd";x]});
(`sum; {absagg["sum";x]});
(`var; {enlist[`$"var",x]!enlist varf[x;x]});
(`wavg; {enlist[`$"wavg",x,w]!enlist(%;swsum[x:x 0;w:x 1];(sum;`$"sum",x))});
(`wsum; {enlist[`$"wsum",x,w]!enlist swsum[x:x 0;w:x 1]}));
// function to convert sorting
go:{if[`asc=x[0];:(xasc;x[1])];:(xdesc;x[1])};
// Full generality dataaccess function in the gateway
getdata:{[o]
// Input checking in the gateway
reqno:.requests.initlogger[o];
o:@[.checkinputs.checkinputs;o;.requests.error[reqno]];
// Get the Procs
if[not `procs in key o;o[`procs]:attributesrouting[o;partdict[o]]];
// Get Default process behavior
default:`timeout`postback`sublist`getquery`queryoptimisation`postprocessing!(0Wn;();0W;0b;1b;{:x;});
// Use upserting logic to determine behaviour
options:default,o;
if[`ordering in key o;options[`ordering]: go each options`ordering];
o:adjustqueries[o;partdict o];
options[`mapreduce]:0b;
gr:$[`grouping in key options;options`grouping;`];
if[`aggregations in key options;
if[all key[options`aggregations]in key aggadjust;
options[`mapreduce]:not`date in gr]];
// Execute the queries
if[options`getquery;
$[.gw.call .z.w;
:.gw.syncexec[(`.dataaccess.buildquery;o);options[`procs]];
:.gw.asyncexec[(`.dataaccess.buildquery;o);options[`procs]]]];
:$[.gw.call .z.w;
// if sync
.gw.syncexecjt[(`getdata;o);options[`procs];autojoin[options];options[`timeout]];
// if async
.gw.asyncexecjpt[(`getdata;o);options[`procs];autojoin[options];options[`postback];options[`timeout]]];
};
// join results together if from multiple processes
autojoin:{[options]
// if there is only one proc queried output the table
if[1=count options`procs;:first];
// if there is no need for map reducable adjustment, return razed results
:$[not options`mapreduce;razeresults[options;];mapreduceres[options;]];
};
// raze results and call process res to apply postprocessing and sublist
razeresults:{[options;res]
res:raze res;
processres[options;res]
};
//apply sublist and post processing to joined results
processres:{[options;res]
res:(options`postprocessing)res;
:$[(options`sublist)<>0W;(options`sublist) sublist res;res];
};
|
kdb+tick profiling for throughput optimization¶
kdb+ is seen as the technology of choice for many of the world’s top financial institutions when implementing a tick-capture system. kdb+ is capable of processing large amounts of data in a very short space of time, making it the ideal technology for dealing with the ever-increasing volumes of financial tick data. The core of a kdb+ tick capture system is the tickerplant.
KX’s source code for kdb+tick will form the basis of this paper. The purpose of this white paper is to discuss factors which influence messaging and throughput performance for a kdb+-based tick-capture system and to present a methodology with which this performance can be profiled to assist in optimizing the tick system configuration.
Some of the possible factors are:
- Number of rows in each update
- Size of the data in bytes
- Tickerplant publish frequency
- Number of subscribers
- Network latency and bandwidth
- Disk write speed
- TCP/IP tuning
- Version of kdb+
This paper examines the first four of these. All tests were performed on 64-bit Linux with eight CPUs, using kdb+ version 3.1 (2014.02.08).
Starting kdb+: Tick
Your mileage will vary
The results presented here are indicative. They will vary for each individual kdb+ system, especially when hardware specifications are taken into consideration.
Setup¶
We will run a feed simulator which will publish trade data to a tickerplant (TP) on a timer. The trade table has the following schema:
trade:([] time:"P"$(); sym:`g#"S"$(); price:"F"$(); size:"I"$(); cond:())
We will also run an RDB (real-time database) which subscribes to the tickerplant for trade messages and inserts into an in-memory table. All the processes will run on the same server and we will use taskset
to tie each process to a different CPU, e.g.
taskset –c 0 q tp.q
taskset –c 1 q rdb.q
taskset –c 2 q feedsim.q
Feed simulator code¶
/ connect to tickerplant
h:hopen 8099
/ number of extra columns to add for test 3.2
ex:0
/ number of rows to send in each update
r:10
/ number of updates to send per millisecond
u:1
/ timer frequency
t:1
/ timer function, sends data to the tickerplant
.z.ts:{
data:(r#.z.p;r?`3;100*r?1.0;10*r?100;r#" ");
if[ex>0; data,:ex#enlist r#1f];
if[r=1;data:first each data];
do[u;neg[h](`upd;`trade;data);neg[h][]]; }
system"t ",string t
/ stop sending data if connection to tickerplant is lost
.z.pc:{if[x=h; system"t 0"];}
Tickerplant code¶
/ listen on port 8099
\p 8099
/ dictionary to contain handles to publish to
subs:enlist[`trade]!()
/ function to subscribe to a table
sub:{[t] subs[t],:neg .z.w;}
/ remove subscriber if connection is lost
.z.pc:{subs::subs except \: neg x;}
/ create empty log file
logFile:`$":sym",string .z.D
logFile set ()
numMsgs:0
fileHandle:hopen logFile
/ write data to logFile and publish to subscribers, called by the feed sim
upd:{[t;x]
tm1:.z.p;
fileHandle@enlist(`upd;t;x);
numMsgs+:1;
tplog,:0.001*.z.p-tm1;
tm2:.z.p;
subs[t]@\:(`upd;t;x;tm2);
tppub,:0.001*.z.p-tm2; }
The tickerplant described above differs from a standard tickerplant in a number of ways. A standard tickerplant would:
- Check to see if the log file already exists on startup and then read the number of messages it contains
- The
sub
function could be enhanced to handle subscribing for certain syms only - The
upd
function could add a time to the data if it is not present - The
upd
function would only send(`upd;t;x)
to the subscribers - At end of day (EOD) it would send a message to any subscribers and roll the log file
In the tickerplant described above, we are capturing some extra timing metrics which will allow us to profile the messaging and throughput statistics for the tickerplant. These are described below in Tests.
RDB code¶
/ define trade table
trade:([]time:"P"$();sym:`g#"S"$();price:"F"$();size:"I"$();cond:())
/ number of extra columns to add for test 3.2
ex:0
if[ex>0; trade[`$"col",/:string til ex]:ex#enlist"F"$()]
/ define upd to insert to the table, called by tickerplant
upd:{[t;x;tm2]
tm3:.z.p;
insert[t;x];
tm4:.z.p;
rdbrecv,:0.001*tm3-tm2;
rdbupd,:0.001*tm4-tm3; }
/ connect to tickerplant and subscribe for trades
h:hopen 8099; h(`sub;`trade)
Again, the RDB described here differs from a standard RDB. A standard RDB would:
- Replay the tickerplant log file on startup to get trades from earlier in the day
upd
would only take 2 arguments: the table name and data- There would be an EOD function defined which would write intra-day tables to disk and then empty the tables
Similarly to the TP code, the code for the RDB records some timing metrics to measure throughput on the RDB. These are described next.
Tests¶
For each test we will vary certain parameters in the feed simulator
code and record the median values for tplog
, tppub
, rdbrecv
, rdbupd
in
the code above where:
| variable | value |
|---|---|
tplog |
median time in microseconds for the tickerplant to write the data to the log file |
tppub |
median time in microseconds for the tickerplant to publish the data to the subscribers |
rdbrecv |
median time in microseconds for the RDB to receive the message from the tickerplant |
rdbupd |
median time in microseconds for the RDB to insert the data |
We will also record
| variable | value |
|---|---|
| Rows per upd | number of rows received by the tickerplant per update |
| Rows per sec | number of rows received by the tickerplant per second |
| TP CPU | CPU usage of the tickerplant seen using top (with the timing code in the upd function removed) |
| RDB CPU | CPU usage of the RDB seen using top (with the timing code in the upd function removed) |
Testing specifications:
- All tests were run on 64-bit Linux with eight CPUs, using kdb+ 3.1 2014.02.08.
- The tickerplant is writing to local disk and the write speed is 400MB/s.
- All the processes run on the same server and are run using
taskset
. Note that kdb+ does not implement IPC compression when publishing to localhost. - Each test is run until the median times stop changing, which takes roughly 1 or 2 minutes.
Number of rows in each update¶
In this test we will vary the number of rows sent in each update by
changing the values of r
(number of rows per update), u
(number of
updates per timer frequency) and t
(timer frequency in milliseconds) in
the feed simulator code. The number of rows received by the
tickerplant per second is calculated as:
Rows per second = r * u * 1000 % t
r u t rps | tplog tppub tpcpu rdbrecv rdbupd rdbcpu
----------------------|----------------------------------------
1 10 1 10,000 | 14 3 31% 71 4 12%
10 1 1 10,000 | 19 4 6% 80 10 2%
100 1 10 10,000 | 35 7 1% 106 46 1%
1 30 1 30,000 | 13 3 92% 80 4 24%
10 3 1 30,000 | 16 4 4% 85 7 3%
100 3 10 30,000 | 30 6 1% 99 44 1%
10 10 1 100,000 | 15 4 32% 82 7 17%
100 1 1 100,000 | 32 6 6% 103 46 4%
1000 1 10 100,000 | 121 23 2% 224 378 3%
100 5 1 500,000 | 28 6 32% 105 42 22%
Table 1: Microseconds taken and percent CPU by rows per update
As seen in Table 1, the results mainly depend on the rows per upd r
argument and not the other two arguments. When publishing the data in single rows (rows per upd = 1) we can only achieve about 30,000 rows per second before the TP CPU usage approaches 100%. If the data is in 10 rows per update we can handle over 100,000 rows per second.
Significant time is saved when processing 10 rows at a time. In fact, it takes only a little more time than processing one row. This goes to the heart of q: in a vector-based language bulk operations are more efficient than multiple single operations.
rows per upd (r) 1 10 100
tplog 13 17 31
tppub 3 4 6
rdbrecv 75 90 103
rdbupd 4 8 44
Table 2: Microseconds taken by rows per update
From these results we can conclude that feeds should read as many messages off the socket as possible and send bulk updates to the tickerplant if possible.
Size of row in bytes¶
In this test we will vary the number of columns in the trade table by changing ex
(number of extra columns) in the feedsim
and RDB code.
- The times for
ex
=0 have been found in the previous section. - For each update message, we determine the size using
count -8!
.
ex r rps | tplog tppub tpcpu rdbrecv rdbupd rdbcpu bytes
----------------|--------------------------------------------------
0 1 10,000 | 14 3 31% 82 4 12% 73
10 1 10,000 | 14 4 34% 87 6 16% 213
50 1 10,000 | 17 5 43% 79 12 38% 773
0 10 100,000 | 15 4 32% 86 7 17% 334
10 10 100,000 | 18 5 40% 78 8 19% 1,194
150 10 100,000 | 33 7 60% 92 14 40% 4,634
0 100 100,000 | 28 6 6% 106 41 4% 2,944
10 100 100,000 | 50 8 13% 187 46 8% 11,004
50 100 100,000 | 141 16 28% 336 61 23% 43,244
Table 3: Microseconds taken and percent CPU by size of row
Above, rps
is rows per second, and bytes
is the size of the update in bytes.
We can see the tplog
, tppub
and rdbupd
times increase as the number of columns increase, as we would expect. The increase is more noticeable for large bulk updates: if there is only one row per update then adding 10 columns to the published data only increases the CPU usage of the tickerplant by 10%.
Publish frequency¶
In this test we will alter the tickerplant behavior so that it publishes to subscribers on a timer. Specifically, we will examine the following three scenarios:
- The tickerplant writes each update to disk individually and publishes each update to subscribers as soon as it is received (standard zero-latency tickerplant)
- The tickerplant writes each update to disk individually but publishes to subscribers on a timer
- The tickerplant both writes to disk and publishes to subscribers on a timer
By batching up updates we can reduce the load on the tickerplant and the RDB. However, this will also result in a delay to RDB receiving the data. Of course, publishing on a timer is not suitable if the subscribers need the data immediately.
We need to make the following adjustments to the tickerplant code in order to publish on a timer:
/ define trade table
trade:([]time:"P"$();sym:`g#"S"$();price:"F"$();size:"I"$();cond:())
/ write to disk and insert to the local table
upd:{[t;x]
tm:.z.p;
fileHandle@enlist(`upd;t;x);
numMsgs+:1;
insert[t;x];
tpupd,:0.001*.z.p-tm; }
/ publish the data in the local table and clear
.z.ts:{
tm:.z.p;
{[t]
if[0=count value t; :()];
subs[t]@\:(`upd;t;value t); .[t;();0#];
} each enlist`trade;
tpflush,:0.001*.z.p-tm; }
/ run .z.ts every 100 milliseconds
\t 100
To further reduce the load we will buffer the messages and only write to the on-disk log file on a timer as well. However, it should be noted that in the event of a tickerplant going down, more data will be lost if the data is being logged on a timer than if the data is being written on every update.
In order to write updates to disk on a timer, we need to make the following changes to the tickerplant code:
/ define trade table
trade:([]time:"P"$();sym:`g#"S"$();price:"F"$();size:"I"$();cond:())
/ insert to the local table
upd:{[t;x]
tm:.z.p;
insert[t;x];
tpupd,:0.001*.z.p-tm; }
/ publish the data in the local table, write to disk and clear
.z.ts:{
tm:.z.p;
{[t]
if[0=count value t; :()];
subs[t]@\:(`upd;t;value t);
fileHandle@(`upd;t;value t);
numMsgs+:1;
.[t;();0#];
} each enlist`trade;
tpflush,:0.001*.z.p-tm; }
/ run .z.ts every 100 milliseconds
\t 100
Rows per rows per timer pub on write on | tpupd tpflush TP CPU rdupd RDB
upd sec freq timer timer | CPU
--------------------------------------------|------------------------------------
1 10,000 0 N N | 13 0 31% 3 12%
1 10,000 100 Y N | 13 36 22% 258 0.1%
1 10,000 100 Y Y | 3 169 9% 273 0.1%
Table 4: Microseconds taken and percent CPU by publish frequency
Where
| column | value |
|---|---|
| timer freq | frequency the timer in the tickerplant is run in milliseconds |
| tpupd | median time in microseconds to run upd in the tickerplant |
| tpflush | median time in microseconds to run the timer (.z.ts ) in the tickerplant |
In Table 4 above we can see that when publishing on the timer, the tickerplant upd
function still takes roughly the same time as in zero-latency mode, but we are only publishing data 10 times a second which reduces the overall load: the TP CPU usage has decreased from 31% to 22%. The RDB CPU usage decreases from 12% to 0.1% as it is only doing 10 bulk updates per second instead of 10,000 single updates per second. Writing to disk only 10 times a second reduces the load on the tickerplant further. The improvements will be greater the more updates the tickerplant receives per second.
Number of subscribers¶
In this test, we will examine how the number of processes that are subscribing to a tickerplant’s data can affect the throughput of the tickerplant. We will run multiple subscribers/RDBs and see the effect on the tickerplant and RDB receive time. We will collect the following metrics:
| variable | value |
|---|---|
| last rdbrecv | median time in microseconds for the last RDB in the tickerplant subscription list (subs dictionary) to receive the message from the tickerplant |
| first rdbrecv | median time in microseconds for the last RDB in the tickerplant subscription list (subs dictionary) to receive the message from the tickerplant |
| num subs | number of subscribers |
rows per upds per timer rows per num | tppub first last TP
upd timer freq sec subs | rdbrecv rdbrecv CPU
-------------------------------------------|------------------------------
1 1 1 1,000 1 | 3 85 85 3%
1 1 1 1,000 3 | 4 172 178 4%
1 1 1 1,000 5 | 6 148 296 6%
1 1 1 1,000 10 | 10 265 343 10%
10 1 1 10,000 1 | 3 88 88 3%
10 1 1 10,000 3 | 5 175 181 5%
10 1 1 10,000 5 | 6 155 318 7%
10 1 1 10,000 10 | 11 224 540 12%
100 1 1 100,000 1 | 21 97 97 6%
100 1 1 100,000 3 | 58 177 324 10%
100 1 1 100,000 5 | 95 257 330 15%
100 1 1 100,000 10 | 185 449 682 30%
Table 5: Microseconds taken by number of subscribers
We can see that increasing the number of subscribers increases the tickerplant publish time, first RDB receive time and last RDB receive time. The first RDB receive time increases because the data is written to each internal message queue and the queues are not flushed until the tickerplant publish function returns.
If there are multiple subscribers to a tickerplant it might be worth considering a chained tickerplant to reduce the number of subscribers. Only the chained tickerplant and the subscribers which need the data as quickly as possible would subscribe to the main tickerplant. Then other subscribers would subscribe to the chained tickerplant to get the data.
Conclusion¶
This white paper examined some key factors which can influence the performance and throughput of a kdb+ tickerplant. We established a methodology to profile the performance of a kdb+ tickerplant and used it to focus on four key areas which can affect the tickerplant’s throughput:
-
Number of rows per update Feeds should read as many messages off the socket as possible and send bulk updates to the tickerplant if possible. Bulk updates will greatly increase the maximum throughput achievable. For example, we found processing a bulk update of 10 messages took only slightly longer than processing an update of 1 message, and hence the CPU usage was nearly 10 times lower for the same number of messages per second.
-
Size of each update in bytes Reducing the size of the data can improve throughput, as writing to disk and sending the data will be faster. The improvement is more noticeable on bulk updates.
-
Publish frequency Buffering the messages in the tickerplant and publishing on a timer improves throughput. The CPU usage of the RDB also decreases as the data is being batched up into fewer updates per second. However, it is not suitable if the subscribers need the data immediately. Writing the messages to disk on a timer improves throughput further but more data could be lost if the tickerplant dies. The improvement is more noticeable as the number of updates per second increases.
-
Number of subscribers Adding more subscribers increases the load on the tickerplant. To reduce this, consider using a chained tickerplant. Even existing subscribers will be affected if more subscribers are added since the internal message queues for each subscriber are all written to before the queues are flushed.
These are not the only factors that contribute to a tickerplant’s performance. A complete analysis would also examine the effects that network latency and bandwidth, disk write-speed and TCP/IP tuning have on tickerplant performance.
The results shown here not representative of kdb+ systems as a whole. Results for each individual kdb+ system will vary due to various hardware and software considerations. However, the code and methodology used in this paper could serve as a starting point to any developers wishing to profile and optimize their own systems.
All tests were run using kdb+ version 3.1 (2014.02.08)
Author¶
Ian Kilpatrick has worked on several kdb+ systems. Based in Belfast, Ian is a technical architect for high-performance data-management, event-processing and trading platforms.
|
// @private
// @kind function
// @category featureDescriptionUtility
// @desc Generate a list of functions to be applied to the dataset for
// non-numeric data
// @param typ {fn} A function returning as its argument the name to be
// associated with the rows being described
// @return {fn[]} List of functions to be applied to relevant data
featureDescription.i.nonNumeric:{[typ]
(count;{count distinct x};{};{};{};{};typ)
}
================================================================================
FILE: ml_automl_code_nodes_featureSignificance_featureSignificance.q
SIZE: 1,246 characters
================================================================================
// code/nodes/featureSignificance/featureSignificance.q - Feature Significance
// Copyright (c) 2021 Kx Systems Inc
//
// Apply feature significance logic to data post feature extraction, returning
// the original dataset and a list of significant features to be used both
// for selection of data from new runs and within the current run.
\d .automl
// @kind function
// @category node
// @desc Apply feature significance logic to data post feature
// extraction
// @param config {dictionary} Information related to the current run of AutoML
// @param features {table} Feature data as a table
// @param target {number[]} Numerical vector containing target data
// @return {dictionary} List of significant features and the feature data post
// feature extraction
featureSignificance.node.function:{[config;features;target]
sigFeats:featureSignificance.applySigFunc[config;features;target];
config[`logFunc]utils.printDict[`totalFeat],string count sigFeats;
sigFeats:featureSignificance.correlationCols sigFeats#features;
`sigFeats`features!(sigFeats;features)
}
// Input information
featureSignificance.node.inputs:`config`features`target!"!+F"
// Output information
featureSignificance.node.outputs:`sigFeats`features!"S+"
================================================================================
FILE: ml_automl_code_nodes_featureSignificance_funcs.q
SIZE: 2,433 characters
================================================================================
// code/nodes/featureSignificance/funcs.q - Feature significance functions
// Copyright (c) 2021 Kx Systems Inc
//
// Definitions of the main callable functions used in the application of
// .automl.featureSignificance
\d .automl
// @kind function
// @category featureSignificance
// @desc Extract feature significance tests and apply to feature data
// @param config {dictionary} Information related to the current run of AutoML
// @param features {table} Feature data as a table
// @param target {number[]} Numerical vector containing target data
// @return {symbol[]} Significant features or error if function does not exist
featureSignificance.applySigFunc:{[config;features;target]
sigFunc:utils.qpyFuncSearch config`significantFeatures;
sigFunc[features;target]
}
// @kind function
// @category featureSignificance
// @desc Apply feature significance function to data post feature
// extraction
// @param config {dictionary} Information related to the current run of AutoML
// @param features {table} Feature data as a table
// @param target {number[]} Numerical vector containing target data
// @return {symbol[]} Significant features
featureSignificance.significance:{[features;target]
BHTest:.ml.fresh.benjhoch .05;
percentile:.ml.fresh.percentile .25;
sigFeats:.ml.fresh.significantFeatures[features;target;BHTest];
if[0=count sigFeats;
sigFeats:.ml.fresh.significantFeatures[features;target;percentile]
];
sigFeats
}
// @kind function
// @category featureSignificance
// @desc Find any correlated columns and remove them
// @param sigFeats {table} Significant data features
// @return {symbol[]} Significant columns
featureSignificance.correlationCols:{[sigFeats]
thres:.95;
sigCols:cols sigFeats;
corrMat:abs .ml.corrMatrix sigFeats;
boolMat:t>\:t:til count first sigFeats;
sigCols:featureSignificance.threshVal[thres;sigCols]'[corrMat;boolMat];
raze distinct 1#'asc each key[sigCols],'value sigCols
}
// @kind function
// @category featureSignificance
// @desc Find any correlated columns within threshold
// @param thres {float} Threshold value to search within
// @param sigCols {symbol[]} Significant columns
// @param corr {float[]} Correlation values
// @param bool {float[]} Lower triangle booleans
// @return {symbol[]} Columns within threshold
featureSignificance.threshVal:{[thres;sigCols;corr;bool]
$[any thres<value[corr]idx:where bool;sigCols idx;()]
}
================================================================================
FILE: ml_automl_code_nodes_featureSignificance_init.q
SIZE: 281 characters
================================================================================
// code/nodes/featureSignificance/init.q - Load featureSignificance node
// Copyright (c) 2021 Kx Systems Inc
//
// Load code for featureSignificance node
\d .automl
loadfile`:code/nodes/featureSignificance/featureSignificance.q
loadfile`:code/nodes/featureSignificance/funcs.q
================================================================================
FILE: ml_automl_code_nodes_labelEncode_init.q
SIZE: 192 characters
================================================================================
// code/nodes/labelEncode/init.q - Load labelEncode node
// Copyright (c) 2021 Kx Systems Inc
//
// Load code for labelEncode node
\d .automl
loadfile`:code/nodes/labelEncode/labelEncode.q
================================================================================
FILE: ml_automl_code_nodes_labelEncode_labelEncode.q
SIZE: 883 characters
================================================================================
// code/nodes/labelEncode/labelEncode.q - Label encoding node
// Copyright (c) 2021 Kx Systems Inc
//
// Apply label encoding on symbolic data returning an encoded version of the
// data in this instance or the original dataset in the case that does not
// require this modification
\d .automl
// @kind function
// @category node
// @desc Encode target data if target is a symbol vector
// @param target {number[]|symbol[]} Numerical or symbol target vector
// @return {dictionary} Mapping between symbol encoding and encoded target data
labelEncode.node.function:{[target]
symMap:()!();
if[11h~type target;
encode:.ml.labelEncode.fit target;
symMap:encode`modelInfo;
target:encode[`transform] target
];
`symMap`target!(symMap;target)
}
// Input information
labelEncode.node.inputs:"F"
// Output information
labelEncode.node.outputs:`symMap`target!"!F"
================================================================================
FILE: ml_automl_code_nodes_modelGeneration_funcs.q
SIZE: 2,922 characters
================================================================================
// code/nodes/modelGeneration/funcs.q - Model generation functions
// Copyright (c) 2021 Kx Systems Inc
//
// Definitions of the main callable functions used in the application of
// .automl.modelGeneration
\d .automl
// @kind function
// @category modelGeneration
// @desc Extraction of an appropriately valued dictionary from a JSON
// file
// @param config {dictionary} Information relating to the current run of AutoML
// @return {table} Models extracted from JSON file
modelGeneration.jsonParse:{[config]
typ:$[`class~config`problemType;`classification;`regression];
modelPath:path,"/code/customization/models/modelConfig/models.json";
jsonPath:hsym`$modelPath;
// Read in JSON file and select models based on problem type
modelTab:.j.k[raze read0 jsonPath]typ;
// Convert to desired structure and convert all values to symbols
modelCols:`model`lib`fnc`seed`typ`apply;
modelTab:modelCols xcol([]model:key modelTab),'value modelTab;
// Convert to seed to either `seed or (::)
seed:modelTab`seed;
toSeed:{@[x;y;:;z]}/[count[seed]#();(where;where not::)@\:seed;(`seed;::)];
modelTab:update seed:toSeed from modelTab;
// Convert rest of table to symbol values
modelTab:{![x;();0b;enlist[y]!enlist($;enlist`;y)]}/[modelTab;`lib`fnc`typ];
select from modelTab where apply
}
// @kind function
// @category modelGeneration
// @desc Extract appropriate models based on the problem type
// @param config {dictionary} Information relating to the current run of AutoML
// @param modelTab {table} Information on applicable models based on problem
// type
// @param target {number[]|symbol[]} Numerical or symbol target vector
// @return {table} Appropriate models based on target and problem type
modelGeneration.modelPrep:{[config;modelTab;target]
if[`class=config`problemType;
// For classification tasks remove inappropriate classification models
modelTab:$[2<count distinct target;
delete from modelTab where typ=`binary;
delete from modelTab where lib=`keras,typ=`multi
]
];
// Add a column with appropriate initialized models for each row
update minit:.automl.modelGeneration.modelFunc .'flip(lib;fnc;model)from
modelTab
}
// @kind function
// @category modelGeneration
// @desc Build up the model to be applied based on naming convention
// @param library {symbol} Library which forms the basis for the definition
// @param func {symbol} Function name if keras or module from which model is
// derived for non-keras models
// @param model {symbol} Model being applied within the library
// @return {<} Appropriate function or projection in the case of sklearn
modelGeneration.modelFunc:{[library;func;model]
$[library in key models;
get".automl.models.",string[library],".fitScore";
// Projection used for sklearn models eg '.p.import[`sklearn.svm][`:SVC]'
{[x;y;z].p.import[x]y}[` sv library,func;hsym model]
]
}
================================================================================
FILE: ml_automl_code_nodes_modelGeneration_init.q
SIZE: 257 characters
================================================================================
// code/nodes/modelGeneration/init.q - Load modelGeneration node
// Copyright (c) 2021 Kx Systems Inc
//
// Load code for modelGeneration node
\d .automl
loadfile`:code/nodes/modelGeneration/funcs.q
loadfile`:code/nodes/modelGeneration/modelGeneration.q
================================================================================
FILE: ml_automl_code_nodes_modelGeneration_modelGeneration.q
SIZE: 1,042 characters
================================================================================
// code/nodes/modelGeneration/modelGeneration.q - Model generation node
// Copyright (c) 2021 Kx Systems Inc
//
// Based on the problem type being solved and user defined configuration
// retrieve the full list of models which can be applied in the running of
// AutoML. The list of models to be run may be reduced following the
// processing of the data and splitting to comply with the model requirements
\d .automl
// @kind function
// @category node
// @desc Create table of appropriate models for the problem type being
// solved
// @param config {dictionary} Information related to the current run of AutoML
// @param target {number[]|symbol[]} Numerical or symbol target vector
// @return {table} Information needed to apply appropriate models to data
modelGeneration.node.function:{[config;target]
modelTable:modelGeneration.jsonParse config;
modelGeneration.modelPrep[config;modelTable;target]
}
// Input information
modelGeneration.node.inputs:`config`target!"!F"
// Output information
modelGeneration.node.outputs:"+"
================================================================================
FILE: ml_automl_code_nodes_optimizeModels_funcs.q
SIZE: 7,955 characters
================================================================================
// code/nodes/optimizeModels/funcs.q - Optimize models functions
// Copyright (c) 2021 Kx Systems Inc
//
// Definitions of the main callable functions used in the application of
// .automl.optimizeModels
\d .automl
// @kind function
// @category optimizeModels
// @desc Optimize models using hyperparmeter search procedures if
// appropriate, otherwise predict on test data
// @param modelDict {dictionary} Data related to model retrieval and various
// configuration associated with a run
// @param modelInfo {table} Information about models applied to the data
// @param bestModel {<} Fitted best model
// @param config {dictionary} Information relating to the current run of AutoML
// @return {dictionary} Score, prediction and best model
optimizeModels.hyperSearch:{[modelDict;modelInfo;bestModel;config]
tts:modelDict`tts;
scoreFunc:modelDict`scoreFunc;
modelName:modelDict`modelName;
modelLib:modelDict`modelLib;
custom:modelLib in key models;
exclude:modelName in utils.excludeList;
predDict:$[custom|exclude;
optimizeModels.scorePred[custom;modelDict;bestModel;config];
optimizeModels.paramSearch[modelInfo;modelDict;config]
];
score:get[scoreFunc][predDict`predictions;tts`ytest];
printScore:utils.printDict[`score],string score;
config[`logFunc]printScore;
predDict,`modelName`testScore!(modelName;score)
}
// @kind function
// @category optimizeModels
// @desc Predict sklearn and custom models on test data
// @param custom {boolean} Whether it is a custom model or not
// @param modelDict {dictionary} Data related to model retrieval and various
// configuration associated with a run
// @param bestModel {<} Fitted best model
// @param tts {dictionary} Feature and target data split into training/testing
// sets
// @param config {dictionary} Information relating to the current run of AutoML
// @return {float[]|boolean[]|int[]} Predicted values
optimizeModels.scorePred:{[custom;modelDict;bestModel;config]
tts:modelDict`tts;
config[`logFunc]utils.printDict`modelFit;
pred:$[custom;
optimizeModels.scoreCustom modelDict;
optimizeModels.scoreSklearn
][bestModel;tts];
`bestModel`hyperParams`predictions!(bestModel;()!();pred)
}
// @kind function
// @category optimizeModels
// @desc Predict custom models on test data
// @param modelDict {dictionary} Data related to model retrieval and various
// configuration associated with a run
// @param bestModel {<} Fitted best model
// @param tts {dictionary} Feature and target data split into training/testing
// sets
// @return {float[]|boolean[]|int[]} Predicted values
optimizeModels.scoreCustom:{[modelDict;bestModel;tts]
customName:"."sv string modelDict`modelLib`modelFunc;
get[".automl.models.",customName,".predict"][tts;bestModel]
}
|
pT:(.ml.em[1b;lf;mf;x]//) pT
.ut.assert[1 0 0 1 0] .ml.imax .ml.likelihood[0b;lf;x] . pT
.ut.assert[1 0 0 1 0] .ml.imax .ml.likelihood[1b;.ml.binll[n];x] . pT
/ multinomial example
n:100000
k:30
X:flip raze .ml.rmultinom[1;k] each (6#1f%6;.5,5#.1;(2#.1),4#.2)y:n?3
lf:.ml.mmml
mf:.ml.wmmmmle[k;1e-8]
mu:flip .ml.prb 3?/:X
phi:3#1f%3
.ml.em[1b;lf;mf;X] . pT:(phi;flip enlist mu)
show pT:(.ml.em[0b;lf;mf;X]//) pT
p:.ml.imax .ml.likelihood[1b;.ml.mmmll;X] . pT
show m:.ml.mode each y group p
avg y=m p
-1"what does the confusion matrix look like?";
show .ut.totals[`TOTAL] .ml.cm[y;m p]
/ Gaussian mixtures
/ http://mccormickml.com/2014/08/04/gaussian-mixture-models-tutorial-and-matlab-code/
/ 1d gauss
mu0:10 20 30 / distribution's mu
s20:s0*s0:1 3 2 / distribution's variance
m0:100 200 150 / number of points per distribution
X:raze X0:mu0+s0*(.ml.bm ?[;1f]::) each m0 / build dataset
show .ut.plt raze each (X0;0f*X0),'(X0;.ml.gaussl'[mu0;s20;X0]) / plot 1d data and gaussian curves
k:count mu0
phi:k#1f%k; / guess that distributions occur with equal frequency
mu:neg[k]?X; / pick k random points as centers
s2:k#var X; / use the whole datasets variance
lf:.ml.gaussl / likelihood function
mf:.ml.wgaussmle / maximum likelihood estimator function
pT:(.ml.em[1b;lf;mf;X]//) (phi;flip (mu;s2)) / returns best guess for (phi;mu;s)
group .ml.imax .ml.likelihood[1b;.ml.gaussll;X] . pT
/ let's use the iris data for multivariate gauss
`X`y set' iris`X`y;
k:count distinct y / 3 clusters
phi:k#1f%k / equal prior probability
mu:X@\:/:neg[k]?count y / pick k random points for mu
SIGMA:k#enlist X cov\:/: X / sample covariance
lf:.ml.gaussmvl
mf:.ml.wgaussmvmle
pT:(.ml.em[1b;lf;mf;X]//) (phi;flip (mu;SIGMA))
/ how well did it cluster the data?
p:.ml.imax .ml.likelihood[1b;.ml.gaussmvll;X] . pT
show m:.ml.mode each y group p
avg y=m p
-1"what does the confusion matrix look like?";
show .ut.totals[`TOTAL] .ml.cm[y;m p]
-1 value .ut.plt .ml.append[0;X 0 2],'.ml.append[1] flip[pT[1;;0]] 0 2;
-1"let's cluster hand written numbers into groups";
-1"assuming each pixel of a black/white image is a Bernoulli distribution,";
-1"we can model each picture as a Bernoulli mixture model";
`X`y set' mnist`X`y;
-1"shrinking training set";
X:1000#'X;y:1000#y;
-1"convert the grayscale image into black/white";
X>:128
plt:value .ut.plot[28;14;.ut.c10;avg] .ut.hmap flip 28 cut
k:10
-1"let's use ",string[k]," clusters";
-1"we first initialize phi to be equal weight across all clusters";
phi:k#1f%k / equal prior probability
-1"then we use the Hamming distance to pick different prototypes";
mu:flip last k .ml.kpp[.ml.hdist;X]// 2#() / pick k distant proto
-1"and finally we add a bit of noise without 'pathological' extreme values";
mu:.5*mu+.15+count[X]?/:k#.7 / randomly disturb around .5
-1"display a few initial prototypes";
-1 (,'/) plt each 4#mu;
lf:.ml.bmml[1]
mf:.ml.wbmmmle[1;1e-8]
pT:(phi;flip enlist mu)
-1"0-values in phi or mu will create null values.";
-1"to prevent this, we need to use dirichlet smoothing";
pT:.ml.em[1b;lf;mf;X] . pT
-1"after the first em round, the numbers are prototypes are much clearer";
-1 (,'/) (plt first::) each pT 1;
-1"let's run 10 more em steps";
pT:10 .ml.em[1b;lf;mf;X]// pT
-1"grouping the data and finding the mode identifies the clusters";
p:.ml.imax .ml.likelihood[0b;.ml.bmml[1];X] . pT
show m:.ml.mode each y group p
avg y=m p
-1"what does the confusion matrix look like?";
show .ut.totals[`TOTAL] .ml.cm[y;m p]
================================================================================
FILE: funq_emma.q
SIZE: 301 characters
================================================================================
/ emma
emma.f:"158.txt"
emma.b:"https://www.gutenberg.org/files/158/old/"
-1"[down]loading emma text";
.ut.download[emma.b;;"";""] emma.f;
emma.txt:{x where not x like "VOLUME*"} read0 `$emma.f
emma.chapters:1_"CHAPTER" vs "\n" sv 39_-373_emma.txt
emma.s:{(3+first x ss"\n\n\n")_x} each emma.chapters
================================================================================
FILE: funq_etl9b.q
SIZE: 601 characters
================================================================================
etl9b.f:"ETL9B"
etl9b.b:"http://etlcdb.db.aist.go.jp/etlcdb/data/"
-1"[down]loading handwritten-kanji data set";
.ut.download[etl9b.b;;".zip";.ut.unzip] etl9b.f;
-1"loading etl9b ('binalized' dataset)";
etl9b.x:.ut.etl9b read1 `:ETL9B/ETL9B_1
-1"extracting the X matrix and y vector";
etl9b.h:0x24,/:"x"$0x21+0x01*til 83 / hiragana
/ etl9b.h:0x25,/:"x"$0x21+0x01*til 83 / katakana (missing)
etl9b.y:flip etl9b.x 1 2
etl9b.w:where etl9b.y in etl9b.h / find hiragana
etl9b.y:etl9b.h?etl9b.y etl9b.w
/ extract 0 1 from bytes
etl9b.X:"f"$flip (raze $[3.5>.z.K;-8#';::] 0b vs/:) each (1_etl9b.x 4) etl9b.w
================================================================================
FILE: funq_fmincg.q
SIZE: 7,405 characters
================================================================================
/ Minimize a continuous differentiable multivariate function. Starting point
/ is given by "X" (D by 1), and the function named in the string "f", must
/ return a function value and a vector of partial derivatives. The Polack-
/ Ribiere flavour of conjugate gradients is used to compute search
/ directions, and a line search using quadratic and cubic polynomial
/ approximations and the Wolfe-Powell stopping criteria is used together with
/ the slope ratio method for guessing initial step sizes. Additionally a
/ bunch of checks are made to make sure that exploration is taking place and
/ that extrapolation will not be unboundedly large. "n" gives the length of
/ the run: if it is positive, it gives the maximum number of line searches,
/ if negative its absolute gives the maximum allowed number of function
/ evaluations. You can (optionally) give "n" a second component, which will
/ indicate the reduction in function value to be expected in the first
/ line-search (defaults to 1.0). The function returns when either its length
/ is up, or if no further progress can be made (ie, we are at a minimum, or
/ so close that due to numerical problems, we cannot get any closer). If the
/ function terminates within a few iterations, it could be an indication that
/ the function value and derivatives are not consistent (ie, there may be a
/ bug in the implementation of your "f" function). The function returns the
/ found solution "X", a vector of function values "fX" indicating the
/ progress made and "i" the number of iterations (line searches or function
/ evaluations, depending on the sign of "n") used.
/ Usage: (X; fX; i) = .fmincg.fmincg[n; f; X]
/ See also: checkgrad
/ Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13
/ (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen
/ Permission is granted for anyone to copy, use, or modify these programs and
/ accompanying documents for purposes of research or education, provided this
/ copyright notice is retained, and note is made of any changes that have
/ been made.
/ These programs and documents are distributed without any warranty, express
/ or implied. As the programs were written for research purposes only, they
/ have not been tested to the degree that would be advisable in any important
/ application. All use of these programs is entirely at the user's own risk.
/ [ml-class] Changes Made:
/ 1) Function name and argument specifications
/ 2) Output display
/ [nick psaris] changes made:
/ 1) ported to q
/ a) renamed "length" as "n"
/ b) placed within .fmincg namespace
/ c) moved constants out of function and into namespace
/ d) refactored to overcome 'locals and 'branch parse errors
/ e) pass/return variables as dict to overcome 8 function parameter limit
/ f) introduced BREAK variable to overcome q's lack of break statement
/ 2) max length "n" is now mandatory
\d .fmincg / function minimize nonlinear conjugate gradient
RHO:.01 / a bunch of constants for line searches
SIG:.5 / RHO and SIG are the constants in the Wolfe-Powell conditions
INT:.1 / don't reevaluate within 0.1 of the limit of the current bracket
EXT:3f / extrapolate maximum 3 times the current bracket
MAX:20 / max 20 function evaluations per line search
RATIO:100 / maximum allowed slope ratio
REALMIN:2.2251e-308
dot:$ / override for performance
wolfepowell:{[d1;d2;f1;f2;z1]$[d2>d1*neg SIG;1b;f2>f1+d1*RHO*z1]}
polackribiere:{[df1;df2;s](s*((dot[df2]df2)-dot[df1]df2)%dot[df1]df1)-df2}
quadfit:{[f2;f3;d2;d3;z3]z3-(.5*d3*z3*z3)%(f2-f3)+d3*z3}
cubicfit:{[f2;f3;d2;d3;z3]
A:(6f*(f2-f3)%z3)+3f*d2+d3;
B:(3f*f3-f2)-z3*d3+2f*d2;
z2:(sqrt[(B*B)-A*d2*z3*z3]-B)%A; / numerical error possible-ok!
z2}
cubicextrapolation:{[f2;f3;d2;d3;z3]
A:(6f*(f2-f3)%z3)+3f*d2+d3;
B:(3f*f3-f2)-z3*d3+2f*d2;
z2:(z3*z3*neg d2)%(B+sqrt[(B*B)-A*d2*z3*z3]); / numerical error possible-ok!
z2}
minimize:{[F;v]
v[`z2]:$[v[`f2]>v`f1;quadfit;cubicfit] . v`f2`f3`d2`d3`z3;
if[v[`z2] in 0n -0w 0w;v[`z2]:.5*v`z3]; / if numerical problem then bisect
v[`z2]:(v[`z3]*1f-INT)|v[`z2]&INT*v`z3; / don't accept too close to limits
v[`z1]+:v`z2;
v[`X]+:v[`z2]*v`s;
v[`f2`df2]:F v`X;
v[`d2]:dot . v`df2`s;
v[`z3]-:v`z2; / z3 is now relative to the location of z2
v}
|
Accumulators¶
Converge (v1\)x v1\[x] v1 scan x (v1/)x v1/[x] v1 over x
Do n v1\x v1\[n;x] n v1/x v1/[n;x]
While t v1\x v1\[t;x] t v1/x v1/[t;x]
Scan (v2\)x v2\[x] (v2)scan x Over (v2/)x v2/[x] (v2)over x
Scan x v2\y v2\[x;y] Over x v2/y v2/[x;y]
Scan v3\[x;y;z] x y\z Over v3/[x;y;z]
v1, v2, v3: applicable value (rank 1-3) n: integer≥0 t: unary truth map x, y: arguments/indexes of v
An accumulator is an iterator that takes an applicable value as argument and derives a function that evaluates the value, first on its entire (first) argument, then on the results of successive evaluations.
There are two accumulators, Scan and Over. They have the same syntax and perform the same computation. But where the Scan-derived functions return the result of each evaluation, those of Over return only the last result.
Over resembles map reduce in some other programming languages.
q)(+\)2 3 4 / Scan
2 5 9
q)(+/)2 3 4 / Over
9
Debugging
If puzzled by the result of using Over, replace it with Scan and examine the intermediate results. They are usually illuminating.
Scan, Over and memory
While Scan and Over perform the same computation, in general, Over requires less memory, because it does not store intermediate results.
The number of successive evaluations is determined differently for unary and for higher-rank values.
The domain of the accumulators is functions, lists, and dictionaries that represent finite-state machines.
q)yrp / a European tour
from to wp
----------------
London Paris 0
Paris Genoa 1
Genoa Milan 1
Milan Vienna 1
Vienna Berlin 1
Berlin London 0
q)show route:yrp[`from]!yrp[`to] / finite-state machine
London| Paris
Paris | Genoa
Genoa | Milan
Milan | Vienna
Vienna| Berlin
Berlin| London
Unary values¶
(v1\)x (v1/)x / unary application
x v1\y x v1/y / binary application
The function an accumulator derives from a unary value is variadic. The result of the first evaluation is the right argument for the second evaluation. And so on.
The value is evaluated on the entire right argument, not on items of it.
When applied as a binary, the number of evaluations the derived function performs is determined by its left argument, or (when applied as a unary) by convergence.
| syntax | name | number of successive evaluations |
|---|---|---|
(v1\)x , (v1/)x |
Converge | until two successive evaluations match, or an evaluation matches x |
i v1\x , i v1/x |
Do | i , a non-negative integer |
t v1\x , t v1/x |
While | until unary value t , evaluated on the result, returns 0 |
Converge¶
q)(neg\)1 / Converge
1 -1
q)l:-10?10
q)(l\)iasc l
4 0 8 5 7 2 6 3 1 9
0 1 2 3 4 5 6 7 8 9
1 8 5 7 0 3 6 4 2 9
8 2 3 4 1 7 6 0 5 9
2 5 7 0 8 4 6 1 3 9
5 3 4 1 2 0 6 8 7 9
3 7 0 8 5 1 6 2 4 9
7 4 1 2 3 8 6 5 0 9
q)(rotate[1]\)"abcd"
"abcd"
"bcda"
"cdab"
"dabc"
q)({x*x}\)0.1
0.1 0.01 0.0001 1e-08 1e-16 1e-32 1e-64 1e-128 1e-256 0
q)(route\)`Genoa / a circular tour
`Genoa`Milan`Vienna`Berlin`London`Paris
q)(not/) 42 / never returns!
Matching is governed by comparison tolerance.
Do¶
q)dbl:2*
q)3 dbl\2 7 / Do
2 7
4 14
8 28
16 56
q)5 enlist\1
1
,1
,,1
,,,1
,,,,1
,,,,,1
q)5(`f;)\1
1
(`f;1)
(`f;(`f;1))
(`f;(`f;(`f;1)))
(`f;(`f;(`f;(`f;1))))
(`f;(`f;(`f;(`f;(`f;1)))))
q)/first 10+2 numbers of Fibonacci sequence
q)10{x,sum -2#x}/0 1 / derived binary applied infix
0 1 1 2 3 5 8 13 21 34 55 89
q)/first n+2 numbers of Fibonacci sequence
q)fibonacci:{x,sum -2#x}/[;0 1] / projection of derived function
q)fibonacci 10
0 1 1 2 3 5 8 13 21 34 55 89
q)m:(0 1f;1 1f)
q)10 (m mmu)\1 1f / first 10 Fibonacci numbers
1 1
1 2
2 3
3 5
5 8
8 13
13 21
21 34
34 55
55 89
89 144
q)3 route\`London / 3 legs of the tour
`London`Paris`Genoa`Milan
A form of the conditional:
q)("j"$a=b) foo/bar / ?[a=b;foo bar;bar]
While¶
q)(10>)dbl\2 / While
2 4 8 16
q){x<1000}{x+x}\2
2 4 8 16 32 64 128 256 512 1024
q)inc:1+
q)inc\[105>;100]
100 101 102 103 104 105
q)inc\[105>sum@;84 20]
84 20
85 21
q)(`Berlin<>)route\`Paris / Paris to Berlin
`Paris`Genoa`Milan`Vienna`Berlin
q)waypoints:(!/)yrp`from`wp
q)waypoints route\`Paris / Paris to the end
`Paris`Genoa`Milan`Vienna`Berlin
In the last example, both applicable values are dictionaries.
Binary values¶
x v\y x v/y
The function an accumulator derived from a binary value is variadic. Functions derived by Scan are uniform; functions derived by Over are aggregates. The number of evaluations is the count of the right argument.
Unary and binary application of f/
Binary application¶
When the derived function is applied as a binary, the first evaluation applies the value to the function’s left argument and the first item of the its right argument, i.e. m[x;first y]
. The result of this becomes the left argument in the next evaluation, for which the right argument is the second item of the right argument. And so on.
q)1000+\2 3 4
1002 1005 1009
q)m / finite-state machine
1 6 4 4 2
2 7 2 0 5
7 5 6 7 0
2 1 8 1 0
7 3 3 6 8
2 3 8 9 0
1 1 9 6 9
7 8 4 3 0
4 5 8 0 4
9 8 0 3 9
q)c / columns of m
4 1 3 3 1 4
q)7 m\c
0 6 6 6 1 5
Items of x
must be in the left domain of the value, and items of y
in its right domain.
Unary application¶
When the derived function is applied as a unary, and the value is a function with a known identity element \(I\), then \(I\) is taken as the left argument of the first evaluation.
q)(,\)2 3 4 / I is ()
,2
2 3
2 3 4
In such cases (I f\x)~(f\)x
and (I f/x)~(f/)x
and items of x
must be in the right domain of the value.
Otherwise, the first item of the right argument is taken as the result of the first evaluation.
q){x,y}\[2 3 4] / () not known as I
2
2 3
2 3 4
q)42{[x;y]x}\2 3 4 / 42 is the first left argument
42 42 42
q)({[x;y]x}\)2 3 4 / 2 is the first left argument
2 2 2
q)(m\)c / c[0] is the first left argument
4 3 1 0 6 9
In this case, for (v\)x
and (v/)x
x[0]
is in the left domain ofv
- items
1_x
are in the right domain ofv
but x[0]
need not be in the range of v
.
q)({count x,y}\)("The";"quick";"brown";"fox")
"The"
8
6
4
Keywords scan
and over
¶
Mnemonic keywords scan
and over
can be used to apply a binary value to a list or dictionary.
Parenthesize an infix to pass it as a left argument.
q)(+) over til 5 / (+/)til 5
10
q)(+) scan til 5 / (+\)til 5
0 1 3 6 10
q)m scan c / (m\)c
4 3 1 0 6 9
Ternary values¶
v\[x;y;z] v/[x;y;z]
The function an accumulator derives from an value of rank >2 has the same rank as the value. Functions derived by Scan are uniform; functions derived by Over are aggregates. The number of evaluations is the maximum of the count of the right arguments.
For v\[x;y;z]
and v/[x;y;z]
x
is in the left domain ofv
y
andz
are atoms or conforming lists or dictionaries in the right domains ofv
The first evaluation is v[x;first y;first z]
. Its result becomes the left argument of the second evaluation. And so on. For r:v\[x;y;z]
r[0]: v[x ; y 0; z 0]
r[1]: v[r 0; y 1; z 1]
r[2]: v[r 1; y 2; z 2]
…
The result of v/[x;y;z]
is simply the last item of the above.
v/[x;y;z]
-
v[ v[… v[ v[x;y0;z0] ;y1;z1]; … yn-2;zn-2]; yn-1;zn-1]
q){x+y*z}\[1000;5 10 15 20;2 3 4 5]
1010 1040 1100 1200
q){x+y*z}\[1000 2000;5 10 15 20;3]
1015 2015
1045 2045
1090 2090
1150 2150
q)// Chinese whispers
q)s:"We are going to advance. Send reinforcements."
q)ssr\[s;("advance";"reinforcements");("a dance";"three and fourpence")]
"We are going to a dance. Send reinforcements."
"We are going to a dance. Send three and fourpence."
The above description of functions derived from ternary values applies by extension to values of higher ranks.
Alternative syntax¶
As of V3.1 2013.07.07, Scan has a built-in function for the following.
q)a:1000f;b:1 2 3 4f;c:5 6 7 8f
q){z+x*y}\[a;b;c]
1005 2016 6055 24228f
q)a b\c
1005 2016 6055 24228f
Note that the built-in version is for floats.
Empty lists¶
Accumulators can change datatype
In iterating through an empty list the value is not evaluated. The result might not be in the range of the value.
Allow for a possible change of type to 0h
when scanning or reducing lists of unknown length.
q)mt:0#0
q)type each (mt;*/[mt];{x*y}/[mt]) / Over can change type
7 -7 0h
q)type each (mt;*\[mt];{x*y}\[mt]) / so can Scan
7 -7 0h
Scan¶
The function Scan derives from a non-unary value is a uniform function: for empty right argument/s it returns the generic empty list. It does not evaluate the value.
q)()~{x+y*z}\[`foo;mt;mt] / lambda is not evaluated
1b
Over¶
The function that Over derives from a non-unary value is an aggregate: it reduces lists and dictionaries to atoms.
For empty right argument/s the atom result depends on the value and, if the derived function is variadic, on how it is applied.
If the value is a binary function with a known identity element \(I\), and the derived function is applied as a unary, the result is \(I\).
q)(+/)mt / 0 is I for +
0
q)(*/)mt / 1 is I for *
1
If the value is a binary function with no known identity element, and the derived function is applied as a unary, the result is ()
, the generic empty list.
q)()~({x+y}/)mt
1b
If the value is a list and the derived function is applied as a unary, the result is an empty list of the same type as the list.
q)type 1 0 3h/[til 0]
5h
q)type (3 4#til 12)/[0#0]
0h
Otherwise, the result is the left argument.
q)42+/mt
42
q){x+y*z}/[42;mt;mt]
42
q)42 (3 4#til 12)/[0#0]
42
The value is not evaluated.
q)`foo+/mt
`foo
q){x+y*z}/[`foo;mt;mt]
`foo
Q for Mortals §6.7.6 Over (/) for Accumulation
|
Data At Rest Encryption (DARE)¶
Data security is an ever-evolving domain, especially in recent years as storage-devices become increasingly more portable, or accessible remotely, and the environment they operate within becomes more hostile. The increased demand for BYOD, remote-working, cloud, and mobile devices increases the possibility of a user-account breach, and/or the theft or loss of physical assets. Ideally, companies will have taken precautions to encrypt their files, so that even in the event that they fall into the wrong hands, they cannot be read without authorization.
Full disk encryption (FDE) has been available on multiple operating systems for several years. Unfortunately, FDE often doesn’t satisfy all requirements for Data At Rest Encryption (DARE), hence there is also demand for Transparent Disk Encryption (TDE). This is now available in kdb+ 4.0.
Transparent Disk Encryption (TDE)¶
TDE solves the problem of protecting data at rest, by encrypting database files on the hard drive and consequently also on backup media.
TDE, like file compression, is fully transparent to queries in kdb+; queries require no change to operate on compressed or encrypted data.
What advantage does TDE have over Full Disk Encryption?¶
Examples of FDE products are cryptsetup + LUKS on Linux, BitLocker on Windows, or FileVault on macOS. These encrypt the entire disk with a symmetric cipher, using a key protected by a passphrase.
Enterprises typically employ TDE to solve compliance issues such as PCI-DSS, which require the protection of data at rest.
As TDE decrypts the data inside the kdb+ process, rather than at the OS/storage level, data remains encrypted when it comes across the wire from remote storage.
Encryption is selective – encrypt only the files that need encrypting.
Files can be archived, or copied, across environments without going through a decryption and encryption cycle.
kdb+ is multi-platform, and as the file format is platform-agnostic, the same encrypted files can be accessed from multiple platforms.
Maintain key and process ownership and separation of responsibilities: the DBA holds TDE keys, the server admin holds FDE keys.
Availability¶
All editions of kdb+ 4.0 support TDE.
Prerequisites¶
Although kdb+ encryption requires at least OpenSSL library v1.0.2, we recommend using the latest available version. The openssl version loaded into kdb+ is reported via
(-26!)[]`SSLEAY_VERSION
However, to generate the master key, OpenSSL 1.1.1 is required due to the additional PBKDF2 functionality. The version is reported at the OS shell command line via
$ openssl version
OpenSSL 1.1.1d 10 Sep 2019
AES-NI¶
The Intel Advanced Encryption Standard (AES) New Instructions (AES-NI) engine is available for certain Intel processors, and allows for extremely fast hardware encryption and decryption using aes. The AES-NI engine in OpenSSL is automatically enabled if the detected processor has AES-NI. The following test reveals whether your processor has AES-NI in its instruction set:
$ grep -m1 -o aes /proc/cpuinfo
aes
To compare the performance of AES-NI versus no AES-NI, run the following commands and compare their outputs. (The outputs below have been abbreviated.)
The numbers reported are in 1000s of bytes per second processed.
$ openssl speed aes-128-cbc
type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes
aes-128 cbc 93572.13k 101100.84k 102865.41k 103882.57k 103697.07k
$ openssl speed -evp aes-128-cbc
type 16 bytes 64 bytes 256 bytes 1024 bytes 8192 bytes
aes-128-cbc 562725.00k 596856.68k 608495.90k 608907.26k 609640.45k
A significantly better performance of the -evp
option indicates that AES-NI is enabled.
Additionally, once you have created a master key, one can verify that the OpenSSL library has support for AES-NI via a benchmark test in q, comparing default to disabled AES-NI support. e.g.
with script ebench.q
as
-36!(`:testkek.key;"mypassword")
(`:etest;20;16;0)set 100000000?10000
system"ts max get`:etest"
Execute the default, AES-NI enabled if detected, as
$q ebench.q
Compare to AES-NI disabled using the OPENSSL_ia32cap environment variable, as
$OPENSSL_ia32cap="~0x200000200000000" q ebench.q
The performance difference between AES and AES-NI was observed to be around 400% for this test.
Configuration¶
A password-protected master key is required. Choose a unique, high-entropy password which would withstand a dictionary attack; a poorly chosen password can become the weakest link in encryption.
Use a cryptographically secure pseudorandom number generator (CSPRNG) to generate a random 256-bit AES key, and password protect it.
OpenSSL uses a CSPRNG for its rand command. The master key can be generated using standard command-line tools using:
$ openssl rand 32 | openssl aes-256-cbc -md SHA256 -salt -pbkdf2 -iter 50000 -out testkek.key # Prompts for the new password
Back up this key file and the associated password
Back up this key file and the password to open it, but keep them separate and in a secure location – perhaps even deposit them in escrow for key recovery. If either of these is lost, any data encrypted using it will become inaccessible.
Place this password-protected key file above or outside of your HDB base directory; it will need to be loaded into kdb+ to enable [de|en]cryption.
Take precautions to restrict remote users from accessing this file directly. This can be done, for example, by ensuring all remote queries execute through reval
by setting the message handlers. e.g.
.z.pg:{reval(value;enlist x)}
Ensure all the other message handlers are initialized accordingly. See:
Then, load the key file using
-36!(`:pathtokeyfile;"passwordforkeyfile")
Encryption¶
Files can be encrypted using the same command as for file compression, with AES256CBC encryption as algo 16.
The master key must already be loaded, via internal function -36!
.
Recalling that the left-hand arguments for encoded set
are
(target filename; logical block size; compression/encryption algorithm; compression level)
Individual files can be encrypted as e.g.
(`:ztest;17;2;6) set asc 10000?`3 / compress to an individual file
(`:ztest;17;2+16;6) set asc 10000?`3 / compress and encrypt to an individual file
(`:ztest;17;16;6) set asc 10000?`3 / encrypt an individual file
Or use .z.zd
for a process-wide default setting for all qualifying files.
.z.zd:17 2 6 / zlib compression
.z.zd:17 16 6 / aes256cbc encryption only
.z.zd:(17;2+16;6) / zlib compression, with aes256cbc encryption arguments to set override those from .z.zd.
When using the global setting .z.zd
, files which do not qualify for encryption are filenames with an extension. e.g. abc.bin
, .d
.
Encryption adds a small amount of data, depending on the logical block size chosen, amounting to less than 2% of the overall size for typical DB files. The encoded size is reported via the command -21!filename
.
File locking¶
Encrypted enumeration domain files are locked for writing during an append, e.g.
q)`:sym?`new`symbols`here
Warning
They must not be read from during the append.
Decryption¶
The master key must already be loaded, via the -36!
command.
Decryption is transparent to a query, just as is decompression.
Performance¶
We strongly recommend users of encryption to use AES-NI capable CPUs; this is actually the most likely scenario for modern systems. Encryption overhead is a few % when already using compression.
Changing the password for the master key¶
Sometimes compliance requires passwords to be changed at regular intervals. The password for the master key can be changed as follows:
# Change password for master key:
# Prompt for existing password
key=`openssl aes-256-cbc -md SHA256 -d -iter 50000 -in testkek.key`
# Prompt for a new password
echo $key | openssl aes-256-cbc -md SHA256 -salt -pbkdf2 -iter 50000 -out newtestkek.key
# Remove the raw key from the environment
unset key
Confirm that the newtestkek.key
works by loading it into kdb+, and decrypting the existing data with it.
Then archive testkek.key
, rename newtestkek.key
to testkek.key
, and update the password to be used in the -36!
call.
This does not change the encryption key itself. To change that, a more involved process is required, which would then re-encrypt all the data.
Limitations¶
The schema in kdb+ is not encrypted, as this is visible in the directory and file names. The column name file for splay, .d
, is also not encrypted.
Technical details¶
Compressed files have the 8-byte header "kxzipped"; encrypted files, which may also be compressed, have the header "kxzippEd".
kdb+ uses a symmetric cipher, AES256CBC – Advanced Encryption Standard (AES), with a 256-bit key size, in Cipher-Block-Chaining (CBC) mode.
The meta data of a file is encrypted using the master key, contains the encrypted data encryption key, and is authenticated via HMAC-SHA256. It uses Encrypt then MAC (EtM).
There is one master key, which is password-protected, and a unique data encryption key is used per file.
The master key is encrypted with a symmetric cipher with a key produced from a passphrase using PBKDF2 (Password-Based Key Derivation Function 2), a key-derivation function with a sliding computational cost, used to reduce vulnerabilities to brute-force attacks. The important factor on the computation complexity of PBKDF2 is key-stretching, here, the number of hash-iterations used. High values increase the time required to brute-force the resulting file. The higher the number of iterations, the fewer the number of challenges that can be performed per second, thereby impeding a brute-force attack.
Choosing high-entropy passwords could significantly extend the amount of time required to crack the password. Remember high entropy is not enough – don’t reuse passwords, or passwords that are easily guessed by dictionary attacks. Human errors, such as choosing a weak password, or storing the key file and password in insecure areas, can leave encryption much weaker than desired, resulting in a false sense of security.
Password entropy is a measurement of how unpredictable a password is. Aim for an entropy of >80.
Compression with encryption¶
Due to the nature of encryption, in that the encrypted data must be indistinguishable from random data, there is little to gain from attempting to compress encrypted data. Hence kdb+ offers the combination of ‘compress then encrypt’.
Depending on your threat model, combining compression and encryption can introduce security issues, and you should be aware that information can be leaked through a compression-ratio side channel. If you’re not sure whether you’re leaking information through compression, do not combine it with encryption.
Roadmap¶
These are some of the points of research on our encryption roadmap.
Authenticated Encryption (AE) Ciphers¶
kdb+ presently provides for confidentiality but not integrity beyond the meta block.
Multiple keys per process¶
One of the most profound challenges related to encryption is key management due to its associated complexity and cost. Unclear key management function, lack of skilled professionals, and fragmented Key Management Systems (KMS) increase the overheads for enterprises.
Integration with¶
- Microsoft Azure Key Vault
- AWS Key Management Service
- Google Cloud Key Management Service
- Key Management Interoperability Protocol (KMIP)
Further reading¶
Understanding Cryptography: A Textbook for Students and Practitioners
An Introduction to Mathematical Cryptography
Cryptography Made Simple
Serious Cryptography: A Practical Introduction to Modern Encryption
coursera.org
Deferred response¶
Overview¶
Ideally, for concurrency, all messaging would be async. However, sync messaging is a convenient paradigm for client apps.
You can use -30!x
to allow processing of a sync message to be ‘suspended’, by indicating the response for the currently-executing sync message to be sent explicitly later.
This allows other messages to be processed prior to sending a response message.
You can use -30!(::)
at any place in the execution path of .z.pg
, start up some work, allow .z.pg
to complete without sending a response, and then when the workers complete the task, send the response explicitly.
Example¶
Below is a simple script to demonstrate the mechanics of -30!x
in a gateway. Further error checking, .z.pc
, timeouts, sequence numbers, load-balancing, etc., are left as an exercise for the reader.
workerHandles:hopen each 6000 6001 / open handles to worker processes
pending:()!() / keep track of received results for each clientHandle
/ this example fn joins the results when all are received from the workers
reduceFunction:raze
/ each worker calls this with (0b;result) or (1b;errorString)
callback:{[clientHandle;result]
pending[clientHandle],:enlist result; / store the received result
/ check whether we have all expected results for this client
if[count[workerHandles]=count pending clientHandle;
/ test whether any response (0|1b;...) included an error
isError:0<sum pending[clientHandle][;0];
result:pending[clientHandle][;1]; / grab the error strings or results
/ send the first error or the reduced result
r:$[isError;{first x where 10h=type each x};reduceFunction]result;
-30!(clientHandle;isError;r);
pending[clientHandle]:(); / clear the temp results
]
}
.z.pg:{[query]
remoteFunction:{[clntHandle;query]
neg[.z.w](`callback;clntHandle;@[(0b;)value@;query;{[errorString](1b;errorString)}])
};
neg[workerHandles]@\:(remoteFunction;.z.w;query); / send the query to each worker
-30!(::); / defer sending a response message i.e. return value of .z.pg is ignored
}
Blog: Deferred Response
|
Open-source libraries¶
Featured¶
Repositories by kdb+ users
| hypertree | Recursive aggregating treetable and 3-D pivot table for hypergrid. |
| phrases | The Q Phrasebook |
| qprof | Code profiler |
| studio | A rapid-development environment for q. |
Repositories listed here are maintained by their owners.
Awesome Q A collection of useful or interesting repositories curated by q devs.
Please tell the KX documentation team about new repositories.
GitHub topic queries:
q
kdb
kdb-q
Applications¶
| 3dsim | 3-D world simulation using kdb+/k/q WebSocket server and three.js. |
| betfair-data-capture | Data capture and analysis system built on TorQ for sports exchange data from Betfair. |
| FX-AlgorithmTrading | FX algorithm trading system. |
| kdb-VNC | Implementation of simple VNC server. |
| MarketDataClient | Web scraper for quotes from external financial sites. |
| mserve | Enhanced mserve load-balanced solution. |
| mm | A q implementation of the classic Mastermind game. |
| qex | An equity exchange. |
| q-chess | Simple chess engine. |
| surv-cloud | Small market surveillance application for cloud/kubernetes. |
| tickrecover | Recover from tickerplant crash. |
| VD_KDB | Forex tick database. |
Cryptography and cryptocurrency¶
| cryptopals | Matasano Crypto Challenges. |
| cryptoq | Cryptographic functions and binary operation. |
| qMachineTrader | Reinforcement learning method for incrementally estimating the optimal action-value function. |
| qMonitor | Real-time directional trade volume for bitcoin to fiat markets on Binance, Bitfinex, Bitstamp, Kraken and CoinbasePro. |
| qMiner | Experimental Bitcoin mining project to combat inequality and climate change. |
| qOrderBook | OrderBook snapshot for Bitcoin / USD market across Binance, Kraken, Coinbase, Bitstamp, Bitfinex, bitFlyer, Poloniex, Gemini, itBit and Bittrex. |
| qSignals | Live-trade signal-generation for crypto market. |
| qTrends | Bitcoin trend analysis. |
Development tools¶
| compress | File compression. |
| con | Qcon replacement. |
| csvguess | Guess a reasonable loadstring for a CSV file (V2.4+). |
| debug4 | Debugging tools. |
| dbmaint | Database maintenance utilities. |
| diskdelete | Delete data from disk directly, loading one col at a time rather than whole table. Preserves attributes. Goes to ridiculous lengths to avoid writing. |
| dotz | Control external (.z.p* ) access to a q session, log access errors to file.
|
| dpy | General object display with type and structure |
| dqweb | Simple web interface for kdb+/q processes |
| help | Create a helpfile from a directory of TXTs |
| io | Benchmark for I/O speed. |
| k4unit | K4 unit testing, loads tests from CSVs, runs+logs to database. |
| kdb-stuff | ServerChecker: how to execute commands on a remote box via SSH from within a q process and parse Linux system info (cpuinfo/meminfo/df). |
| kwsrepl | kdb+/k/q live REPL over web sockets. |
| miQ | Divide an application into modules |
| nexusQ | Visualizer for q process network. |
| paste.q | Script for allowing pasting of multi-line q commands into the q REPL. |
| persist-state.q | Save the state of your q session when you exit, and restore it when you start again. |
| q-build | Scripts to allow the standard q binary to be built into RPM and DEB packages. |
| q-doc | Javadoc-inspired documentation generator. |
| q-unit | Unit testing framework. |
| qac | Provides shared memory atomic counter to multiple q processes on Linux. These processes can be either parent/children or unrelated. |
| qcon2 | Console application to query q. |
| qconsole | An IDE for q, written in J/GTK. |
| qp | A package manager for q. |
| qprof | Code profiler |
| qprofiler | Simple code profiler |
| qsparkline | Sparkline plots as UTF-8 character vectors. |
| qspec | A testing framework lightly inspired by Behavior Driven Development and the rspec testing framework. |
| qstudioopen | JDBC driver and authenticator. |
| qtb | Unit testing framework. |
| QUnit | Unit testing framework. |
| reservedwords | Lists q’s reserved words. |
| sphinxQ | A Sphinx documentation tool for q. |
| studio | A rapid-development environment for q. |
| tick | Use instead of an RDB to append data to disk partition during day and use that to build historical partition at day end. |
| tickYahoo | Jobs to download tick data from Yahoo! and save in q database. |
| unparse | Unparse parse tree for q. |
| ws | Workspace utilities |
| yATF.q | CI and test runners |
Editor integrations¶
| Atom |
derekwisong/atom-q quintanar401/atom-charts quintanar401/connect-kdb-q |
| Emacs |
eepgwde/kdbp-mode geocar/kq-mode indiscible/emacs psaris/q-mode |
| Evolved | simongarland/Syntaxhighlighter-for-q |
| Heroku | gargraman/heroku-buildpack-kdb |
| IntelliJ IDEA |
a2ndrade/k-intellij-plugin kdbinsidebrains/plugin shupakabras/kdb-intellij-plugin |
| Jupyter |
jvictorchen/IKdbQ newtux/KdbQ_kernel |
| Linux, macOS, Unix | enlnt/kdb-magic |
| Pygments | jasraj/q-pygments |
| Sublime Text |
smbody-mipt/kdb kimtang/QStudio kimtang/sublime-q kimtang/Q komsit37/sublime-q |
| TextMate | psaris/KX.tmbundle |
| vim |
katusk/vim-qkdb-syntax patmok/qvim simongarland/vim |
| Visual Studio Code |
kdb+/q extension lwshang/vscode-q |
| WordPress | simongarland/Syntaxhighlighter-for-q |
Examples¶
| game-of-life | Conway’s Game of Life. |
| hypercalc | From a q table, create a view with calculated columns. |
| hypertree | Recursive aggregating treetable and 3-D pivot table for hypergrid. |
| JsonRestApi | Server–as-a-function interface |
| kdb | kdb+ database examples. |
| kdb-euler | Solutions to Euler’s problems. |
| kdblib | Q scripts. |
| klondike | Klondike solitaire |
| kxl | Experimental spreadsheet UI. |
| phrases | The Q Phrasebook |
| projecteuler | Solutions to Project Euler problems. |
| proto | Implementing EDSL. |
| q4q | Source code for “Q for Quants”. |
| qca | Simple cellular automata. |
| qmandel | Mandelbrot. |
| qtest | Test-driven development |
| qtips | Source files for “Q Tips: Fast, Scalable and Maintainable kdb+”. |
| q_practice | Quick reference guide for some q tasks. |
| secret-dubstep | Time series and statistics. |
| symfun | Studies and classic problems. |
| telescope | Simulate lexically scoped local variables in q functions. |
| ticker-planto | Simplified version of kdb+tick. |
| vivas | Charting: kdb+ WebSocketdrives JavaScript client |
Foreign functions¶
| Boost math library | kimtang/bml |
| C/C++ |
enlnt/ffiq felixlungu/c |
| Fortran | johnanthonyludlow/kdb/docs/fortran.pdf |
| gnuplot | kxcontrib/zuoqianxu/qgnuplot |
| Google Charts | kxcontrib/zuoqianxu/qgooglechart |
| LAPACK, Cephes and FDLIBM | althenia.net/qml |
| Mathematica | kxcontrib/zuoqianxu/qmathematica |
| Matlab | kxcontrib/zuoqianxu/qmatlab |
| Perl | kxcontrib/zuoqianxu/qperl |
| Python |
kxcontrib/serpent.speak kxcontrib/zuoqianxu/qpython |
| Non-linear least squares | brogar/nls |
| R | kimtang/rinit rwinston/kdb-rmathlib |
| Rust |
adwhit/krust redsift/rkdb redsift/kdb-rs-hash |
| TA-Lib | kxcontrib/zuoqianxu/qtalib |
| ZeroMQ | wjackson/qzmq |
Interfaces: q clients¶
| Betfair | picoDoc/betfair-data-capture |
| Bitcoin |
bitmx/btceQ |
| BosonNLP | FlyingOE/q_BosonNLP |
| COMTRADE | diamondrod/q_comtrade |
| Expat XML parser | felixlungu/qexpat |
| ForexConnect | mortensorensen/qfxcm |
| gRPC | diamondrod/qrpc |
| Interactive Brokers | mortensorensen/QInteractiveBrokers |
| IEX | himoacs/iex_q |
| J | Q client for J |
| JDBC | CharlesSkelton/babel |
| Kafka | ajayrathore/krak |
| MQTT | himoacs/mqtt-q |
| ODBC | johnanthonyludlow/kdb/docs/odbc.pdf |
| Philips Hue | jparmstrong/qphue |
| Reuters | KxSystems/kdb/c/feed/rfa.zip |
| TSE FLEX | Naoki-Yatsu/TSE-FLEX-Converter |
|
gartinian/kdbTwitter timeseries/twitter-kdb |
|
| Wind资讯 | FlyingOE/q_Wind |
| Yahoo! | fdeleze/tickYahoo |
Interfaces to kdb+¶
Machine learning¶
| funq | Functional Introduction to Machine Learning in q. |
| lstmq | LSTM (Long short-term memory) neural network. |
| ml.q | Machine-learning examples for q |
| mlq | Machine Learning for q |
| qAutomatedTrading | Automated trading platform based on Machine Learning algorithm. |
| qnn | Simple neural network |
| qPoliticalSentimentAnalysis | Political Sentiment Analysis of Facebook comments using Boosting Algorithm. |
| tf | Tensorflow q wrapper |
Mathematics¶
| options | Option-pricing functions. |
| q-math | Library of math functions. |
| ql.q | Quantitative finance library. |
| qml | A library for statistics, linear algebra, and optimization in q. It provides an interface between the q programming language and numerical libraries such as LAPACK. |
Utilities¶
| anim.q | In-browser 3D animation streaming from q |
| broq | Brotli file decompression. |
| jwt.q | JSON Web Token signing and parsing for q. |
| kdb-jdbc | Packaged dependencies of the JDBC driver and protocol/connection classes as provided at KxSystems/kdb/c. |
| kdb-jfx-viewer | (Making) q realtime viewer by JavaFX. |
| kdb-protocol | Packaged dependencies of the IPC protocol classes as provided at KxSystems/kdb/c. |
| kdb-scripts | Utilities. |
| kdbreport | Convert a table to present as reports as email body. |
| kx | Collection of code from code.kx.com and the lists. r is a small parser for RPN. g is to g.k as x is to xterm . sc dynamically calls functions by their address on AMD_64.
|
| log4q | A concise logger for q/kdb+ applications. |
| Q-GeneticAlgo | Functions for defining and running genetic algorithms for fixed length binary chromosomes. |
| q-fmt | Format strings in q style as s-expressions or m-expressions. |
| q-fn | General higher-order functions library |
| q-memo | Memoization tooling for functions in the q programming language. |
| qBigInt | C library for Big Integer KDB+ Arithmetic |
| qchart | Plot data directly from q (using JavaScript). Works well with sublime-q. Transform your data into JSON and renders it into a HTML/JavaScript template. |
| qgz | GZip decompression. |
| qjson | JSON output. |
| qng | Simple PNG tools. |
| qrapidjson | Rapid JSON serialiser. |
| qutil | Provides several different common utility functions. These currently include: an option-parsing facility as an alternative to .Q.opt; and a file-loading facility based on a pathlist and supporting version numbers. |
| strQ | String helper functions. |
| ws.q | Simple library for websockets. |
| xls | Format cells in an Excel document |
Web¶
| dash | Really fast (>100kq/sec) webserver for q and dashboard-making bits. |
| iver-tree | Fast virtual grid renderer. |
| qdash | A port to q of the JavaScript lodash utilities. |
| qmvp | Q Minimum Viable Product is a barebones boiler-plate webserver that includes templating, serving from html/, index files, and logging. |
| qqq | Useful functions for creating websites. |
| quagga | An experiment in building a web-based development environment for q. |
| qwa | Microservice that performs analytics duties for your website. |
Salvaged repositories in kxcontrib
kxcontrib contains repositories salvaged from the former Subversion server for which we have been unable to identify current versions on GitHub. These repositories are not maintained.
kxcontrib¶
Salvaged repositories
kxcontrib contains repositories salvaged from the former Subversion server for which we have been unable to identify current versions on GitHub. These repositories are not maintained.
| kxblog | Code shared on the KX blog |
KX libraries¶
Fusion interfaces¶
Our Fusion interfaces are
- written for non-q programmers to use
- well documented, with understandable and useful examples
- maintained and supported by KX on a best-efforts basis, at no cost to customers
- released under the Apache 2 license
- free for all use cases, including 64-bit and commercial use
Message and data formats¶
| arrowkdb | Read and write Arrow and Parquet data |
| avrokdb | Read and write Avro data |
| hdf5 | Read and write HDF5 data |
| jdbc | JDBC driver for kdb+ |
| kafka | Q client for Apache Kafka |
| ldap | Q client for LDAP |
| mqtt | Q client for MQTT |
| prometheus-kdb-exporter | Export kdb+ metrics to Prometheus |
| protobufkdb | Read and write Protocol Buffers data |
| solace | Query kdb+ from a Solace event broker |
Other repos maintained by KX¶
| analyst-training | Learn KX Analyst and KX Developer |
| automl | Automate machine learning in kdb+ |
| cookbook | Companion files to the Knowledge Base |
| help | Online help for q |
| jupyterq | Jupyter kernel for kdb+ |
| kdb | Companion files to kdb+ |
| kdb-taq | Processing trade-and-quote data |
| kdb-tick | Tickerplant |
| man | man-style reference |
| ml | Machine Learning Toolkit |
| mlnotebooks | Jupyter notebooks with ML examples |
| nlp | Natural Language Processing in q |
Interprocess communication
A kdb+ process can communicate with other processes through TCP/IP, which is baked into the q language.
General index of open-source repositories
|
Using R with kdb+¶
kdb+ and R are complementary technologies. kdb+ is the world’s leading timeseries database and incorporates a programming language called q. R is a programming language and environment for statistical computing and graphics. Both are tools used by data scientists to interrogate and analyze data. Their features sets overlap in that they both:
- are interactive development environments
- incorporate vector languages
- have a built-in set of statistical operations
- can be extended by the user
- are well suited for both structured and ad-hoc analysis
They do however have several differences:
- q can store and analyze petabytes of data directly from disk whereas R is limited to reading data into memory for analysis
- q has a larger set of datatypes including extensive temporal times (timestamp, timespan, time, second, minute, date, month) which make temporal arithmetic straightforward
- R contains advanced graphing capabilities whereas q does not
- built-in routines in q are generally faster than R
- R has a more comprehensive set of pre-built statistical routines
When used together, q and R provide an excellent platform for easily performing advanced statistical analysis and visualization on large volumes of data.
R can be called as a server from q, and vice-versa.
Q and R working together¶
Given the complementary characteristics of the two languages, it is important to utilize their respective strengths. All the analysis could be done in q; the q language is sufficiently flexible and powerful to replicate any of the pre-built R functions. Below are some best practice guidelines, although where the line is drawn between q and R will depend on both the system architecture and the strengths of the data scientists using the system.
- Do as much of the analysis as possible in q. Q analyzes the data directly from the disk and it is always most efficient to do as much work as possible as close to the data as possible. Whenever possible avoid extracting large raw datasets from q. When extractions are required, use q to create smaller aggregated datasets
- Do not re-implement tried and tested R routines in q unless they either
- can be written more efficiently in q and are going to be called often
- require more data than is feasible to ship to the R installation
- Use R for data visualization
There are four ways to interface q with R:
- R can connect to kdb+ and extract data – loads a shared library into R, connects to kdb+ via TCP/IP
- Embed R inside q and invoke R routines – loads the R library into q, instantiates R
- Q can connect to a remote instance of R via TCP/IP and invoke R routines remotely
- Q can load the R maths library and invoke the R math routines locally
The first and second methods on interfacing between q and R are covered by the Fusion interfaces rkdb and embedR. The remaining methods are not supported or owned by KX but are described below: the packages and methods outlined are kdb-Rmath, RServe and RODBC.
A number of considerations will affect which of the above interfaces are used.
Considering the potential size of the data, it is probably more likely that the kdb+ installation containing the data will be hosted remotely from the user. Points to consider when selecting the integration method are:
- if interactive graphing is required, either interface (1) or (2) must be used
- interface (2) can only be used if the q and R installations are installed on the same server
- interfaces (2) and (4) require less data transfer between (possibly remote) processes
- interfaces (2) and (3) both require variables to be copied from kdb+ to R for processing, meaning that at some point in time two copies of the variable will exist, increasing total memory requirements
Examples¶
The following examples make use of the Fusion interfaces between q/kdb+ and R and show their versatility.
Extract aggregated data into R¶
This approach extracts aggregated statistics from q to R. The required statistics in this case are the price returns between consecutive time buckets for each instrument. The following q function extracts time-bucketed data:
timebucketedstocks:{[startdate; enddate; symbols; timebucket]
/ extract the time-bucketed data
data:select last price by date,sym,time:timebucket xbar date+time
from trade
where date within (startdate;enddate),sym in symbols;
/ calculate returns between prices in consecutive buckets
/ and return the results unkeyed
() xkey update return:1^price%prev price by sym from data }
An example is:
q)timebucketedstocks[2014.01.09;2014.01.13;`GOOG`IBM;0D00:05]
date sym time price return
----------------------------------------------------------------
2014.01.09 GOOG 2014.01.09D04:00:00.000000000 1142 1
2014.01.09 GOOG 2014.01.09D04:05:00.000000000 1142.5 1.000438
2014.01.09 GOOG 2014.01.09D04:10:00.000000000 1142 0.9995624
2014.01.09 GOOG 2014.01.09D04:30:00.000000000 1143.99 1.001743
2014.01.09 GOOG 2014.01.09D04:35:00.000000000 1144 1.000009
2014.01.09 GOOG 2014.01.09D04:55:00.000000000 1144 1
..
Once the data is in R it needs to be aligned and correlated. To align the data we will use a pivot function defined in the reshape package.
# Reduce the dataset as much as possible
# only extract the columns we will use
> res <- execute(h,"select time,sym,return from timebucketedstocks[2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:05]")
> head(res)
time sym return
1 2014-01-09 09:30:00 GOOG 1.0000000
2 2014-01-09 09:35:00 GOOG 0.9975051
3 2014-01-09 09:40:00 GOOG 0.9966584
4 2014-01-09 09:45:00 GOOG 1.0005061
5 2014-01-09 09:50:00 GOOG 1.0004707
6 2014-01-09 09:55:00 GOOG 0.9988128
> install.packages('reshape')
> library(reshape)
# Pivot the data using the re-shape package
> p <- cast(res, time~sym)
# Using return as value column.
# Use the value argument to cast to override this choice
> head(p)
time GOOG IBM MSFT
1 2014-01-09 09:30:00 1.0000000 1.0000000 1.0000000
2 2014-01-09 09:35:00 0.9975051 1.0006143 1.0002096
3 2014-01-09 09:40:00 0.9966584 1.0001588 1.0001397
4 2014-01-09 09:45:00 1.0005061 0.9998941 0.9986034
5 2014-01-09 09:50:00 1.0004707 0.9965335 1.0019580
6 2014-01-09 09:55:00 0.9988128 0.9978491 1.0022334
# And generate the correlation matrix
> cor(p)
GOOG IBM MSFT
GOOG 1.0000000 0.2625370 0.1577429
IBM 0.2625370 1.0000000 0.2568469
MSFT 0.1577429 0.2568469 1.0000000
An interesting consideration is the timing for each of the steps and how that changes when the dataset gets larger.
> system.time(res <- execute(h,"select time,sym,return from timebucketedstocks[2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:05]"))
user system elapsed
0.001 0.001 0.145
> system.time(replicate(10,p<-cast(res,time~sym)))
user system elapsed
0.351 0.012 0.357
> system.time(replicate(100,cor(p)))
user system elapsed
0.04 0.00 0.04
We can see that
-
the data extract to R takes 145 ms. Much of this time is taken up by q producing the dataset. There is minimal transport cost (as the processes are on the same host);
q)\t select time,sym,return from timebucketedstocks[2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:05] 134
-
the pivot takes approximately 36 ms
- the correlation time is negligible
We can also analyze how these figures change as the dataset grows. If we choose a more granular time period for bucketing the data set will be larger. In our case we will use 10-second buckets rather than 5-minute buckets, meaning the result data set will be 30× larger.
> system.time(res <- execute(h,"select time,sym,return from timebucketedstocks[2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:00:10]"))
user system elapsed
0.015 0.008 0.234
Using return as value column. Use the value argument to cast to override this choice
> system.time(p<-cast(res,time~sym))
user system elapsed
0.950 0.048 0.998
We can see that the time to extract the data increases by ~90 ms. The q query time increases by 4 ms, so the majority of the increase is due to shipping the larger dataset from q to R.
q)\t select time,sym,return
from timebucketedstocks[2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:00:10]
138
The pivot time on the larger data set grows from 40 ms to ~1000 ms giving a total time to do the analysis of approximately 2300 ms. As the dataset grows, the time to pivot the data in R starts to dominate the overall time.
Align data in q¶
Given the pivot performance in R, an alternative is to pivot the data on the q side.
This has the added benefit of reducing the volume of data transported
due to the fact that we can drop the time
and sym
identification columns as the data is already aligned.
The q function below pivots the data.
timebucketedpivot:{[startdate; enddate; symbols; timebucket]
/ Extract the time bucketed data
data:timebucketedstocks[startdate;enddate;symbols;timebucket];
/ Get the distinct list of column names (the instruments)
colheaders:value asc exec distinct sym from data;
/ Pivot the table, filling with 1 because if no value,
/ the price has stayed the same and return the results unkeyed
() xkey 1^exec colheaders#(sym!return) by time:time from data }
An example is:
q)timebucketedpivot[2014.01.09;2014.01.13;`GOOG`IBM;0D00:05]
time GOOG IBM
-------------------------------------------------
2014.01.09D09:30:00.000000000 1 1
2014.01.09D09:35:00.000000000 0.9975051 1.000614
2014.01.09D09:40:00.000000000 0.9966584 1.000159
2014.01.09D09:45:00.000000000 1.000506 0.9998941
2014.01.09D09:50:00.000000000 1.000471 0.9965335
2014.01.09D09:55:00.000000000 0.9988128 0.9978491
2014.01.09D10:00:00.000000000 1.000775 0.9992017
..
Using the larger dataset example, we can then do
> system.time(res <- execute(h,"delete time from timebucketedpivot [2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:00:10]"))
user system elapsed
0.003 0.004 0.225
> cor(res)
GOOG IBM MSFT
GOOG 1.0000000 0.15336531 0.03471400
IBM 0.1533653 1.00000000 0.02585773
MSFT 0.0347140 0.02585773 1.00000000
thus reducing the total query time from 2300 ms to 860 ms and also reducing the network usage.
Correlations in q¶
A final approach is to calculate the correlations in q, meaning that R is not used for any statistical analysis.
The below function invokes the previously defined functions and creates the correlation matrix.
Utilizing the function timebucketedpivot
defined above, and
correlationmatrix:{[startdate; enddate; symbols; timebucket]
/ Extract the pivoted data
data:timebucketedpivot[startdate;enddate;symbols;timebucket];
/ Make sure the symbol list is distinct
/ and contains only values present in the data
symbols:asc distinct symbols inter exec distinct sym from data;
/ Calculate the list of pairs to correlate
pairs:raze {first[x],/:1 _ x}each {1 _ x}\[symbols];
/ Return the pair correlation
/ Calculate two rows for each pair, with the same value in each correlate
pair:{[data;pair]
([]s1:pair;s2:reverse pair; correlation:cor[data pair 0; data pair 1])};
paircor:raze correlatepair[flip delete time from data] each pairs;
/ Pivot the data to give a matrix
pivot:exec symbols#s1!correlation by sym:s2 from paircor;
/ fill with 1 for the diagonal
unkey () xkey 1f^pivot }
which can be run like this:
q)correlationmatrix[2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:00:10]
sym GOOG IBM MSFT
------------------------------------
GOOG 1 0.1533653 0.034714
IBM 0.1533653 1 0.02585773
MSFT 0.034714 0.02585773 1
q)\t correlationmatrix[2014.01.09; 2014.01.15; `GOOG`IBM`MSFT; 0D00:00:10]
181
This solution executes quickest and with the least network usage, as the resultant correlation matrix returned to the user is small.
Example: working with smart-meter data¶
To demonstrate the power of q, an example using randomly-generated smart-meter data has been developed. This can be downloaded from KxSystems/cookbook/tutorial. By following the instructions in the README, an example database can be built. The default database contains information on 100,000 smart-meter customers from different sectors and regions over 61 days. The default database contains 9.6M records per day, 586M rows in total. A set of example queries are provided, and a tutorial to step through the queries and test the performance of q. Users are encouraged to experiment with:
- using secondary processes to boost performance
- running queries with different parameters
- modifying or writing their own queries
- compression to reduce the size of on-disk data
- changing the amount of data generated – more days, more customers, different customer distributions etc.
The data can be extracted from R for further analysis or visualisation.
As an example, the code below will generate an average daily usage profile
for each customer type (res
= residential, com
= commercial, ind
= industrial) over a 10-day period.
# load the xtsExtra package
# this will overwrite some of the implementations
# loaded from the xts package (if already loaded)
> install.packages("xtsExtra", repos="http://r-forge.r-project.org") # for R 3.1 you may need an additional parameter type="source"
> library(xtsExtra)
# load the connection library
> library(rkdb)
> h <- open_connection("127.0.0.1",9998,NULL)
# pull back the profile data
# customertypeprofiles takes 3 parameters
# [start date; end date; time bucket size]
> d<-execute(h,"customertypeprofiles[2013.08.01;2013.08.10;15]")
> dxts<-xts(d[,-1],order.by=d[,'time'])
# plot it
> plot.xts(dxts, screens=1, ylim=c(0,500000), auto.legend=TRUE, main=" Usage Profile by Customer Type")
which produces the plot in Figure 5:
Figure 5: Customer usage profiles generated in q and drawn in R
Timezones¶
Note that R’s timezone setting affects date transfers between R and q. In R:
> Sys.timezone() # reads current timezone
> Sys.setenv(TZ = "GMT") # sets GMT ("UTC" is the same)
For example, in the R server:
q)Rcmd "Sys.setenv(TZ='GMT')"
q)Rget "date()"
"Fri Feb 3 06:33:43 2012"
q)Rcmd "Sys.setenv(TZ='EST')"
q)Rget "date()"
"Fri Feb 3 01:33:57 2012"
Knowledge Base: Timezones and Daylight Saving Time
Other methods¶
Outside the Fusion interfaces to R, a number of interfaces provide extremely useful functionality to a q instance from R and vice-versa.
Q in R¶
RODBC with kdb+¶
Although it is not the recommended method, if R is running on Windows, the q ODBC3 driver can be used to connect to kdb+ from R.
The RODBC package should be installed in R. An example is given below.
# install RODBC
> install.packages("RODBC")
# load it
> library(RODBC)
# create a connection to a predefined DSN
> ch <- odbcConnect("localhost:5000") # run a query
# s.k should be installed on the q server to enable standard SQL
# However, all statements can be prefixed with q) to run standard q.
> res <- sqlQuery(ch, paste('q)select count i by date from trade'))
R in q¶
Embedded R maths library¶
R contains a maths library which can be compiled standalone. The functions can then be exposed to q by wrapping them in C code which handles the mapping between R datatypes and q datatypes (K objects). See rwinston/kdb-rmathlib for an example of integrating q with the R API (i.e. making use of some statistical functions from q).
q)\l rmath.q
q)x:rnorm 1000 / create 1000 normal variates
q)summary x / simple statistical summary of x
q)hist[x;10] / show histogram (bin count) with 10 bins
q)y:scale x / x = (x - mean(x))/sd(x)
q)quantile[x;.5] / calculate the 50% quantile
q)pnorm[0;1.5;1.5] / cdf value for 0 for a N(1.5,1.5) distribution
q)dnorm[0;1.5;1.5] / normal density at 0 for N(1.5;1.5) distribution
Andrey’s althenia.net/qml for an embedded math lib
Remote R: Rserve¶
Rserve allows applications to connect remotely to an R instance over TCP/IP. The methods are the same as those outlined above, the difference being that all data is passed over TCP/IP rather than existing in the same memory space.
Every connection to Rserve has a separate workspace and working directory, which means user-defined variables and functions with name clashes will not overwrite each other. This differs from the previous method where, if two users are using the same q process, they can overwrite each other’s variables in both the q and R workspaces.
|
Origins¶
The q language derives from the notation devised by Harvard mathematician Kenneth E. Iverson.
Many powerful techniques for programming in q originally appeared in earlier Iversonian languages, which can reward study by q programmers.
- Try APL
-
APL (A Programming Language) was the first language derived from Iverson notation. At this site you can see it with its original elegant symbols.
- Jsoftware wiki
-
J was Iverson’s from-the-ground-up reboot of APL in 1990.
- no stinking loops
-
Stevan Apter’s personal website is a treasure chest for k programmers.
- Vector
-
The journal of the British APL Association has recorded work in the Iversonian languages since 1984.
History¶
- AKQJ大师
- Kx Systems: A Historical Need for Speed, Datanami, October 2020
- APL Since 1978, Proc. ACM Program. Lang., Vol. 4, No. HOPL, Article 69. June 2020
- Stages of Denial, John Earnest, March 2020
- Vector processing languages: the future of Big Data analytics and real-time business intelligence, Conceptual Origami, 2010
- A conversation with Arthur Whitney, ACM, 2009
- An interview with Arthur Whitney, KX, 2004
Archive¶
- Abridged q language manual, Arthur Whitney, 2009
- Abridged kdb+ database manual, Arthur Whitney, 2006
- kdb+ database reference manual, Don Orth, 2006
- Q language reference manual, Don Orth, 2006
- Abridged kdb+tick manual, Arthur Whitney, 2005
- Abridged kdb+taq manual, Arthur Whitney, 2005
- kdb+ database and language primer, Dennis Sasha, 2005
Technical articles¶
A selection of technical articles of interest to kdb+ developers
Programming in q¶
- Real-world examples of iterators revised
- Monte-Carlo methods with kdb+, by Arman Tadjrishi
- Memory mapping in kdb+, by Adam Bonham
- Logging best practices, by Jamie McKeown
- Advanced pattern matching and text manipulation in kdb+ with embedPy, Jun Bing Neo
- Single-page applications and kdb+: AngularJS, Angular, React, by Stephen Trainor
- Partitioning data in kdb+, by Rian Ó Cuinneagáin
- Tables with calculated columns, by Stevan Apter, Vector 25:1
- Treetable: a case-study in q, by Stevan Apter, Vector 24:4
- Parsing data in kdb+, by Rian Ó Cuinneagáin
- Server-as-a-Function: Providing RESTful JSON APIs in q, by Rob Moore
- Deferred response, by Gopala Bhat
- Scripting with q, by David Crossey
Machine learning¶
- A comparison of Python and q for data analysis
- Natural Language Processing
- A comparison of Python and q for handling lists
- Feature extraction and selection in kdb+
- Neural networks in kdb+
- Using embedPy to apply LASSO regression
- Machine-Learning techniques featured in JupyterQ notebooks
- Decision trees
Performance¶
Applications and case studies¶
- Data visualization with kdb+ using ODBC: A Tableau case study
- Combining high-frequency cryptocurrency venue data using kdb+
- Template of Fortnite gamer visualizations using Dashboards
- KX for Love!!
- Storing and exploring the Bitcoin blockchain
- eFX: Data and analytics are the next arms race
- IIoT for predictive maintenance and Big Data
- Quantile Technologies
- MIT Motorsports’ kdb+ Vehicle Telemetry System
- Consolidated audit trail go-live is now a certainty
- Signal processing with kdb+
- Web scraping
- The exploration of space weather at NASA FDL
- Edge computing on a low-profile device
|
// timeserie request on graph panel w/ no preference on sym seperation
graphnosym:{[coln;rqt]
// return columns with json number type only
coln:-1_coln where`number=types (0!meta rqt)`t;
colname:coln cross`msec;
:nosymresponse[rqt;colname];
};
// timeserie request on table panel w/ no preference on sym seperation
tablenosym:{[coln;rqt]
coltype:types -1_(0!meta rqt)`t;
:tabresponse[coln;coltype;rqt];
};
// timeserie request on non-specific panel w/ data for one sym returned
othersym:{[args;rqt]
// specify what columns data to return, taken from drop down input
outcol:args[2],`msec;
data:flip value flip?[rqt;enlist(=;sym;enlist args 3);0b;outcol!outcol];
:.j.j enlist `target`datapoints!(args 3;data);
};
// timeserie request on graph panel w/ data for each sym returned
graphsym:{[colname;rqt]
// return columns with json number type only
syms:`$string ?[rqt;();1b;{x!x}enlist sym]sym;
// specify what columns data to return, taken from drop down input
outcol:colname,`msec;
build:{[outcol;rqt;x;y]data:flip value flip?[rqt;enlist(=;sym;enlist y);0b;outcol!outcol];x,`target`datapoints!(y;data)};
:.j.j build[outcol;rqt]\[();syms];
};
// timeserie request on table panel w/ single sym specified
tablesym:{[coln;rqt;symname]
coltype:types -1_(0!meta rqt)`t;
// select data for requested sym only
rqt:?[rqt;enlist(=;sym;enlist symname);0b;()];
:tabresponse[coln;coltype;rqt];
};
================================================================================
FILE: TorQ_code_common_heartbeat.q
SIZE: 6,804 characters
================================================================================
// Heartbeating
// All processes can publish heartbeats. This allows downstream processes to check they are available and not blocked
// If if the connection is still valid the process may be unavailable
// This script handles both publishing of heartbeats, and functions for checking if the required heartbeats are received in a timely manner
// The pubsub is reliant on a pubsub implementation e.g. u.[q|k]
// Override the processwarning and processerror functions to implement required behaviour when warnings or errors are encountered
// Override warningperiod and errorperiod functions to have bespoke warning and error periods for different process types
// Use storeheartbeat function in the upd function to process heartbeats
\d .hb
enabled:@[value;`enabled;1b] // whether the heartbeating is enabled
subenabled:@[value;`subenabled;0b] // whether subcriptions to heartbeats are enabled
debug:@[value;`debug;1b] // whether to print debug information
publishinterval:@[value;`publishinterval;0D00:00:30] // how often heartbeats are published
checkinterval:@[value;`checkinterval;0D00:00:10] // how often heartbeats are checked
warningtolerance:@[value;`warningtolerance;1.5f] // a process will move to warning state when it hasn't heartbeated in warningtolerance*checkinterval
errortolerance:@[value;`errortolerance;2f] // and to an error state when it hasn't heartbeated in errortolerance*checkinterval
CONNECTIONS:@[value;`CONNECTIONS;()]; // processes that heartbeat subscriptions are recieved from (as a subset of .servers.CONNECTIONS)
subscribedhandles:0 0Ni
// table for publishing heartbeats
// sym = proctype
heartbeat:([]time:`timestamp$(); sym:`symbol$(); procname:`symbol$(); counter:`long$(); pid:`int$(); host:`$(); port:`int$())
// process ID, hostname and port
pid:.z.i
host:.z.h
port:system"p"
// create a keyed version of the heartbeat table to store the incoming heartbeats
hb:update warning:0b, error:0b from `sym`procname xkey heartbeat
// functions to get the warning and error tolerances
// to have different warnings or errors for different process types, modify these functions
warningperiod:{[processtype] `timespan$warningtolerance*publishinterval}
errorperiod:{[processtype] `timespan$errortolerance*publishinterval}
// heartbeat counter
hbcounter:@[value;`hbcounter;0j]
// publish a heartbeat
publishheartbeat:{
if[@[value;`.ps.initialised;0b];
.ps.publish[`heartbeat;enlist `time`sym`procname`counter`pid`host`port!(.proc.cp[];.proc.proctype;.proc.procname;hbcounter;pid;host;port)];
hbcounter+::1]}
// add a set of process names and types to seed the heartbeat table
addprocs:{[proctypes;procnames] .hb.hb:(2!([]sym:proctypes,();procname:procnames,();time:.proc.cp[];counter:0Nj;pid:0Nj;host:`;port:0Nj;warning:0b;error:0b)),.hb.hb}
// store an incoming heartbeat
storeheartbeat:{[hb]
// store the heartbeat
`.hb.hb upsert update warning:0b,error:0b from select by sym,procname from hb};
// check if any of the heartbeats are in error or warning
checkheartbeat:{
now:.proc.cp[];
// calculate which processes haven't heartbeated recently enough
stats:update status+`short$2*now>time+.hb.errorperiod[sym] from
update status:`short$now>time+.hb.warningperiod[sym] from .hb.hb;
warn[select from stats where status=1,not warning];
err[select from stats where status>1,not error];
}
// process warnings and errors
warn:{
if[debug;
{.lg.o[`heartbeat;"processtype ",(string x`sym),", processname ",(string x`procname)," has not heartbeated since ",(string x`time)," and has status WARNING"]} each 0!x];
update warning:1b from `.hb.hb where ([]sym;procname) in key x;
processwarning[x]}
err:{
if[debug;
{.lg.e[`heartbeat;"processtype ",(string x`sym),", processname ",(string x`procname)," has not heartbeated since ",(string x`time)," and has status ERROR"]} each 0!x];
update error:1b from `.hb.hb where ([]sym;procname) in key x;
// key x is .hb.hb table with an extra status column
// It is empty most of the time but will contain a row if the process has not heartbeat
processerror[x]}
// override these functions to implement bespoke functionality on heartbeat errors and warnings
processwarning:{[processtab]
if[1<=count processtab;
.html.pub[`heartbeat;0!select from .hb.hb where procname in exec procname from processtab]]}
processerror:{[processtab]
if[1<=count processtab;
.html.pub[`heartbeat;0!select from .hb.hb where procname in exec procname from processtab]]}
// subscribe to heartbeats and log messages on a handle
subscribe:{[handle]
@[{x(`.ps.subscribe;`heartbeat;`); subscribedhandles,::x};handle;{.lg.e[`hbsub;"failed to subscribe to heartbeat on handle ",(string x),": ",y]}[handle]];
}
hbsubscriptions:{[]
getheartbeats .hb.CONNECTIONS
}
getheartbeats:{[proctype]
if[`ALL in proctype; proctype:`];
// only get handles that have not been subscribed to
handles:(.servers.getservers[`proctype;proctype;()!();0b;0b]`w) except subscribedhandles;
if[count handles;
.lg.o[`hbsub;"subscribing to new handle(s) ",", " sv string handles];
subscribe each handles]
}
\d .
// set the heartbeat table to the top level namespace, to allow it to be initialised in the pub/sub routine
heartbeat:.hb.heartbeat;
if[(not @[value;`.proc.lowpowermode;0b]) & @[value;`.hb.enabled;1b];
// add the checkheartbeat function to the timer
$[@[value;`.timer.enabled;0b] and `publish in key `.ps;
[.lg.o[`init;"adding heartbeat functions to the timer"];
.timer.repeat[.proc.cp[];0Wp;.hb.publishinterval;(`.hb.publishheartbeat;`);"publish heartbeats"];
.timer.repeat[.proc.cp[];0Wp;.hb.checkinterval;(`.hb.checkheartbeat;`);"check the heartbeats have been received in a timely manner"]];
.lg.e[`init;"heartbeating is enabled, but the timer and/or pubsub code is not enabled"]]];
if[.hb.subenabled;
upd:{[f;t;x] if[t=`heartbeat; .hb.storeheartbeat[x]]; f . (t;x)}@[value;`upd;{{[t;x]}}];
.dotz.set[`.z.pc;{if[y;.hb.subscribedhandles::.hb.subscribedhandles except y]; x@y}@[value;.dotz.getcommand[`.z.pc];{{[x]}}]];
.timer.rep[.z.p;0wp;0D00:01:00;(`.hb.hbsubscriptions;`);0h;"subscribe to heartbeats";0b];
.servers.connectcustom:{[func;connectiontab]
// only return servers specified by .hb.CONNECTIONS
// if `ALL is specified then all servers are returned
connectiontab:$[`ALL in .hb.CONNECTIONS;
connectiontab;
select from connectiontab where proctype in .hb.CONNECTIONS];
// only select records with unsubscribed handles
connectiontab:select from connectiontab where not w in .hb.subscribedhandles;
// subscribed to new handles
if[count connectiontab;
.hb.subscribe each connectiontab`w];
func@connectiontab
}@[value;`.servers.connectcustom;{{[x]}}]
]
================================================================================
FILE: TorQ_code_common_help.q
SIZE: 16,228 characters
================================================================================
|
Amazon Web Services (AWS)¶
The solution described here was developed and deployed on Amazon Web Services (AWS). In this section the AWS resources that were used are described, each of which should be transferable to the other big cloud platforms like Microsoft Azure and Google Cloud.
Amazon Machine Image (AMI)¶
In the solution multiple servers are launched, all needing the same code and software packages installed in order to run our kdb+ system. Instead of launching the servers and then installing the needed software on each one, it is best practice to do it on one server, create an Amazon Machine Image (AMI) of that server and then use that image to launch all of the servers. This will keep the software and code consistent across the deployment.
To create our AMI a regular EC2 instance was launched using Amazon’s Linux 2 AMI, kdb+ was installed, our code was deployed and an image of the instance was taken.
An example script of how to do this
Once available this AMI was used along with Cloudformation to deploy a stack.
Cloudformation¶
Using AWS Cloudformation means we can easily deploy and manage our system’s resources with a JSON or YAML file. The resources needed for our stack are outlined below.
- AWS Elastic File System (EFS).
- EC2 launch templates.
- Auto Scaling groups.
An example YAML file to deploy this stack
AWS Elastic File System (EFS)¶
In our system the RDB and tickerplant will be on different servers but both processes will need access to the tickerplant’s logs. For simplicity we will write the logs to EFS, a network file system which all of our EC2 instances can mount.
In high volume systems EFS will not be fast enough for the tickerplant so it will need to write its logs to local disk. A separate process will then be needed on the server to read and stream the logs to the RDB in the case RDB replay. The code snippet below can be used to do so.
.u.stream:{[tplog;start;end]
.u.i:0;
.u.start:start;
-11!(tplog;end);
delete i, start from `.u; }
upd: {if[.u.start < .u.i+:1; neg[.z.w] @ (`upd;x;y); neg[.z.w][]]};
EC2 launch template¶
We will use launch templates to configure details for EC2 instances ahead of time (e.g. instance type, root volume size, AMI ID). Our Auto Scaling groups will then use these templates to launch their servers.
Auto Scaling group (ASG)¶
AWS EC2 Auto Scaling groups (ASG) can be used to maintain a given number of EC2 instances in a cluster.
Recovery¶
The first Auto Scaling group we deploy will be for the tickerplant. Even though there will only ever be one instance for the tickerplant we are still putting it in an ASG for recovery purposes. If it goes down the ASG will automatically start another one.
Scalability¶
There are a number of ways an ASG can scale its instances on AWS:
| scaling | method |
|---|---|
| Scheduled | Timeframes are set to scale in and out. |
| Predictive | Machine learning is used to predict demand. |
| Dynamic | Cloudwatch metrics are monitored to follow the flow of demand (e.g. CPU and memory usage). |
| Manual | Adjusting the ASG’s DesiredCapacity attribute. |
Dynamic Scaling¶
We could conceivably publish memory usage statistics as Cloudwatch metrics from our RDBs and allow AWS to manage scaling out.
If the memory across the cluster rises to a certain point the ASG will increment its DesiredCapacity
attribute and launch a new server.
Sending custom Cloudwatch metrics is relatively simple:
Examples using either Python’s boto3 library or the AWS CLI
Manual scaling¶
Another way to scale the instances in an ASG, and the method more suitable for our use case, is to manually adjust the ASG’s DesiredCapacity
.
This can be done via the AWS console or the AWS CLI.
As it can be done using the CLI we can program the RDBs to scale the cluster in and out. Managing the Auto Scaling within the application is preferable because we want to be specific when scaling in.
If scaling in was left up to AWS it would choose which instance to terminate based on certain criteria (e.g. instance count per availability zone, time to the next next billing hour). However, if all of the criteria have been evaluated and there are still multiple instances to choose from, AWS will pick one at random.
Under no circumstance do we want AWS to terminate an instance running an RDB process which is still holding live data.
So we will need to keep control of the Auto Scaling group’s DesiredCapacity
within the application.
As with publishing Cloudwatch metrics, adjusting the DesiredCapacity
can be done with Python’s boto3 library or the AWS CLI.
Examples with Python and the AWS CLI
Auto Scaling in q¶
As the AWS CLI simply uses Unix commands we can run them in q
using the system
command.
By default the CLI will return json
so we can parse the result using .j.k
.
It will be useful to wrap the aws
system commands in a retry loop as they may timeout when AWS is under load.
.util.sys.runWithRetry:{[cmd]
n: 0;
while[not last res:.util.sys.runSafe cmd;
system "sleep 1";
if[10 < n+: 1; 'res 0];
];
res 0 }
.util.sys.runSafe: .Q.trp[{(system x;1b)};;{-1 x,"\n",.Q.sbt[y];(x;0b)}]
To adjust the DesiredCapacity
of an ASG we first need to find the correct group.
To do this we will use the aws ec2
functions to find the AutoScalingGroupName
that the RDB server belongs to.
.util.aws.getInstanceId: {last " " vs first system "ec2-metadata -i"};
.util.aws.describeInstance:{[instanceId]
res: .util.sys.runWithRetry
"aws ec2 describe-instances --filters \"Name=instance-id,Values=",instanceId,"\"";
res: (.j.k "\n" sv res)`Reservations;
if[() ~ res; 'instanceId," is not an instance"];
flip first res`Instances }
.util.aws.getGroupName:{[instanceId]
tags: .util.aws.describeInstance[instanceId]`Tags;
res: first exec Value from raze[tags] where Key like "aws:autoscaling:groupName";
if[() ~ res; 'instanceId," is not in an autoscaling group"];
res }
To increment the capacity we can use the aws autoscaling
functions to find the current DesiredCapacity
.
Once we have this we can increment it by one and set the attribute.
The ASG will then automatically launch a server.
.util.aws.describeASG:{[groupName]
res: .util.sys.runWithRetry
"aws autoscaling describe-auto-scaling-groups --auto-scaling-group-name ",groupName;
res: flip (.j.k "\n" sv res)`AutoScalingGroups;
if[() ~ res; 'groupName," is not an autoscaling group"];
res }
.util.aws.getDesiredCapacity:{[groupName]
first .util.aws.describeASG[groupName]`DesiredCapacity }
.util.aws.setDesiredCapacity:{[groupName;n]
.util.sys.runWithRetry
aws autoscaling set-desired-capacity --auto-scaling-group-name ",
groupName," --desired-capacity ",string n }
.util.aws.scale:{[groupName]
.util.aws.setDesiredCapacity[groupName] 1 + .util.aws.getDesiredCapacity groupName; }
To scale in, the RDB will terminate its own server.
When doing this it must make an aws autoscaling
call, the ASG will then know not to launch a new instance in its place.
.util.aws.terminate:{[instanceId]
.j.k "\n" sv .util.sys.runWithRetry
"aws autoscaling terminate-instance-in-auto-scaling-group --instance-id ",
instanceId," --should-decrement-desired-capacity" }
|
/ cluster the iris data
sm:.5<.ml.gaussk[iris.X;.5] each flip iris.X / similarity matrix based on Gaussian kernel
show .ml.interpret .ml.mcl[2;1.5;10] over .ml.inflate[1;0f] sm
/ are there 4 species: http://www.siam.org/students/siuro/vol4/S01075.pdf
================================================================================
FILE: funq_mlens.q
SIZE: 1,200 characters
================================================================================
mlens.f:("ml-latest";"ml-latest-small") 1 / pick the smaller dataset
mlens.b:"http://files.grouplens.org/datasets/movielens/" / base url
-1"[down]loading latest movielens data set";
.ut.download[mlens.b;;".zip";.ut.unzip] mlens.f;
-1"loading movie definitions: integer movieIds and enumerated genres";
mlens.movie:1!("I**";1#",") 0: `$mlens.f,"/movies.csv"
-1"removing movies without genres";
update 0#'genres from `mlens.movie where genres like "(no genres listed)";
-1"converting unicode in titles to ascii";
update .ut.sr[.ut.ua] peach rtrim title from `mlens.movie;
-1"extracting the movie's year from the title";
update year:"I"$-1_/:-5#/:title from `mlens.movie;
update -7_/:title from `mlens.movie where not null year;
-1"adding `u on movieId and splitting genres";
update `u#movieId,`$"|"vs'genres from `mlens.movie;
-1"adding the decade as a genre";
update genres:(genres,'`$string 10 xbar year) from `mlens.movie;
-1"enumerating genres";
mlens.movie:update `genre?/:genres from mlens.movie
-1"loading movie ratings";
mlens.rating:("IIFP";1#",") 0:`$mlens.f,"/ratings.csv"
-1"adding `p on userId and linking movieId to movie table";
update `p#userId,`mlens.movie$movieId from `mlens.rating;
================================================================================
FILE: funq_mnist.q
SIZE: 638 characters
================================================================================
mnist.zf:(
"train-labels.idx1-ubyte";
"train-images.idx3-ubyte";
"t10k-labels.idx1-ubyte";
"t10k-images.idx3-ubyte")
mnist.f:ssr[;".";"-"] each mnist.zf
mnist.b:"http://yann.lecun.com/exdb/mnist/"
-1"[down]loading handwritten-digit data set";
.ut.download[mnist.b;;".gz";.ut.gunzip] mnist.f;
/ rename unzipped file to match zipped file
mnist.zf {[zf;f]if[zfs~key zfs:`$":",zf;system "r ",zf," ",f]}' mnist.f;
mnist.Y:enlist mnist.y:"i"$.ut.mnist read1 `$mnist.f 0
mnist.X:flip "f"$raze each .ut.mnist read1 `$mnist.f 1
mnist.Yt:enlist mnist.yt:"i"$.ut.mnist read1 `$mnist.f 2
mnist.Xt:flip "f"$raze each .ut.mnist read1 `$mnist.f 3
================================================================================
FILE: funq_moby.q
SIZE: 284 characters
================================================================================
/ moby-dick
moby.f:"2701.txt"
moby.b:"https://www.gutenberg.org/files/2701/old/"
-1"[down]loading moby-dick text";
.ut.download[moby.b;;"";""] moby.f;
moby.txt:read0 `$moby.f
moby.chapters:"\nCHAPTER" vs "\n" sv 298_-373_ moby.txt
moby.s:{(3+first x ss"\n\n\n")_x} each moby.chapters
================================================================================
FILE: funq_nb.q
SIZE: 3,895 characters
================================================================================
\l funq.q
\l iris.q
\l stopwords.q
\l smsspam.q
/ https://en.wikipedia.org/wiki/Naive_Bayes_classifier
X:(6 5.92 5.58 5.92 5 5.5 5.42 5.75; / height (feet)
180 190 170 165 100 150 130 150f; / weight (lbs)
12 11 12 10 6 8 7 9f) / foot size (inches)
y:`male`male`male`male`female`female`female`female / classes
Xt:(6 7f;130 190f;8 12f) / test data
-1"assuming Gaussian distribution";
-1"analyzing mock dataset";
-1"building classifier";
show pT:.ml.fnb[.ml.wgaussmle/:;::;y;X] / build classifier
-1"confirming accuracy";
.ut.assert[`female`male] .ml.pnb[0b;.ml.gaussl;pT] Xt / make classification predictions
.ut.assert[`female`male] .ml.pnb[1b;.ml.gaussll;pT] Xt / use log likelihood
/ iris
-1"analyzing iris data set";
-1"building classifier";
pT:.ml.fnb[.ml.wgaussmle/:;::;iris.y;iris.X] / build classifier
-1"confirming accuracy";
.ut.assert[.96f] avg iris.y=.ml.pnb[0b;.ml.gaussl;pT] iris.X / how good is classification
.ut.assert[.96f] avg iris.y=.ml.pnb[1b;.ml.gaussll;pT] iris.X / how good is classification
/ inf2b-learn-note07-2up.pdf
X:(2 0 0 1 5 0 0 1 0 0 0; / goal
0 0 1 0 0 0 3 1 4 0 0; / tutor
0 8 0 0 0 1 2 0 1 0 1; / variance
0 0 1 8 0 1 0 2 0 0 0; / speed
1 3 0 0 1 0 0 0 0 0 7; / drink
1 1 3 8 0 0 0 0 1 0 0; / defence
1 0 5 0 1 6 1 1 0 0 1; / performance
1 0 0 1 9 1 0 2 0 0 0) / field
Xt:flip(8 0 0 1 7 1 0 1;0 1 3 0 3 0 1 0)
y:(6#`sport),5#`informatics
-1"assuming Bernoulli distribution";
-1"analyzing mock dataset";
/ Bernoulli
-1"building classifier";
show pT:.ml.fnb[.ml.wbinmle[1;0]/:;::;y;0<X] / build classifier
-1"confirming accuracy";
.ut.assert[`sport`informatics] .ml.pnb[0b;.ml.binl[1];pT] Xt / make classification prediction
.ut.assert[`sport`informatics] .ml.pnb[1b;.ml.binll[1];pT] Xt / make classification prediction
/ Bernoulli - add one smoothing
-1"testing Bernoulli add one smoothing";
show pT:.ml.fnb[.ml.wbinmle[2;0]/:;::;y;1+0<X]
.ut.assert[`sport`informatics] .ml.pnb[0b;.ml.binl[2];pT] Xt
.ut.assert[`sport`informatics] .ml.pnb[1b;.ml.binll[2];pT] Xt / use log likelihood
/ multinomial - add one smoothing
-1"testing multinomial add one smoothing";
show pT:.ml.fnb[.ml.wmultimle[1];::;y;X]
.ut.assert[`sport`informatics] .ml.pnb[0b;.ml.multil;pT] Xt
.ut.assert[`sport`informatics] .ml.pnb[1b;.ml.multill;pT] Xt / use log likelihood
/ https://www.youtube.com/watch?v=km2LoOpdB3A
X:(2 2 1 1; / chinese
1 0 0 0; / beijing
0 1 0 0; / shanghai
0 0 1 0; / macao
0 0 0 1; / tokyo
0 0 0 1) / japan
y:`c`c`c`j
-1"analyzing another mock dataset";
-1"testing multinomial add one smoothing";
Xt:flip enlist 3 0 0 0 1 1
/ multinomial - add one smoothing
-1"building classifier";
show flip pT:.ml.fnb[.ml.wmultimle[1];::;y;X]
-1"confirming accuracy";
.ut.assert[1#`c] .ml.pnb[0b;.ml.multil;pT] Xt
.ut.assert[1#`c] .ml.pnb[1b;.ml.multill;pT] Xt
-1"modeling spam/ham classifier";
-1"remove unicode and punctuation characters from sms text";
t:update .ut.sr[.ut.ua,.ut.ha,.ut.pw] peach text from smsspam.t
-1"tokenizing and removing stop words from sms text";
t:update (except[;stopwords.xpo6] " " vs) peach lower text from t
-1"user porter stemmer to stem sms txt";
t:update (.porter.stem') peach text from t
-1"partitioning sms messages between training and test";
d:.ut.part[`train`test!3 1;0N?] t
c:d . `train`text
y:d . `train`class
-1"generating vocabulary and term document matrix";
X:.ml.tdm[c] v:asc distinct raze c
ct:d . `test`text
yt:d . `test`class
Xt:.ml.tdm[ct] v
-1 "fitting multinomial naive bayes classifier";
pT:.ml.fnb[.ml.wmultimle[1];::;y;flip X]
-1"confirming accuracy";
avg yt=p:.ml.pnb[0b;.ml.multil;pT] flip Xt
-1 "sorting model by strong spam signal";
show select[>spam] from ([]word:v)!flip last pT
-1 "sorting model by strong spam relative signal";
show select[>spam%ham] from ([]word:v)!flip last pT
================================================================================
FILE: funq_nn.q
SIZE: 7,342 characters
================================================================================
\c 20 100
\l funq.q
\l mnist.q
\l winequality.q
/ digit recognition
-1"referencing mnist data from global namespace";
`X`Xt`y`yt set' mnist`X`Xt`y`yt;
-1"shrinking training set";
X:1000#'X;y:1000#y;
-1"normalize data set";
X%:255f;Xt%:255f
-1"define a plot function that includes the empty space character";
plt:value .ut.plot[28;14;.ut.c10;avg] .ut.hmap flip 28 cut
-1"visualize the data";
-1 (,'/) plt each X@\:/: -4?count X 0;
-1"we first generate a matrix of y values where each row only has a single 1 value";
-1"the location of which corresponds the the digit in the dataset";
show Y:.ml.diag[(1+max y)#1f]@\:y
-1"neural networks include multiple layers";
-1"where the first and last are visible, but all others are hidden";
-1"to cost and gradient functions, compute over a list of THETA matrices";
-1"we first define a network topology (the size of each layer)";
-1"it has been proven that a single hidden layer (with enough nodes)";
-1"can approximate any function. in addition, extra layers add marginal value.";
-1"we present an example with a single hidden layer";
-1"the size of the first and last layer are fixed.";
-1"a good size for the middle layer is the average of the first and last";
n:0N!"j"$.ut.nseq[2;count X;count Y]
-1"correctly picking the initial THETA values is important.";
-1"instead of setting them all to a 0 (or any constant value),";
-1"we must set them to random values to 'break the symmetry'.";
-1"additionally, we must chose values that ensure the gradient";
-1"of the sigmoid function is not too small. .ml.glorotu does this";
0N!theta:2 raze/ THETA:.ml.glorotu'[1+-1_n;1_n];
rf:.ml.l2[1f]; / regularization function
-1"the neural network cost function feeds the X values through the network,";
-1"then backpropagates the errors and gradient for each layer.";
-1"the cost and gradient calculations are expensive but share intermediate values";
-1"it is therefore important to compute both simultaneously";
hgolf:`h`g`o`l!`.ml.sigmoid`.ml.dsigmoid`.ml.sigmoid`.ml.logloss
show .ml.nncostgrad[rf;n;hgolf;Y;X;theta]
|
Word wheels¶
| N | D | E |
| O | K | G |
| E | L | W |
Find all the words containing K that can be composed from the letters in the grid. Use letters no more times than they appear in the grid.
Multiple levels of iteration and nested indexes
Test whether a word can be composed from a grid by examining the difference of their letter counts.
Minimize the number of tests by making a matrix of all the results, then indexing into it.
Compose iterated functions to iterate through the lists of indexes.
Use simple syntactic substitution to parallelize the key computation.
Six code lines for Part 1; twelve for Part 2
No loops, no counters, no control structures.
Write a program to solve the word-wheel puzzle
Words must contain at least three letters and appear in the National Puzzlers’ League dictionary.
Write programs to
- Solve a grid: find all the words that can be composed from it that contain its middle letter
- Find the grids with the longest solutions that include a 9-letter word
from Rosetta Code
Get a vocabulary¶
Dictionaries and vocabularies
To avoid confusion between dictionary as a q data structure, and as a list of words, we refer here to the latter as a vocabulary.
Reading the vocabulary is straightforward enough, but we discover words we do not want.
q)show vocab:"\n"vs .Q.hg "http://wiki.puzzlers.org/pub/wordlists/unixdict.txt"
"10th"
"1st"
"2nd"
"3rd"
"4th"
"5th"
"6th"
"7th"
"8th"
"9th"
,"a"
"a&m"
"a&p"
"a's"
"aaa"
"aaas"
"aarhus"
"aaron"
"aau"
"aba"
"ababa"
"aback"
..
We want only words with 3-9 letters composed of the letters a-z. (We see the vocabulary is already in lower case.)
Sometimes we display lists of strings as symbols just to save space
q).Q.A in raze vocab / any upper-case letters here?
00000000000000000000000000b
q)ce:count each
q)`$v39:{x where(ce x)within 3 9}{x where all each x in .Q.a}vocab
`aaa`aaas`aarhus`aaron`aau`aba`ababa`aback`abacus`abalone`abandon`abase`abash..
q)count v39
20664
List v39
is all the words we might find from a word wheel.
Match on the mid letter¶
The example grid is "ndeokgelw"
. Its mid letter is k, so only words containing k are candidates.
q)grid:"ndeokgelw"
q)grid 4
"k"
q)show c:v39 where grid[4]in'v39
"aback"
"ackerman"
"ackley"
"adkins"
"aiken"
"airlock"
"airpark"
..
Test composable¶
A simple test for whether a word w
can be composed from a grid g
is to examine the difference of their letter counts.
q)lc:ce group@
q)lc grid
n| 1
d| 1
e| 2
o| 1
k| 1
g| 1
l| 1
w| 1
q)(lc grid)-lc "alaska"
n| 1
d| 1
e| 2
o| 1
k| 0
g| 1
l| 0
w| 1
a| -3
s| -1
The negative numbers in the difference show "alaska"
cannot be composed from "ndeokgelw"
.
The complete test:
q)all 0<=(lc grid)-lc "alaska"
0b
Test all the candidates¶
Subtract is not atomic in the domain of dictionaries, so we use Each Right to subtract from the grid letter count the letter count of each candidate.
q)c where all each 0<=(lc grid)-/:lc each c
"eke"
"elk"
"keel"
"keen"
"keg"
"ken"
"keno"
"knee"
"kneel"
"knew"
"know"
"knowledge"
"kong"
"leek"
"week"
"wok"
"woke"
Putting that all together:
ce:count each
lc:ce group@
vocab:"\n"vs .Q.hg "http://wiki.puzzlers.org/pub/wordlists/unixdict.txt"
v39:{x where(ce x)within 3 9}{x where all each x in .Q.a}vocab
solve:{[g;v]
i:where(g 4)in'v;
v i where all each 0<=(lc g)-/:lc each v i }[;v39]
q)solve "ndeokgelw"
"eke"
"elk"
"keel"
"keen"
"keg"
"ken"
"keno"
"knee"
"kneel"
"knew"
"know"
"knowledge"
"kong"
"leek"
"week"
"wok"
"woke"
Find the best wheel¶
The solve
function makes possible a naïve solution to the second question: which grids have the longest solutions – i.e. the most words composable from them?
Every grid with a 9-letter word in its solution is a permutation of that word. But only the middle letter determines which candidate words need testing against the grid, so nine rotations exhaust the possibilities.
q)count grids:distinct raze(til 9)rotate\:/:v where(ce v)=9
27810
Now we just need the length of their solutions and it will be easy to bust out the winners.
bust:{[v]
grids:distinct raze(til 9)rotate\:/:v where(ce v)=9;
wc:(count solve@)each grids;
grids where wc=max wc }
While we are waiting for the solutions to over 27,000 grids we have ample time to reflect on what is being done.
Each call to solve
finds the vocabulary words that contain the grid’s middle letters. Yet there are only 26 possible middle letters; only 26 of these searches are necessary. We could make a dictionary: the letters a-z keyed to the indexes of words that contain them.
q)`$v39 iaz:(.Q.a)!where each .Q.a in'\:v39
a| `aaa`aaas`aarhus`aaron`aau`aba`ababa`aback`abacus`abalone`abandon`abase`ab..
b| `aba`ababa`aback`abacus`abalone`abandon`abase`abash`abate`abater`abbas`abb..
c| `aback`abacus`abc`abdicate`abduct`abeyance`abject`abreact`abscess`abscissa..
..
Further reflection shows us bust
is performing the letter-count subtraction for each of 27,000+ grids. But rotation makes no difference to the letter count of the grid. There are only 3,088 distinct letter counts for the 27,000+ grids. And each word in the dictionary need be tested only once against each of the 3,088 9-letter words.
An efficient solution¶
Start again by listing the indexes of all the words that can be composed from each grid word.
q)vlc:lc each v39 / letter counts of vocabulary words
q)ig:where(ce v39)=9 / find grids (9-letter words)
q)igw:where each(all'')0<=(vlc ig)-/:\:vlc / find words composable from each grid word
List igw
corresponds to the 9-letter grid words. Each item is the vocabulary indexes for words composable from the corresponding grid word.
Each grid word has nine possible mid letters.
q)show ml:4 rotate'v39 ig / mid letters for each grid
"minalabdo"
"nathyaber"
"rrentabho"
"itionabol"
"inateabom"
"igineabor"
"issaeabsc"
..
Each mid letter selects a different word list from iaz
.
The first grid word is "abdominal"
. The first item of igw
lists the words that can be composed from it.
q)`$v39 igw 0
`aba`abdominal`abo`ada`adam`ado`aid`aida`ail`aim`ala`alai`alamo`alan`alb`alba..
The first item of ml
is the mid letters of the nine permutations of "abdominal"
.
Dictionary iaz
gives us the vocabulary indexes of the words that contain each mid letter.
q)ml 0
"minalabdo"
q)`$v39 iaz ml 0
`abdomen`abdominal`abnormal`abominate`abraham`abram`abramson`abysmal`academe`..
`abdicate`abdominal`abelian`abetting`abide`abidjan`abigail`ablution`abolish`a..
`aaron`abalone`abandon`abdomen`abdominal`abelian`abelson`aberdeen`abernathy`a..
`aaa`aaas`aarhus`aaron`aau`aba`ababa`aback`abacus`abalone`abandon`abase`abash..
`abalone`abdominal`abel`abelian`abelson`abigail`ablate`ablaze`able`ablution`a..
`aaa`aaas`aarhus`aaron`aau`aba`ababa`aback`abacus`abalone`abandon`abase`abash..
`aba`ababa`aback`abacus`abalone`abandon`abase`abash`abate`abater`abbas`abbe`a..
`abandon`abdicate`abdomen`abdominal`abduct`abed`aberdeen`abetted`abhorred`abi..
`aaron`abalone`abandon`abbot`abbott`abdomen`abdominal`abelson`abhorred`abhorr..
We just need to select from each list just the words that are composable from igw 0
, that is
q)`$v39 (igw 0)inter/:iaz ml 0
`abdominal`adam`aim`alamo`alma`almond`ama`ami`amid`amino`animal`balm`bam`bimo..
`abdominal`aid`aida`ail`aim`alai`ali`alia`ami`amid`amino`ani`animal`bail`bali..
`abdominal`alan`almond`amino`ana`and`ani`animal`ban`banal`band`bin`bind`bland..
`aba`abdominal`abo`ada`adam`ado`aid`aida`ail`aim`ala`alai`alamo`alan`alb`alba..
`abdominal`ail`ala`alai`alamo`alan`alb`alba`ali`alia`alma`almond`animal`bail`..
`aba`abdominal`abo`ada`adam`ado`aid`aida`ail`aim`ala`alai`alamo`alan`alb`alba..
`aba`abdominal`abo`alb`alba`bad`bail`bald`bali`balm`bam`ban`banal`band`bid`bi..
`abdominal`ada`adam`ado`aid`aida`almond`amid`and`bad`bald`band`bid`bimodal`bi..
`abdominal`abo`ado`alamo`almond`amino`bimodal`blond`boa`boil`bold`bon`bona`bo..
Iterating that through igw
and iaz
:
igw inter/:'iaz ml
It remains only to raze the lists and count the solution lengths to get the word count for each grid.
wc:ce raze igw inter/:'iaz ml
Those grids?
grids:raze(til 9)rotate\:/:v39 ig
Putting it all together:
best:{[v]
vlc:lc each v; / letter counts of vocabulary words
ig:where(ce v)=9; / find grids (9-letter words)
igw:where each(all'')0<=(vlc ig)-/:\:vlc; / find words composable from each grid
grids:raze(til 9)rotate\:/:v ig; / 9 permutations of each grid
iaz:(.Q.a)!where each .Q.a in'\:v; / find words containing a, b, etc
ml:4 rotate'v ig; / mid letters for each grid
wc:ce raze igw inter/:'iaz ml; / word counts for grids
distinct grids where wc=max wc } / grids with most words
q)show w:best v39
"ntclaremo"
"tspearmin"
q)ce solve each q
215 215
Parallelization¶
The heavy CPU lifting in best
is done by the dictionary subtractions in the definition of igw
: 3,088 × 20,664 tests.
This would be a good subject for parallelization, and the compact q code means there is not much to do.
We shall use the peach
keyword, which, like each
, applies a unary function.
But what unary function?
The expression to parallelize here is (dlc ig)-/:\:dlc
.
That has the syntax x f\:y
where f
is -/:
and x
is dlc ig
.
Iterators Each Left and Each Right are equivalent to applying Each to a unary. In other words
x f/:y <=> f[x;] each y
x f\:y <=> f[;y] each x
So
(vlc ig)-/:\:vlc <=> -/:[;vlc] each vlc ig
which gets us to
igw:where each(all'')0<=-/:[;vlc]peach vlc ig
On the author’s six-core machine with an allowance of four secondary tasks, this halves the execution time of the previous expression.
You can experiment with settings your own machine, and see whether you iget further improvements by extending the left argument of peach
.
For example:
igw:{where all each 0<=x-/:y}[;vlc] peach vlc ig
Review¶
For the first program, solve
, the key move is to use the difference between letter-count dictionaries to determine whether a word is composable from a grid.
It helps that dictionaries are in the domain of the Subtract operator.
Having found the words containing the grid’s mid letter, it remained only to select those that are composable from the grid.
The solve
function suggests a naïve solution for the second program: bust
iterates solve
over all the possible grids, counts the solution lengths and picks the longest.
Waiting for bust
to complete left time to reflect on the work it was repeating unnecessarily.
In best
that work is refactored into
- a dictionary
iaz
that maps mid letters to words that contain them - a list
igw
of the words composable from each of the 9-letter grid words
In both iaz
and igw
, vocabulary words are represented by their indexes in v39
, the list of 3-9 letter words.
For the nine grids permuted from a single grid word, e.g. "abdominal"
, we take from igw
its list of composable words and intersect it with each of the word lists for the mid letters of its permutation.
Easier to express in q than English:
igw inter/:'iaz ml
Even the refactored best
entails over 63 million dictionary subtractions to find which words are composable from each of the 3,088 grid words.
This is a prime target for parallelization, and where the terse q code comes into its own.
The peach
keyword applies a unary function. The expression (vlc ig)-/:\:vlc
specifies two levels of iteration, but transforms easily into a unary applied by each
, for which we need only substitute peach
.
Still only a single line of code, it is light work to experiment with moving other elements of the calculation within the ambit of peach
.
|
Compacting the HDB sym file¶
Under some scenarios, the sym enum file sitting in the root of the HDB folder can become bloated with symbols no longer used since earlier parts of a HDB were archived.
To compact the file requires re-enumeration of all enumerated columns against a new empty sym file. That can take some time to execute; nothing else should read or write to the HDB whilst this is running.
The code below is for a simple HDB, with
- date partitions
- a single sym list
- only splayed tables
Use at your own risk
This is an all-or-nothing approach. Run the code below at your own risk.
Ensure you understand what it does, and test it against a dev HDB you are happy to destroy in the event of an error.
This should really ever only be a one-time process. If you find your sym file growing beyond reasonable size, you very likely have non-repeating strings which would be better stored as char vectors than symbols.
This process is not a fix for a poor choice of schema!
/cd hdb
/q
system "mv sym zym";
`:sym set `symbol$(); / create a new empty sym file
files:key `:.;
dates:files where files like "????.??.??";
{[d]
root:":",string d;
tableNames:string key `$root;
tableRoot:root,/:"/",/:tableNames;
files:raze {`$x,/:"/",/:string key `$x}each tableRoot;
files:files where not files like "*#";
types:type each get each files;
enumeratedFiles:files where types within 20 76h;
/ if we have more than one enum better get help
if[any types within 21 76h;'"too difficult"];
{
`sym set get `:zym;
s:get x;
a:attr s;
s:value s;
`sym set get `:sym;
s:a#.Q.en[`:.;([]s:s)]`s;
x set s;
-1 "re-enumerated ", string x;
}each enumeratedFiles;
}each dates
Remember to rm
the zym file at the end of processing.
Back up the sym file¶
The sym file is found in the root of your HDB. It is the key to the default enums.
Regularly back up the sym file outside the HDB.
Multi-threaded sym rewrite code¶
Here’s some multi-threaded (can run single threaded) and more memory-intensive but hugely faster sym file rewrite code that handles partitioned and splayed tables and par.txt
.
Note you lose the `g#
, which isn’t supported in threads, so you have to apply it later.
system"l ." /load the HDB - can change this if you don't start q from your HDB root
allpaths:{[dbdir;table]
/ from dbmaint.q + an extra check for paths that exist (to support .Q.bv)
files:key dbdir;
if[any files like"par.txt";
:raze allpaths[;table]each hsym each`$read0(`)sv dbdir,`par.txt];
files@:where files like"[0-9]*";
files:(`)sv'dbdir,'files,'table;
files where 0<>(count key@)each files}
sym:oldSym:get`:sym /to unenumerate
/sym files from parted tables
symFiles:raze` sv/:/:raze
{allpaths[`:.;x],/:\:exec c from meta[x] where t in "s"} peach tables[]
where {1b~.Q.qp value x}each tables[]
/sym files from splayed tables
symFiles,:raze{` sv/: hsym[x],/:exec c from meta x where t in "s"}each tables[]
where {0b~.Q.qp value x}each tables[]
/symbol files we're dealing with - memory intensive
allsyms:distinct raze {[file] distinct @[value get@;file;`symbol$()]} peach symFiles
.Q.gc[] /memory intensive so gc
/
The preceding code makes no changes to the HDB.
You can estimate the savings with count[allsyms]%count sym.
The rest of the script makes changes; there is no going back once you start.
Let nothing write to the HDB while the script runs.
\
system"mv sym zym" / make backup of sym file
`:sym set `symbol$() / reset sym file - scary part
`sym set get`:sym
.Q.en[`:.;([]allsyms)] / enumerate all syms at once
{[file]
s:get file; / file contents
a:first `p`s inter attr s; / attributes - due to no`g# error in threads
/ can be just a:attr s if your version of kdb+
/ supports setting `g# in threads
s:oldSym`int$s; / unenumerate against old sym file
file set a#`sym$s; / enumerate against new sym; add attrib; write
0N!"re-enumerated ", string file;
} peach symFiles
Multi-threaded sym rewrite code
Take backups!
Error writing file?
In the multi-threaded script, a 'cast
could happen if this line fails on a file:
allsyms:distinct raze{[file] :distinct @[value get@;file;`symbol$()] } peach symFiles;
/symbol files we're dealing with - memory intensive
So perhaps check the integrity of your HDB (perhaps change the above to help debug):
allsyms:distinct raze{[file]
:distinct @[value get@; file; {0N!(x;y); `symbol$()}[file;]]
} peach symFiles;
would print the file and error.
It’s important to understand what’s going on, not just run the whole thing blindly.
Corporate actions¶
Even routine corporate actions can have a significant impact on prices, volume and volatility. With q one typically captures raw tick data, and should a corporate action influence a previously captured price, an adjustment factor is applied to that raw data – this can be done on-the-fly, and hence can also be selective about which types of corporate actions are applied.
Q is data-vendor agnostic, and as such you are free to choose which vendor to source corporate actions data from, one being ActionsExchange who provide corporate action updates several times per day via an FTP site in a well-documented fixed-width ASCII format, or ISO15022 MT564. Telekurs and bme are other such vendors.
If your vendor happens to provide adjustment factors, that is a nice-to-have, otherwise you’ll be tasked with calculating the adjustment factor from first principles – not difficult, but you’ll need further data such as close prices. Also, your corporate action vendor may provide each action with a confidence measure.
Future-looking actions
Future-looking corporate actions can prepare traders for some upcoming unusual activities, e.g. special dividends.
Given a table that contains the raw corporate actions for a security, e.g.
q)ca
date sym caType factor
------------------------------
2000.01.01 ABC split 0.5
2000.02.01 ABC dividend 0.98
2000.03.01 ABC bonus 0.8
2000.04.01 ABC dividend 0.97
and a table of trades
q)t
date sym price size
-------------------------
1995.01.01 ABC 100 100
2000.01.02 ABC 100 100
2000.02.02 ABC 100 100
2000.03.02 ABC 100 100
2000.04.02 ABC 100 100
2000.05.01 ABC 100 100
we can write a function adjust
to apply the relevant adjustment factors for a date and sym
getCAs:{[caTypes]
/ handles multiple corporate actions on one date
t:0!select factor:prd factor by date-1,sym from ca where caType in caTypes;
t,:update date:1901.01.01,factor:1.0 from ([]sym:distinct t`sym);
t:`date xasc t;
t:update factor:reverse prds reverse 1 rotate factor by sym from t;
:update `g#sym from 0!t;
};
adjust:{[t;caTypes]
t:0!t;
factors:enlist 1.0^aj[`sym`date;([] date:t`date;sym:t`sym);getCAs caTypes]`factor;
mc:c where (lower c:cols t) like "*price"; / find columns to multiply
dc:c where lower[c] like "*size"; / find columns to divide
:![t;();0b;(mc,dc)!((*),/:mc,\:factors),((%),/:dc,\:factors)]; / multiply or divide out the columns
};
/ get the adjustment factors considering all corporate actions
q)getCAs exec distinct caType from ca
date sym factor
----------------------
1901.01.01 ABC 0.38024
2000.01.01 ABC 0.76048
2000.02.01 ABC 0.776
2000.03.01 ABC 0.97
2000.04.01 ABC 1
q)adjust[t;`dividend] / adjust trades for dividends only
date sym price size
-----------------------------
1995.01.01 ABC 95.06 105.1967
2000.01.02 ABC 95.06 105.1967
2000.02.02 ABC 97 103.0928
2000.03.02 ABC 97 103.0928
2000.04.02 ABC 100 100
2000.05.01 ABC 100 100
CPU affinity¶
Kdb+ can be constrained to run on specific cores through the setting of CPU affinity.
Typically, you can set the CPU affinity for the shell you are in, and then processes started within that shell will inherit the affinity.
.Q.w
(memory stats)
Basics: Command-line parameter -w
,
System command \w
Linux¶
Non-Uniform Access Memory (NUMA)
Detecting NUMA¶
The following commands will show if NUMA is active.
$ grep NUMA=y /boot/config-`uname -r`
CONFIG_NUMA=y
CONFIG_AMD_NUMA=y
CONFIG_X86_64_ACPI_NUMA=y
CONFIG_ACPI_NUMA=y
Or test for the presence of NUMA maps.
$ find /proc -name numa_maps
/proc/12108/numa_maps
/proc/12109/task/12109/numa_maps
/proc/12109/numa_maps
...
Q and NUMA¶
Until Linux kernels 3.x, q and NUMA did not work well together.
When activating NUMA, substitute parameter settings according to the recommendations for different Linux kernels.
Activating NUMA¶
When NUMA is
-
not active, use the
taskset
command, e.g.taskset -c 0,1,2 q
will run q on cores 0, 1 and 2. Or
taskset -c 0,1,2 bash
and then all processes started from within that new shell will automatically be restricted to those cores.
-
active, use
numactl
instead oftaskset
numactl --interleave=all --physcpubind=0,1,2 q
and set
echo 0 > /proc/sys/vm/zone_reclaim_mode
You can change zone_reclaim_mode
without restarting q.
Other ways to limit resources¶
On Linux systems, administrators might prefer cgroups as a way of limiting resources.
On Unix systems, memory usage can be constrained using ulimit
, e.g.
ulimit -v 262144
limits virtual address space to 256MB.
Solaris¶
Use psrset
psrset -e 2 q
which will run q using processor set 2. Or, to start a shell restricted to those cores:
psrset -e 2 bash
Windows¶
Start q.exe
with the OS command start
with the /affinity
flag set
start /affinity 3 c:\q\w64\q.exe
will run q on core 0 and 1.
Running a kdb+ daemon¶
Here’s a simple way to daemonize kdb+ on Linux. Supports redirecting stderr and stdout. It closes stdin and writes a pid to a pidfile.
Shell features
Remember shell features, e.g.
nohup q -p 5000 < /dev/null > /tmp/stdoe 2>&1&
echo $! > /tmp/pidfile
Sample use:
saturn:tmp> gcc daemonize.c -o daemonize
saturn:tmp> ./daemonize -e /tmp/stderr -o /tmp/stdout -p /tmp/pidfile
~/q/l64/q -p 5000
saturn:tmp> cat /tmp/pidfile
32139
saturn:tmp> q
KDB+ 2.4t 2007.05.04 Copyright (C) 1993-2007 Kx Systems
l64/ 4(8)core 3943MB niall saturn 127.0.0.1 prod 2012.01.01 niall
q)h:hopen `:localhost:5000
q)h"2+2"
4
q)h"0N!`hello"
`hello
q)\\
saturn:tmp> cat /tmp/stdout
`hello
The code:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <getopt.h>
int open_or_die(char *path) {
int file, flags = O_CREAT | O_WRONLY | O_APPEND;
if ((NULL == path) || (-1 == (file = open (path, flags, 0666)))) {
(void) fprintf (stderr, "Failed to open file \"%s\": %s\n", path,
strerror(errno));
exit (EXIT_FAILURE);
}
return file;
}
int main (int argc, char *argv[]) {
// Check for some args.
if (8 > argc) {
(void) puts(
"Usage: daemonize options path [args]\n"
"\t-e <filename> Redirect stderr to file <filename>\n"
"\t-o <filename> Redirect stdout to file <filename>\n"
"\t-p <filename> Write pid to <filename>\n");
exit (EXIT_FAILURE);
}
// Parse args.
int option;
char **command = NULL, *pid_filename, *stdout_filename,
*stderr_filename = NULL;
while (-1 != (option = getopt (argc, argv, "+e:o:p:"))) {
switch (option) {
case 'e': stderr_filename = optarg; break;
case 'o': stdout_filename = optarg; break;
case 'p': pid_filename = optarg; break;
default: (void) fprintf (stderr, "Unknown option: -%c\n",
optopt);
}
}
// Assume the command to daemonize is the rest of the arguments
command = &argv[optind];
// Make a token attempt to see if we'll be able to exec the command.
if (-1 == access (command[0], F_OK)) {
(void) fprintf (stderr, "Can't access %s, exiting.", command[0]);
exit (EXIT_FAILURE);
}
// Try to open some files for pid, stdin, stdout, stderr.
FILE *pid_file = fopen (pid_filename, "w+");
int stdin_file = open_or_die("/dev/null");
int stderr_file = open_or_die(stderr_filename);
int stdout_file = open_or_die(stdout_filename);
// Nuke stdin and redirect stderr, stdout.
close (STDIN_FILENO);
dup2 (stdin_file, STDIN_FILENO);
close (STDOUT_FILENO);
dup2 (stdout_file, STDOUT_FILENO);
close (STDERR_FILENO);
dup2 (stderr_file, STDERR_FILENO);
// Now daemonize..
if (0 != daemon (0, 1)) {
(void) fprintf (stderr, "Can't daemonize: %s\nExiting.",
strerror(errno));
exit (EXIT_FAILURE);
}
// Write the pid
fprintf (pid_file, "%d\n", getpid ());
fclose (pid_file);
// And away we go..
execvp (command[0], command);
}
|
-1"in addition, it is important to confirm that the analytic gradient we compute";
-1"is the same (at least to a few significant digits)";
-1"as a discrete (and slower to calculate) gradient.";
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;.ml.l2[.1];3 5 10 50 2;hgolf]
-1"confirming gradient of a few different activation and loss functions";
hgolf:`h`g`o`l!`.ml.relu`.ml.drelu`.ml.sigmoid`.ml.logloss
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;();3 5 10 50 2;hgolf]
hgolf:`h`g`o`l!`.ml.relu`.ml.drelu`.ml.softmax`.ml.celoss
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;();3 5 10 50 2;hgolf]
hgolf:`h`g`o`l!`.ml.lrelu`.ml.dlrelu`.ml.sigmoid`.ml.logloss
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;();3 5 10 50 2;hgolf]
hgolf:`h`g`o`l!`.ml.tanh`.ml.dtanh`.ml.sigmoid`.ml.logloss
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;();3 5 10 50 2;hgolf]
hgolf:`h`g`o`l!`.ml.tanh`.ml.dtanh`.ml.softmax`.ml.celoss
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;();3 5 10 50 2;hgolf]
hgolf:`h`g`o`l!`.ml.tanh`.ml.dtanh`.ml.linear`.ml.mseloss
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;();3 5 10 50 2;hgolf]
hgolf:`h`g`o`l!`.ml.linear`.ml.dlinear`.ml.linear`.ml.mseloss
.ut.assert . a:.ut.rnd[1e-6] .ml.checknngrad[1e-5;();3 5 10 50 2;hgolf]
hgolf:`h`g`o`l!`.ml.sigmoid`.ml.dsigmoid`.ml.softmax`.ml.celoss
-1"we can now run (batch) gradient descent across the whole data set.";
-1"this will always move along the steepest gradient, but makes slow progress";
-1"and is prone to finding local minima";
first .fmincg.fmincg[5;.ml.nncostgrad[rf;n;hgolf;Y;X];theta];
/ NOTE: qml throws a `limit error (too many elements)
/.qml.minx[`quiet`full`iter,1;.ml.nncostgradf[rf;n;hgolf;Y;X];enlist theta]
-1"we can, alternatively, perform stochastic gradient descent (SGD).";
-1"by taking a subset of the data on each iteration, we can analyze all the data";
-1"without holding it all in memory simultaneously. in addition, the parameters will";
-1"jump around and therefore increasing the chance we find a global minima.";
-1"SGD converges faster, but might never stop iterating";
-1"";
/https://www.quora.com/Whats-the-difference-between-gradient-descent-and-stochastic-gradient-descent
-1"when the batch size is equal to the size of the data set,";
-1"SGD is equal to batch gradient descent.";
-1"at the other extreme, we can analyze one observation at a time.";
-1"this is called 'on-line learning'";
-1"we first define a minimization projection:";
cf:first .ml.nncostgrad[rf;n;hgolf;Y;X]::
gf:last .ml.nncostgrad[rf;n;hgolf]::
-1"we then have a few choices to randomize the dataset.";
-1"A: permutate, then run n non-permuted epochs";
i:0N?count X 0
X:X[;i];Y:Y[;i];y@:i
theta:first .ml.iter[1;2;cf;.ml.sgd[.1;gf;til;5;Y;X]] theta
-1"B: run n permuted epochs";
theta:first .ml.iter[1;2;cf;.ml.sgd[.1;gf;0N?;5;Y;X]] theta
-1"C: run n random (with replacement) epochs (aka bootstrap)";
theta:first .ml.iter[1;2;cf;.ml.sgd[.1;gf;{x?x};5;Y;X]] theta
-1"we can run any above example with cost threshold.";
theta:first .ml.iter[1;.01;cf;.ml.sgd[.1;gf;{x?x};5;Y;X]] theta
-1"what is the final cost?";
cf theta
-1"how well did we learn on the training data set?";
avg y=p:.ml.imax .ml.pnn[hgolf;X] .ml.nncut[n] theta
-1"we can visualize the hidden features";
-1 plt 1_ rand first .ml.nncut[n] theta
-1"or view a few mistakes";
p w:where not y=p
do[2;-1 plt X[;i:rand w];show ([]p;y) i]
-1"how well can we predict unseen data";
avg yt=p:.ml.imax .ml.pnn[hgolf;Xt] .ml.nncut[n] theta
-1"or view a few mistakes";
p w:where not yt=p
do[2;-1 plt Xt[;i:rand w];show ([]p;yt) i]
-1"we can view the confusion matrix as well";
show .ut.totals[`TOTAL] .ml.cm[yt;"i"$p]
-1"neural networks are not limited to classification problems.";
-1"using a linear activation function on the output layer";
-1"along with a means squared (aka quadratic) error loss function";
-1"our feed forward neural network can be used for non-linear regression.";
-1"we split the wine quality data into train and test partitions";
d:.ut.part[`train`test!3 1;0N?] winequality.red.t
`Y`X set' 0 1 cut value flip d.train
`Yt`Xt set' 0 1 cut value flip d.test
-1"and then create a z-score function";
zsf:.ml.zscoref each X
-1"to normalized the train and test data with the same values";
X:zsf @' X
Xt:zsf @' Xt
-1"next we define the topology";
n:"j"$.ut.nseq[2;count X;count Y];
-1"add some regularization";
rf:.ml.l2[l2:10f];
-1"add initialize the THETA coefficients";
theta:2 raze/ .ml.heu'[1+-1_n;1_n];
-1"using the (leaky) rectified linear unit prevents vanishing gradients";
hgolf:`h`g`o`l!`.ml.lrelu`.ml.dlrelu`.ml.linear`.ml.mseloss
theta:first r:.fmincg.fmincg[1000;.ml.nncostgrad[rf;n;hgolf;Y;X];theta]
-1"before revealing how our non-linear neural network fared,";
-1"lets review the mse resulting from ridge regression on the train data";
THETA:.ml.ridge[0f,count[X]#l2;Y;.ml.prepend[1f]X]
.ml.lincost[();Y;X] THETA
-1"and the test data";
.ml.lincost[();Yt;Xt] THETA
-1"now we check for a reduction in the mse using the neural network";
.ml.nncost[();hgolf;Y;X] .ml.nncut[n] theta
-1"and the test data";
.ut.assert[0.2] .ut.rnd[.1] .ml.nncost[();hgolf;Yt;Xt] .ml.nncut[n] theta
.ut.assert[0.1] .ut.rnd[.1] sum 2 raze/ .ml.nngrad[();hgolf;Yt;Xt] .ml.nncut[n] theta
================================================================================
FILE: funq_northanger.q
SIZE: 363 characters
================================================================================
/ northanger abbey
northanger.f:"20080515-121.txt"
northanger.b:"https://www.gutenberg.org/files/121/old/"
-1"[down]loading northanger abbey text";
.ut.download[northanger.b;;"";""] northanger.f;
northanger.txt:read0 `$northanger.f
northanger.chapters:1_"CHAPTER" vs "\n" sv 57_-373_northanger.txt
northanger.s:{(3+first x ss"\n\n\n")_x} each northanger.chapters
================================================================================
FILE: funq_onevsall.q
SIZE: 1,122 characters
================================================================================
\c 20 100
\l funq.q
\l mnist.q
/ digit recognition
-1"referencing mnist data from global namespace";
`X`Xt`Y`y`yt set' mnist`X`Xt`Y`y`yt;
-1"shrinking training set";
X:1000#'X;Y:1000#'Y;y:1000#y;
X%:255f;Xt%:255f
-1"define a plot function that includes the empty space character";
plt:value .ut.plot[28;14;.ut.c10;avg] .ut.hmap flip 28 cut
-1"visualize the data";
-1 (,'/) plt each X@\:/: -4?count X 0;
lbls:"i"$til 10
rf:.ml.l2[1] / regularization function
theta:(1+count X)#0f / initial theta coefficients
f:first .fmincg.fmincg[5;;theta] .ml.logcostgrad[rf;;X]@
-1"to run one-vs-all",$[count rf;" with regularization";""];
-1"we perform multiple runs of logistic regression (one for each digit)";
-1"this trains one set of parameters for each number";
-1 .ut.box["**"] "for performance, we peach across digits";
THETA:.ml.fova[f;Y;lbls]
-1"checking accuracy of parameters";
avg yt=p:lbls .ml.imax .ml.plog[Xt] THETA
-1"view a few confused characters";
w:where not yt=p
do[2;-1 plt Xt[;i:rand w];show ([]p;yt) i]
-1"view the confusion matrix";
show .ut.totals[`TOTAL] .ml.cm[yt;p]
================================================================================
FILE: funq_onp.q
SIZE: 375 characters
================================================================================
onp.f:"OnlineNewsPopularity"
onp.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/"
onp.b,:"00332/"
-1"[down]loading online news popularity data set";
.ut.download[onp.b;;".zip";.ut.unzip] onp.f;
onp.t:(" efefebebfi" where 2 2 3 4 1 1 6 12 8 21 1;1#",") 0: `$onp.f,"/",onp.f,".csv"
onp.t:`popular xcols delete shares from update popular:shares>=1400 from onp.t;
================================================================================
FILE: funq_optdigits.q
SIZE: 343 characters
================================================================================
optdigits.f:("optdigits.tra";"optdigits.tes")
optdigits.b:"http://archive.ics.uci.edu/ml/machine-learning-databases/"
optdigits.b,:"optdigits/"
-1"[down]loading optdigits data set";
.ut.download[optdigits.b;;"";""] each optdigits.f;
optdigits[`X`Y`Xt`Yt]:raze (64 cut (65#"H";",") 0: `$) each optdigits.f
optdigits[`y`yt]:optdigits[`Y`Yt][;0]
================================================================================
FILE: funq_pagerank.q
SIZE: 2,194 characters
================================================================================
\c 20 100
\l funq.q
\l cloud9.q
\l berkstan.q
/ http://ilpubs.stanford.edu:8090/422/
/ http://infolab.stanford.edu/~backrub/google.html
/ https://ahrefs.com/blog/google-pagerank/
/ https://en.wikipedia.org/wiki/Google_matrix
/ https://en.wikipedia.org/wiki/PageRank
/ http://www.cs.princeton.edu/~chazelle/courses/BIB/pagerank.htm
/ http://www.mathworks.com/help/matlab/examples/use-page-rank-algorithm-to-rank-websites.html
/ https://www.mathworks.com/moler/exm/chapters/pagerank.pdf
-1 "given a list of page links,";
i:1 1 2 2 3 3 3 4 6
j:2 6 3 4 4 5 6 1 1
show l:(i;j)
node:asc distinct raze l
l:node?l
-1 "we can transform the sparse connectivity matrix";
show S:(1 2#1+max over l), .ml.prepend[1f] l
-1 "into a full matrix";
show A:.ml.full S
-1 "using matrix inversion, we can algebraically compute the pagerank";
-1 "it is commonly understood that the odds of clicking on a link are 85%";
-1 "while the odds of randomly going to another page are 15%";
d:.85
show node[i]!r i:idesc r:.ml.pageranka[d;A]
-1 "ranks don't change drastically over time";
-1 "so perhaps an iterative approach is better";
show node[i]!r i:idesc r:.ml.pageranki[d;A] over r:n#1f%n:count A
S:.ml.sparse A / sparse matrix
show node[i]!r i:idesc r:.ml.pageranks[d;S] over r:n#1f%n:S[0;0]
-1 "this can be optimized by using the power method";
-1 "first compute the Google matrix, then iteratively multiply until convergence";
show node[i]!r i:idesc r:$[;.ml.google[d;A]] over r:n#1f%n:count A
|
// Generate standardised timestamp string for log names
gentimeformat:{(raze string "dv"$x) except ".:"};
// Tabperiod mode - TP log rolled periodically (default 1 hr), 1 log per table (default setting)
.stplg.logname.tabperiod:{[dir;tab;p] ` sv (hsym dir;`$raze string (.proc.procname;"_";tab),.stplg.gentimeformat[p]) };
// Standard TP mode - write all tables to single log, roll daily
.stplg.logname.singular:{[dir;tab;p] ` sv (hsym dir;`$raze string .proc.procname,"_",.stplg.gentimeformat[p]) };
// Periodic-only mode - write all tables to single log, roll periodically intraday
.stplg.logname.periodic:{[dir;tab;p] ` sv (hsym dir;`$raze string .proc.procname,"_periodic",.stplg.gentimeformat[p]) };
// Tabular-only mode - write tables to separate logs, roll daily
.stplg.logname.tabular:{[dir;tab;p] ` sv (hsym dir;`$raze string (.proc.procname;"_";tab),.stplg.gentimeformat[p]) };
// Custom mode - mixed periodic/tabular mode
// Tables are defined as periodic, tabular, tabperiod or none in config file stpcustom.csv
// Tables not specified in csv are not logged
.stplg.logname.custom:{[dir;tab;p] .stplg.logname[.stplg.custommode tab][dir;tab;p] };
// If in error mode, create an error log name using .stplg.errorlogname
.stplg.logname.error:{[dir;ename;p] ` sv (hsym dir;`$raze string (.proc.procname;"_";ename),.stplg.gentimeformat[p]) };
// Update and timer functions in three batch modes ////////////////////////////////////
// preserve pre-existing definitions
upd:@[value;`.stplg.upd;enlist[`]!enlist ()];
zts:@[value;`.stplg.zts;enlist[`]!enlist ()];
// Functions to add columns on updates
updtab:@[value;`.stplg.updtab;enlist[`]!enlist {(enlist(count first x)#y),x}]
// If set to memorybatch, publish and write to disk will be run in batches
// insert to table in memory, on a timer flush the table to disk and publish, update counts
upd[`memorybatch]:{[t;x;now]
t insert updtab[t] . (x;now);
};
zts[`memorybatch]:{
{[t]
if[count value t;
`..loghandles[t] enlist (`upd;t;value flip value t);
@[`.stplg.msgcount;t;+;1];
@[`.stplg.rowcount;t;+;count value t];
.stpps.pubclear[t]];
}each .stpps.t;
};
// Standard batch mode - write to disk immediately, publish in batches
upd[`defaultbatch]:{[t;x;now]
t insert x:.stplg.updtab[t] . (x;now);
`..loghandles[t] enlist(`upd;t;x);
// track tmp counts, and add these after publish
@[`.stplg.tmpmsgcount;t;+;1];
@[`.stplg.tmprowcount;t;+;count first x];
};
zts[`defaultbatch]:{
// publish and clear all tables, increment counts
.stpps.pubclear[.stpps.t];
// after data has been published, updated the counts
.stplg.msgcount+:.stplg.tmpmsgcount;
.stplg.rowcount+:.stplg.tmprowcount;
// reset temp counts
.stplg.tmpmsgcount:.stplg.tmprowcount:()!();
};
// Immediate mode - publish and write immediately
upd[`immediate]:{[t;x;now]
x:updtab[t] . (x;now);
`..loghandles[t] enlist(`upd;t;x);
x:$[0h>type last x;enlist;flip] .stpps.tabcols[t]!x;
@[`.stplg.msgcount;t;+;1];
@[`.stplg.rowcount;t;+;count x];
.stpps.pub[t;x]
};
zts[`immediate]:{}
//////////////////////////////////////////////////////////////////////////////////////
// Functions to obtain logs for client replay ////////////////////////////////////////
// replaylog called from client-side, returns nested list of logcounts and lognames
replaylog:{[t]
getlogs[replayperiod][t]
}
// alternative replay allows for 'pass through logging'
// if SCTP not producing logs, subscribers replay from STP log files
if[.sctp.loggingmode=`parent;
replaylog:{[t]
.sctp.tph (`.stplg.replaylog; t)
}
]
getlogs:enlist[`]!enlist ()
// If replayperiod set to `period, only replay logs for current logging period
getlogs[`period]:{[t]
distinct flip (.stplg.msgcount;exec tbl!logname from `..currlog where tbl in t)@\:t
};
// If replayperiod set to `day, replay all of today's logs
getlogs[`day]:{[t]
// set the msgcount to 0Wj for all logs which have closed
lnames:select seq,tbls,logname,msgcount:0Wj from .stpm.metatable where any each tbls in\: t;
// Meta table does not store counts for live logs, so these are populated here
lnames:update msgcount:sum each .stplg.msgcount[tbls] from lnames where seq=.stplg.i;
flip value exec `long$msgcount,logname from lnames
};
//////////////////////////////////////////////////////////////////////////////////////
// Open log for a single table at start of logging period
openlog:{[multilog;dir;tab;p]
lname:logname[multilog][dir;tab;p];
.lg.o[`openlog;"opening logfile: ",string lname];
h:$[(notexists:not type key lname)or null h0:exec first handle from `..currlog where logname=lname;
[if[notexists;.[lname;();:;()]];hopen lname];
h0
];
`..currlog upsert (tab;lname;h);
};
// Error log for failed updates in error mode
openlogerr:{[dir]
lname:.[.stplg.logname.error;(dir;.stplg.errorlogname;.z.p+.eodtime.dailyadj);{.lg.e[`openlogerr;"failed to make error log: ",x]}];
if[not type key lname;.[lname;();:;()]];
h:@[{hopen x};lname;{.lg.e[`openlogerr;"failed to open handle to error log with error: ",x]}];
`..currlog upsert (errorlogname;lname;h);
};
// Log failed message and error type in error mode
badmsg:{[e;t;x]
.lg.o[`upd;"Bad message received, error: ",e];
`..loghandles[errorlogname] enlist(`upderr;t;x);
};
closelog:{[tab]
if[null h:`..currlog[tab;`handle];.lg.o[`closelog;"no open handle to log file"];:()];
.lg.o[`closelog;"closing log file ",string `..currlog[tab;`logname]];
@[hclose;h;{.lg.e[`closelog;"handle already closed"]}];
update handle:0N from `..currlog where tbl=tab;
};
// Roll all logs at end of logging period
rolllog:{[multilog;dir;tabs;p]
.stpm.updmeta[multilog][`close;tabs;p];
closelog each tabs;
@[`.stplg.msgcount;tabs;:;0];
{[m;d;t]
.[openlog;(m;d;t;currperiod);
{.lg.e[`stp;"failed to open log for table ",string[y],": ",x]}[;t]]
}[multilog;dir;]each tabs;
.stpm.updmeta[multilog][`open;tabs;p];
};
// Creates dictionary of process data to be used at endofday/endofperiod - configurable but default provided
endofdaydata:@[value;`.stplg.endofdaydata;{ {`proctype`procname`tables!(.proc.proctype;.proc.procname;.stpps.t)} }];
// endofperiod function defined in SCTP
// passes on eop messages to subscribers and rolls logs
endofperiod:{[currentpd;nextpd;data]
.lg.o[`endofperiod;"flushing remaining data to subscribers and clearing tables"];
.stpps.pubclear[.stplg.t];
.lg.o[`endofperiod;"executing end of period for ",.Q.s1 `currentperiod`nextperiod!(currentpd;nextpd)];
.stpps.endp[currentpd;nextpd;data]; // sends endofperiod message to subscribers
currperiod::nextpd; // increments current period
if[.sctp.loggingmode=`create;periodrollover[data]] // logs only rolled if in create mode
};
// stp runs function to send out end of period messages and roll logs
// eop log roll is stopped if eod is also going to be triggered (roll is not stopped in SCTP)
stpeoperiod:{[currentpd;nextpd;data;rolllogs]
.lg.o[`endofperiod;"flushing remaining data to subscribers and clearing tables"];
.stpps.pubclear[.stplg.t];
.lg.o[`stpeoperiod;"passing on endofperiod message to subscribers"];
.stpps.endp[currentpd;nextpd;data]; // sends endofperiod message to subscribers
currperiod::nextperiod; // increments current period
if[(data`p)>nextperiod::multilogperiod+currperiod;
system"t 0";'"next period is in the past"]; // timer off
getnextendUTC[]; // grabs next end time
if[rolllogs;periodrollover[data]]; // roll if appropriate
.lg.o[`stpeoperiod;"end of period complete, new values for current and next period are ",.Q.s1 (currentpd;nextpd)];
}
// common eop log rolling logic for STP and SCTP
periodrollover:{[data]
i+::1; // increments log seq number
rolllog[multilog;dldir;rolltabs;data`p];
}
// common eod for STP and SCTP to send out eod messages and roll logs
endofday:{[date;data]
.lg.o[`endofday;"flushing remaining data to subscribers and clearing tables"];
.stpps.pubclear[.stplg.t];
.lg.o[`endofday;"executing end of day for ",.Q.s1 .eodtime.d];
.stpps.end[date;data]; // sends endofday message to subscribers
dayrollover[data];
}
// common eod log rolling logic for STP and SCTP
dayrollover:{[data]
if[(data`p)>.eodtime.nextroll:.eodtime.getroll[data`p];
system"t 0";'"next roll is in the past"]; // timer off
getnextendUTC[]; // grabs next end time
.eodtime.d+:1; // increment current day
.stpm.updmeta[multilog][`close;logtabs;(data`p)+.eodtime.dailyadj]; // update meta tables
.stpm.metatable:0#.stpm.metatable;
closelog each logtabs; // close current day logs
init[string .proc.procname]; // reinitialise process
.lg.o[`dayrollover;"end of day complete, new value for date is ",.Q.s1 .eodtime.d];
}
// get the next end time to compare to
getnextendUTC:{nextendUTC::-1+min(.eodtime.nextroll;nextperiod - .eodtime.dailyadj)}
checkends:{
// jump out early if don't have to do either
if[nextendUTC > x; :()];
// check for endofperiod
if[nextperiod < x1:x+.eodtime.dailyadj; stpeoperiod[.stplg`currperiod;.stplg`nextperiod;.stplg.endofdaydata[],(enlist `p)!enlist x1;not .eodtime.nextroll < x]];
// check for endofday
if[.eodtime.nextroll < x;if[.eodtime.d<("d"$x)-1;system"t 0";'"more than one day?"]; endofday[.eodtime.d;.stplg.endofdaydata[],(enlist `p)!enlist x]];
};
|
Debugging¶
q)) extra right parens mark suspended execution/s 'myerror Signal error, cut back stack :r exit suspended function with r as result \ abort execution and exit debugger .Q.bt dump backtrace & current frame information .Q.trp extends Trap At to collect backtrace -e \e error-trap mode
Errors¶
Uncaught errors are printed as follows (without the comments). Since V3.5.
q)2+"hi"
'type / error string
[0] 2+"hi" / stack frame index and source code
^ / caret indicates the primitive that failed
This will be augmented with file:line and function name, if such information is available.
q)myfun"hi" / myfun defined in test.q and loaded with \l
'type
[1] /kdb+3.5/test.q:5: myfun:{2+x} / note the full path name
^
Nested anonymous lambdas will inherit their enclosing function's name with the @
suffix.
q)f0:{{("hi";x+y)}[x*2;"there"]}
q)f0[2]
'type
[2] f0@:{("hi";x+y)}
^
q)\
A name error (global used as local) bytecode compiler error has location info.
q){a::1;a:1}
'a
[0] {a::1;a:1}
^
Debugger¶
Usually when an error happens inside a lambda the execution is suspended and you enter the debugger, as indicated by the additional )
following the normal
q)
prompt.
q)f:{g[x;2#y]}
q)g:{a:x*2;a+y}
q)f[3;"hello"]
'type
[2] g:{a:x*2;a+y}
^
q))
The debug prompt allows operating on values defined in the local scope.
q))a*4
24
You can use `
and .
freely to navigate up and down the stack.
q))` / up
[1] f:{g[x;2#y]}
^
q))`
[0] f[3;"hello"]
^
q)). / down
[1] f:{g[x;2#y]}
^
q))
In a debugger session, .z.ex
and .z.ey
are set to the failed primitive and its argument list.
q)).z.ex
+
q)).z.ey
6
"he"
Signal¶
'err
will signal err
from the deepest frame available, destroying it.
q))'myerror
'myerror
[1] f:{g[x;2#y]}
^
q))
Resume¶
When execution is suspended, :e
resumes with e
as the result of the failed operation. e
defaults to null ::
.
q)read0`:test.q
"/ test script"
"a:b:0"
"func:{1+x}"
"a:func`a"
"b:1"
q)\l test.q
'type
[3] <full path to file>/test.q:3: func:{1+x}
^
q)):42 / result of 1+x
q)a
42
q)b
1
Note that resume does not return from enclosing function
q){0N!"x+1 is ",string x+1;x}`asd
'type
[1] {0N!"x+1 is ",string x+1;x}
^
q)):17
"x+1 is 17"
`asd
Abort¶
Use \
to exit the debugger and abort execution.
q))\
q)
Debuggers may nest if an expression entered into a debug prompt signals an error. Nesting level is indicated by appending further parentheses to the q))
prompt. Each \
exits a single debug level.
q)){x+y}[a;y]
'type
[5] {x+y}
^
q)))x
6
q)))\ / exit the inner debugger
q))\ / exit the outer debugger
q)
Stack frames¶
Backtrace¶
.Q.bt[]
will dump the backtrace to stdout at any point during execution or debug.
It will highlight the current stack frame with >>
. (Since V4.0 2020.03.17.)
q)g:{a:x*2;a+y}
q)f:{{.Q.bt[];x*2}x+1}
q)f 4
[2] f@:{.Q.bt[];x*2}
^
[1] f:{{.Q.bt[];x*2}x+1}
^
[0] f 4
^
10
q)g[3;"hello"]
'type
[1] g:{a:x*2;a+y}
^
q)).Q.bt[]
>>[1] g:{a:x*2;a+y}
^
[0] g[3;"hello"]
^
The debugger itself occupies a stack frame, but its source is hidden.
Where¶
Debugger command &
displays current frame information.
(Since V4.0 2020.03.17.)
q))&
'type
[1] g:{a:x*2;a+y}
^
Context¶
The debugger restores the original namespace and language (q or k) setting for each frame.
View calculations and system commands, including \l
, correspond to individual debug stack frames.
.d1 ).Q.bt`
>>[3] t0.k:8: va::-a
^
[2] t1.q:8: vb::va*3
^
[1] t1.q:7: vc::vb+2
^
[0] 2+vc
^
Trap¶
.Q.trp[f;x;g]
extends trap at
(@[f;x;g]
) to collect backtrace. Along with the error string, g
gets called with the backtrace object as a second argument. You can format it with .Q.sbt
to make it legible.
q)f:{`hello+x}
q) / print the formatted backtrace and error string to stderr
q).Q.trp[f;2;{2@"error: ",x,"\nbacktrace:\n",.Q.sbt y;-1}]
error: type
backtrace:
[2] f:{`hello+x}
^
[1] (.Q.trp)
[0] .Q.trp[f;2;{2@"error: ",x,"\nbacktrace:\n",.Q.sbt y;-1}]
^
-1
q)
.Q.trp
can be used for remote debugging.
q)h:hopen`::5001 / f is defined on the remote
q)h"f `a"
'type / q's ipc protocol can only get the error string back
[0] h"f `a"
^
q) / a made up protocol: (0;result) or (1;backtrace string)
q)h".z.pg:{.Q.trp[(0;)@value@;x;{(1;.Q.sbt y)}]}"
q)h"f 3"
0 / result
,9 9 9
q)h"f `a"
1 / failure
" [4] f@:{x*y}\n ^\n [3..
q)1@(h"f `a")1; / output the backtrace string to stdout
[4] f@:{x*y}
^
[3] f:{{x*y}[x;3#x]}
^
[2] f `a
^
[1] (.Q.trp)
[0] .z.pg:{.Q.trp[(0;)@enlist value@;x;{(1;.Q.sbt y)}]}
^
Errors thrown by parse
show up in .Q.trp
with location information.
q).Q.trp[parse;"2+2;+2";{[email protected] 2#y}];
[3] 2+2;+2
^
[2] (.q.parse)
Error trap modes¶
At any point during execution, the behavior of Signal ('
) is determined by the internal error-trap mode:
0 abort execution (set by Trap: @ or .)
1 suspend execution and run the debugger
2 collect stack trace and abort (set by .Q.trp)
Mode 2 (dump stack trace) is now default for loading scripts non-interactively (e.g. with -q
).
During abort, the stack is unwound up to the nearest trap (@
or .
or .Q.trp
). The error-trap mode is always initially set to 1 for console input and to 0 for sync message processing.
\e
sets the mode applied before async and HTTP callbacks run. Thus, \e 1
will cause the relevant handlers to break into the debugger, while \e 2
will dump the backtrace either to the server console (for async), or into the socket (for HTTP).
q)\e 2
q)'type / incoming async msg signals 'type
[2] f@:{x*y}
^
[1] f:{{x*y}[x;3#x]}
^
[0] f `a
^
q)\e 1
q)'type
[2] f@:{x*y}
^
q)) / the server is suspended in a debug session
Keywords
Q is an embedded domain-specific language. Many of its keywords are defined as lambdas or projections, and can suspend as described.
Display,
show
Q for Mortals 3: §10.2 Debugging
Dictionaries¶
Lists and dictionaries¶
A list is a mapping from its indexes to its items: v:1040 59 27
maps
0 -> 1040
1 -> 59
2 -> 27
A dictionary is a mapping from a list of keys to a list of values.
q)show d:`tom`dick`harry!1040 59 27
tom | 1040
dick | 59
harry| 27
The indexes of v
are 0 1 2
. The indexes of d
are `tom`dick`harry
.
The values of v
and d
are the same.
q)value d
1040 59 27
q)value `v
1040 59 27
Construction¶
Use Dict to make a dictionary from a list of keys and a list of values.
q)show d:`a`b`c!1 2 3
a| 1
b| 2
c| 3
The lists must be the same length. The keys should be unique (no duplicates) but no error is signalled if duplicates are present.
Avoid duplicating keys in a dictionary or (column names in a) table.
Q does not reject duplicate keys, but operations on dictionaries and tables with duplicate keys are undefined.
If you know the keys are unique you can set the u
attribute on them.
(`u#`a`b`c)!100 200 300
The dictionary will then function as a hash table – and indexing will be faster.
Items of the key and value lists can be of any datatype, including dictionaries or tables.
Keys and values¶
q)key d
`a`b`c
q)value d
1 2 3
Keywords key
and value
return the key and value lists respectively.
Indexing¶
A dictionary is a mapping from its key items to its value items.
A list is a mapping from its indexes to its items. If the indexes of a list are its keys, it is unsurprising to find a dictionary is indexed by its keys.
q)k:`a`b`c`d`e
q)v:10 20 30 40 50
q)show dic:k!v
a| 10
b| 20
c| 30
d| 40
e| 50
q)dic[`d`b]
40 20
q)v[3 1]
40 20
Nor that we can omit index brackets the same way.
q)dic `d`b
40 20
q)v 3 1
40 20
Indexing out of the domain works as for lists, returning a null of the same type as the first value item.
q)v 5
0N
q)dic `x
0N
But unlike a list, indexed assignment to a dictionary has upsert semantics.
q)v[5 1]:42 100
'length
[0] v[5 1]:42 100
^
q)dic[`x`b]:42 100
q)dic
a| 10
b| 100
c| 30
d| 40
e| 50
x| 42
Dictionary indexing uses Find to search the keys.
q)d:k!v
q)d[x] ~ v[k?x]
1b
where
and Find¶
Find and where
both return indexes from lists. Also from dictionaries.
q)d:`a`b`c`d!10 20 30 10
q)where d=10
`a`d
q)d?30
`c
Reverse dictionary lookup: use Find for the key of the first matching value, or where
for all of them.
q)dns:`netbox`google`apple!`$("104.130.139.23";"216.58.212.206";"17.172.224.47")
q)dns `apple
`17.172.224.47
q)dns?`$"17.172.224.47"
`apple
q)where dns=`$"17.172.224.47"
,`apple
Order¶
Dictionaries are ordered.
q)first dic
10
q)last dic
42
q)k:`a`b`c
q)v:1 2 3
q)(k!v) ~ reverse[k]!reverse v
0b
Taking and dropping from a dictionary¶
Dictionaries are ordered, so you can take and drop items from either end of them.
q)d
a| 10
b| 20
c| 30
d| 10
q)-2#d
c| 30
d| 10
q)-1 _ d
a| 10
b| 20
c| 30
You can also take and drop selected items.
q)`b`d#d
b| 20
d| 10
q)`b`x _ d
a| 10
c| 30
d| 10
Joining dictionaries¶
Join on dictionaries has upsert semantics.
q)(`a`b`c!10 20 30),`c`d!400 500
a| 10
b| 20
c| 400
d| 500
Empty and singleton dictionaries¶
Just like a list, a dictionary may be empty or have a single item. But its key and value must still be lists.
q)()!() / general empty dictionary
q)(`symbol$())!`float$() / typed empty dictionary
q)sd:(enlist `a)!enlist 1 / singleton dictionary
a| 1
q)key sd
,`a
q)value sd
,1
Column dictionaries¶
When a dictionary’s value items are all same-length lists, it is a column dictionary.
q)show bd:`name`dob`sex!(`jack`jill`john;1982.09.15 1984.07.05 1990.11.16;`m`f`m)
name| jack jill john
dob | 1982.09.15 1984.07.05 1990.11.16
sex | m f m
Flip it and we see a table.
q)flip bd
name dob sex
-------------------
jack 1982.09.15 m
jill 1984.07.05 f
john 1990.11.16 m
Step dictionaries
Tables
Q for Mortals
§5. Dictionaries,
Enumerations¶
For a long list containing few distinct values, an enumeration can reduce storage requirements. The ‘manual’ way to create an enum (for understanding, not recommended):
q)y:`a`b`c`b`a`b`c`c`c`c`c`c`c
q)x:`a`b`c
q)show e:"i"$x?y;
0 1 2 1 0 1 2 2 2 2 2 2 2i /these values are what we store instead of y.
q)x e /get back the symbols any time from x and e.
`a`b`c`b`a`b`c`c`c`c`c`c`c
q)`x!e / same result as `x$y
`x$`a`b`c`b`a`b`c`c`c`c`c`c`c
Create, extend and resolve enumerations using these operators:
| operator | name | semantics |
|---|---|---|
$ |
Enumerate | create an enumeration |
? |
Enum Extend | extend an enumeration |
! |
Enumeration | resolve values from an enumeration |
Q for Mortals §7.5 Enumerations
|
// @kind function
// @category saveReport
// @desc Generate a report using the Python package 'reportlab'.
// This report outlines the results from a timed + dated run of automl.
// @param params {dictionary} All data generated during the process
// @param filePath {string} Location to save report
// @return {::} Associated pdf report saved to disk
saveReport.reportlabGenerate:{[params;filePath]
// Main variables
bestModel:params`modelName;
modelMeta:params`modelMetaData;
config:params`config;
pdf:saveReport.i.canvas[`:Canvas]pydstr filePath,".pdf";
ptype:$[`class~config`problemType;"classification";"regression"];
plots:params`savedPlots;
// Report generation
// Title
f:saveReport.i.title[pdf;775;0;"kdb+/q AutoML Procedure Report";
"Helvetica-BoldOblique";15];
// Summary
f:saveReport.i.text[pdf;f;40;"This report outlines the results for a ",ptype,
" problem achieved through running kdb+/q AutoML.";"Helvetica";11];
f:saveReport.i.text[pdf;f;30;"This run started on ",string[config`startDate],
" at ",string[config`startTime],".";"Helvetica";11];
// Input data
f:saveReport.i.text[pdf;f;30;"Description of Input Data";"Helvetica-Bold";
13];
f:saveReport.i.text[pdf;f;30;"The following is a breakdown of information",
" for each of the relevant columns in the dataset:";"Helvetica";11];
ht:saveReport.i.printDescripTab params`dataDescription;
f:saveReport.i.makeTable[pdf;ht`t;f;ht`h;10;10];
f:saveReport.i.image[pdf;plots`target;f;250;280;210];
f:saveReport.i.text[pdf;f;25;"Figure 1: Distribution of input target data";
"Helvetica";10];
// Feature extraction and selection
f:saveReport.i.text[pdf;f;30;"Breakdown of Pre-Processing";"Helvetica-Bold";
13];
numSig:count params`sigFeats;
f:saveReport.i.text[pdf;f;30;@[string config`featureExtractionType;0;upper],
" feature extraction and selection was performed with a total of ",
string[numSig],
" feature",$[1~numSig;;"s",]" produced.";"Helvetica";11];
f:saveReport.i.text[pdf;f;30;"Feature extraction took ",
string[params`creationTime], " time in total.";"Helvetica";11];
// Cross validation
f:saveReport.i.text[pdf;f;30;"Initial Scores";"Helvetica-Bold";13];
xvalFunc:string config[`crossValidationFunction];
xvalSize:config[`crossValidationArgument];
xvalType:`$last"."vs xvalFunc;
xval:$[xvalType in`mcsplit`pcsplit;
"Percentage based cross validation, ",xvalFunc,
", was performed with a testing set created from ",
string[100*xvalSize],"% of the training data.";
string[xvalSize],"-fold cross validation was performed on the training",
" set to find the best model using ",xvalFunc,"."
];
f:saveReport.i.text[pdf;f;30;xval;"Helvetica";11];
f:saveReport.i.image[pdf;plots`data;f;90;500;100];
f:saveReport.i.text[pdf;f;25;"Figure 2: The data split used within this",
" run of AutoML, with data split into training, holdout and testing sets";
"Helvetica";10];
f:saveReport.i.text[pdf;f;30;"The total time taken to carry out cross",
" validation for each model on the training set was ",
string[modelMeta`xValTime];"Helvetica";11];
f:saveReport.i.text[pdf;f;15;"where models were scored and optimized using ",
string[modelMeta`metric],".";"Helvetica";11];
f:saveReport.i.text[pdf;f;30;"Model scores:";"Helvetica";11];
// Feature impact
f:saveReport.i.printKDBTable[pdf;f;modelMeta`modelScores];
f:saveReport.i.image[pdf;plots`impact;f;250;280;210];
f:saveReport.i.text[pdf;f;25;"Figure 3: Feature impact of each significant",
" feature as determined by the training set";"Helvetica";10];
// Run models
f:saveReport.i.text[pdf;f;30;"Model selection summary";"Helvetica-Bold";13];
f:saveReport.i.text[pdf;f;30;"Best scoring model = ",string bestModel;
"Helvetica";11];
f:saveReport.i.text[pdf;f;30;"The score on the holdout set for this model",
" was = ", string[ modelMeta`holdoutScore],".";"Helvetica";11];
f:saveReport.i.text[pdf;f;30;"The total time taken to complete the running",
" of this model on the holdout set was: ",
string[modelMeta`holdoutTime],".";"Helvetica";11];
// Hyperparameter search
srch:config`hyperparameterSearchType;
hptyp:$[srch=`grid;
"grid";
srch in`random`sobol;
"random";
'"inappropriate type"
];
hpFunc:string config`$hptyp,"SearchFunction";
hpSize:string config`$hptyp,"SearchArgument";
hpMethod:`$last"."vs hpFunc;
f:saveReport.i.text[pdf;f;30;"Best Model";"Helvetica-Bold";13];
if[not bestModel in utils.excludeList;
f:saveReport.i.text[pdf;f;30;;"Helvetica";11]$[hpMethod in`mcsplit`pcsplit;
"The hyperparameter search was completed using ",hpFunc,
" with a percentage of ",hpSize,"% of training data used for validation";
"A ",hpSize,"-fold ",lower[hptyp]," search was performed on the",
" training set to find the best model using, ",hpFunc,"."];
f:saveReport.i.text[pdf;f;30;"The following are the hyperparameters",
" which have been deemed optimal for the model:";"Helvetica";11];
f:saveReport.i.printKDBTable[pdf;f;params`hyperParams];
];
// Final results
f:saveReport.i.text[pdf;f;30;"The score for the best model fit on the",
" entire training set and scored on the testing set was = ",
string params`testScore;"Helvetica";11];
$[ptype like"*class*";
[f:saveReport.i.image[pdf;plots`conf;f;300;250;250];
saveReport.i.text[pdf;f;25;"Figure 4: This is the confusion matrix",
" produced for predictions made on the testing set";"Helvetica";10]
];
[f:saveReport.i.image[pdf;plots`reg;f;300;250;250];
saveReport.i.text[pdf;f;25;"Figure 4: Regression analysis plot produced",
" for predictions made on the testing set";"Helvetica";10]
];
];
pdf[`:save][];
}
================================================================================
FILE: ml_automl_code_nodes_saveReport_reportlab_utils.q
SIZE: 3,513 characters
================================================================================
// code/nodes/saveReport/reportlab/utils.q - Utilities reportlab
// Copyright (c) 2021 Kx Systems Inc
//
// Utilities used for the generation of a reportlab PDF
\d .automl
// Python imports
saveReport.i.canvas:.p.import`reportlab.pdfgen.canvas
saveReport.i.table:.p.import[`reportlab.platypus]`:Table
saveReport.i.np:.p.import`numpy
// @kind function
// @category saveReportUtility
// @desc Convert kdb description table to printable format
// @param tab {table} kdb table to be converted
// @return {dictionary} Table and corresponding height
saveReport.i.printDescripTab:{[tab]
dti:10&count tab;
h:dti*27-dti%2;
tab:value d:dti#tab;
tab:.ml.df2tab .ml.tab2df[tab][`:round]3;
t:enlist[enlist[`col],cols tab],key[d],'flip value flip tab;
`h`t!(h;t)
}
// @kind function
// @category saveReportUtility
// @desc Convert kdb table to printable format
// @param pdf {<} PDF gen module used
// @param f {int} The placement height from the bottom of the page
// @param tab {table} kdb table to be converted
// @return {int} The placement height from the bottom of the page
saveReport.i.printKDBTable:{[pdf;f;tab]
dd:{(,'/)string(key x;count[x]#" ";count[x]#"=";count[x]#" ";value x)}tab;
cntf:first[count dd]{[m;h;s]ff:saveReport.i.text[m;h 0;15;s h 1;
"Helvetica";11];
(ff;1+h 1)}[pdf;;dd]/(f-5;0);
first cntf
}
// @kind function
// @category saveReportUtility
// @desc Add text to report
// @param m {<} Pdf gen module used
// @param h {int} The placement height from the bottom of the page
// @param i {int} How far below is the text
// @param txt {string} Text to include
// @param f {int} Font size
// @param s {int} Font size
// @return {int} The placement height from the bottom of the page
saveReport.i.text:{[m;h;i;txt;f;s]
if[(h-i)<100;h:795;m[`:showPage][];];
m[`:setFont][pydstr f;s];
m[`:drawString][30;h-:i;pydstr txt];
h
}
// @kind function
// @category saveReportUtility
// @desc Add title to report
// @param m {<} Pdf gen module used
// @param h {int} The placement height from the bottom of the page
// @param i {int} How far below is the text
// @param txt {string} Text to include
// @param f {int} Font size
// @param s {int} Font size
// @return {int} The placement height from the bottom of the page
saveReport.i.title:{[m;h;i;txt;f;s]
if[(h-i)<100;h:795;m[`:showPage][]];
m[`:setFont][pydstr f;s];
m[`:drawString][150;h-:i;pydstr txt];
h
}
// @kind function
// @category saveReportUtility
// @desc Add image to report
// @param m {<} Pdf gen module used
// @param fp {string} Filepath
// @param h {int} The placement height from the bottom of the page
// @param i {int} How far below is the text
// @param wi {int} Image width
// @param hi {int} Image height
// @return {int} The placement height from the bottom of the page
saveReport.i.image:{[m;fp;h;i;wi;hi]
if[(h-i)<100;h:795;m[`:showPage][]];
m[`:drawImage][pydstr fp;40;h-:i;wi;hi];
h
}
// @kind function
// @category saveReportUtility
// @desc Add table to report
// @param m {<} Pdf gen module used
// @param t {<} Pandas table
// @param h {int} The placement height from the bottom of the page
// @param i {int} How far below is the text
// @param wi {int} Image width
// @param hi {int} Image height
// @return {int} The placement height from the bottom of the page
saveReport.i.makeTable:{[m;t;h;i;wi;hi]
if[(h-i)<100;h:795;m[`:showPage][]]t:saveReport.i.table
saveReport.i.np[`:array][t][`:tolist][];
t[`:wrapOn][m;wi;hi];
t[`:drawOn][m;30;h-:i];
h
}
================================================================================
FILE: ml_automl_code_nodes_saveReport_saveReport.q
SIZE: 745 characters
================================================================================
// code/nodes/saveReport/saveReport.q - Save report node
// Copyright (c) 2021 Kx Systems Inc
//
// Save report summarizing automl pipeline results
\d .automl
// @kind function
// @category node
// @desc Save a Python generated report summarizing the process of
// reaching the users final model via pyLatex/reportlab
// @param params {dictionary} All data generated during the preprocessing and
// prediction stages
// @return {::} Report saved to a location defined by run date and time
saveReport.node.function:{[params]
if[2<>params[`config]`saveOption;:()];
params:saveReport.reportDict params;
saveReport.saveReport params
}
// Input information
saveReport.node.inputs:"!"
// Output information
saveReport.node.outputs:"!"
================================================================================
FILE: ml_automl_code_nodes_selectModels_funcs.q
SIZE: 2,666 characters
================================================================================
// code/nodes/selectModels/funcs.q - Functions called in selectModels node
// Copyright (c) 2021 Kx Systems Inc
//
// Definitions of the main callable functions used in the application of
// .automl.selectModels
\d .automl
|
Four is magic¶
Convergence and finite-state machines
The Scan form of the Converge iterator generates the converging sequence of string lengths.
Modifying the iterated function to return strings and their lengths yielded a list from the convergence from which the strings could be selected by Index.
A vector of pre-calculated string lengths constitutes a finite-state machine that generates the sequence three orders of magnitude faster than stringifying the integers as required.
7 code lines: no loops, counters, or control structures.
Generate a converging integer sequence
Another simple children’s game – and a popular programming challenge.
Start with a positive integer. Write it in English as a string, and count the length of the string. Print the result as e.g. “eleven is six” and use the count as the next number in the sequence. When you reach four, print “four is magic”. For example:
Eleven is six, six is three, three is five, five is four, four is magic.
Rosetta Code for full task details
We are going to need the small cardinal numbers, eventually as strings, but to start with, as symbols.
C:``one`two`three`four`five`six`seven`eight`nine`ten,
`eleven`twelve`thirteen`fourteen`fifteen`sixteen`seventeen`eighteen`nineteen
Finite-state machine¶
Experimenting with just these numbers yields useful insights.
q)show sl:count each string C
0 3 3 5 4 4 3 5 5 4 3 6 6 8 8 7 7 9 8 8
Every item of the vector is an index of it: the vector of string lengths constitutes a finite-state machine.
One and only one item matches its index: 4. Running the machine will converge there.
Four really is magic.
q)sl 11
6
q)sl 6
3
q)sl 3
5
q)sl 5
4
q)sl 4
4
q)sl\[11]
11 6 3 5 4
q)C sl\[11]
`eleven`six`three`five`four
The same syntax applies a function to an argument or a list to its indexes
That means finite-state machine represented as a list of its indexes can be iterated it through states without writing any further logic.
That applies not just to vectors of indexes but to any dictionary d
for which all(value d)in key d
is true.
We can see here the core of a solution, at least for as many cardinals as we have string versions. But whether we cache strings for all the cardinals we need, or generate them at need, either way we need a function that will stringify a number.
Outline solution¶
Expressing numbers on the short scale means we can analyze this problem into two parts. We shall work in symbols and cast to string only when everything else has been done.
st
-
Stringify a number below a thousand.
s
-
Break a number >1000 into 3-digit groups, and stringify each with
st
. fim
-
Generate the converging sequence and format it.
Stringify a small number¶
Define st
first. We shall need strings for multiples of ten.
/ tens
T:``ten`twenty`thirty`forty`fifty`sixty`seventy`eighty`ninety
If x<20
then st x
is simply C x
; otherwise, if x<100
then st x
is (T;C)@'10 vs x
; otherwise we use C
to stringify the hundreds and st
on the remainder.
st:{ / stringify <1000
$[x<20; C x;
x<100; (T;C)@'10 vs x;
{C[y],`hundred,$[z=0;`;x z]}[.z.s] . 100 vs x] }
q)st 456
`four`hundred`fifty`six
q)st 400
`four`hundred`
q)st 35
`thirty`five
Stringify a large number¶
The first move is st each 1000 vs x
to break the number into 3-digit groups and stringify them.
q)1000 vs 12345678
12 345 678
q)st each 1000 vs 12345678
`twelve
`three`hundred`forty`five
`six`hundred`seventy`eight
Next, magnitudes.
/ magnitudes
M:``thousand`million`billion`trillion`quadrillion`quintillion`sextillion`septillion
q){x{$[x~`;x;x,y]}'M reverse til count x} st each 1000 vs 12345678
`twelve`million
`three`hundred`forty`five`thousand
`six`hundred`seventy`eight`
Finish it off.
s:{$[x=0; "zero";
{" "sv string except[;`]raze x{$[x~`;x;x,y]}'M reverse til count x}st each 1000 vs x]}
q)s 12345678
"twelve million three hundred forty five thousand six hundred seventy eight"
Iterate¶
Generating the convergence is now easy: stringify, count, repeat.
q)(count s@)\[12345678]
12345678 74 12 6 3 5 4
The composition (count s@)
is equivalent to the lambda {count s x}
Stringify the sequence and use Each Prior to format in pairs.
q)raze 1_{y," is ",x,", "}prior s each(count s@)\[12345678]
"twelve million three hundred forty five thousand six hundred seventy eight is seventy..
Finish off.
fim:{@[;0;upper],[;"four is magic.\n"]
raze 1_{y," is ",x,", "}prior s each(count s@)\[x]}
And test.
q)1 raze fim each 0 4 8 16 25 89 365 2586 25865 369854 40000000001;
Zero is four, four is magic.
Four is magic.
Eight is five, five is four, four is magic.
Sixteen is seven, seven is five, five is four, four is magic.
Twenty five is eleven, eleven is six, six is three, three is five, five is four, four is magic.
Eighty nine is eleven, eleven is six, six is three, three is five, five is four, four is magic.
Three hundred sixty five is twenty four, twenty four is eleven, eleven is six, six is three, three is five, five is four, four is magic.
Two thousand five hundred eighty six is thirty six, thirty six is ten, ten is three, three is five, five is four, four is magic.
Twenty five thousand eight hundred sixty five is forty five, forty five is ten, ten is three, three is five, five is four, four is magic.
Three hundred sixty nine thousand eight hundred fifty four is fifty eight, fifty eight is eleven, eleven is six, six is three, three is five, five is four, four is magic.
Forty billion one is seventeen, seventeen is nine, nine is four, four is magic.
Test your understanding¶
All those integers got stringified by (count s@)\
but we did not keep the strings.
Instead we stringified the numbers a second time.
Replace s each(count s@)\
with an expression that applies s
once only to each number.
Answer
The sequence is generated by the Converge iterator with the syntactic form f\[x]
.
In the solution above f
is count s@
.
(The composition is equivalent to {count s x}
.)
That returns the count of the string.
All we have to do is include the string in the result without using it to generate the next iteration.
Happily we are spared a test, because first
on an atom is a no-op.
Our new f
:
q)show q:{(count str;str:s first x)}\[12345678]
12345678
(74;"twelve million three hundred forty five thousand six hundred seventy eight")
(12;"seventy four")
(6;"twelve")
(3;"six")
(5;"three")
(4;"five")
(4;"four")
Note above that the 0th item of the result comes from applying f
zero times to the argument 12345678, i.e. a no-op.
When you apply a function derived by the Scan form of the Converge, Do or While iterators, the first item of the result is always the derived function’s argument unchanged.
It follows that items of the resulting list might not have uniform type.
We can extract the strings with (1 _ q)[;1]
but functional forms allow us to embed the extraction in the line without defining a variable.
{x[;1]} 1_{(count str;str:s first x)}\[12345678]
Of course, we do not need a lambda when we have Index.
.[;(::;1)]1_{(count str;str:s first x)}\[12345678]
Caching results¶
If you had to calculate a lot of these sequences you might prefer to cache some strings and stringify only numbers beyond the cache.
q)CL:count each CACHE:s each til 1000000
q)CL\[123456]
123456 56 9 4
q)CACHE CL\[123456]
"one hundred twenty three thousand four hundred fifty six"
"fifty six"
"nine"
"four"
q)\ts:100000 .[;(::;1)] 1_{(count str;str:s first x)}\[123456]
4573 3040
q)\ts:10000 CACHE CL\[123456]
5 800
We see using the vector of string lengths as a finite-state machine is three orders of magnitude faster than stringifying the four numbers.
Review¶
The solution is small and well modularized, with three constants and three functions.
/ small cardinal numbers
C:``one`two`three`four`five`six`seven`eight`nine`ten,
`eleven`twelve`thirteen`fourteen`fifteen`sixteen`seventeen`eighteen`nineteen
/ tens
T:``ten`twenty`thirty`forty`fifty`sixty`seventy`eighty`ninety
/ magnitudes
M:``thousand`million`billion`trillion`quadrillion`quintillion`sextillion`septillion
st:{ / stringify <1000
$[x<20; C x;
x<100; (T;C)@'10 vs x;
{C[y],`hundred,$[z=0;`;x z]}[.z.s] . 100 vs x] }
/ stringify
s:{$[x=0; "zero";
{" "sv string except[;`]raze x{$[x~`;x;x,y]}'M reverse til count x} st each 1000 vs x] }
/ four is magic
fim:{@[;0;upper],[;"four is magic.\n"] raze 1_{y," is ",x,", "} prior
.[;(::;1)] 1_{(count str;str:s first x)}\[x] }
The sequence is generated by applying a function with the Converge iterator. The function stringifies an integer argument and returns the string length.
Modifying the iterated function to return both the string and its length avoids having to stringify the numbers a second time.
Caching strings yields a vector of string lengths that can be used with Converge as a finite-state machine, three orders of magnitude faster than stringifying numbers on the fly.
|
/ Enables the 'ticking' cron mode
/ NOTE: Does not validate the configured ticking mode
/ @see .cron.cfg.timerInterval
.cron.mode.ticking:{
.log.if.info "Enabling cron job scheduler [ Mode: Ticking ] [ Timer Interval: ",string[.cron.cfg.timerInterval]," ms ] [ Historical Start Times: ",string[.cron.cfg.historicalStartTimes]," ]";
system "t ",string .cron.cfg.timerInterval;
};
/ Enables the 'tickless' cron mode
/ NOTE: Does not validate the configured ticking mode
/ @see .cron.i.setNextTick
.cron.mode.tickless:{
.log.if.info "Enabling cron job scheduler [ Mode: Tickless ] [ Historical Start Times: ",string[.cron.cfg.historicalStartTimes]," ]";
.cron.i.setNextTick[];
};
================================================================================
FILE: kdb-common_src_csv.q
SIZE: 2,239 characters
================================================================================
// CSV Parsing and Writing Functions
// Copyright (c) 2016 - 2017 Sport Trades Ltd
// Documentation: https://github.com/BuaBook/kdb-common/wiki/csv.q
.require.lib each `type`util`convert;
/ Attempts to load a CSV based on the specified column types, ignoring
/ empty lines and comment lines (lines beginning with a forward slash)
/ @param types (String) The types of each column
/ @param path (FilePath) The location of the file to load
/ @returns (Table) The CSV file as a table
/ @see .csv.parse
.csv.load:{[types;path]
if[not .type.isFilePath path;
'"IllegalArgumentException";
];
.log.if.info"Loading CSV file ",.convert.hsymToString path;
:.csv.parse[types;read0 path];
};
/ Parses CSV data based on the specified column types, ignoring empty lines
/ and comment lines (lines beginning with a forward slash)
/ @param types (String) The types of each column
/ @param csvData (List) String list of file lines
/ @returns (Table) The CSV data as a table
/ @throws CorruptCsvDataException If there are any column lengths of the CSV data that mismatch
/ @throws TypesMismatchException If there are any missing columns based on the expected types
.csv.parse:{[types;csvData]
s:trim csvData;
str:s where(0<count each s)¬"/"=s[;0];
if[not all w:count[types]=c:1+sum each","=str;
$[any w;
'"CorruptCsvDataException";
'"TypesMismatchException (",string[first c]," expected)"
];
];
(types;enlist",")0:str
};
/ Writes the specified table to the specified path in CSV format
/ @param path (FilePath) The path to save the CSV file
/ @param table (Table) The table to convert to CSV
/ @throws UnsupportedColumnTypeException If the table contains nested list columns
.csv.write:{[path;data]
if[(not .type.isTable data) | not .type.isFilePath path ;
'"IllegalArgumentException";
];
if[not .util.isEmpty keys data;
data:0!data;
];
if[any unsupported:" "~/:.Q.ty each .Q.V data;
'"UnsupportedColumnTypeException (",.convert.listToString[where unsupported],")";
];
.log.if.info "Saving CSV file [ Target: ",string[path]," ] [ Table Length: ",string[count data]," ]";
:path 0: .q.csv 0: data;
};
================================================================================
FILE: kdb-common_src_env.q
SIZE: 4,086 characters
================================================================================
// Environemnt Variable Manager
// Copyright (c) 2020 - 2021 Jaskirat Rajasansir
// Documentation: https://github.com/BuaBook/kdb-common/wiki/env.q
.require.lib each `type`convert`os;
/ Environment variables that should be loaded on library initialisation and optionally parsed by the specified parse function
/ Specify null symbol to cache the environment variable value on library init as returned by 'getenv'
/ @see .os.sharedObjectEnvVar
.env.cfg.vars:(`symbol$())!`symbol$();
.env.cfg.vars[`QHOME]: `.convert.stringToHsym;
.env.cfg.vars[`QLIC]: `.convert.stringToHsym;
.env.cfg.vars[`PATH]: `.env.i.parsePathTypeVar;
.env.cfg.vars[.os.sharedObjectEnvVar]: `.env.i.parsePathTypeVar;
/ The cached and optionally parsed environment variables
.env.cache:1#.q;
.env.init:{
.env.loadAllEnvVars[];
};
/ Loads all the pre-configured environment variables from the current shell. This can be called at any point to update all environment
/ variables in the cache
/ @see .env.cfg.vars
/ @see .env.i.loadEnvVar
.env.loadAllEnvVars:{
.log.if.info "Loading all configured environment variables [ Total: ",string[count .env.cfg.vars]," ]";
.env.i.loadEnvVar ./: flip (key; value) @\: .env.cfg.vars;
};
/ Queries the specified environment varaible either from the cache or directly via 'getenv' if not pre-configured
/ @param envVar (Symbol) The environment variable to query
/ @returns () The raw environment variable or the parsed result
/ @throws EnvironmentVariableNotDefinedException If the environment variable is not set in the cache and an empty value from 'getenv'
/ @see .env.cache
/ @see getenv
.env.get:{[envVar]
if[not envVar in key .env.cache;
envVal:getenv envVar;
if[0 = count envVal;
'"EnvironmentVariableNotDefinedException";
];
:envVal;
];
:.env.cache envVar;
};
/ @returns (FilePath) The full path of the specified command as it exists within '$PATH' (matching the Linux 'which' command)
/ @throws CommandNotFoundException If the command does not exist in '$PATH'
/ @see .env.i.findInPathTypeVar
.env.which:{[cmd]
cmdPath:.env.i.findInPathTypeVar[cmd; `PATH];
if[null cmdPath;
'"CommandNotFoundException";
];
:cmdPath;
};
/ Loads and optionally parses the specified environment variable with the specified parse function reference
/ @param envVar (Symbol) The environment variable to parse
/ @param parseFunc (Symbol) Function reference for the parse function. If null, no parse is performed
/ @see .env.cache
.env.i.loadEnvVar:{[envVar; parseFunc]
.log.if.debug "Loading environment variable [ Variable: ",string[envVar]," ] [ Parse Function: ",string[`none ^ parseFunc]," ]";
envVal:getenv envVar;
if[not null parseFunc;
envVal:get[parseFunc] envVal;
];
.env.cache[envVar]:envVal;
};
/ Parses a '$PATH'-type environment variable into a list of folder paths for use within kdb+
/ NOTE: Only valid folders will be returned from this function
/ @param rawPath (String) The environment variable output
/ @returns (FolderPathList) The list of valid folders from the '$PATH'-type environment variable or empty symbol list if environment variable not set
/ @see .type.isFolder
.env.i.parsePathTypeVar:{[rawPath]
paths:.os.envPathSeparator vs rawPath;
if[0 = count paths;
:`symbol$();
];
paths:`$":",/:paths;
paths@:where .type.isFolder each paths;
:paths;
};
/ Finds the specified file within the folders defined in a '$PATH'-type environment variable
/ @param file (String|Symbol) The name of the file to find
/ @param pathVar (Symbol) The '$PATH'-type variable to search
/ @returns (Symbol) The full path of the file or null symbol if not found
/ @see .env.get
.env.i.findInPathTypeVar:{[file; pathVar]
file:.type.ensureSymbol file;
paths:.env.get pathVar;
toFind:paths!key each paths;
toFind:where file in/: toFind;
if[0 = count toFind;
:`;
];
:` sv first[toFind],file;
};
================================================================================
FILE: kdb-common_src_event.q
SIZE: 5,224 characters
================================================================================
// Internal Event Management
// Copyright (c) 2017 Sport Trades Ltd
// Documentation: https://github.com/BuaBook/kdb-common/wiki/event.q
.require.lib each `ns`time`convert;
/ Event names and functions to bind to. These "core" handlers are specified on initialisation
/ if the functions are not already in use
/ @see .event.init
.event.cfg.coreHandlers:()!();
.event.cfg.coreHandlers[`port.open]:`.z.po;
.event.cfg.coreHandlers[`port.close]:`.z.pc;
.event.cfg.coreHandlers[`websocket.open]:`.z.wo;
.event.cfg.coreHandlers[`websocket.close]:`.z.wc;
.event.cfg.coreHandlers[`process.exit]:`.z.exit;
/ Allow the event system to add "process is exiting" logging when the process exits
/ @see .event.i.defaultExitHandler
.event.cfg.addDefaultExitHandler:1b;
/ The primary mapping of events to the listener functions that will be notified when the event
/ is fired
/ @see .event.fire
.event.handlers:(`symbol$())!();
.event.init:{
.event.installHandler ./: flip (key;value)@\:.event.cfg.coreHandlers;
if[.event.cfg.addDefaultExitHandler;
.event.addListener[`process.exit; `.event.i.defaultExitHandler];
];
};
/ "Fire" an event. This executes all listener functions assigned to that event. Listener functions
/ are executed in the order they were added into the event management library. If any listeners fail
/ to execute they will be logged after all listeners have completed.
/ @param event (Symbol) The event to fire
/ @param args () The arguments to pass to each listener function
/ @see .ns.protectedExecute
.event.fire:{[event;args]
listeners:.event.handlers event;
if[0=count listeners;
.log.if.debug "Event fired but no listeners [ Event: ",string[event]," ] [ Args: ",.Q.s1[args]," ]";
:(::);
];
.log.if.debug "Notifying listeners of event [ Event: ",string[event]," ] [ Args: ",.Q.s1[args]," ]";
listenRes:listeners!.ns.protectedExecute[;args] each listeners;
listenErr:where .ns.const.pExecFailure~/:first each listenRes;
if[0 < count listenErr;
.log.if.warn "One or more listeners failed to execute successfully [ Event: ",string[event]," ] [ Errored: ",.convert.listToString[listenErr]," ]";
.log.if.warn "Listener exception detail:\n",.Q.s listenErr#last each listenRes;
:(::);
];
};
/ Adds a listener function to the specified event
/ @param event (Symbol) The event to add the listener function to
/ @param listenFunction (Symbol) Reference to the function to execute when the event is fired
/ @throws IllegalArgumentException If the event is not a symbol or an empty symbol
/ @throws FunctionDoesNotExistException If the function reference does not exist
.event.addListener:{[event;listenFunction]
if[(not .type.isSymbol event) | `~event;
'"IllegalArgumentException";
];
if[not .ns.isSet listenFunction;
'"FunctionDoesNotExistException (`",string[listenFunction],")";
];
if[not event in key .event.handlers;
.log.if.info "New event type to be added for management [ Event: ",string[event]," ]";
];
if[listenFunction in .event.handlers event;
.log.if.debug "Listener already added for event. Will not re-add [ Event: ",string[event]," ] [ Listener: ",string[listenFunction]," ]";
:(::);
];
.event.handlers[event],:listenFunction;
.log.if.info "New listener added for event [ Event: ",string[event]," ] [ Listener: ",string[listenFunction]," ]";
};
/ Removes the listener from the specified event
/ @param event (Symbol) The event to remove the listener from
/ @param listenFunction (Symbol) Reference to the function to remove from the listener
.event.removeListener:{[event;listenFunction]
if[not event in key .event.handlers;
:(::);
];
if[not listenFunction in .event.handlers event;
:(::);
];
.event.handlers[event]:.event.handlers[event] except listenFunction;
.log.if.info "Removed listener from event [ Event: ",string[event]," ] [ Listener: ",string[listenFunction]," ]";
};
/ Binds an event to a specific function so the event management library can be used with it. This is generally
/ used for the core .z event notification functions but can be used anywhere. NOTE: It will not override an existing
/ function
/ @param event (Symbol) The event that will be fired when the bound function is executed
/ @param bindFunction (Symbol) Reference to the function that should be set
.event.installHandler:{[event;bindFunction]
if[.ns.isSet bindFunction;
.log.if.warn "Function to bind event management to is already set. Will not override [ Function: ",string[bindFunction]," ]";
:(::);
];
set[bindFunction;] .event.fire[event;];
if[not event in key .event.handlers;
.event.handlers[event]:`symbol$();
];
|
An introduction to graphical interfaces for kdb+ using C#¶
Over the course of fifteen years, C# has become one of the most common programming languages in the world. It has been used in applications ranging from computer games to medical systems to storage systems.
When deployed in an environment which requires database connections, it is traditional for C# to use a form of SQL for the back end, be it MySQL or SQLite to provide data storage and the ability to execute queries. Though functional, kdb+ offers substantial improvements in performance and processing power over this standard method of operation.
In this paper, we will explain the basics of using C# to open connections to kdb+ processes running on remote servers as well as setting up a basic API that will allow for authentication, error recovery and basic queries through an interface. In this case, all C# code will be pointing to the same kdb+ process.
C# is heavily integrated into Windows software. It allows for the implementation of the .NET environment into applications and can be utilized in the creation of websites using PHP and ASP.NET as well as stand-alone Windows applications.
The paper makes use of the standard c.cs
file offered by KX to enable
connections to C#. This can be found at
KxSystems/kdb.
It is important to note that this paper does not aim to provide a full C# library, but instead give guidance on how to quickly and easily allow a C# application to connect to and run queries against a kdb+ process.
The C# source code for this paper can be found at kxcontrib/csharpgui.
Connecting kdb+ and C¶
C# socket to enable client connection¶
To connect from C# to a running kdb+ process, it is first necessary
to import the c.cs
file mentioned in the introduction. While it is
possible to construct a bespoke plugin for use between kdb+ and C#,
and may be required depending on the requirements of the project, for
basic connections and queries, the default KX plug-in will be
satisfactory. This must then be called by referencing the namespace
provided (in this case, it is kx
). After importing c.cs
into a C#
project, it can then be called via the using
directive:
using kx;
This will allow all objects, classes and methods within the kx
namespace provided by the c.cs
file to be used throughout the project,
allowing connections to the kdb+ process via TCP network sockets. It
will also permit querying and updates to be performed on the same kdb+
process.
To open a connection between C# and the kdb+ process, an object of
class c
needs to be called and instantiated. This is a KX-provided
class that will act as an interface between kdb+ and C#. This will be
used in a method called OpenConnection
. In the below example, a
server and process will be hard-coded to private variables though these
could be modified to accept values from a configuration file. The
methods have also been set up to accept a username and password if
desired; this will be described in more detail later.
private static String HOST = "localhost";
private static int PRIMARY_PORT = 5010;
public static c GetConnection(String user, string password)
{
c connection;
if ((connPool.Count > 0) && (counter < MaxPoolSize))
{
connection = RetrieveFromPool(HOST, PRIMARY_PORT,user,password);
}
else
{
connection = OpenConnection(HOST, PRIMARY_PORT, user, password);
counter++;
}
return connection;
}
private static c OpenConnection(String host,int port,string user,string password)
{
try
{
c conn;
if ((user == null) || (password == null))
{
conn = new c(host, port); //Takes host and port
}
else
{
conn = new c(host, port, user, password);
}
if (conn == null) //Returns null if no connection was made
{
throw new Exception("Connection could not be established");
}
else
{
return conn; //If connection was made, return conn object
}
}
catch (Exception e)
{
System.Diagnostics.Debug.Write("An unexpected error occurred: " + e.ToString());
//Catch any unexpected errors and fail gracefully.
throw e;
}
}
The above code shows a simple, generic connection method which can be
called when starting a C# application to create a connection to a
running kdb+ process. We return the c
object in this case as we will need
it in the main method to execute queries.
Note that a lot of the method consists of error catching. This is to ensure that the application can recover in the event of the connection not being made successfully or another, unexpected error occurring. In this example, we have merely outputted a message to the Visual Studio console to say that we have an error but later, we will see how this error handling can be used to provide error recovery or business continuity.
This method will be used as part of a ConnectionPool
which will be
used to monitor and assign connections to users rather than having a
haphazard collection of connections with each process. This will
reduce the load and traffic on the kdb+ instance as it cuts down on
the number of handles that could be attempting to query
simultaneously.
private void button1_Click(object sender, EventArgs e)
{
c conn = ConnectionPool.GetConnection();
//This pulls an object of type c from the shared connection pool.
if (conn != null) {
textBox1.Text = "You have connected to localhost:5010 successfully";
} else {
textBox1.Text = "Error, you have not successfully connected to the server";
}
ConnectionPool.ReturnConnection(conn);
}
This is an example of the OpenConnection
method in operation (via the
ConnectionPool
) with a simple click event for a button. It will check
if the method has returned a c
object for further use and, if not, it
will throw an error.
Successful connection
Unsuccessful connection
Validation with passwords¶
As previously mentioned, a bonus of using kdb+ as the back end for a C# or Java application is that the code on the server side is independent of both languages. This greatly eases the development required to make such a service available and enables platforms operating either language to access the same services. This also enables developers to take advantage of kdb+ as a language and move much of the processing to the server side. An example of this is validation to prevent users from connecting to the server without permission.
In the below process, we have created a simple table with a user and their password. The password could be further encrypted using a hashing algorithm such as MD5 for extra security (this will not be shown in this white paper but is highly recommended).
q)user_table:([users:`mreynolds`user1`user2]password:("password";"password2";"password3"))
q)user_table
users | password
---------| -----------
mreynolds| "password"
user1 | "password2"
user2 | "password3"
This involves changing the c.cs
file provided by KX as this is not set
up to accept customized usernames (it instead takes the username
stored in Environment.UserName
) or any passwords at all. We will also
need to modify the OpenConnection
method and define .z.pw
.
First of all, the c
constructor for c.cs
:
public c(string h, int p, string u, int maxBufferSize)
{
serverName = h; //This is the hostname
serverPort = p; //This is the port number
_maxBufferSize = maxBufferSize;
Connect(h, p); //This is what makes the connection to kdb+
s = this.GetStream();
B = new byte[2 + u.Length];
//This defines the length of the bytesteam as username + 2
// for termination characters
J = 0;
w(u + "\x3"); //This is as above with termination characters ‘x3’ sent
s.Write(B, 0, J); //This line sends data to kdb+ as stream of bytes;
if (1 != s.Read(B, 0, 1))
throw new KException("access"); vt = Math.Min(B[0], (byte)3);
//Throws error if connection is not accepted.
}
This works by opening a byte stream to the kdb+ process and then feeding user input as bytes to the handle.
A byte stream is an open connection that sends a collection of bytes from sender to receiver in a bidirectional format. This connection is reliable and the use of bytes allows the C# query to be sent to a kdb+ process and the kdb+ response to be returned and de-serialized.
As defined currently, along with the host h
and port p
, it will
take a further parameter for the username u
, but none for the
password. To make it accept passwords as well, we need to modify the c
constructor in the c.cs
file to the following:
//We have added pw argument to this to take in our password
public c(string h, int p, string u, string pw, int maxBufferSize)
{
serverName = h; //This is the hostname
serverPort = p; //This is the port number
_maxBufferSize = maxBufferSize; //This is what makes the connection to kdb+
Connect(h, p);
s = this.GetStream();
B = new byte[3 + u.Length + pw.Length];
//This differs from above as we have added the length of the password as well,
// plus an extra character to take account of the separator between u and pw ':'
J = 0;
w(u + ":" + pw + "\x3");
//We can now send through both username and password to the kdb+
// session for authentication.
s.Write(B, 0, J); //This line sends data to kdb+ as stream of bytes;
if (1 != s.Read(B, 0, 1)) throw new KException("access"); vt=Math.Min(B[0],(byte)3);
}
We have specified a new variable pw
, which is now being read into the byte stream along with u
. In particular, it is w(u + ":" + pw + "\x3")
that will be interpreted by .z.pw
or the -u
argument as a username and password. We can use a simple definition for .z.pw
to query the users table whenever a connection is made through C#.
This will return 0b
if the user does not have permission to access the
process. Within C#, this will throw a KException with the message
access
. This can be used to track if the user has been refused
access in the C# API.
“Permissions with kdb+” for more detailed information on validation and authentication
private void button1_Click(object sender, EventArgs e)
{
try
{
c conn = OpenConnection("localhost",5010,usernameText.Text, pwText.Text);
label3.Text = "Hello " + usernameText.Text +
". You are permitted to make this connection";
}
catch (KException kEx)
{
if (kEx.Message == "access") //Permission denied
{
label3.Text = "Sorry "+usernameText.Text+". You may not make this connection";
}
else
{
label3.Text = "An unexpected kdb+ error occurred";
}
}
catch (Exception ex)
{
label3.Text = ex.Message;
}
}
Successful connection
Unsuccessful connection
Queries¶
Simple queries¶
With a connection established between the C# application and the kdb+
process, it is now possible to send queries from the front end and
receive data from the back end. This is done through the use of the c
object which is created after opening the connection. This is why it
is important to keep track of the object in methods such as
OpenConnection.
The method used to perform operations over the kdb+ connection is
simply called k
and is called as a child method of the c
class. The
same method is used to query, update and delete data from kdb+ and
accepts a string as a parameter.
conn.k("select from tab");
This is then fed into a method called c
, which breaks it into bytes
and passes it into kdb+. The result is then received by the C# client
as a stream of bytes which is de-serialized by the c
method into C#
compatible types. The result itself is a two-dimensional array when
used with select
or a one-dimensional array when used with exec
. This
can then be cast to the type c.Flip
, which mimics a table with similar
functionality and methods available.
In the below example, an app has been written with a hard-coded query
to a simple trade table. This will update the table in the application
by use of a DataGridView
every time the update button is clicked. To
push the data to a DataGridView
, it first needs two loops to copy the
data into the correct formats (one loop for rows and one loop for
columns).
private void button1_Click(object sender, EventArgs e)
{
if (conn == null)
{
conn = ConnectionPool.GetConnection();
}
object result = (object)conn.k("select from trade");
c.Flip table = (c.Flip)result;
QueryView.Columns.Clear();
//Clear columns first to allow clean population of table
foreach (string colName in table.getColumns())
{
QueryView.Columns.Add(colName, colName); //Add the columns to the Queryview
}
QueryView.Rows.Add(table.getNoRow());
for (int row = 0; row < table.getNoRow(); row++) {
for (int col = 0; col < (table.getColumns().Length); col++)
{
QueryView[col, row].Value = c.at(table.y[col], row);
//Populate each cell of the Queryview with its associated value
}
}
ConnectionPool.ReturnConnection(conn);
}
This produces the following output:
q)select from trade
symbol price size
-----------------
DEN.O 38 13
ASI.O 36 41
GOOG.O 94 11
APL.O 60 2
ASI.O 47 27
GOOG.O 40 10
APL.O 85 27
DEN.O 71 44
MSN.O 66 27
APL.O 33 38
APL.O 56 21
GOOG.O 24 30
Query output
Building dynamic queries¶
As the query consists of only a single string value, it is easy to modify and adjust dynamically based on user inputs, allowing easy creation of GUIs to interact with kdb+.
It should be noted that while it is possible to build a dynamic query as shown in the below example, it is vulnerable to injection attacks. Production systems should be more robust in dealing with these kinds of attacks, though this is beyond the scope of this white paper.
Below is an example of a class and GUI that has been constructed using simple dropdowns and text boxes yet creates a flexible and powerful editor we can use to query the trade table:
Example GUI
To carry this out, we use a new method called BuildQuery
(presented below) and replace the hard-coded query with:
conn.k(BuildQuery());
The BuildQuery
method takes the inputs of each textbox, checkbox and
combo box seen above and combines them to build a query to
send to kdb+. This allows those without much knowledge of kdb+ queries
or optimization of queries to view data stored on kdb+ processes
without exposing them to the qSQL language.
This is the BuildQuery
method, which takes all the available inputs and
creates a query string from them:
private string BuildQuery()
{
String check1 = "";
String check2 = "";
String check3 = "";
StringBuilder queryString = new StringBuilder();
if (checkBox1.Checked)
{
check1 = " not ";
}
if (checkBox2.Checked)
{
check2 = " not ";
}
if (checkBox3.Checked)
{
check3 = " not ";
}
if ((selectedCols.Text == null) || (selectedCols.Text == "*"))
{
queryString.Append("select from ");
}
else
{
queryString.Append("select " + selectedCols.Text + " from ");
}
queryString.Append(tableComboBox.SelectedItem);
if (argComboBox1.SelectedItem != null)
{
queryString.Append(" where (" + check1 + argComboBox1.SelectedItem +
signComboBox1.SelectedItem + argInput1.Text + ")");
//Append the above strings and the text boxes of the UI into
//a single string query that can be sent to the kdb+ process
}
if ((argComboBox2.SelectedItem != null) && (argComboBox1.SelectedItem != null))
{
queryString.Append(andor1.SelectedItem + " (" + check2 +
argComboBox2.SelectedItem + signComboBox2.SelectedItem +
argInput2.Text + ")");
}
if ((argComboBox2.SelectedItem != null) && (argComboBox1.SelectedItem != null) &&
(argComboBox3.SelectedItem != null))
{
queryString.Append(andor2.SelectedItem + " (" + check3 +
argComboBox3.SelectedItem + signComboBox3.SelectedItem +
argInput3.Text + ")");
}
return queryString.ToString();
}
Managing connections¶
A key requirement of a business application and particularly a trading-based application is continuity of service. Loss of business continuity can happen for a number of reasons including server crashes due to technical or environmental faults, a failure in the API or a loss of connection between the two components. In this regard, it is important that any C# application that connects to kdb+ be designed to handle these events and fail over if necessary to backup processes or inform the user of the problem and take measures to reconnect as soon as possible if this is desired.
On the client side, we will utilize the KException
and use this to
fail over to a secondary connection if a query cannot go through
successfully. This method will then attempt to run the query again
using the backup connection and publish the data as normal. This can
ensure continuity of business in the event that the kdb+ process is
rendered unable to respond.
catch (Exception ex)
{
if (ex.Message == "read" || ex.Message == "stop")
{
try
{
if (ex.Message == "read")
{
errorLabel.Text = "ALERT: using secondary connection";
}
else
{
errorLabel.Text = "ALERT: query timed out, using second connection";
}
conn = ConnectionPool.GetConnection();
c.Flip table = GetData(queryBox.Text);
PublishQuery(table);
}
catch (Exception ee)
{
errorLabel.Text = "ERROR - unable to connect: " + ee.Message;
}
}
else
{
errorLabel.Text = "ERROR: " + ex.Message;
}
}
In the above example, we are capturing any exception that is thrown
with the read
error. This means the GUI was unable to successfully
send the query to the kdb+ back-end. To reconnect to the secondary
connection, we call the ConnectionPool.GetConnection
method again and
re-send the query. The PublishQuery
method simply publishes the result
of the query into a DataGridView
as before. On the kdb+ side we have
two processes running the same functions and trade table but on
different ports.
We can expand this functionality to take account of a process being busy, such as when it is processing a large query or hanging for another reason. In this case, we have artificially increased the amount of time it will take the query to run to test the below code. Timeout errors are supplied on the kdb+ end and will return a stop error, which we can catch.
q)\T 1
The catch statement can then be modified to trap the stop error and rerun the query on another process:
catch (Exception ex)
{
if (ex.Message == "read" || ex.Message == "stop")
{
try
{
if (ex.Message == "read")
{
errorLabel.Text = "ALERT: using secondary connection";
}
else
{
errorLabel.Text = "ALERT: query timed out, using second connection";
}
conn = ConnectionPool.GetSecondaryConnection();
c.Flip table = GetData(query);
publishQuery(table);
}
catch (Exception ee)
{
errorLabel.Text = "ERROR - unable to connect: " + ee.Message;
}
}
}
This is used by a method within the connection pool called
GetSecondaryConnection
, which will use a predefined port and the same
host to open a new connection. This will add the connection to the
same pool, preventing the application from exceeding its maximum
number of connections:
public static c GetSecondaryConnection(String user, string password)
{
c connection;
if ((connPool.Count > 0) && (counter < MaxPoolSize))
{
connection = RetrieveFromPool(HOST, SECONDARY_PORT, user, password);
}
else
{
connection = OpenConnection(HOST, SECONDARY_PORT, user, password);
counter++;
}
return connection;
}
Running analytics¶
Until now, we’ve been using kdb+ to deliver raw trade data to our C#
front end. While viewing raw data can be useful, many users will want
to see an enriched view of each trade. We can take the example from
Queries above and expand it so that each row in the DataGridView
will be
selectable, allowing us to drill into each trade to provide further
analysis on the product being traded. The analytics will include:
- Minimum daily price
- Maximum daily price
- Daily VWAP price
- Average price
We will also plot these on a line graph to allow users to better identify patterns and outliers throughout the day.
To calculate these, we will create a new function called analyzeData
on the kdb+ side, which will then be called from C#.
q) analyzeData:{[x]
0!select
minPrice:min price,
maxPrice:max price,
vwap:size wavg price,
avgTab:avg price,
cntTab:count i by 15 xbar time.minute
from trade where sym=x}
This will calculate the min, max, average and VWAP prices for a given
symbol in 15-minute buckets. We will also write a function called
getSummary
which provides the overall min, max, average, and VWAP for the
entire day. Note that we must unkey the table before returning it to
C# as c.Flip
would treat this keyed table (type 99) as a dictionary
and cause an error.
q) getSummary: {[x]
0!select
distinct sym,
minPrice:min price,
maxPrice:max price,
vwap:size wavg price,
avgTab:avg price,
cntTab:count i
from trade where sym=x}
If we perform these on the table trade for one symbol `FDP.O
in the
kdb+ session, we can see the results:
q)4#details
sym | fullName
------| -----------------------
FDP.O | "First Derivatives PLC"
BMW.O | "BMW AG"
MSFT.O| "Microsoft Corporation"
INTC.O| "Intel Corp"
q)getSummary[`FDP.O]
sym minPrice maxPrice avgPrice vwap total
--------------------------------------------
FDP.O 1.021 1.109 1.064 1.064 5082
q)10#analyzeData[`FDP.O]
minute minPrice maxPrice vwapPrice avgPrice totalTransactions
-------------------------------------------------------------
00:00 1.022 1.108 1.063 1.063 93
00:15 1.022 1.108 1.063 1.064 114
00:30 1.022 1.108 1.061 1.061 132
00:45 1.022 1.108 1.065 1.066 139
01:00 1.021 1.108 1.066 1.068 143
01:15 1.021 1.108 1.069 1.069 126
01:30 1.021 1.108 1.061 1.061 137
01:45 1.022 1.108 1.063 1.062 144
02:00 1.022 1.108 1.066 1.066 130
02:15 1.022 1.108 1.067 1.067 129
The methods to pull this data into a graph in the case of analyzeData
,
and text boxes in the case of getSummary
, are simple to implement,
involving query calls to kdb+ to collect the data and then using loops
to process it.
public Form3(String symbol, c conn)
{
InitializeComponent();
symLabel.Text = symbol;
this.conn = conn;
c.Flip details = GetData("select details from details where sym=`" + symbol);
compLabel.Text = c.at(details.y[0], 0).ToString();
//This will execute the above functions using the symbol taken from the UI
details = GetData("getSummary[`" + symbol + "]");
GetDaily(details);
details = GetData("analyzeData[`" + symbol + "]");
SetAxis(details);
PopulateChart(details); //Populates Example Chart
PopulateGrid(details); //Populates Example Grid
}
This calls the methods GetData
(which was used in the previous
section), GetDaily
, SetAxis
and PopulateChart
. Note that the form
takes as arguments a string to represent the symbol and the connection
object. This is to avoid opening up new connections with each
selection. These values are supplied from the parent form in a
CellEvent
for the DataGrid
, making it selectable:
private void QueryView_CellClick(object sender, DataGridViewCellEventArgs e)
{
if (e.RowIndex >= 0) //We need to check that user has not clicked column header
{
int rowIndex = e.RowIndex;
DataGridView senderGrid = (DataGridView)sender; //The cell selected
String selSym = senderGrid.Rows[rowIndex].Cells[1].Value.ToString();
Form3 frm3 = new Form3(selSym, conn); //Open new window with arguments
frm3.ShowDialog(this);
}
}
The GetDaily
method uses the getSummary
function on our kdb+
process to query the table and return values for that single symbol
over the entire day. We then use these to populate the boxes on the
left hand to provide a quick visual summary of the data:
private void GetDaily(c.Flip details)
{
min = (double)c.at(details.y[1], 0);
max = (double)c.at(details.y[2], 0);
dAvg = (double)c.at(details.y[3], 0);
dVwap = (double)c.at(details.y[4], 0);
transNo = (int)c.at(details.y[5], 0);
minBox.Text = min.ToString("#.#####");
maxBox.Text = max.ToString("#.#####");
avgBox.Text = dAvg.ToString("#.#####");
dailyVwap.Text = dVwap.ToString("#.#####");
transNoBox.Text = transNo.ToString();
}
The SetAxis
method is optional but provides a more pronounced set of
peaks and troughs in the data by setting the maximum and minimum
values of the Y axis depending on the data itself. This is done by
using a simple loop to find the maximum returned value and minimum
returned value from the subset of data. This does not include maximum
or minimum prices over the period as this would reduce the sensitivity
of the chart.
private void SetAxis(c.Flip details)
{
double min = 1000;
double max = 0;
for (int i = 0; i < details.getNoRow(); i++)
{
for (int j = 3; j < details.getNoColumns(); j++)
{
double minVal = (double)c.at(details.y[j], i);
double maxVal = (double)c.at(details.y[j], i);
if (minVal < min)
{
min = minVal;
}
if (maxVal > max)
{
max = maxVal;
}
}
}
reportChart.ChartAreas[0].AxisY.Minimum = min - 0.0025; //Add margin
reportChart.ChartAreas[0].AxisY.Maximum = max + 0.0025; //Add Margin
}
Finally, we need to plot the graph itself. This is done with a
DataVisualization.Charting.Chart
object in the GUI, with the co-ordinates in each series being added via a loop:
private void PopulateChart(c.Flip details)
{
for (int i = 0; i < details.getNoRow(); i++)
{
reportChart.Series["vwap"].Points.AddXY((c.at(details.y[0], i)).ToString(),
(double)c.at(details.y[3], i));
reportChart.Series["avg"].Points.AddXY((c.at(details.y[0], i)).ToString(),
(double)c.at(details.y[4], i));
reportChart.Series["dailyAvg"].Points.AddXY((c.at(details.y[0], i)).ToString(),
dAvg);
reportChart.Series["dailyVwap"].Points.AddXY((c.at(details.y[0], i)).ToString(),
dVwap);
}
}
As clients may also wish to see the data in its grid form, a grid is populated along with the chart with a button to switch between the two views whenever required:
Example Chart
Example Grid
Though this has only been set up to query the trade table as it currently exists, it would not be difficult to implement a timer to periodically query kdb+ (every minute, for example) and retain up-to-date figures and charts. By the same measure, adding the ability to compare different symbols or different time frames would not take much more effort, nor would giving the user the ability to choose what period time they analyze. Furthermore, WebSockets could be used to deliver streaming data from the kdb+ back end to the C# GUI.
Author¶
Michael Reynolds works as a kdb+ consultant for one of the largest investment banks in the world. As part of his daily job, Michael is responsible for maintaining kdb+ databases as well as a C# APIs and plug-ins.
|
// Check that the q model being set/retrieved from the model registry
// is of an appropriate type
//
// @param model {fn|proj|dictionary} The model to be saved to the registry.
// In the case this is a dictionary it is assumed that a 'predict' key
// exists such that the model can be used on retrieval
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.q:{[model;getOrSet]
if[not type[model]in 99 100 104h;
printString:$[getOrSet;"retrieved is not";"must be"];
'"model ",printString," a q function/projection/dictionary"
];
if[99h=type model;
if[not `predict in key model;
printString:$[getOrSet;"retrieved";"saved"];
'"q dictionaries being ",printString," must contain a 'predict' key"
];
];
}
// Check that the Python object model being set/retrieved from the model
// registry is of an appropriate type
//
// @param model {<} The model to be saved to the registry. This must be
// an embedPy or foreign object
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.python:{[model;getOrSet]
if[not type[model]in 105 112h;
printString:$[getOrSet;"retrieved is not";"must be"];
'"model ",printString," an embedPy object"
];
}
// Check that a model that is being added to the or retrieved from the
// registry is an sklearn model with a predict method
//
// @param model {<} The model to be saved to or retrieved from the registry.
// This must be an embedPy or foreign object
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.sklearn:{[model;getOrSet]
mlops.check.python[model;getOrSet];
mlops.check.pythonlib[model;"sklearn"];
@[{x[`:predict]};model;{[x]'"model must contain a predict method"}]
}
// Check that a model that is being added to the or retrieved from the
// registry is an xgboost model with a predict method
//
// @param model {<} The model to be saved to or retrieved from the registry.
// This must be an embedPy or foreign object
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.xgboost:{[model;getOrSet]
mlops.check.python[model;getOrSet];
mlops.check.pythonlib[model;"xgboost"];
@[{x[`:predict]};model;{[x]'"model must contain a predict method"}]
}
// Check that a model that is being added to the or retrieved from the
// registry is a Keras model with a predict method
//
// @param model {<} The model to be saved to or retrieved from the registry.
// This must be an embedPy or foreign object
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.keras:{[model;getOrSet]
mlops.check.python[model;getOrSet];
mlops.check.pythonlib[model;"keras"];
@[{x[`:predict]};model;{[x]'"model must contain a predict method"}]
}
// Check that a model that is being added to the or retrieved from the
// registry is a Theano model with a predict method
//
// @param model {<} The model to be saved to or retrieved from the registry.
// This must be an embedPy or foreign object
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.theano:{[model;getOrSet]
mlops.check.python[model;getOrSet];
mlops.check.pythonlib[model;"theano"]
}
// Check that a model that is being added to or retrieved from the
// registry is a PyTorch model
//
// TO-DO
// - Increase type checking on torch objects
//
// @param model {<} The model to be saved to or retrieved from the registry.
// This must be an embedPy or foreign object
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.torch:{[model;getOrSet]
mlops.check.python[model;getOrSet];
}
// Check that a DAG being saved/retrieved is appropriately formatted
//
// @param model {dictionary} The DAG to be saved/retrieved
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.graph:{[model;getOrSet]
if[not 99h=type model;
printString:$[getOrSet;"retrieved is not";"must be"];
'"graph ",printString," a q dictionary"
];
if[not `vertices`edges~key model;
'"graph does not contain 'vertices' and 'edges' keys expected"
];
}
// Check that a model that is being added to or retrieved from the
// registry is a pyspark pipeline with a transform method
//
// @param model {<} The model to be saved to or retrieved from the registry.
// This must be an embedPy or foreign object
// @param getOrSet {boolean} Is the model being retrieved or persisted, this
// modifies the error statement on issue with invocation
// @return {::} Function will error on unsuccessful invocation otherwise
// generic null is returned
mlops.check.pyspark:{[model;getOrSet]
.ml.mlops.check.python[model;getOrSet];
@[{x[`:transform]};model;{[x]'"model/pipeline must contain a transform method"}]
}
// Check that the python object that is retrieved contains an appropriate
// indication that it comes from the library that it is expected to come
// from.
//
// @param model {<} The model to be saved to or retrieved from the registry.
// This must be an embedPy or foreign object
// @param library {string} The name of the library that is being checked
// against, this is sufficient in the case of fit sklearn/xgboost/keras models
// but may not be generally applicable
// @return {::} Function willerror on unsuccessful invocation otherwise
// generic null is returned
mlops.check.pythonlib:{[model;library]
builtins:.p.import[`builtins];
stringRepr:builtins[`:str;<][builtins[`:type]model];
if[not stringRepr like "*",library,"*";
'"Model retrieved not a python object derived from the library '",
library,"'."
];
}
================================================================================
FILE: ml_ml_mlops_src_q_create.q
SIZE: 1,302 characters
================================================================================
\d .ml
// .ml.registry.util.create.binExpected - Separate the expected values into bins
// @param expected {float[]} The expected data
// @param nGroups {long} The number of groups
// @returns {dict} The splitting values and training distributions
mlops.create.binExpected:{[expected;nGroups]
expected:@["f"$;expected;{'"Cannot convert the data to floats"}];
splits:mlops.create.splitData[expected;nGroups],0w;
expectDist:mlops.create.percSplit[expected;splits];
(`$string splits)!expectDist
}
// .ml.registry.util.create.splitData - Split the data into equallly distributed
// bins
// @param expected {float[]} The expected predictions
// @param nGroups {int} The number of data groups
// @return {float[]} The splitting points in the expected set
mlops.create.splitData:{[expected;nGroups]
n:1%nGroups;
mlops.percentile[expected;-1_n*1+til nGroups]
}
// .ml.registry.util.create.percSplit - Get the percentage of data points that
// are in each distribution bin
// @param data {float[]} The data to be split
// @param split {float[]} The splitting values defining how the data is to be
// distributed
// @return {float[]} The splitting values and training distributions
mlops.create.percSplit:{[data;splits]
groups:deltas 1+bin[asc data;splits];
groups%count data
}
================================================================================
FILE: ml_ml_mlops_src_q_get.q
SIZE: 3,389 characters
================================================================================
\d .ml
// Retrieve a q model from disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {dict|fn|proj} The model previously saved to disk
// registry
mlops.get.q:{[filePath]
mlops.get.typedModel[`q;filePath;get]
}
// Retrieve a Python model from disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {<} The embedPy object associated with the model saved
mlops.get.python:{[filePath]
func:.p.import[`joblib]`:load;
mlops.get.typedModel[`python;filePath;func]
}
// Retrieve a sklearn model from disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {<} The embedPy object associated with the model saved
mlops.get.sklearn:{[filePath]
func:.p.import[`joblib]`:load;
mlops.get.typedModel[`sklearn;filePath;func]
}
// Retrieve a xgboost model from disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {<} The embedPy object associated with the model saved
mlops.get.xgboost:{[filePath]
func:.p.import[`joblib]`:load;
mlops.get.typedModel[`xgboost;filePath;func]
}
// Retrieve a Keras model from disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {<} The embedPy object associated with the model saved
mlops.get.keras:{[filePath]
func:.p.import[`keras.models]`:load_model;
mlops.get.typedModel[`keras;filePath;func]
}
// Retrieve a Theano model from disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {<} The embedPy object associated with the model saved
mlops.get.theano:{[filePath]
func:.p.import[`joblib]`:load;
mlops.get.typedModel[`theano;filePath;func]
}
// Retrieve a PyTorch model from disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {<} The embedPy object associated with the model saved
mlops.get.torch:{[filePath]
torch:.p.import`torch;
model:@[torch`:load;
filePath;
{[torch;filePath;err]
@[torch`:jit.load;
filePath;
{[x;y]'"Could not retrieve the requested model at ",x}[filePath]
]
}[torch;filePath]
];
mlops.check.torch[model;1b];
model
}
// Retrieve a DAG from a location on disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {dictionary} The dictionary defining a saved workflow
mlops.get.graph:{[filePath]
func:.dag.loadGraph;
mlops.get.typedModel[`graph;filePath;func]
}
// Retrieve a pyspark model from a location on disk
//
// @param filePath {string} The full path the model to be retrieved
// @return {<} The embedPy object associated with the model saved
.ml.mlops.get.pyspark:{[modelPath]
pipe:.p.import[`pyspark.ml]`:PipelineModel;
func:pipe`:load;
@[func;modelPath;{[x;y]'"Could not retrieve the requested model at ",x}[modelPath]]
};
// Retrieve a model from disk.
//
// @param typ {symbol} Type of model being retrieved
// @param filePath {string} The full path to the desired model
// @param func {function} Function used to retrieve model object
// @return {dict|fn|proj} The model previously saved to disk within the
// registry
mlops.get.typedModel:{[typ;filePath;func]
mdl:$[typ~`q;hsym`$filePath;filePath];
model:@[func;mdl;
{[x;y]'"Could not retrieve the requested model at ",x}[filePath]
];
mlops.check[typ][model;1b];
model
}
================================================================================
FILE: ml_ml_mlops_src_q_init.q
SIZE: 315 characters
================================================================================
// init.q - Initialise functionality related to MLOps Tools
// Copyright (c) 2021 Kx Systems Inc
// Functionality relating to MLOps Tools
// Load all functionality
mlops.loadfile`:src/q/create.q
mlops.loadfile`:src/q/check.q
mlops.loadfile`:src/q/get.q
mlops.loadfile`:src/q/misc.q
mlops.loadfile`:src/q/update.q
================================================================================
FILE: ml_ml_mlops_src_q_misc.q
SIZE: 5,811 characters
================================================================================
\d .ml
// .ml.registry.util.percentile - Functionality from the ml-toolkit. Percentile
// calculation for an array
// @param array {number[]} A numerical array
// @param perc {float} Percentile of interest
// @returns {float} The value below which `perc` percent of the observations
// within the array are found
mlops.percentile:{[array;perc]
array:array where not null array;
percent:perc*-1+count array;
i:0 1+\:floor percent;
iDiff:0^deltas asc[array]i;
iDiff[0]+(percent-i 0)*last iDiff
}
|
// @kind function
// @category private
// @fileoverview Split HTTP response into headers & dict
// @param r {string} raw HTTP response
// @return {(dict;string;string)} (response header;response body;raw headers)
formatresp:{[r]
p:(0,4+first r ss 4#"\r\n") cut r; //split response headers & body
rh:p 0; //keep raw headers to return as text
p:@[p;0;"statustext:",]; //add key for status text line
d:trim enlist[`]_(!/)("S:\n")0:p[0]except"\r"; //create dictionary of response headers
d:lower[key d]!value d; //make headers always lower case
d[`status]:"I"$(" "vs r)1; //add status code
if[(`$"content-encoding")in key d;
p[1]:.req.decompress[d`$"content-encoding"]p[1]; //if compressed, decompress body based on content-encoding
];
:(d;p[1];rh); //return header dict, reponse body, raw headers string
}
// @kind function
// @category private
// @fileoverview Signal if not OK status, return unchanged response if OK
// @param v {boolean} verbose flag
// @param x {(dict;string)} HTTP response object
// @return {(dict;string)} HTTP response object
okstatus:{[v;x]
if[not[.req.SIGNAL]|v|x[0][`status] within 200 299;:x]; //if signalling disabled, in verbose mode or OK status, return
'string x[0]`status; //signal if bad status FIX: handle different status codes - descriptive signals
}
// @kind function
// @category public
// @fileoverview Send an HTTP request
// @param m {symbol} HTTP method/verb
// @param u {symbol|string|#hsym} URL
// @param hd {dict} dictionary of custom HTTP headers to use
// @param p {string} payload/body (for POST requests)
// @param v {boolean} verbose flag
// @return {(dict;string)} HTTP response (headers;body)
send:{[m;u;hd;p;v]
u:$[type u;enlist;]u; //allow URL to contain query dict as 2nd element of the list
q:@[.req.query;`method`url`headers`body;:;(m;.url.parse0[0]u 0;hd;p)]; //parse URL into URL object & build query
if[count u 1; q[`url;`path]:.url.format `path`query!(q[`url;`path];u 1)]; //if query dict supplied, incorporate it in the URL path
if[a:count q[`url]`auth;.auth.setcache . q[`url]`host`auth]; //cache credentials if set
if[not a;q[`url;`auth]:.auth.getcache q[`url]`host]; //retrieve cached credentials if not set
q:proxy q; //check if we need to use proxy & get proxy address
/nu:$[@[value;`.doh.ENABLED;0b];.doh.resolve;]u 0; //resolve URL via DNS-over-HTTPS if enabled
hs:.url.hsurl`$raze q ./:enlist[`url`protocol],$[`proxy in key q;1#`proxy;enlist`url`host]; //get hostname as handle
q:.cookie.addcookies[q]; //add cookie headers
q:addheaders[q]; //get dictionary of HTTP headers for request
r:hs d:buildquery[q]; //build query and execute
if[v;neg[`int$v]"-- REQUEST --\n",string[hs],"\n",d]; //if verbose, log request
r:formatresp r; //format response to headers & body
if[v;neg[`int$v]"-- RESPONSE --\n",r[2],"\n\n",r[1],("\n"<>last r[1])#"\n"]; //if verbose, log response
if[(sc:`$"set-cookie") in k:key r 0; //check for Set-Cookie headers
.cookie.addcookie[q[`url;`host]]'[value[r 0]where k=sc]]; //set any cookies necessary
if[r[0][`status]=401;:.z.s[m;.auth.getauth[r 0;u 0];hd;p;v]]; //if unauthorised prompt for user/pass FIX:should have some counter to prevent infinite loops
if[.status.class[r] = 3; //if status is 3XX, redirect
lo:$["/"=r[0][`location]0;.url.format[`protocol`auth`host#q`url],1_r[0]`location;r[0]`location]; //detect if relative or absolute redirect
:.z.s[m;lo;hd;p;v]]; //perform redirections if needed
:r;
}
// @kind function
// @category private
// @fileoverview Parse to kdb object based on Content-Type header. Only supports JSON currently
// @param r {(dict;string)} HTTP respone
// @return {any} Parsed response
parseresp:{[r]
/ TODO - add handling for other data types? /
if[not .req.PARSE;:2#r]; //if parsing disabled, return "raw" response (incl. headers dict)
f:$[(`j in key`)&r[0][`$"content-type"]like .req.ty[`json],"*";.j.k;::]; //check for JSON, parse if so
:@[f;r[1];r[1]]; //error trap parsing, return raw if fail
}
// @kind function
// @category public
// @fileoverview Send an HTTP GET request
// @param x {symbol|string|#hsym} URL
// @param y {dict} dictionary of custom HTTP headers to use
// @return {(dict;string)|any} HTTP response (headers;body), or parsed if JSON
// @qlintsuppress RESERVED_NAME
.req.get:{parseresp okstatus[.req.VERBOSE] send[`GET;x;y;();.req.VERBOSE]}
// @kind function
// @category public
// @fileoverview Send an HTTP GET request (simple, no custom headers)
// @param x {symbol|string|#hsym} URL
// @return {(dict;string)|any} HTTP response (headers;body), or parsed if JSON
.req.g:.req.get[;()!()]
// @kind function
// @category public
// @fileoverview Send an HTTP POST request
// @param x {symbol|string|#hsym} URL
// @param y {dict} dictionary of custom HTTP headers to use
// @param z {string} body for HTTP request
// @return {(dict;string)|any} HTTP response (headers;body), or parsed if JSON
.req.post:{parseresp okstatus[.req.VERBOSE] send[`POST;x;y;z;.req.VERBOSE]}
// @kind function
// @category public
// @fileoverview Send an HTTP POST request (no custom headers)
// @param x {symbol|string|#hsym} URL
// @param y {dict} symbol of encoding to use (e.g. `` `json `csv``)
// @param z {string|any} body for HTTP request (if non-string, must be an encoder in .req.tx)
// @return {(dict;string)|any} HTTP response (headers;body), or parsed if JSON
.req.p:{[x;y;z]
if[10h<>type z;
if[not y in key .req.tx;'type];
z:.req.tx[y] z;
];
y:enlist["Content-Type"]!enlist .req.ty y;
parseresp okstatus[.req.VERBOSE] send[`POST;x;y;z;.req.VERBOSE]}
// @kind function
// @category public
// @fileoverview Send an HTTP DELETE request
// @param x {symbol|string|#hsym} URL
// @param y {dict} dictionary of custom HTTP headers to use
// @param z {string} body for HTTP request
// @return {(dict;string)|any} HTTP response (headers;body), or parsed if JSON
// @qlintsuppress RESERVED_NAME
.req.delete:{parseresp okstatus[.req.VERBOSE] send[`DELETE;x;y;z;.req.VERBOSE]}
// @kind function
// @category public
// @fileoverview Send an HTTP DELETE request, no body
// @param x {symbol|string|#hsym} URL
// @param y {dict} dictionary of custom HTTP headers to use
// @return {(dict;string)|any} HTTP response (headers;body), or parsed if JSON
.req.del:.req.delete[;;()]
\d .
================================================================================
FILE: reQ_req_status.q
SIZE: 426 characters
================================================================================
\d .status
// @kind function
// @category private
// @fileoverview get status "class" from status code, header dict or return object
// @param x {int|dict|(dict;string)} status code, header dict or return object
// @return {int} status class
class:{c:div[;100];$[0=type x;.z.s[first x];99=type x;c x`status;c x]} //get class from status code, header dict or return object
/TODO: add dict of status codes
\d .
================================================================================
FILE: reQ_req_timeout.q
SIZE: 899 characters
================================================================================
\d .req
|
parse¶
Parse a string
parse x parse[x]
Where x
is a string representing
- a well-formed q expression, returns a parse tree (V3.4 can accept newlines within the string; earlier versions cannot.)
- a function, returns the function
q)parse "1 2 3 + 5" / the list 1 2 3 is parsed as a single item
+
1 2 3
5
q)parse "{x*x}"
{x*x}
Warning
Should not be used with input data over 2GB in length (0Wi). Returns domain error with this condition since 4.1 2022.04.15.
A parse tree can clarify order of execution.
q)parse "1 2 3 +/: 5 7" / Each Right has postfix syntax
(/:;+)
1 2 3
5 7
q)parse "1 2 3 +neg 5 7" / neg is applied before +
+
1 2 3
(-:;5 7)
A parse tree can be executed with eval
.
q)eval parse "1 2 3 +/: 5 7"
6 7 8
8 9 10
Explicit definitions in .q
are shown in full:
q)foo:{x+2}
q)parse "foo each til 5"
k){x'y}
`foo
(k){$[-6h=@x;!x;'`type]};5)
The composition of eval
after parse
is essentially the q interpreter.
QSQL¶
QSQL queries are parsed to the corresponding functional form.
Example using parse on a QSQL statement against table sp
(created using sp.q
):
q)\l sp.q
q)x:parse "select part:p,qty by sup:s from sp where qty>200,p=`p1"
q)x
?
`sp
,((>;`qty;200);(=;`p;,`p1))
(,`sup)!,`s
`part`qty!`p`qty
q)eval x
sup| part qty
---| --------
s1 | p1 300
s2 | p1 300
Views¶
Views are special in that they are not parsable (sensibly) with -5!x
(parse
).
q)eval parse"a::5"
5
q)a
5
q)views[]
`symbol$()
pj
¶
Plus join
x pj y pj[x;y]
Where
x
andy
are tables. Since 4.1t 2023.08.04 ifx
is the name of a table, it is updated in place.y
is keyed- the key column/s of
y
are columns ofx
returns x
and y
joined on the key columns of y
.
pj
adds matching records in y
to those in x
, by adding common columns, other than the key columns. These common columns must be of appropriate types for addition.
For each record in x
:
- if there is a matching record in
y
it is added to thex
record. - if there is no matching record in
y
, common columns are left unchanged, and new columns are zero.
q)show x:([]a:1 2 3;b:`x`y`z;c:10 20 30)
a b c
------
1 x 10
2 y 20
3 z 30
q)show y:([a:1 3;b:`x`z]c:1 2;d:10 20)
a b| c d
---| ----
1 x| 1 10
3 z| 2 20
q)x pj y
a b c d
---------
1 x 11 10
2 y 20 0
3 z 32 20
In the example above, pj
is equivalent to x+0^y[`a`b#x]
(compute the value of y
on a
and b
columns of x
, fill the result with zeros and add to x
).
Joins
Q for Mortals
§9.9.6 Plus Join
prd
, prds
¶
Product/s
prd
¶
Product
prd x prd[x]
Where x
is a numeric list, returns its product.
Nulls are treated as 1s.
q)prd 7 / product of atom (returned unchanged)
7
q)prd 2 3 5 7 / product of list
210
q)prd 2 3 0N 7 / 0N is treated as 1
42
q)prd (1 2 3 4;2 3 5 7) / product of list of lists
2 6 15 28
q)prd 101b
0b
q)prd "abc"
'type
prd
is an aggregate function, equivalent to */
.
prds
¶
Products
prds x prds[x]
Where x
is a numeric list, returns the cumulative products of its items.
q)prds 7 / atom is returned unchanged
7
q)prds 2 3 5 7 / cumulative products of list
2 6 30 210
q)prds 2 3 0N 7 / 0N is treated as 1
2 6 6 42
q)prds (1 2 3;2 3 5) / cumulative products of list of lists
1 2 3 / same as (1 2 3;1 2 3 * 2 3 5)
2 6 15
q)prds "abc" / type error if list is not numeric
'type
prds
is a uniform function, equivalent to *\
.
Implicit iteration¶
prd
and prds
apply to dictionaries and tables.
q)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 21 3;4 5 6)
q)d
a| 10 21 3
b| 4 5 6
q)t
a b
----
10 4
21 5
3 6
q)k
k | a b
---| ----
abc| 10 4
def| 21 5
ghi| 3 6
q)prd d
40 105 18
q)prds d
a| 10 21 3
b| 40 105 18
q)prd t
a| 630
b| 120
q)prds t
a b
-------
10 4
210 20
630 120
q)prd k
a| 630
b| 120
q)prds k
k | a b
---| -------
abc| 10 4
def| 210 20
ghi| 630 120
Domains and ranges¶
domain: b g x h i j e f c s p m d z n u v t
range: i . i i i j e f i . p m d z n u v t
prior
¶
v2 prior x prior[v2;x]
(vv)prior x prior[vv;x]
Where
v2
is a binary applicable valuevv
is a variadic applicable value
applies v2
or vv
to each item of x
and the item preceding it, and returns a result of the same length.
That is, the projections prior[v2;]
and prior[vv;]
are uniform functions.
q)(+) prior til 10
0 1 3 5 7 9 11 13 15 17
q){x+y%10}prior til 10
0n 1 2.1 3.2 4.3 5.4 6.5 7.6 8.7 9.8
prior
is a wrapper for the Each Prior iterator.
See the iterator for how the first item of the result is determined.
It is good q style to use prior
rather than the iterator, except where iterators are composed and brevity helps.
rand
¶
Pick randomly
rand x rand[x]
Pick an item from a list¶
Where x
is a list returns one item chosen randomly from x
q)rand 1 30 45 32
32
q)rand("abc";"def";"ghi") / list of lists
"ghi"
Pick a value at random¶
Where x
is an atom returns an atom of the same type.
q)rand 100
10
q)rand each 20#6 /roll twenty 6-sided dice
2 5 4 5 1 0 5 2 4 5 1 2 0 1 1 2 1 0 0 5
q)rand 3.14159
1.277572
q)rand 2012.09.12
2008.02.04
q)rand `3
`afe
Right domain and range are as for Roll and Deal.
Returns a single item
rand
is exactly equivalent to {first 1?x}
.
If you need a list result, use Roll.
The following expressions all roll a million six-sided dice.
q)\ts rand each 1000000#6
264 41166192
q)\ts {first 1?x}each 1000000#6
210 41166496
q)\ts 1000000?6 / Roll
6 8388800
rank
¶
Position in the sorted list
rank x rank[x]
Where x
is a list or dictionary, returns for each item in x
the index of where it would occur in the sorted list or dictionary.
This is the same as calling iasc
twice on the list.
q)rank 2 7 3 2 5
0 4 2 1 3
q)iasc 2 7 3 2 5
0 3 2 4 1
q)iasc iasc 2 7 3 2 5 / same as rank
0 4 2 1 3
q)asc[2 7 3 2 5] rank 2 7 3 2 5 / identity
2 7 3 2 5
q)iasc idesc 2 7 3 2 5 / descending rank
3 0 2 4 1
ratios
¶
Ratios between items
ratios y ratios[y]
Where y
is a non-symbolic sortable list, returns the ratios of the underlying values of consecutive pairs of items of y
.
ratios
is an aggregate function.
Examples: queries to get returns on prices:
update ret:ratios price by sym from trade
select log ratios price from trade
In a query to get price movements:
update diff:deltas price by sym from trade
With signum
to count the number of up/down/same ticks:
q)select count i by signum deltas price from trade
price| x
-----| ----
-1 | 247
0 | 3
1 | 252
Implicit iteration¶
ratios
applies to dictionaries and tables.
q)k:`k xkey update k:`abc`def`ghi from t:flip d:`a`b!(10 21 3;4 5 6)
q)ratios d
a| 10 21 3
b| 0.4 0.2380952 2
q)ratios t
a b
--------------
10 4
2.1 1.25
0.1428571 1.2
q)ratios k
k | a b
---| --------------
abc| 10 4
def| 2.1 1.25
ghi| 0.1428571 1.2
First predecessor¶
The predecessor of the first item is 1.
q)ratios 2000 2005 2007 2012 2020
2000 1.0025 1.000998 1.002491 1.003976
It may be more convenient to have 1 as the first item of the result.
q)ratios0:{first[x]%':x}
q)ratios0 2000 2005 2007 2012 2020
1 1.0025 1.000998 1.002491 1.003976
Subtract Each Divide
The derived function %':
(Divide Each Prior) used to define ratios
is variadic and can be applied as either a unary or a binary.
However, ratios
is supported only as a unary function.
For binary application, use the derived function.
raze
¶
Return the items of x
joined, collapsing one level of nesting
raze x raze[x]
To collapse all levels, use Converge i.e. raze/[x]
.
q)raze (1 2;3 4 5)
1 2 3 4 5
q)b:(1 2;(3 4;5 6);7;8)
q)raze b / flatten one level
1
2
3 4
5 6
7
8
q)raze/[b] / flatten all levels
1 2 3 4 5 6 7 8
q)raze 42 / atom returned as a list
,42
Returns the flattened values from a dictionary.
q)d:`q`w`e!(1 2;3 4;5 6)
q)value d
1 2
3 4
5 6
q)raze d
1 2 3 4 5 6
Use only on items that can be joined
raze
is the extension ,/
(Join Over) and requires items that can be joined together.
q)d:`a`b!(1 2;3 5)
q)10,d / cannot join integer and dictionary
'type
q)raze (10;d) / raze will not work
'type
read0
¶
Read text from a file or process handle
read0 f read0[f]
read0 (f;o;n) read0[(f;o;n)]
read0 h read0[h]
read0 (fifo;n) read0[(fifo;n)]
where
f
is a file symbol(f;o;n)
is a file descriptorh
is a system or connection handlefifo
is a communication handle to a Fifon
is a non-negative integer
returns character data from the source as follows.
File symbol¶
Returns the lines of the file as a list of strings. Lines are assumed delimited by either LF or CRLF, and the delimiters are removed.
q)`:test.txt 0:("hello";"goodbye") / write some text to a file
q)read0`:test.txt
"hello"
"goodbye"
q)/ read 500000 lines, chunks of (up to) 100000 at a time
q)d:raze{read0(`:/tmp/data;x;100000)}each 100000*til 5
File descriptor¶
Returns n
chars from the file, starting from the position o
.
q)`:foo 0: enlist "hello world"
q)read0 (`:foo;6;5)
"world"
System or process handle¶
Returns a line of text from the source.
q)rl:{1">> ";read0 0}
q)rl`
>> xiskso
"xiskso"
Reading the console permits interactive input.
q)1">> ";a:read0 0
>> whatever
q)a[4+til 4]
"ever"
Fifo/named pipe¶
Returns n
characters from the pipe.
(Since V3.4 2016.05.31)
q)h:hopen`$":fifo:///etc/redhat-release"
q)read0(h;8)
"Red Hat "
q)read0(h;8)
"Enterpri"
Connection handles,
File system,
Interprocess communication
Q for Mortals
§11.4.1 Reading and Writing Text Files
read1
¶
Read bytes from a file or named pipe
read1 f read1[f]
read1 (f;o;n) read1[(f;o;n)]
read1 h read1[h]
read1 (fifo;n) read1[(fifo;n)]
Where
f
is a file symbolo
is an offset as a non-negative integer/long(f;o;n)
is a file descriptorh
is a system or process handlefifo
is a communication handle to a Fifon
is a length as a non-negative integer/long
returns bytes from the source, as follows.
File¶
Where the argument is
- a file symbol, returns the entire content of the file
- a file descriptor
(f;o;n)
, returns up ton
bytes fromf
starting ato
- a file descriptor
(f;o)
, returns the entire content off
fromo
onwards
q)`:test.txt 0:("hello";"goodbye") / write some text to a file
q)read1`:test.txt / read in as bytes
0x68656c6c6f0a676f6f646279650a
q)"c"$read1`:test.txt / convert from bytes to char
"hello\ngoodbye\n"
q)/ read 500000 lines, chunks of (up to) 100000 at a time
q)d:raze{read1(`:/tmp/data;x;100000)}each 100000*til 5
Named pipe¶
(Since V3.4.) Where x
is
- a list
(fifo;length)
, returnslength
bytes read fromfifo
- an atom
fifo
, blocks and returns bytes fromfifo
when EOF is encountered (0#0x
if immediate)
q)h:hopen`$":fifo:///etc/redhat-release"
q)"c"$read1(h;8)
"Red Hat "
q)"c"$read1(h;8)
"Enterpri"
q)system"mkfifo somefifo";h:hopen`fifo:somefifo; 0N!read1 h; hclose h
|
// @kind function
// @category dataCheckUtility
// @desc Construct date time string path in appropriate format
// @param strPath {string} Date time path string
// @return {string} Date and time path converted to appropriate format
dataCheck.i.dateTimeStr:{[strPath]
ssr[strPath;":";"."]
}
// @kind function
// @category dataCheckUtility
// @desc Check if directories to be created already exist
// @param config {dictionary} Configuration information assigned by the user
// and related to the current run
// @return {::|err} Error if logfile or savePath already exists
dataCheck.i.fileNameCheck:{[config]
ignore:utils.ignoreWarnings;
if[config`overWriteFiles;ignore:0];
mainFileExists:$[0<config`saveOption;count key hsym`$config`mainSavePath;0];
loggingExists :$[utils.logging;count key hsym`$config`printFile;0];
dataCheck.i.delFiles[config;ignore;mainFileExists;loggingExists];
dataCheck.i.printWarning[config;ignore;mainFileExists;loggingExists];
modelName:$[-11h=type config`savedModelName;string;]config`savedModelName;
if[not`~config`savedModelName;
h:hopen hsym`$path,"/outputs/timeNameMapping.txt";
h .Q.s enlist[sum config`startDate`startTime]!enlist modelName;
hclose h;
]
}
// @kind function
// @category dataCheckUtility
// @desc Delete any previous save paths and logging paths if warnings
// are to be ignored
// @param config {dictionary} Configuration information assigned by the user
// and related to the current run
// @param ignore {int} The ignoreWarnings option set i.e. 0, 1 or 2
// @param mainFileExists {boolean} Whether the savePath exists if saveOption
// is greater than 0
// @param loggingExists {boolean} Whether the logging path exists if logging
// option is chosen
// @return {::} Delete save paths and logging files
dataCheck.i.delFiles:{[config;ignore;mainFileExists;loggingExists]
if[ignore=2;:()];
if[mainFileExists;system"rm -rf ",config[`mainSavePath]];
if[loggingExists;system"rm -rf ",config[`printFile]];
}
// @kind function
// @category dataCheckUtility
// @desc If savePath and logging already exist, give warning or
// error out depening on ignoreWarning option
// @param config {dictionary} Configuration information assigned by the user
// and related to the current run
// @param ignore {int} The utils.ignoreWarnings options set i.e. 0, 1 or 2
// @param mainFileExists {boolean} Whether the savePath exists if saveOption
// is greater than 0
// @param loggingExists {boolean} Whether the logging path exists if logging
// option is chosen
// @return {::|err} Error if logfile or savePath already exists or give warning
dataCheck.i.printWarning:{[config;ignore;mainFileExists;loggingExists]
if[ignore=0;:()];
index:$[ignore=2;0;1];
if[mainFileExists;
dataCheck.i.warningOption[config;ignore]
utils.printWarnings[`savePathExists]index
];
if[loggingExists;
dataCheck.i.warningOption[config;ignore]
utils.printWarnings[`loggingPathExists]index
];
}
// @kind function
// @category dataCheckUtility
// @desc How the warning should be handled depending on the
// ignoreWarning option chosen
// @param config {dictionary} Configuration information assigned by the user
// and related to the current run
// @param ignore {int} The utils.ignoreWarnings options set i.e. 0, 1 or 2
// @return {err|string} Print warning to screen/log file or error out
dataCheck.i.warningOption:{[config;ignore]
$[ignore=2;{'x};ignore=1;config`logFunc;]
}
================================================================================
FILE: ml_automl_code_nodes_dataPreprocessing_dataPreprocessing.q
SIZE: 1,100 characters
================================================================================
// code/nodes/dataPreprocessing/dataPreprocessing.q - Data preprocessing node
// Copyright (c) 2021 Kx Systems Inc
//
// Preprocess the dataset prior to application of ML algorithms. This includes
// the application of symbol encoding, handling of null data/infinities and
// removal of constant columns
\d .automl
// @kind function
// @category node
// @desc Preprocess input data based on the type of problem being solved and
// the parameters supplied by the user
// @param config {dictionary} Information related to the current run of AutoML
// @param features {table} Feature data as a table
// @param symEncode {dictionary} Columns to symbol encode and their required
// encoding
// @return {table} Feature table with the data preprocessed appropriately
dataPreprocessing.node.function:{[config;features;symEncode]
symTable:dataPreprocessing.symEncoding[features;config;symEncode];
dataPreprocessing.featPreprocess[symTable;config]
}
// Input information
dataPreprocessing.node.inputs:`config`features`symEncode!"!+S"
// Output information
dataPreprocessing.node.outputs:"+"
================================================================================
FILE: ml_automl_code_nodes_dataPreprocessing_funcs.q
SIZE: 3,945 characters
================================================================================
// code/nodes/dataPreprocessing/funcs.q - Functions called by dataPreprocessing
// Copyright (c) 2021 Kx Systems Inc
//
// Definitions of the main callable functions used in the application of
// .automl.dataPreprocessing
\d .automl
// @kind function
// @category dataPreprocessing
// @desc Symbol encoding applied to feature data
// @param features {table} Feature data as a table
// @param config {dictionary} Information relating to the current run of AutoML
// @return {table} Feature table encoded appropriately for the task
dataPreprocessing.symEncoding:{[features;config;symEncode]
typ:config`featureExtractionType;
// If no symbol columns, return table or empty encoding schema
if[all{not` in x}each value symEncode;
if[count symEncode`freq;
features:$[`fresh~typ;
[aggColData:0!config[`aggregationColumns]xgroup features;
raze .ml.freqEncode[;symEncode`freq]each flip each aggColData
];
.ml.freqEncode[features;symEncode`freq]
];
];
features:.ml.oneHot.fitTransform[0!features;symEncode`ohe];
// Extract symbol columns from dictionary
symbolCols:distinct raze symEncode;
:flip symbolCols _ flip features
];
features
}
// @kind function
// @category dataPreprocessing
// @desc Apply preprocessing depending on feature extraction type
// @param features {table} Feature data as a table
// @param config {dictionary} Information relating to the current run of AutoML
// @return {table} Feature table with appropriate feature preprocessing applied
dataPreprocessing.featPreprocess:{[features;config]
typ:config`featureExtractionType;
// For FRESH the aggregate columns need to be excluded from the preprocessing
// steps. This ensures that encoding is not performed on the aggregate
// columns if this is a symbol and/or this column is constant.
if[`fresh=typ;
aggData:(config[`aggregationColumns],())#flip features;
features:flip(cols[features]except config`aggregationColumns)#flip features
];
featTable:$[not typ in`nlp;
dataPreprocessing.nonTextPreprocess features;
dataPreprocessing.textPreprocess features
];
// Rejoin separated aggregate columns for FRESH
config[`logFunc]utils.printDict`preproc;
$[`fresh=typ;flip[aggData],';]featTable
}
// @kind function
// @category dataPreprocessing
// @desc Apply preprocessing for non NLP feature extraction type
// @param features {table} Feature data as a table
// @return {table} Feature table with appropriate feature preprocessing applied
dataPreprocessing.nonTextPreprocess:{[features]
features:dataPreprocessing.nullEncode[features;med];
features:.ml.dropConstant features;
.ml.infReplace features
}
// @kind function
// @category dataPreprocessing
// @desc Apply preprocessing for NLP feature extraction type
// @param features {table} Feature data as a table
// @return {table} Feature table with appropriate feature preprocessing applied
dataPreprocessing.textPreprocess:{[features]
if[count[cols features]>count charCol:.ml.i.findCols[features;"C"];
nonTextPreproc:dataPreprocessing.nonTextPreprocess charCol _features;
:?[features;();0b;charCol!charCol],'nonTextPreproc
];
features
}
// @kind function
// @category dataPreprocessingUtility
// @desc Null encoding of feature data
// @param features {table} Feature data as a table
// @param func {fn} Function to be applied to column from which the value
// to fill nulls is derived (med/min/max)
// @return {table} Feature table with null values filled if required
dataPreprocessing.nullEncode:{[features;func]
nullCheck:flip null features;
nullFeat:where 0<sum each nullCheck;
nullValues:nullCheck nullFeat;
names:`$string[nullFeat],\:"_null";
// 0 filling needed if return value also null
// Encoding maintained through added columns
$[0=count nullFeat;
features;
flip 0^(func each flip features)^flip[features],names!nullValues
]
}
================================================================================
FILE: ml_automl_code_nodes_dataPreprocessing_init.q
SIZE: 269 characters
================================================================================
// code/nodes/dataPreprocessing/init.q - Load dataPreprocessing node
// Copyright (c) 2021 Kx Systems Inc
//
// Load code for dataPreprocessing node
\d .automl
loadfile`:code/nodes/dataPreprocessing/funcs.q
loadfile`:code/nodes/dataPreprocessing/dataPreprocessing.q
================================================================================
FILE: ml_automl_code_nodes_featureCreation_featureCreation.q
SIZE: 1,278 characters
================================================================================
// code/nodes/featureCreation/featureCreation.q - Feature Creation node
// Copyright (c) 2021 Kx Systems Inc
//
// This function contains the logic required to generate appropriate default
// or custom features for each of the problem types supported by AutoML
\d .automl
// @kind function
// @category node
// @desc Apply feature creation based on problem type. Individual
// functions relating to this functionality are use case dependent and
// contained within [fresh/normal/nlp]/featureCreate.q
// @param features {table} Feature data as a table
// @param config {dictionary} Information related to the current run of AutoML
// @return {dictionary} Features with additional features created along with
// time taken and any saved models
featureCreation.node.function:{[config;features]
typ:config`featureExtractionType;
$[typ=`fresh;
featureCreation.fresh.create[features;config];
typ=`normal;
featureCreation.normal.create[features;config];
typ=`nlp;
featureCreation.nlp.create[features;config];
'"Feature extraction type is not currently supported"
]
}
// Input information
featureCreation.node.inputs:`config`features!"!+"
// Output information
featureCreation.node.outputs:`creationTime`features`featModel!"t+<"
================================================================================
FILE: ml_automl_code_nodes_featureCreation_fresh_featureCreate.q
SIZE: 1,346 characters
================================================================================
// code/nodes/featureCreation/fresh/featureCreation.q - FRESH feature creation
// Copyright (c) 2021 Kx Systems Inc
//
// Create features using the fresh algorithm
\d .automl
// @kind function
// @category featureCreate
// @desc Create features using the FRESH algorithm
// @param features {table} Feature data as a table
// @param config {dictionary} Information related to the current run of AutoML
// @return {table} Features created in accordance with the FRESH feature
// creation procedure
featureCreation.fresh.create:{[features;config]
aggCols:config`aggregationColumns;
problemFunctions:config`functions;
params:$[type[problemFunctions]in -11 11h;
get;
99h=type problemFunctions;
;
'"Inappropriate type for FRESH parameter data"
]problemFunctions;
// Feature extraction should be performed on all columns that are
// non-aggregate columns
cols2use:cols[features]except aggCols;
featExtractStart:.z.T;
// Apply feature creation and encode nulls with the median value of column
features:value .ml.fresh.createFeatures[features;aggCols;cols2use;params];
features:dataPreprocessing.nullEncode[features;med];
features:.ml.infReplace features;
features:0^.ml.dropConstant features;
featExtractEnd:.z.T-featExtractStart;
`creationTime`features`featModel!(featExtractEnd;features;())
}
================================================================================
FILE: ml_automl_code_nodes_featureCreation_fresh_init.q
SIZE: 218 characters
================================================================================
// code/nodes/featureCreation/fresh/init.q - Load fresh code
// Copyright (c) 2021 Kx Systems Inc
//
// Load code for fresh featureCreation node
\d .automl
loadfile`:code/nodes/featureCreation/fresh/featureCreate.q
================================================================================
FILE: ml_automl_code_nodes_featureCreation_init.q
SIZE: 390 characters
================================================================================
// code/nodes/featureCreation/init.q - Load featureCreation node
// Copyright (c) 2021 Kx Systems Inc
//
// Load code for featureCreation node
\d .automl
loadfile`:code/nodes/featureCreation/featureCreation.q
loadfile`:code/nodes/featureCreation/normal/init.q
loadfile`:code/nodes/featureCreation/fresh/init.q
if[not checkimport[3];
loadfile`:code/nodes/featureCreation/nlp/init.q
]
================================================================================
FILE: ml_automl_code_nodes_featureCreation_nlp_featureCreate.q
SIZE: 1,304 characters
================================================================================
// code/nodes/featureCreation/nlp/featureCreation.q - Feature creation
// Copyright (c) 2021 Kx Systems Inc
//
// Apply NLP specific feature extraction on string characters and normal
// preprocessing methods to remaining data
\d .automl
|
expecTypes:`test`fuzz`perf!("should";"it holds that";"performs")
output:()!()
output[`top]:{[specs]
raze output.spec each specs
}
output[`spec]:{[spec];
if[spec[`result] ~ `pass; :""]; / Never print passed specs
o: spec[`title],"::\n";
o,: raze output[`expectation] each spec[`expectations] $[.tst.output.mode ~ `describe;
(::);
where spec[`expectations;;`result] <> `pass];
o
}
output[`expectation]:{[e];
o: "- ",expecTypes[e`type]," ",e[`desc],$[.tst.output.mode ~ `describe;"";":"],"\n";
if[not .tst.output.mode ~ `describe;
o,:output[e`type][e];
];
o
}
output[`code]:{[e];
o:"";
if[not "{}" ~ last value e[`before];o,:"Before code: \n", (last value e[`before]),"\n"];
o,:"Test code: \n",(last value e[`code]),"\n";
if[not "{}" ~ last value e[`after];o,:"After code: \n", (last value e[`after]),"\n"];
o
}
output[`anyFailures]:{[t];(`failures in key t) and count t[`failures]}
output[`assertsRun]:{[t];
(string t[`assertsRun]), $[1 = t[`assertsRun];" assertion was";" assertions were"]," run.\n"
}
output[`error]:{[e];
o:$[count e[`errorText];"Error: ",(string e[`result]), " '", e[`errorText],"\n";""];
if[not output[`anyFailures] e;o,:output[`assertsRun] e];
o
}
output[`test]:{[t];
o:"";
o,:output.error[t];
if[output[`anyFailures] t;
o,:raze "Failure: ",/:t[`failures],\:"\n";
o,:output[`assertsRun] t;
];
o,:output.code[t];
o,"\n"
}
output[`fuzzLimit]:10;
output[`fuzz]:{[f];
o:"";
o,:output.error[f];
/ If the fuzz assertions errors out after tests have been run, but not all failure processing has completed, the output will not pring correctly
/ Consider trying to figure out how to print the fuzz that the test failed on (store last fuzz?)
if[(o~"") and output[`anyFailures] f;
o,:raze "Failure: ",/:f[`failures],\: "\n";
o,:"Maximum accepted failure rate: ", (string f[`maxFailRate]), "\n";
o,:"Failure rate was ", (string f[`failRate]), " for ", (string f[`runs]), " runs\n";
o,:"Displaying ", (string displayFuzz:min (.tst.output.fuzzLimit;count f[`fuzzFailureMessages])), " of ", (string count f[`fuzzFailureMessages]), " fuzz failures messages\n";
o,:raze (raze displayFuzz # f[`fuzzFailureMessages]),\:"\n";
];
o,:output.code[f];
o,"\n"
}
output[`perf]:{[p];
}
output[`always]:0b
output[`interactive]:1b
================================================================================
FILE: qspec_lib_output_xml.q
SIZE: 949 characters
================================================================================
\d .tst
xml:enlist[`]!enlist[::]
xml.safeString:{
if[not count x;:()];
$[10h = type x;
x;
0h <= type x;
// To ensure type conversion isn't an issue, we prepend and drop a dummy string
1 _ @[x;where not 10h = type each x:enlist[" "],x;string];
string x
]
}
xml.attrib:{
k:xml.safeString key x;
v:"\"",'(xml.entitySub each xml.safeString value x),'"\"";
" " sv k,'"=",'v
}
xml.node:{[name;attrib;body];
startNode:"<",(name:xml.safeString[name]),{$[count x;" ",x;""]}[xml.attrib[attrib]],$[count body;">";"/>"];
$[count body;
` sv (startNode;xml.safeString body;"</",name,">");
startNode
]
}
xml.entitySub:{ssr/[x;"<>&'\"";("<";">";"&";"'";""")]}
xml.bodySub:{
splitCDATA:{(0,raze[flip 0 3 + x ss/: ("<![[]CDATA[[]";"]]>")]) cut x};
raze @[s;i where not (i:til[count s:splitCDATA x]) mod 2;xml.entitySub]
}
xml.cdata:{[data]"<![CDATA[",xml.safeString[data],"]]>"}
================================================================================
FILE: qspec_lib_output_xunit.q
SIZE: 2,683 characters
================================================================================
.utl.require .tst.PKGNAME,"/output/xml.q"
\d .tst
expecTypes:`test`fuzz`perf!("should";"it holds that";"performs")
output:()!()
output[`top]:{[specs]
xml.node["test-suite";()!()] raze output.spec each specs
}
output[`spec]:{[spec];
e:spec`expectations;
attrs:`name`reports`skips`tests`errors`failures!(spec`title;1;0;sum e`assertsRun;sum e[`result] like "*Error";sum e[`result]=`testFail);
attrs,:(`$("test-cases";"errors-detail";"failures-detail"))!(count e;sum e[`result] like "*Error";sum count each e`failures);
xml.node["test-suite";attrs;-1 _ ` sv output[`expectation] each e]
}
output[`expectation]:{[e];
label: expecTypes[e`type]," ",name:e[`desc];
sysout:output[e`type][e];
atr:`name`label`errors`failures`skip`tests!(name;label;e[`result] like "*Error";count e`failures;0;e`assertsRun);
xml.node["test-case";atr] $[(e[`result] like "*Error") or count e`failures;
xml.node["sysout";()!();xml.bodySub sysout];
""
]
}
output[`code]:{[e];
o:"";
if[not "{}" ~ last value e[`before];o,:"Before code: \n", (last value e[`before]),"\n"];
o,:"Test code: \n",(last value e[`code]),"\n";
if[not "{}" ~ last value e[`after];o,:"After code: \n", (last value e[`after]),"\n"];
o
}
output[`anyFailures]:{[t];(`failures in key t) and count t[`failures]}
output[`assertsRun]:{[t];
(string t[`assertsRun]), $[1 = t[`assertsRun];" assertion was";" assertions were"]," run.\n"
}
output[`error]:{[e];
o:$[count e[`errorText];"Error: ",(string e[`result]), " '", e[`errorText],"\n";""];
if[not output[`anyFailures] e;o,:output[`assertsRun] e];
o
}
output[`test]:{[t];
o:"";
o,:output.error[t];
if[output[`anyFailures] t;
o,:raze "Failure: ",/:t[`failures],\:"\n";
o,:output[`assertsRun] t;
];
o,:output.code[t];
o,"\n"
}
output[`fuzzLimit]:10;
output[`fuzz]:{[f];
o:"";
o,:output.error[f];
/ If the fuzz assertions errors out after tests have been run, but not all failure processing has completed, the output will not pring correctly
/ Consider trying to figure out how to print the fuzz that the test failed on (store last fuzz?)
if[(o~"") and output[`anyFailures] f;
o,:raze "Failure: ",/:f[`failures],\: "\n";
o,:"Maximum accepted failure rate: ", (string f[`maxFailRate]), "\n";
o,:"Failure rate was ", (string f[`failRate]), " for ", (string f[`runs]), " runs\n";
o,:"Displaying ", (string displayFuzz:min (.tst.output.fuzzLimit;count f[`fuzzFailureMessages])), " of ", (string count f[`fuzzFailureMessages]), " fuzz failures messages\n";
o,:raze (raze displayFuzz # f[`fuzzFailureMessages]),\:"\n";
];
o,:output.code[f];
o,"\n"
}
output[`perf]:{[p];
}
output[`always]:1b
output[`interactive]:0b
================================================================================
FILE: qspec_lib_tests_assertions.q
SIZE: 1,982 characters
================================================================================
\d .tst
asserts:()!()
asserts[`must]:{[val;message];
.tst.assertState.assertsRun+:1;
if[not all val;.tst.assertState.failures,: enlist message];
}
asserts[`musteq]:{[l;r]; asserts.must[l=r;"Expected ", (-3!l), " to be equal to ", (-3!r)]}
asserts[`mustmatch]:{[l;r]; asserts.must[l~r;"Expected ", (-3!l), " to match ", (-3!r)]}
asserts[`mustnmatch]:{[l;r]; asserts.must[not l~r;"Expected ", (-3!l), " to not match ", (-3!r)]}
asserts[`mustne]:{[l;r]; asserts.must[l<>r;"Expected ", (-3!l), " to not be equal to ", (-3!r)]}
asserts[`mustlt]:{[l;r]; asserts.must[l<r;"Expected ", (-3!l), " to be less than ", (-3!r)]}
asserts[`mustgt]:{[l;r]; asserts.must[l>r;"Expected ", (-3!l), " to be greater than ", (-3!r)]}
asserts[`mustlike]:{[l;r]; asserts.must[l like r;"Expected ", (-3!l), " to be like ", (-3!r)]}
asserts[`mustin]:{[l;r]; asserts.must[l in r;"Expected ", (-3!l), " to be in ", (-3!r)]}
asserts[`mustnin]:{[l;r]; asserts.must[not l in r;"Expected ", (-3!l), " to not be in ", (-3!r)]}
asserts[`mustwithin]:{[l;r]; asserts.must[l within r;"Expected ", (-3!l), " to be within ", (-3!r)]}
asserts[`mustdelta]:{[tol;l;r]; asserts.must[l within (r - abs tol;r + abs tol);"Expected ", (-3!l), " to be within +/-", (-3!tol), " of ", (-3!r)]}
asserts[`mustthrow]:{[e;c];
r:@[{value $[1 = count x;((),x),(::);x];""};c;(::)];
m:"Expected '", (-3!c), "' to throw ",$[not count e;
"an error.";
10h = type e;
[e: enlist e;
"the error '",(first e),"'."];
"one of the errors ", ("," sv {"'",x,"'"} each e), "."];
p:1b;
if[(not count r);m,:" No error thrown";p:0b];
if[(count e) and not any r like/:e;m,: " Error thrown: '",r,"'";p:0b];
asserts.must[p;m]
}
asserts[`mustnotthrow]:{[e;c];
r:@[{value $[1 = count x;((),x),(::);x];""};c;(::)];
m:"Expected '", (-3!c), "' to not throw ";
if[10h = type e;e:enlist e];
p:1b;
if[(count r) and not count e;m,:"an error. Error thrown: '",r,"'";p:0b];
if[any r like/:e;m,: "the error '",r,"'";p:0b];
asserts.must[p;m]
}
================================================================================
FILE: qspec_lib_tests_expec.q
SIZE: 1,809 characters
================================================================================
\d .tst
runExpec:{[spec;expec];
time:.z.n;
startExpec:expec;
expec:setupExpec[spec;expec];
beforeBad:`before;
expec,: @[{x[];()};expec`before;expecError[expec;"before"]];
beforeBad:`test;
/ Only run the expectation code when the setup works
if[not count expec[`result];expec,: @[callExpec;expec;expecError[expec;string expec[`type]]]];
beforeBad:`after;
expec,: @[{x[];()};expec`after;expecError[expec;"after"]];
expec:teardownExpec[spec;expec];
if[.tst.halt;
stageBadExpec[spec;startExpec;beforeBad];
];
expec[`time]:.z.n - time;
expec
}
stageBadExpec:{[spec;expec;beforeBad]
expec:setupExpec[spec;expec];
if[beforeBad ~ `before;:(::)];
expec,: @[{x[];()};expec`before;expecError[expec;"before"]];
if[beforeBad ~ `test;:(::)];
@[callExpec;expec;expecError[expec;string expec[`type]]];
}
setupExpec:{[spec;expec];
expec[`result]:();
((` sv `.q,) each uiRuntimeNames,key asserts) .tst.mock' uiRuntimeCode,value asserts;
system "d ", string .tst.context;
expec
}
teardownExpec:{[spec;expec];
system "d .tst";
.tst.restore[];
/ Clear any state for assertions
.tst.assertState:.tst.defaultAssertState;
.tst.callbacks.expecRan[spec;expec];
expec
}
expecError:{[expec;errorType;errorText];
expec[`result]: `$errorType,"Error";
expec[`errorText],:errorText;
expec[`failures]:.tst.assertState.failures;
expec[`assertsRun]:.tst.assertState.assertsRun;
expec
}
callExpec:{[expec];
$[expec[`type] in key runners;
runners[expec`type] expec;
'badExpecType]
}
|
// @kind function
// @category utility
// @desc Retrieve all files/models which meet the criteria
// set out by the date/time information provided by the user
// @param dateInfo {date|string} user provided string (for wildcarding)
// or individual date
// @param allDates {symbol[]} list of all folders contained within the
// .automl.path,"/outputs/dateTimeModels" folder
// @return all dates matching the user provided criteria
utils.getRelevantDates:{[dateInfo;allDates]
if[0=count allDates;'"No dated models available"];
relevantDates:$[-14h=type dateInfo;
$[(`$string dateInfo)in allDates;
dateInfo;
'"startDate provided was not present within the list of available dates"
];
10h=abs type dateInfo;
$["*"~dateInfo;
allDates;
allDates where allDates like dateInfo
];
'"startDate provided must be an individual date or regex string"
];
if[0=count relevantDates;
'"No dates requested matched a presently saved model folder"
];
relevantDates
}
// @kind function
// @category utility
// @desc Retrieve all files/models which meet the criteria
// set out by the date/time information provided by the user
// @param timeInfo {time|string} user provided string (for wildcarding)
// or individual time
// @param fileList {string[]} list of all folders matching the requested
// dates supplied by the user
// @return {string[]} all files meeting both the date and time criteria
// provided by the user.
utils.getRelevantFiles:{[timeInfo;fileList]
relevantFiles:$[-19h=type timeInfo;
$[any timedString:fileList like ("*",ssr[string[timeInfo];":";"."]);
fileList where timedString;
'"startTime provided was not present within the list of available times"
];
10h=abs type timeInfo;
$["*"~timeInfo;
fileList;
fileList where fileList like ("*",ssr[timeInfo;":";"."])
];
'"startTime provided must be an individual time or regex string"
];
if[0=count relevantFiles;
'"No files matching the user provided date and time were found for deletion"
];
relevantFiles
}
// @kind function
// @category utility
// @desc Delete models pased on named input, this may be a direct match
// or a regex matching string
// @param config {dictionary} User provided config containing, a mapping from
// the save model name to the defined name as a string
// (direct match/wildcard)
// @param allFiles {symbol[]} list of all folders contained within the
// .automl.path,"/outputs/" folder
// @param pathStem {string} the start of all paths to be constructed, this
// is in the general case .automl.path,"/outputs/"
// @return {::|err} Null on success, error if attempting to delete folders
// which do not have a match
utils.deleteNamedModel:{[config;pathStem]
nameInfo:config[`savedModelName];
namedPathStem:pathStem,"namedModels/";
relevantNames:utils.getRelevantNames[nameInfo;namedPathStem];
namedPaths:namedPathStem,/:string relevantNames;
utils.deleteFromNameMapping[relevantNames;pathStem];
utils.deleteRecursively each hsym `$namedPaths;
}
// @kind function
// @category utility
// @desc Retrieve all named models matching the user supplied
// string representation of the search
// @param nameInfo {string} string used to compare all named models to
// during a search
// @param namedPathStem {string} the start of all paths to be constructed,
// in this case .automl.path,"/outputs/namedModels"
// @return {symbol[]} the names of all named models which match the user
// provided string pattern
utils.getRelevantNames:{[nameInfo;namedPathStem]
allNamedModels:key hsym`$namedPathStem;
if[0=count allNamedModels;'"No named models available"];
relevantModels:$[10h=abs type nameInfo;
$["*"~nameInfo;
allNamedModels;
allNamedModels where allNamedModels like nameInfo
];
'"savedModelName must be a string"
];
if[0=count relevantModels;
'"No files matching the user provided savedModelName were found for",
" deletion"
];
relevantModels
}
// @kind function
// @category utility
// @desc In the case that a named model is to be deleted, in order to
// facilitate retrieval 'nearest' timed model a text file mapping timestamp
// to model name is provided. If a model is to be deleted then this timestamp
// also needs to be removed from the mapping. This function is used to
// facilitate this by rewriting the timeNameMapping.txt file following
// model deletion.
// @param relevantNames {symbol[]} the names of all named models which match
// the user provided string pattern
// @param pathStem {string} the start of all paths to be constructed,
// this is in the general case .automl.path,"/outputs"
// @return {::} On successful execution will return null, otherwise raises
// an error indicating that the timeNameMapping.txt file contains
// no information.
utils.deleteFromNameMapping:{[relevantNames;pathStem]
timeMapping:hsym`$pathStem,"timeNameMapping.txt";
fileInfo:("P*";"|")0:timeMapping;
if[all 0=count each fileInfo;
'"timeNameMapping.txt contains no information"
];
originalElements:til count first fileInfo;
modelNames:{trim x except ("\"";"\\")}each last fileInfo;
relevantNames:string relevantNames;
locs:raze{where x like y}[modelNames]each relevantNames;
relevantLocs:originalElements except locs;
relevantData:(first fileInfo;modelNames)@\:relevantLocs;
writeData:$[count relevantData;(!). relevantData;""];
hdel timeMapping;
h:hopen timeMapping;
if[not writeData~"";{x each .Q.s[y]}[h;writeData]];
hclose h;
}
================================================================================
FILE: ml_automl_init.q
SIZE: 1,243 characters
================================================================================
// init.q - Load automl library
// Copyright (c) 2021 Kx Systems Inc
//
// The automated machine-learning framework is built
// largely on the tools available within the Machine Learning Toolkit.
// The purpose of this framework is to help automate the process of
// applying machine-learning techniques to real-world problems. In the
// absence of expert machine-learning engineers this handles the
// processes within a traditional workflow.
\l ml/ml.q
.ml.loadfile`:init.q
\d .automl
// Load all nodes required for graph based on init file within
// associated folder
nodelist:`configuration`featureData`targetData`dataCheck`modelGeneration,
`featureDescription`labelEncode`dataPreprocessing`featureCreation,
`featureSignificance`trainTestSplit`runModels`selectModels`optimizeModels,
`preprocParams`predictParams`pathConstruct`saveGraph`saveMeta`saveReport,
`saveModels
loadfile`:code/commandLine/utils.q
loadfile`:code/commandLine/cli.q
{loadfile hsym`$"code/nodes/",string[x],"/init.q"}each nodelist;
loadfile`:code/customization/init.q
loadfile`:code/graph.q
loadfile`:code/aml.q
loadfile`:code/utils.q
\d .nlp
.automl.utils.loadNLP[]
\d .automl
-1"\nDocumentation can be found at https://code.kx.com/q/ml/automl/";
================================================================================
FILE: ml_ml_clust_aprop.q
SIZE: 2,576 characters
================================================================================
// clust/init.q - Affinity propagation
// Copyright (c) 2021 Kx Systems Inc
//
// Clustering using affinity propagation.
// Affinity Propagation groups data based on the similarity
// between points and subsequently finds exemplars, which best
// represent the points in each cluster. The algorithm does
// not require the number of clusters be provided at run time,
// but determines the optimum solution by exchanging real-valued
// messages between points until a high-valued set of exemplars
// is produced.
\d .ml
// @kind function
// @category clust
// @desc Fit affinity propagation algorithm
// @param data {float[][]} Each column of the data is an individual datapoint
// @param df {symbol} Distance function name within '.ml.clust.df'
// @param damp {float} Damping coefficient
// @param diag {fn} Function applied to the similarity matrix diagonal
// @param iter {dictionary} Max number of overall iterations and iterations
// without a change in clusters. (::) can be passed in which case the
// defaults of (`total`noChange!200 15) will be used
// @return {dictionary} Data, input variables, clusters and exemplars
// (`data`inputs`clust`exemplars) required, along with a projection of the
// predict function
clust.ap.fit:{[data;df;damp;diag;iter]
data:clust.i.floatConversion[data];
defaultDict:`run`total`noChange!0 200 15;
if[iter~(::);iter:()!()];
if[99h<>type iter;'"iter must be (::) or a dictionary"];
// Update iteration dictionary with user changes
updDict:defaultDict,iter;
// Cluster data using AP algo
modelInfo:clust.i.runAp[data;df;damp;diag;til count data 0;updDict];
returnInfo:enlist[`modelInfo]!enlist modelInfo;
predictFunc:clust.ap.predict returnInfo;
returnInfo,enlist[`predict]!enlist predictFunc
}
// @kind function
// @category clust
// @desc Predict clusters using AP config
// @param config {dictionary} `data`inputs`clust`exemplars returned by the
// modelInfo key from the return of clust.ap.fit
// @param data {float[][]} Each column of the data is an individual datapoint
// @return {long[]} Predicted clusters
clust.ap.predict:{[config;data]
config:config`modelInfo;
data:clust.i.floatConversion[data];
if[-1~first config`clust;
'"'.ml.clust.ap.fit' did not converge, all clusters returned -1.",
" Cannot predict new data."
];
// Retrieve cluster centres from training data
exemp:config[`data][;distinct config`exemplars];
// Predict testing data clusters
data:$[0h=type data;flip;enlist]data;
clust.i.apPredDist[exemp;config[`inputs]`df]each data
}
================================================================================
FILE: ml_ml_clust_dbscan.q
SIZE: 4,968 characters
================================================================================
// clust/dbscan.q - DBSCAN clustering
// Copyright (c) 2021 Kx Systems Inc
//
// DBSCAN clustering.
// The Density-Based Spatial Clustering of Applications with Noise
// (DBSCAN) algorithm groups points that are closely packed in areas
// of high density. Any points in low-density regions are seen as outliers
\d .ml
// Density-Based Spatial Clustering of Applications with Noise (DBSCAN)
// @kind function
// @category clust
// @desc Fit DBSCAN algorithm to data
// @param data {float[][]} Each column of the data is an individual datapoint
// @param df {symbol} Distance function name within '.ml.clust.df'
// @param minPts {long} Minimum number of points with the epsilon radius
// @param eps {float} Epsilon radius to search
// @return {dictionary} A dictionary containing:
// modelInfo - Encapsulates all relevant infromation needed to fit
// the model `data`inputs`clust`tab, where data is the original data,
// inputs are the user defined minPts and eps, clust are the cluster
// assignments and tab is the neighbourhood table defining items in the
// clusters.
// predict - A projection allowing for prediction on new input data
// update - A projection allowing new data to be used to update
// cluster centers such that the model can react to new data
clust.dbscan.fit:{[data;df;minPts;eps]
data:clust.i.floatConversion[data];
// Check distance function
if[not df in key clust.i.df;clust.i.err.df[]];
// Create neighbourhood table
tab:clust.i.nbhoodTab[data;df;minPts;eps;til count data 0];
// Apply the density based clustering algorithm over the neighbourhood table
tab:{[t]any t`corePoint}clust.i.dbAlgo/tab;
// Find cluster for remaining points and return list of clusters
clust:-1^exec cluster from tab;
// Return config dict
inputDict:`df`minPts`eps!(df;minPts;eps);
modelInfo:`data`inputs`clust`tab!(data;inputDict;clust;tab);
returnInfo:enlist[`modelInfo]!enlist modelInfo;
predictFunc:clust.dbscan.predict returnInfo;
updFunc:clust.dbscan.update returnInfo;
returnInfo,`predict`update!(predictFunc;updFunc)
}
// @kind function
// @category clust
// @desc Predict clusters using DBSCAN config
// @param config {dictionary} A dictionary returned from '.ml.clust.dbscan.fit'
// containing:
// modelInfo - Encapsulates all relevant infromation needed to fit
// the model `data`inputs`clust`tab, where data is the original data,
// inputs are the user defined minPts and eps, clust are the cluster
// assignments and tab is the neighbourhood table defining items in the
// clusters.
// predict - A projection allowing for prediction on new input data
// update - A projection allowing new data to be used to update
// cluster centers such that the model can react to new data
// @param data {float[][]} Each column of the data is an individual datapoint
// @return {long[]} Predicted clusters
clust.dbscan.predict:{[config;data]
config:config[`modelInfo];
data:clust.i.floatConversion[data];
// Predict new clusters
-1^exec cluster from clust.i.dbscanPredict[data;config]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.