{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \\n');\n });\n\n it('comment', () => {\n const opt = createConfig({ options: { 'comment.enabled': true } });\n\n equal(format('ul>li.item', opt), '
    \\n\\t
  • \\n\\t\\n
');\n equal(format('div>ul>li.item#foo', opt), '
\\n\\t
    \\n\\t\\t
  • \\n\\t\\t\\n\\t
\\n
');\n\n opt.options['comment.after'] = ' { [%ID] }';\n equal(format('div>ul>li.item#foo', opt), '
\\n\\t
    \\n\\t\\t
  • { %foo }\\n\\t
\\n
');\n });\n });\n\n describe('HAML', () => {\n describe('HAML', () => {\n const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config);\n\n it.only('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n '#header\\n\\t%ul.nav\\n\\t\\t%li.nav-item(title=\"test\")\\n\\t\\t%li.nav-item(title=\"test\")');\n\n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n '#foo.bar(data-n1=\"v1\" title=\"test\" data-n2=\"v2\")');\n\n let profile = createProfile({ compactBoolean: true });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled foo=\"\" title=\"test\")/');\n let profile = createProfile({ 'output.compactBoolean': true });\n profile = createProfile({ compactBoolean: false });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled=true foo=\"\" title=\"test\")/');\n });\n });\n});\n });\n\n describe('Pug', () => {\n const format = (abbr: string, config = defaultConfig) => pug(parse(abbr, config), config);\n\n it('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n '#header\\n\\tul.nav\\n\\t\\tli.nav-item(title=\"test\") \\n\\t\\tli.nav-item(title=\"test\") ');\n\n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n '#foo.bar(data-n1=\"v1\", title=\"test\", data-n2=\"v2\") ');\n\n equal(format('input[disabled. foo title=test]'), 'input(type=\"text\", disabled, foo=\"\", title=\"test\")');\n // Use closing slash for XML output format\n equal(format('input[disabled. foo title=test]', createProfile({ 'output.selfClosingStyle': 'xml' })), 'input(type=\"text\", disabled, foo=\"\", title=\"test\")/');\n });\n\n it('nodes with text', () => {\n equal(format('{Text 1}'), 'Text 1');\n equal(format('span{Text 1}'), 'span Text 1');\n equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\\n\\tb Text 2');\n equal(format('span{Text 1\\nText 2}>b{Text 3}'), 'span\\n\\t| Text 1\\n\\t| Text 2\\n\\tb Text 3');\n equal(format('div>span{Text 1\\nText 2}>b{Text 3}'), 'div\\n\\tspan\\n\\t\\t| Text 1\\n\\t\\t| Text 2\\n\\t\\tb Text 3');\n });\n\n it('generate fields', () => {\n equal(format('a[href]', field), 'a(href=\"${1}\") ${2}');\n equal(format('a[href]*2', field), 'a(href=\"${1}\") ${2}\\na(href=\"${3}\") ${4}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar');\n equal(format('ul>li*2', field), 'ul\\n\\tli ${1}\\n\\tli ${2}');\n equal(format('div>img[src]/', field), 'div\\n\\timg(src=\"${1}\", alt=\"${2}\")');\n });\n });\n\n describe('Slim', () => {\n const format = (abbr: string, config = defaultConfig) => slim(parse(abbr, config), config);\n\n it('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n '#header\\n\\tul.nav\\n\\t\\tli.nav-item title=\"test\" \\n\\t\\tli.nav-item title=\"test\" ');\n\n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n '#foo.bar data-n1=\"v1\" title=\"test\" data-n2=\"v2\" ');\n\n // const profile = createProfile({ inlineBreak: 0 });\n // equal(format('ul>li>span{Text}', profile), 'ul\\n\\tli: span Text');\n // equal(format('ul>li>span{Text}'), 'ul\\n\\tli\\n\\t\\tspan Text');\n // equal(format('ul>li>span{Text}*2', profile), 'ul\\n\\tli\\n\\t\\tspan Text\\n\\t\\tspan Text');\n });\n\n // it.skip('attribute wrappers', () => {\n // equal(format('input[disabled. foo title=test]'), 'input disabled=true foo=\"\" title=\"test\"');\n // equal(format('input[disabled. foo title=test]', null, { attributeWrap: 'round' }),\n // 'input(disabled foo=\"\" title=\"test\")');\n // });\n\n it('nodes with text', () => {\n equal(format('{Text 1}'), 'Text 1');\n equal(format('span{Text 1}'), 'span Text 1');\n equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\\n\\tb Text 2');\n equal(format('span{Text 1\\nText 2}>b{Text 3}'), 'span\\n\\t| Text 1\\n\\t| Text 2\\n\\tb Text 3');\n equal(format('div>span{Text 1\\nText 2}>b{Text 3}'), 'div\\n\\tspan\\n\\t\\t| Text 1\\n\\t\\t| Text 2\\n\\t\\tb Text 3');\n });\n\n it('generate fields', () => {\n equal(format('a[href]', field), 'a href=\"${1}\" ${2}');\n equal(format('a[href]*2', field), 'a href=\"${1}\" ${2}\\na href=\"${3}\" ${4}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar');\n equal(format('ul>li*2', field), 'ul\\n\\tli ${1}\\n\\tli ${2}');\n equal(format('div>img[src]/', field), 'div\\n\\timg src=\"${1}\" alt=\"${2}\"/');\n });\n });\n});\n\n Support HAML output\n\n @@ -146,12 +146,12 @@ describe('Format', () => {\n describe('HAML', () => {\n const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config);\n \n- it.only('basic', () => {\n+ it('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n- '#header\\n\\t%ul.nav\\n\\t\\t%li.nav-item(title=\"test\")\\n\\t\\t%li.nav-item(title=\"test\")');\n+ '#header\\n\\t%ul.nav\\n\\t\\t%li.nav-item(title=\"test\") \\n\\t\\t%li.nav-item(title=\"test\") ');\n \n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n- '#foo.bar(data-n1=\"v1\" title=\"test\" data-n2=\"v2\")');\n+ '#foo.bar(data-n1=\"v1\" title=\"test\" data-n2=\"v2\") ');\n \n let profile = createProfile({ compactBoolean: true });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled foo=\"\" title=\"test\")/');\n@@ -159,5 +159,22 @@ describe('Format', () => {\n profile = createProfile({ compactBoolean: false });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled=true foo=\"\" title=\"test\")/');\n });\n+\n+ it('nodes with text', () => {\n+ equal(format('{Text 1}'), 'Text 1');\n+ equal(format('span{Text 1}'), '%span Text 1');\n+ equal(format('span{Text 1}>b{Text 2}'), '%span Text 1\\n\\t%b Text 2');\n+ equal(format('span{Text 1\\nText 2}>b{Text 3}'), '%span\\n\\tText 1 |\\n\\tText 2 |\\n\\t%b Text 3');\n+ equal(format('div>span{Text 1\\nText 2\\nText 123}>b{Text 3}'), '%div\\n\\t%span\\n\\t\\tText 1 |\\n\\t\\tText 2 |\\n\\t\\tText 123 |\\n\\t\\t%b Text 3');\n+ });\n+\n+ it('generate fields', () => {\n+ equal(format('a[href]', field), '%a(href=\"${1}\") ${2}');\n+ equal(format('a[href]*2', field), '%a(href=\"${1}\") ${2}\\n%a(href=\"${3}\") ${4}');\n+ equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}');\n+ equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar');\n+ equal(format('ul>li*2', field), '%ul\\n\\t%li ${1}\\n\\t%li ${2}');\n+ equal(format('div>img[src]/', field), '%div\\n\\t%img(src=\"${1}\" alt=\"${2}\")/');\n+ });\n });\n });\n"},"addition_count":{"kind":"number","value":20,"string":"20"},"commit_subject":{"kind":"string","value":"Support HAML output"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676109,"cells":{"id":{"kind":"string","value":"10071759"},"text":{"kind":"string","value":" format.ts\n import { equal } from 'assert';\nimport html from '../src/markup/format/html';\nimport haml from '../src/markup/format/haml';\nimport pug from '../src/markup/format/pug';\nimport slim from '../src/markup/format/slim';\nimport parse from '../src/markup';\nimport createConfig, { Options } from '../src/config';\n\ndescribe('Format', () => {\n const defaultConfig = createConfig();\n const field = createConfig({\n options: {\n 'output.field': (index, placeholder) => placeholder ? `\\${${index}:${placeholder}}` : `\\${${index}}`\n }\n });\n\n function createProfile(options: Partial) {\n const config = createConfig({ options });\n return config;\n }\n\n describe('HTML', () => {\n const format = (abbr: string, config = defaultConfig) => html(parse(abbr, config), config);\n it('basic', () => {\n equal(format('div>p'), '
\\n\\t

\\n
');\n equal(format('div>p*3'), '
\\n\\t

\\n\\t

\\n\\t

\\n
');\n equal(format('div#a>p.b*2>span'), '
\\n\\t

\\n\\t

\\n
');\n equal(format('div>div>div'), '
\\n\\t
\\n\\t\\t
\\n\\t
\\n
');\n\n equal(format('table>tr*2>td{item}*2'),\n '\\n\\t\\n\\t\\t\\n\\t\\t\\n\\t\\n\\t\\n\\t\\t\\n\\t\\t\\n\\t\\n
itemitem
itemitem
');\n });\n\n it('inline elements', () => {\n const profile = createProfile({ 'output.inlineBreak': 3 });\n const breakInline = createProfile({ 'output.inlineBreak': 1 });\n const keepInline = createProfile({ 'output.inlineBreak': 0 });\n const xhtml = createProfile({ 'output.selfClosingStyle': 'xhtml' });\n\n equal(format('div>a>b*3', xhtml), '');\n\n equal(format('p>i', profile), '

');\n equal(format('p>i*2', profile), '

');\n equal(format('p>i*2', breakInline), '

\\n\\t\\n\\t\\n

');\n equal(format('p>i*3', profile), '

\\n\\t\\n\\t\\n\\t\\n

');\n equal(format('p>i*3', keepInline), '

');\n\n equal(format('i*2', profile), '');\n equal(format('i*3', profile), '\\n\\n');\n equal(format('i{a}+i{b}', profile), 'ab');\n\n equal(format('img[src]/+p', xhtml), '\"\"\\n

');\n equal(format('div>img[src]/+p', xhtml), '
\\n\\t\"\"\\n\\t

\\n
');\n equal(format('div>p+img[src]/', xhtml), '
\\n\\t

\\n\\t\"\"\\n
');\n equal(format('div>p+img[src]/+p', xhtml), '
\\n\\t

\\n\\t\"\"\\n\\t

\\n
');\n equal(format('div>p+img[src]/*2+p', xhtml), '
\\n\\t

\\n\\t\"\"\"\"\\n\\t

\\n
');\n equal(format('div>p+img[src]/*3+p', xhtml), '
\\n\\t

\\n\\t\"\"\\n\\t\"\"\\n\\t\"\"\\n\\t

\\n
');\n });\n\n it('generate fields', () => {\n equal(format('a[href]', field), '${2}');\n equal(format('a[href]*2', field), '${2}${4}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}\\n${4} ${5:foo} ${6:bar}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar\\n foo bar');\n equal(format('ul>li*2', field), '
    \\n\\t
  • ${1}
  • \\n\\t
  • ${2}
  • \\n
');\n equal(format('div>img[src]/', field), '
\"${2}\"
');\n });\n\n // it.only('debug', () => {\n // equal(format('div>{foo}+{bar}+p'), '
\\n\\tfoobar\\n\\t

\\n
');\n // });\n\n it('mixed content', () => {\n equal(format('div{foo}'), '
foo
');\n equal(format('div>{foo}'), '
foo
');\n equal(format('div>{foo}+{bar}'), '
\\n\\tfoo\\n\\tbar\\n
');\n equal(format('div>{foo}+{bar}+p'), '
\\n\\tfoo\\n\\tbar\\n\\t

\\n
');\n equal(format('div>{foo}+{bar}+p+{foo}+{bar}+p'), '
\\n\\tfoo\\n\\tbar\\n\\t

\\n\\tfoo\\n\\tbar\\n\\t

\\n
');\n equal(format('div>{foo}+p+{bar}'), '
\\n\\tfoo\\n\\t

\\n\\tbar\\n
');\n equal(format('div>{foo}>p'), '
\\n\\tfoo\\n\\t

\\n
');\n\n equal(format('div>{}'), '
');\n equal(format('div>{}+p'), '
\\n\\t\\n\\t

\\n
');\n equal(format('div>p+{}'), '
\\n\\t

\\n\\t\\n
');\n equal(format('div>{}>p'), '
\\n\\t\\n
');\n equal(format('div>{}*2>p'), '
\\n\\t\\n\\t\\n
');\n\n equal(format('div>{}>p*2'), '
\\n\\t\\n
');\n equal(format('div>{}*2>p*2'), '
\\n\\t\\n\\t\\n
');\n\n equal(format('div>{}>b'), '
\\n\\t\\n
');\n equal(format('div>{}>b*2'), '
\\n\\t\\n
');\n equal(format('div>{}>b*3'), '
\\n\\t\\n
');\n\n equal(format('div>{}', field), '
');\n equal(format('div>{}>b', field), '
\\n\\t\\n
');\n });\n\n it('self-closing', () => {\n const xmlStyle = createProfile({ 'output.selfClosingStyle': 'xml' });\n const htmlStyle = createProfile({ 'output.selfClosingStyle': 'html' });\n const xhtmlStyle = createProfile({ 'output.selfClosingStyle': 'xhtml' });\n\n equal(format('img[src]/', htmlStyle), '\"\"');\n equal(format('img[src]/', xhtmlStyle), '\"\"');\n equal(format('img[src]/', xmlStyle), '\"\"/');\n equal(format('div>img[src]/', xhtmlStyle), '
\"\"
');\n });\n\n it('boolean attributes', () => {\n const compact = createProfile({ 'output.compactBoolean': true });\n const noCompact = createProfile({ 'output.compactBoolean': false });\n\n equal(format('p[b.]', noCompact), '

');\n equal(format('p[b.]', compact), '

');\n equal(format('p[contenteditable]', compact), '

');\n equal(format('p[contenteditable]', noCompact), '

');\n equal(format('p[contenteditable=foo]', compact), '

');\n });\n\n it('no formatting', () => {\n const profile = createProfile({ 'output.format': false });\n equal(format('div>p', profile), '

');\n equal(format('div>{foo}+p+{bar}', profile), '
foo

bar
');\n equal(format('div>{foo}>p', profile), '
foo

');\n equal(format('div>{}>p', profile), '
');\n });\n\n it('format specific nodes', () => {\n equal(format('{}+html>(head>meta[charset=${charset}]/+title{${1:Document}})+body', field),\n '\\n\\n\\n\\t\\n\\t${2:Document}\\n\\n\\n\\t${3}\\n\\n');\n });\n\n it('comment', () => {\n const opt = createConfig({ options: { 'comment.enabled': true } });\n\n equal(format('ul>li.item', opt), '
    \\n\\t
  • \\n\\t\\n
');\n equal(format('div>ul>li.item#foo', opt), '
\\n\\t
    \\n\\t\\t
  • \\n\\t\\t\\n\\t
\\n
');\n\n opt.options['comment.after'] = ' { [%ID] }';\n equal(format('div>ul>li.item#foo', opt), '
\\n\\t
    \\n\\t\\t
  • { %foo }\\n\\t
\\n
');\n });\n });\n\n describe('HAML', () => {\n describe('HAML', () => {\n const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config);\n\n it.only('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n '#header\\n\\t%ul.nav\\n\\t\\t%li.nav-item(title=\"test\")\\n\\t\\t%li.nav-item(title=\"test\")');\n\n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n '#foo.bar(data-n1=\"v1\" title=\"test\" data-n2=\"v2\")');\n\n let profile = createProfile({ compactBoolean: true });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled foo=\"\" title=\"test\")/');\n let profile = createProfile({ 'output.compactBoolean': true });\n profile = createProfile({ compactBoolean: false });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled=true foo=\"\" title=\"test\")/');\n });\n });\n});\n });\n\n describe('Pug', () => {\n const format = (abbr: string, config = defaultConfig) => pug(parse(abbr, config), config);\n\n it('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n '#header\\n\\tul.nav\\n\\t\\tli.nav-item(title=\"test\") \\n\\t\\tli.nav-item(title=\"test\") ');\n\n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n '#foo.bar(data-n1=\"v1\", title=\"test\", data-n2=\"v2\") ');\n\n equal(format('input[disabled. foo title=test]'), 'input(type=\"text\", disabled, foo=\"\", title=\"test\")');\n // Use closing slash for XML output format\n equal(format('input[disabled. foo title=test]', createProfile({ 'output.selfClosingStyle': 'xml' })), 'input(type=\"text\", disabled, foo=\"\", title=\"test\")/');\n });\n\n it('nodes with text', () => {\n equal(format('{Text 1}'), 'Text 1');\n equal(format('span{Text 1}'), 'span Text 1');\n equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\\n\\tb Text 2');\n equal(format('span{Text 1\\nText 2}>b{Text 3}'), 'span\\n\\t| Text 1\\n\\t| Text 2\\n\\tb Text 3');\n equal(format('div>span{Text 1\\nText 2}>b{Text 3}'), 'div\\n\\tspan\\n\\t\\t| Text 1\\n\\t\\t| Text 2\\n\\t\\tb Text 3');\n });\n\n it('generate fields', () => {\n equal(format('a[href]', field), 'a(href=\"${1}\") ${2}');\n equal(format('a[href]*2', field), 'a(href=\"${1}\") ${2}\\na(href=\"${3}\") ${4}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar');\n equal(format('ul>li*2', field), 'ul\\n\\tli ${1}\\n\\tli ${2}');\n equal(format('div>img[src]/', field), 'div\\n\\timg(src=\"${1}\", alt=\"${2}\")');\n });\n });\n\n describe('Slim', () => {\n const format = (abbr: string, config = defaultConfig) => slim(parse(abbr, config), config);\n\n it('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n '#header\\n\\tul.nav\\n\\t\\tli.nav-item title=\"test\" \\n\\t\\tli.nav-item title=\"test\" ');\n\n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n '#foo.bar data-n1=\"v1\" title=\"test\" data-n2=\"v2\" ');\n\n // const profile = createProfile({ inlineBreak: 0 });\n // equal(format('ul>li>span{Text}', profile), 'ul\\n\\tli: span Text');\n // equal(format('ul>li>span{Text}'), 'ul\\n\\tli\\n\\t\\tspan Text');\n // equal(format('ul>li>span{Text}*2', profile), 'ul\\n\\tli\\n\\t\\tspan Text\\n\\t\\tspan Text');\n });\n\n // it.skip('attribute wrappers', () => {\n // equal(format('input[disabled. foo title=test]'), 'input disabled=true foo=\"\" title=\"test\"');\n // equal(format('input[disabled. foo title=test]', null, { attributeWrap: 'round' }),\n // 'input(disabled foo=\"\" title=\"test\")');\n // });\n\n it('nodes with text', () => {\n equal(format('{Text 1}'), 'Text 1');\n equal(format('span{Text 1}'), 'span Text 1');\n equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\\n\\tb Text 2');\n equal(format('span{Text 1\\nText 2}>b{Text 3}'), 'span\\n\\t| Text 1\\n\\t| Text 2\\n\\tb Text 3');\n equal(format('div>span{Text 1\\nText 2}>b{Text 3}'), 'div\\n\\tspan\\n\\t\\t| Text 1\\n\\t\\t| Text 2\\n\\t\\tb Text 3');\n });\n\n it('generate fields', () => {\n equal(format('a[href]', field), 'a href=\"${1}\" ${2}');\n equal(format('a[href]*2', field), 'a href=\"${1}\" ${2}\\na href=\"${3}\" ${4}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}');\n equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar');\n equal(format('ul>li*2', field), 'ul\\n\\tli ${1}\\n\\tli ${2}');\n equal(format('div>img[src]/', field), 'div\\n\\timg src=\"${1}\" alt=\"${2}\"/');\n });\n });\n});\n\n Support HAML output\n\n @@ -146,12 +146,12 @@ describe('Format', () => {\n describe('HAML', () => {\n const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config);\n \n- it.only('basic', () => {\n+ it('basic', () => {\n equal(format('div#header>ul.nav>li[title=test].nav-item*2'),\n- '#header\\n\\t%ul.nav\\n\\t\\t%li.nav-item(title=\"test\")\\n\\t\\t%li.nav-item(title=\"test\")');\n+ '#header\\n\\t%ul.nav\\n\\t\\t%li.nav-item(title=\"test\") \\n\\t\\t%li.nav-item(title=\"test\") ');\n \n equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'),\n- '#foo.bar(data-n1=\"v1\" title=\"test\" data-n2=\"v2\")');\n+ '#foo.bar(data-n1=\"v1\" title=\"test\" data-n2=\"v2\") ');\n \n let profile = createProfile({ compactBoolean: true });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled foo=\"\" title=\"test\")/');\n@@ -159,5 +159,22 @@ describe('Format', () => {\n profile = createProfile({ compactBoolean: false });\n equal(format('input[disabled. foo title=test]/', profile), '%input(type=\"text\" disabled=true foo=\"\" title=\"test\")/');\n });\n+\n+ it('nodes with text', () => {\n+ equal(format('{Text 1}'), 'Text 1');\n+ equal(format('span{Text 1}'), '%span Text 1');\n+ equal(format('span{Text 1}>b{Text 2}'), '%span Text 1\\n\\t%b Text 2');\n+ equal(format('span{Text 1\\nText 2}>b{Text 3}'), '%span\\n\\tText 1 |\\n\\tText 2 |\\n\\t%b Text 3');\n+ equal(format('div>span{Text 1\\nText 2\\nText 123}>b{Text 3}'), '%div\\n\\t%span\\n\\t\\tText 1 |\\n\\t\\tText 2 |\\n\\t\\tText 123 |\\n\\t\\t%b Text 3');\n+ });\n+\n+ it('generate fields', () => {\n+ equal(format('a[href]', field), '%a(href=\"${1}\") ${2}');\n+ equal(format('a[href]*2', field), '%a(href=\"${1}\") ${2}\\n%a(href=\"${3}\") ${4}');\n+ equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}');\n+ equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar');\n+ equal(format('ul>li*2', field), '%ul\\n\\t%li ${1}\\n\\t%li ${2}');\n+ equal(format('div>img[src]/', field), '%div\\n\\t%img(src=\"${1}\" alt=\"${2}\")/');\n+ });\n });\n });\n"},"addition_count":{"kind":"number","value":20,"string":"20"},"commit_subject":{"kind":"string","value":"Support HAML output"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676110,"cells":{"id":{"kind":"string","value":"10071760"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n && typeof options.container === 'undefined') {\n default_meow_area = $(window.document.createElement('div'))\n .attr({'id': ((new Date()).getTime()), 'class': 'meows'});\n $('body').prepend(default_meow_area);\n delete this.queue[timestamp];\n },\n size: function () {\n var size = 0;\n for (var timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n // Add title if it's defined\n if (typeof this.title === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('h1')).text(this.title)\n );\n }\n\n // Add icon if it's defined\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (event.type === 'mouseleave') {\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n that.hovered = true;\n that.manifest.addClass('hover');\n $(document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', 'javascript:;')\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n if (that.hovered !== true && typeof that === 'object') {\n that.destroy();\n }\n }\n }, that.duration);\n }\n\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n\n this.destroy = function () {\n // Call callback if it's defined (this = meow DOM element)\n \t\t\tif (typeof options.beforeDestroy === 'function') {\n \t\t\t options.beforeDestroy.call(that.manifest);\n \t\t\t}\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n \t\t\t options.afterDestroy.call(null);\n \t\t\t}\n \t\t\tif (meows.size() <= 0) {\n \t\t\t $('#' + meow_area).remove();\n \t\t\tif (typeof options.lastDestroyed === 'function') {\n \t\t\t options.lastDestroyed.call(null);\n \t\t\t}\n \t\t\t}\n });\n });\n };\n\n JSLinted\n\n @@ -37,8 +37,9 @@\n delete this.queue[timestamp];\n },\n size: function () {\n- var size = 0;\n- for (var timestamp in this.queue) {\n+ var timestamp,\n+ size = 0;\n+ for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n@@ -135,7 +136,7 @@\n $(document.createElement('a'))\n .addClass('close')\n .html('&times;')\n- .attr('href', 'javascript:;')\n+ .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n@@ -176,22 +177,22 @@\n \n this.destroy = function () {\n // Call callback if it's defined (this = meow DOM element)\n- \t\t\tif (typeof options.beforeDestroy === 'function') {\n- \t\t\t options.beforeDestroy.call(that.manifest);\n- \t\t\t}\n+ if (typeof options.beforeDestroy === 'function') {\n+ options.beforeDestroy.call(that.manifest);\n+ }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n- \t\t\t options.afterDestroy.call(null);\n- \t\t\t}\n- \t\t\tif (meows.size() <= 0) {\n- \t\t\t $('#' + meow_area).remove();\n- \t\t\tif (typeof options.lastDestroyed === 'function') {\n- \t\t\t options.lastDestroyed.call(null);\n- \t\t\t}\n- \t\t\t}\n+ options.afterDestroy.call(null);\n+ }\n+ if (meows.size() <= 0) {\n+ $('#' + meow_area).remove();\n+ if (typeof options.lastDestroyed === 'function') {\n+ options.lastDestroyed.call(null);\n+ }\n+ }\n });\n });\n };\n"},"addition_count":{"kind":"number","value":15,"string":"15"},"commit_subject":{"kind":"string","value":"JSLinted"},"deletion_count":{"kind":"number","value":14,"string":"14"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10676111,"cells":{"id":{"kind":"string","value":"10071761"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n && typeof options.container === 'undefined') {\n default_meow_area = $(window.document.createElement('div'))\n .attr({'id': ((new Date()).getTime()), 'class': 'meows'});\n $('body').prepend(default_meow_area);\n delete this.queue[timestamp];\n },\n size: function () {\n var size = 0;\n for (var timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n // Add title if it's defined\n if (typeof this.title === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('h1')).text(this.title)\n );\n }\n\n // Add icon if it's defined\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (event.type === 'mouseleave') {\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n that.hovered = true;\n that.manifest.addClass('hover');\n $(document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', 'javascript:;')\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n if (that.hovered !== true && typeof that === 'object') {\n that.destroy();\n }\n }\n }, that.duration);\n }\n\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n\n this.destroy = function () {\n // Call callback if it's defined (this = meow DOM element)\n \t\t\tif (typeof options.beforeDestroy === 'function') {\n \t\t\t options.beforeDestroy.call(that.manifest);\n \t\t\t}\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n \t\t\t options.afterDestroy.call(null);\n \t\t\t}\n \t\t\tif (meows.size() <= 0) {\n \t\t\t $('#' + meow_area).remove();\n \t\t\tif (typeof options.lastDestroyed === 'function') {\n \t\t\t options.lastDestroyed.call(null);\n \t\t\t}\n \t\t\t}\n });\n });\n };\n\n JSLinted\n\n @@ -37,8 +37,9 @@\n delete this.queue[timestamp];\n },\n size: function () {\n- var size = 0;\n- for (var timestamp in this.queue) {\n+ var timestamp,\n+ size = 0;\n+ for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n@@ -135,7 +136,7 @@\n $(document.createElement('a'))\n .addClass('close')\n .html('&times;')\n- .attr('href', 'javascript:;')\n+ .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n@@ -176,22 +177,22 @@\n \n this.destroy = function () {\n // Call callback if it's defined (this = meow DOM element)\n- \t\t\tif (typeof options.beforeDestroy === 'function') {\n- \t\t\t options.beforeDestroy.call(that.manifest);\n- \t\t\t}\n+ if (typeof options.beforeDestroy === 'function') {\n+ options.beforeDestroy.call(that.manifest);\n+ }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n- \t\t\t options.afterDestroy.call(null);\n- \t\t\t}\n- \t\t\tif (meows.size() <= 0) {\n- \t\t\t $('#' + meow_area).remove();\n- \t\t\tif (typeof options.lastDestroyed === 'function') {\n- \t\t\t options.lastDestroyed.call(null);\n- \t\t\t}\n- \t\t\t}\n+ options.afterDestroy.call(null);\n+ }\n+ if (meows.size() <= 0) {\n+ $('#' + meow_area).remove();\n+ if (typeof options.lastDestroyed === 'function') {\n+ options.lastDestroyed.call(null);\n+ }\n+ }\n });\n });\n };\n"},"addition_count":{"kind":"number","value":15,"string":"15"},"commit_subject":{"kind":"string","value":"JSLinted"},"deletion_count":{"kind":"number","value":14,"string":"14"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10676112,"cells":{"id":{"kind":"string","value":"10071762"},"text":{"kind":"string","value":" version.rb\n # frozen_string_literal: true\nmodule Split\n MAJOR = 1\n MINOR = 7\n PATCH = 0\n VERSION = [MAJOR, MINOR, PATCH].join('.')\nend\n\n v2.0.0\n\n @@ -1,7 +1,7 @@\n # frozen_string_literal: true\n module Split\n- MAJOR = 1\n- MINOR = 7\n+ MAJOR = 2\n+ MINOR = 0\n PATCH = 0\n VERSION = [MAJOR, MINOR, PATCH].join('.')\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"v2.0.0"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676113,"cells":{"id":{"kind":"string","value":"10071763"},"text":{"kind":"string","value":" version.rb\n # frozen_string_literal: true\nmodule Split\n MAJOR = 1\n MINOR = 7\n PATCH = 0\n VERSION = [MAJOR, MINOR, PATCH].join('.')\nend\n\n v2.0.0\n\n @@ -1,7 +1,7 @@\n # frozen_string_literal: true\n module Split\n- MAJOR = 1\n- MINOR = 7\n+ MAJOR = 2\n+ MINOR = 0\n PATCH = 0\n VERSION = [MAJOR, MINOR, PATCH].join('.')\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"v2.0.0"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676114,"cells":{"id":{"kind":"string","value":"10071764"},"text":{"kind":"string","value":" version.rb\n # frozen_string_literal: true\nmodule Split\n MAJOR = 1\n MINOR = 7\n PATCH = 0\n VERSION = [MAJOR, MINOR, PATCH].join('.')\nend\n\n v2.0.0\n\n @@ -1,7 +1,7 @@\n # frozen_string_literal: true\n module Split\n- MAJOR = 1\n- MINOR = 7\n+ MAJOR = 2\n+ MINOR = 0\n PATCH = 0\n VERSION = [MAJOR, MINOR, PATCH].join('.')\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"v2.0.0"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676115,"cells":{"id":{"kind":"string","value":"10071765"},"text":{"kind":"string","value":" trial.rb\n # frozen_string_literal: true\n\nmodule Split\n class Trial\n attr_accessor :goals\n attr_accessor :experiment\n attr_writer :metadata\n\n def initialize(attrs = {})\n self.experiment = attrs.delete(:experiment)\n self.alternative = attrs.delete(:alternative)\n self.metadata = attrs.delete(:metadata)\n self.goals = attrs.delete(:goals) || []\n\n @user = attrs.delete(:user)\n @options = attrs\n\n @alternative_chosen = false\n end\n\n def metadata\n @metadata ||= experiment.metadata[alternative.name] if experiment.metadata\n end\n\n def alternative\n @alternative ||= if @experiment.has_winner?\n @experiment.winner\n end\n end\n\n def alternative=(alternative)\n @alternative = if alternative.kind_of?(Split::Alternative)\n alternative\n else\n @experiment.alternatives.find { |a| a.name == alternative }\n end\n end\n\n def complete!(context = nil)\n if alternative\n if Array(goals).empty?\n alternative.increment_completion\n else\n Array(goals).each { |g| alternative.increment_completion(g) }\n end\n\n run_callback context, Split.configuration.on_trial_complete\n end\n end\n\n # Choose an alternative, add a participant, and save the alternative choice on the user. This\n # method is guaranteed to only run once, and will skip the alternative choosing process if run\n # a second time.\n def choose!(context = nil)\n @user.cleanup_old_experiments!\n # Only run the process once\n return alternative if @alternative_chosen\n\n new_participant = @user[@experiment.key].nil?\n if override_is_alternative?\n self.alternative = @options[:override]\n if should_store_alternative? && !@user[@experiment.key]\n self.alternative.increment_participation\n end\n elsif @options[:disabled] || Split.configuration.disabled?\n self.alternative = @experiment.control\n elsif @experiment.has_winner?\n self.alternative = @experiment.winner\n else\n cleanup_old_versions\n\n if exclude_user?\n self.alternative = @experiment.control\n else\n self.alternative = @user[@experiment.key]\n if alternative.nil?\n if @experiment.cohorting_disabled?\n self.alternative = @experiment.control\n else\n self.alternative = @experiment.next_alternative\n\n # Increment the number of participants since we are actually choosing a new alternative\n self.alternative.increment_participation\n\n end\n end\n\n @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?)\n @alternative_choosen = true\n run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) \n alternative\n end\n\n alternative\n end\n\n private\n def run_callback(context, callback_name)\n context.send(callback_name, self) if callback_name && context.respond_to?(callback_name, true)\n end\n\n def override_is_alternative?\n @experiment.alternatives.map(&:name).include?(@options[:override])\n end\n\n def should_store_alternative?\n if @options[:override] || @options[:disabled]\n Split.configuration.store_override\n else\n !exclude_user?\n end\n end\n\n def cleanup_old_versions\n if @experiment.version > 0\n @user.cleanup_old_versions!(@experiment)\n end\n end\n\n def exclude_user?\n @options[:exclude] || @experiment.start_time.nil? || @user.max_experiments_reached?(@experiment.key)\n end\n end\nend\n\n disable cohorting revision\n\n @@ -85,9 +85,11 @@ module Split\n end\n end\n \n- @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?)\n+ new_participant_and_cohorting_disabled = new_participant && @experiment.cohorting_disabled?\n+\n+ @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || new_participant_and_cohorting_disabled\n @alternative_choosen = true\n- run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) \n+ run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || new_participant_and_cohorting_disabled\n alternative\n end\n \n"},"addition_count":{"kind":"number","value":4,"string":"4"},"commit_subject":{"kind":"string","value":"disable cohorting revision"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676116,"cells":{"id":{"kind":"string","value":"10071766"},"text":{"kind":"string","value":" trial.rb\n # frozen_string_literal: true\n\nmodule Split\n class Trial\n attr_accessor :goals\n attr_accessor :experiment\n attr_writer :metadata\n\n def initialize(attrs = {})\n self.experiment = attrs.delete(:experiment)\n self.alternative = attrs.delete(:alternative)\n self.metadata = attrs.delete(:metadata)\n self.goals = attrs.delete(:goals) || []\n\n @user = attrs.delete(:user)\n @options = attrs\n\n @alternative_chosen = false\n end\n\n def metadata\n @metadata ||= experiment.metadata[alternative.name] if experiment.metadata\n end\n\n def alternative\n @alternative ||= if @experiment.has_winner?\n @experiment.winner\n end\n end\n\n def alternative=(alternative)\n @alternative = if alternative.kind_of?(Split::Alternative)\n alternative\n else\n @experiment.alternatives.find { |a| a.name == alternative }\n end\n end\n\n def complete!(context = nil)\n if alternative\n if Array(goals).empty?\n alternative.increment_completion\n else\n Array(goals).each { |g| alternative.increment_completion(g) }\n end\n\n run_callback context, Split.configuration.on_trial_complete\n end\n end\n\n # Choose an alternative, add a participant, and save the alternative choice on the user. This\n # method is guaranteed to only run once, and will skip the alternative choosing process if run\n # a second time.\n def choose!(context = nil)\n @user.cleanup_old_experiments!\n # Only run the process once\n return alternative if @alternative_chosen\n\n new_participant = @user[@experiment.key].nil?\n if override_is_alternative?\n self.alternative = @options[:override]\n if should_store_alternative? && !@user[@experiment.key]\n self.alternative.increment_participation\n end\n elsif @options[:disabled] || Split.configuration.disabled?\n self.alternative = @experiment.control\n elsif @experiment.has_winner?\n self.alternative = @experiment.winner\n else\n cleanup_old_versions\n\n if exclude_user?\n self.alternative = @experiment.control\n else\n self.alternative = @user[@experiment.key]\n if alternative.nil?\n if @experiment.cohorting_disabled?\n self.alternative = @experiment.control\n else\n self.alternative = @experiment.next_alternative\n\n # Increment the number of participants since we are actually choosing a new alternative\n self.alternative.increment_participation\n\n end\n end\n\n @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?)\n @alternative_choosen = true\n run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) \n alternative\n end\n\n alternative\n end\n\n private\n def run_callback(context, callback_name)\n context.send(callback_name, self) if callback_name && context.respond_to?(callback_name, true)\n end\n\n def override_is_alternative?\n @experiment.alternatives.map(&:name).include?(@options[:override])\n end\n\n def should_store_alternative?\n if @options[:override] || @options[:disabled]\n Split.configuration.store_override\n else\n !exclude_user?\n end\n end\n\n def cleanup_old_versions\n if @experiment.version > 0\n @user.cleanup_old_versions!(@experiment)\n end\n end\n\n def exclude_user?\n @options[:exclude] || @experiment.start_time.nil? || @user.max_experiments_reached?(@experiment.key)\n end\n end\nend\n\n disable cohorting revision\n\n @@ -85,9 +85,11 @@ module Split\n end\n end\n \n- @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?)\n+ new_participant_and_cohorting_disabled = new_participant && @experiment.cohorting_disabled?\n+\n+ @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || new_participant_and_cohorting_disabled\n @alternative_choosen = true\n- run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) \n+ run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || new_participant_and_cohorting_disabled\n alternative\n end\n \n"},"addition_count":{"kind":"number","value":4,"string":"4"},"commit_subject":{"kind":"string","value":"disable cohorting revision"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676117,"cells":{"id":{"kind":"string","value":"10071767"},"text":{"kind":"string","value":" trial.rb\n # frozen_string_literal: true\n\nmodule Split\n class Trial\n attr_accessor :goals\n attr_accessor :experiment\n attr_writer :metadata\n\n def initialize(attrs = {})\n self.experiment = attrs.delete(:experiment)\n self.alternative = attrs.delete(:alternative)\n self.metadata = attrs.delete(:metadata)\n self.goals = attrs.delete(:goals) || []\n\n @user = attrs.delete(:user)\n @options = attrs\n\n @alternative_chosen = false\n end\n\n def metadata\n @metadata ||= experiment.metadata[alternative.name] if experiment.metadata\n end\n\n def alternative\n @alternative ||= if @experiment.has_winner?\n @experiment.winner\n end\n end\n\n def alternative=(alternative)\n @alternative = if alternative.kind_of?(Split::Alternative)\n alternative\n else\n @experiment.alternatives.find { |a| a.name == alternative }\n end\n end\n\n def complete!(context = nil)\n if alternative\n if Array(goals).empty?\n alternative.increment_completion\n else\n Array(goals).each { |g| alternative.increment_completion(g) }\n end\n\n run_callback context, Split.configuration.on_trial_complete\n end\n end\n\n # Choose an alternative, add a participant, and save the alternative choice on the user. This\n # method is guaranteed to only run once, and will skip the alternative choosing process if run\n # a second time.\n def choose!(context = nil)\n @user.cleanup_old_experiments!\n # Only run the process once\n return alternative if @alternative_chosen\n\n new_participant = @user[@experiment.key].nil?\n if override_is_alternative?\n self.alternative = @options[:override]\n if should_store_alternative? && !@user[@experiment.key]\n self.alternative.increment_participation\n end\n elsif @options[:disabled] || Split.configuration.disabled?\n self.alternative = @experiment.control\n elsif @experiment.has_winner?\n self.alternative = @experiment.winner\n else\n cleanup_old_versions\n\n if exclude_user?\n self.alternative = @experiment.control\n else\n self.alternative = @user[@experiment.key]\n if alternative.nil?\n if @experiment.cohorting_disabled?\n self.alternative = @experiment.control\n else\n self.alternative = @experiment.next_alternative\n\n # Increment the number of participants since we are actually choosing a new alternative\n self.alternative.increment_participation\n\n end\n end\n\n @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?)\n @alternative_choosen = true\n run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) \n alternative\n end\n\n alternative\n end\n\n private\n def run_callback(context, callback_name)\n context.send(callback_name, self) if callback_name && context.respond_to?(callback_name, true)\n end\n\n def override_is_alternative?\n @experiment.alternatives.map(&:name).include?(@options[:override])\n end\n\n def should_store_alternative?\n if @options[:override] || @options[:disabled]\n Split.configuration.store_override\n else\n !exclude_user?\n end\n end\n\n def cleanup_old_versions\n if @experiment.version > 0\n @user.cleanup_old_versions!(@experiment)\n end\n end\n\n def exclude_user?\n @options[:exclude] || @experiment.start_time.nil? || @user.max_experiments_reached?(@experiment.key)\n end\n end\nend\n\n disable cohorting revision\n\n @@ -85,9 +85,11 @@ module Split\n end\n end\n \n- @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?)\n+ new_participant_and_cohorting_disabled = new_participant && @experiment.cohorting_disabled?\n+\n+ @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || new_participant_and_cohorting_disabled\n @alternative_choosen = true\n- run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) \n+ run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || new_participant_and_cohorting_disabled\n alternative\n end\n \n"},"addition_count":{"kind":"number","value":4,"string":"4"},"commit_subject":{"kind":"string","value":"disable cohorting revision"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676118,"cells":{"id":{"kind":"string","value":"10071768"},"text":{"kind":"string","value":" helper.rb\n module Split\n module Helper\n def ab_test(experiment_name, *alternatives, &block)\n experiment = Split::Experiment.find_or_create(experiment_name, *alternatives)\n if experiment.winner\n ret = experiment.winner.name\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n end\n end\n\n ret = yield(ret) if block_given?\n ret\n end\n\n def finished(experiment_name)\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def finish_experiment(experiment, options = { reset: true })\n return false if active_experiments[experiment.name].nil?\n return true if experiment.has_winner?\n should_reset = experiment.resettable? && options[:reset]\n if ab_user[experiment.finished_key] && !should_reset\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n finish_experiment(experiment, options.merge(goals: goals))\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n ab_test can now be passed a block in a rails view as well, closes #4\n @@ -1,6 +1,6 @@\n module Split\n module Helper\n- def ab_test(experiment_name, *alternatives, &block)\n+ def ab_test(experiment_name, *alternatives)\n experiment = Split::Experiment.find_or_create(experiment_name, *alternatives)\n if experiment.winner\n ret = experiment.winner.name\n@@ -21,8 +21,17 @@ module Split\n end\n end\n \n- ret = yield(ret) if block_given?\n- ret\n+ if block_given?\n+ if defined?(capture) # a block in a rails view\n+ block = Proc.new { yield(ret) }\n+ concat(capture(ret, &block))\n+ false\n+ else\n+ yield(ret)\n+ end\n+ else\n+ ret\n+ end\n end\n \n def finished(experiment_name)\n"},"addition_count":{"kind":"number","value":12,"string":"12"},"commit_subject":{"kind":"string","value":"ab_test can now be passed a block in a rails view as well, closes #4"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676119,"cells":{"id":{"kind":"string","value":"10071769"},"text":{"kind":"string","value":" helper.rb\n module Split\n module Helper\n def ab_test(experiment_name, *alternatives, &block)\n experiment = Split::Experiment.find_or_create(experiment_name, *alternatives)\n if experiment.winner\n ret = experiment.winner.name\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n end\n end\n\n ret = yield(ret) if block_given?\n ret\n end\n\n def finished(experiment_name)\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def finish_experiment(experiment, options = { reset: true })\n return false if active_experiments[experiment.name].nil?\n return true if experiment.has_winner?\n should_reset = experiment.resettable? && options[:reset]\n if ab_user[experiment.finished_key] && !should_reset\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n finish_experiment(experiment, options.merge(goals: goals))\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n ab_test can now be passed a block in a rails view as well, closes #4\n @@ -1,6 +1,6 @@\n module Split\n module Helper\n- def ab_test(experiment_name, *alternatives, &block)\n+ def ab_test(experiment_name, *alternatives)\n experiment = Split::Experiment.find_or_create(experiment_name, *alternatives)\n if experiment.winner\n ret = experiment.winner.name\n@@ -21,8 +21,17 @@ module Split\n end\n end\n \n- ret = yield(ret) if block_given?\n- ret\n+ if block_given?\n+ if defined?(capture) # a block in a rails view\n+ block = Proc.new { yield(ret) }\n+ concat(capture(ret, &block))\n+ false\n+ else\n+ yield(ret)\n+ end\n+ else\n+ ret\n+ end\n end\n \n def finished(experiment_name)\n"},"addition_count":{"kind":"number","value":12,"string":"12"},"commit_subject":{"kind":"string","value":"ab_test can now be passed a block in a rails view as well, closes #4"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676120,"cells":{"id":{"kind":"string","value":"10071770"},"text":{"kind":"string","value":" helper.rb\n module Split\n module Helper\n def ab_test(experiment_name, *alternatives, &block)\n experiment = Split::Experiment.find_or_create(experiment_name, *alternatives)\n if experiment.winner\n ret = experiment.winner.name\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n end\n end\n\n ret = yield(ret) if block_given?\n ret\n end\n\n def finished(experiment_name)\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def finish_experiment(experiment, options = { reset: true })\n return false if active_experiments[experiment.name].nil?\n return true if experiment.has_winner?\n should_reset = experiment.resettable? && options[:reset]\n if ab_user[experiment.finished_key] && !should_reset\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n finish_experiment(experiment, options.merge(goals: goals))\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n ab_test can now be passed a block in a rails view as well, closes #4\n @@ -1,6 +1,6 @@\n module Split\n module Helper\n- def ab_test(experiment_name, *alternatives, &block)\n+ def ab_test(experiment_name, *alternatives)\n experiment = Split::Experiment.find_or_create(experiment_name, *alternatives)\n if experiment.winner\n ret = experiment.winner.name\n@@ -21,8 +21,17 @@ module Split\n end\n end\n \n- ret = yield(ret) if block_given?\n- ret\n+ if block_given?\n+ if defined?(capture) # a block in a rails view\n+ block = Proc.new { yield(ret) }\n+ concat(capture(ret, &block))\n+ false\n+ else\n+ yield(ret)\n+ end\n+ else\n+ ret\n+ end\n end\n \n def finished(experiment_name)\n"},"addition_count":{"kind":"number","value":12,"string":"12"},"commit_subject":{"kind":"string","value":"ab_test can now be passed a block in a rails view as well, closes #4"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676121,"cells":{"id":{"kind":"string","value":"10071771"},"text":{"kind":"string","value":" demo.js\n ADDFILE\n Rename elements to components\n\n @@ -0,0 +1,2 @@\n+angular\n+ .module('demo', ['semantic.ui.components.divider']);\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Rename elements to components"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"js"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676122,"cells":{"id":{"kind":"string","value":"10071772"},"text":{"kind":"string","value":" demo.js\n ADDFILE\n Rename elements to components\n\n @@ -0,0 +1,2 @@\n+angular\n+ .module('demo', ['semantic.ui.components.divider']);\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Rename elements to components"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"js"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676123,"cells":{"id":{"kind":"string","value":"10071773"},"text":{"kind":"string","value":" demo.js\n ADDFILE\n Rename elements to components\n\n @@ -0,0 +1,2 @@\n+angular\n+ .module('demo', ['semantic.ui.components.divider']);\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Rename elements to components"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"js"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676124,"cells":{"id":{"kind":"string","value":"10071774"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split)\n[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split)\n[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split)\n[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split)\n\n## Requirements\n\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Fix travis badges\n @@ -7,10 +7,10 @@ Split is heavily inspired by the Abingo and Vanity rails ab testing plugins and\n Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n \n [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n-[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split)\n-[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split)\n-[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split)\n-[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split)\n+[![Build Status](https://secure.travis-ci.org/splitrb/split.svg?branch=master)](http://travis-ci.org/splitrb/split)\n+[![Dependency Status](https://gemnasium.com/splitrb/split.svg)](https://gemnasium.com/splitrb/split)\n+[![Code Climate](https://codeclimate.com/github/splitrb/split.svg)](https://codeclimate.com/github/splitrb/split)\n+[![Coverage Status](http://img.shields.io/coveralls/splitrb/split.svg)](https://coveralls.io/r/splitrb/split)\n \n ## Requirements\n \n"},"addition_count":{"kind":"number","value":4,"string":"4"},"commit_subject":{"kind":"string","value":"Fix travis badges"},"deletion_count":{"kind":"number","value":4,"string":"4"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676125,"cells":{"id":{"kind":"string","value":"10071775"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split)\n[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split)\n[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split)\n[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split)\n\n## Requirements\n\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Fix travis badges\n @@ -7,10 +7,10 @@ Split is heavily inspired by the Abingo and Vanity rails ab testing plugins and\n Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n \n [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n-[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split)\n-[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split)\n-[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split)\n-[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split)\n+[![Build Status](https://secure.travis-ci.org/splitrb/split.svg?branch=master)](http://travis-ci.org/splitrb/split)\n+[![Dependency Status](https://gemnasium.com/splitrb/split.svg)](https://gemnasium.com/splitrb/split)\n+[![Code Climate](https://codeclimate.com/github/splitrb/split.svg)](https://codeclimate.com/github/splitrb/split)\n+[![Coverage Status](http://img.shields.io/coveralls/splitrb/split.svg)](https://coveralls.io/r/splitrb/split)\n \n ## Requirements\n \n"},"addition_count":{"kind":"number","value":4,"string":"4"},"commit_subject":{"kind":"string","value":"Fix travis badges"},"deletion_count":{"kind":"number","value":4,"string":"4"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676126,"cells":{"id":{"kind":"string","value":"10071776"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split)\n[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split)\n[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split)\n[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split)\n\n## Requirements\n\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Fix travis badges\n @@ -7,10 +7,10 @@ Split is heavily inspired by the Abingo and Vanity rails ab testing plugins and\n Split is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n \n [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n-[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split)\n-[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split)\n-[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split)\n-[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split)\n+[![Build Status](https://secure.travis-ci.org/splitrb/split.svg?branch=master)](http://travis-ci.org/splitrb/split)\n+[![Dependency Status](https://gemnasium.com/splitrb/split.svg)](https://gemnasium.com/splitrb/split)\n+[![Code Climate](https://codeclimate.com/github/splitrb/split.svg)](https://codeclimate.com/github/splitrb/split)\n+[![Coverage Status](http://img.shields.io/coveralls/splitrb/split.svg)](https://coveralls.io/r/splitrb/split)\n \n ## Requirements\n \n"},"addition_count":{"kind":"number","value":4,"string":"4"},"commit_subject":{"kind":"string","value":"Fix travis badges"},"deletion_count":{"kind":"number","value":4,"string":"4"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676127,"cells":{"id":{"kind":"string","value":"10071777"},"text":{"kind":"string","value":" .jshintrc\n ADDFILE\n chore(.jshintrc): Added .jshintrc\n\n @@ -0,0 +1,30 @@\n+{\n+ \"browser\": true,\n+ \"bitwise\": true,\n+ \"curly\": true,\n+ \"eqeqeq\": true,\n+ \"immed\": true,\n+ \"indent\": 2,\n+ \"newcap\": true,\n+ \"noarg\": true,\n+ \"regexp\": true,\n+ \"undef\": true,\n+ \"unused\": true,\n+ \"maxlen\": 120,\n+ \"strict\": true,\n+ \"trailing\": true,\n+ \"smarttabs\": true,\n+ \"globals\": {\n+ \"angular\": false,\n+ \"describe\": false,\n+ \"it\": false,\n+ \"beforeEach\": false,\n+ \"afterEach\": false,\n+ \"inject\": false,\n+ \"module\": false,\n+ \"browser\": false,\n+ \"expect\": false,\n+ \"_\": false,\n+ \"$\": false\n+ }\n+}\n\\ No newline at end of file\n"},"addition_count":{"kind":"number","value":30,"string":"30"},"commit_subject":{"kind":"string","value":"chore(.jshintrc): Added .jshintrc"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"jshintrc"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676128,"cells":{"id":{"kind":"string","value":"10071778"},"text":{"kind":"string","value":" .jshintrc\n ADDFILE\n chore(.jshintrc): Added .jshintrc\n\n @@ -0,0 +1,30 @@\n+{\n+ \"browser\": true,\n+ \"bitwise\": true,\n+ \"curly\": true,\n+ \"eqeqeq\": true,\n+ \"immed\": true,\n+ \"indent\": 2,\n+ \"newcap\": true,\n+ \"noarg\": true,\n+ \"regexp\": true,\n+ \"undef\": true,\n+ \"unused\": true,\n+ \"maxlen\": 120,\n+ \"strict\": true,\n+ \"trailing\": true,\n+ \"smarttabs\": true,\n+ \"globals\": {\n+ \"angular\": false,\n+ \"describe\": false,\n+ \"it\": false,\n+ \"beforeEach\": false,\n+ \"afterEach\": false,\n+ \"inject\": false,\n+ \"module\": false,\n+ \"browser\": false,\n+ \"expect\": false,\n+ \"_\": false,\n+ \"$\": false\n+ }\n+}\n\\ No newline at end of file\n"},"addition_count":{"kind":"number","value":30,"string":"30"},"commit_subject":{"kind":"string","value":"chore(.jshintrc): Added .jshintrc"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"jshintrc"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676129,"cells":{"id":{"kind":"string","value":"10071779"},"text":{"kind":"string","value":" .jshintrc\n ADDFILE\n chore(.jshintrc): Added .jshintrc\n\n @@ -0,0 +1,30 @@\n+{\n+ \"browser\": true,\n+ \"bitwise\": true,\n+ \"curly\": true,\n+ \"eqeqeq\": true,\n+ \"immed\": true,\n+ \"indent\": 2,\n+ \"newcap\": true,\n+ \"noarg\": true,\n+ \"regexp\": true,\n+ \"undef\": true,\n+ \"unused\": true,\n+ \"maxlen\": 120,\n+ \"strict\": true,\n+ \"trailing\": true,\n+ \"smarttabs\": true,\n+ \"globals\": {\n+ \"angular\": false,\n+ \"describe\": false,\n+ \"it\": false,\n+ \"beforeEach\": false,\n+ \"afterEach\": false,\n+ \"inject\": false,\n+ \"module\": false,\n+ \"browser\": false,\n+ \"expect\": false,\n+ \"_\": false,\n+ \"$\": false\n+ }\n+}\n\\ No newline at end of file\n"},"addition_count":{"kind":"number","value":30,"string":"30"},"commit_subject":{"kind":"string","value":"chore(.jshintrc): Added .jshintrc"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"jshintrc"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676130,"cells":{"id":{"kind":"string","value":"10071780"},"text":{"kind":"string","value":" README.md\n Semantic-UI-Angular\n===================\n\n[![Join the chat at https://gitter.im/Semantic-Org/Semantic-UI-Angular](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Semantic-Org/Semantic-UI-Angular?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n[![Build Status](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular.svg)](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular)\n[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n\nStatus\n------\nWorking to open the first release.\n\nCurrent progress:\n-----------------\nAt this moment we have following directives:\n\n - sm-button;\n - sm-checkbox;\n - sm-divider;\n - sm-radio-group and sm-radio-button;\n - sm-rating.\n\n## To do:\nAll tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues\n\n\n docs(README): Update README.md\n\n @@ -6,20 +6,42 @@ Semantic-UI-Angular\n [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n \n+**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components.\n+As soon as Angular 2 will get better animations support, we will consider creating (or merging some existing project if there will be one) Angular 2 components as well.\n+We've decided to use TypeScript as a step to Angular 2 friendly environment.\n+\n Status\n ------\n-Working to open the first release.\n+**Work in progress**\n+\n+We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions.\n+Once we release first `alpha.0` we are happy to get community help.\n+\n+\n+Support\n+-------\n+We support AngularJS 1.4.8 version.\n+\n+\n+Building Semantic-UI-Angular\n+----------------------------\n+You have to have `nodejs` installed before running following commands.\n \n-Current progress:\n------------------\n-At this moment we have following directives:\n+```\n+npm install\n+npm run build\n+```\n \n- - sm-button;\n- - sm-checkbox;\n- - sm-divider;\n- - sm-radio-group and sm-radio-button;\n- - sm-rating.\n+The distribution packages will be stored in `dist` folder.\n \n-## To do:\n-All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues\n+Running tests\n+-------------\n+Single run:\n+```\n+npm test\n+```\n \n+Dev mode:\n+```\n+npm run test-dev\n+```\n"},"addition_count":{"kind":"number","value":33,"string":"33"},"commit_subject":{"kind":"string","value":"docs(README): Update README.md"},"deletion_count":{"kind":"number","value":11,"string":"11"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676131,"cells":{"id":{"kind":"string","value":"10071781"},"text":{"kind":"string","value":" README.md\n Semantic-UI-Angular\n===================\n\n[![Join the chat at https://gitter.im/Semantic-Org/Semantic-UI-Angular](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Semantic-Org/Semantic-UI-Angular?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n[![Build Status](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular.svg)](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular)\n[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n\nStatus\n------\nWorking to open the first release.\n\nCurrent progress:\n-----------------\nAt this moment we have following directives:\n\n - sm-button;\n - sm-checkbox;\n - sm-divider;\n - sm-radio-group and sm-radio-button;\n - sm-rating.\n\n## To do:\nAll tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues\n\n\n docs(README): Update README.md\n\n @@ -6,20 +6,42 @@ Semantic-UI-Angular\n [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n \n+**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components.\n+As soon as Angular 2 will get better animations support, we will consider creating (or merging some existing project if there will be one) Angular 2 components as well.\n+We've decided to use TypeScript as a step to Angular 2 friendly environment.\n+\n Status\n ------\n-Working to open the first release.\n+**Work in progress**\n+\n+We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions.\n+Once we release first `alpha.0` we are happy to get community help.\n+\n+\n+Support\n+-------\n+We support AngularJS 1.4.8 version.\n+\n+\n+Building Semantic-UI-Angular\n+----------------------------\n+You have to have `nodejs` installed before running following commands.\n \n-Current progress:\n------------------\n-At this moment we have following directives:\n+```\n+npm install\n+npm run build\n+```\n \n- - sm-button;\n- - sm-checkbox;\n- - sm-divider;\n- - sm-radio-group and sm-radio-button;\n- - sm-rating.\n+The distribution packages will be stored in `dist` folder.\n \n-## To do:\n-All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues\n+Running tests\n+-------------\n+Single run:\n+```\n+npm test\n+```\n \n+Dev mode:\n+```\n+npm run test-dev\n+```\n"},"addition_count":{"kind":"number","value":33,"string":"33"},"commit_subject":{"kind":"string","value":"docs(README): Update README.md"},"deletion_count":{"kind":"number","value":11,"string":"11"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676132,"cells":{"id":{"kind":"string","value":"10071782"},"text":{"kind":"string","value":" README.md\n Semantic-UI-Angular\n===================\n\n[![Join the chat at https://gitter.im/Semantic-Org/Semantic-UI-Angular](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Semantic-Org/Semantic-UI-Angular?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n[![Build Status](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular.svg)](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular)\n[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n\nStatus\n------\nWorking to open the first release.\n\nCurrent progress:\n-----------------\nAt this moment we have following directives:\n\n - sm-button;\n - sm-checkbox;\n - sm-divider;\n - sm-radio-group and sm-radio-button;\n - sm-rating.\n\n## To do:\nAll tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues\n\n\n docs(README): Update README.md\n\n @@ -6,20 +6,42 @@ Semantic-UI-Angular\n [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n \n+**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components.\n+As soon as Angular 2 will get better animations support, we will consider creating (or merging some existing project if there will be one) Angular 2 components as well.\n+We've decided to use TypeScript as a step to Angular 2 friendly environment.\n+\n Status\n ------\n-Working to open the first release.\n+**Work in progress**\n+\n+We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions.\n+Once we release first `alpha.0` we are happy to get community help.\n+\n+\n+Support\n+-------\n+We support AngularJS 1.4.8 version.\n+\n+\n+Building Semantic-UI-Angular\n+----------------------------\n+You have to have `nodejs` installed before running following commands.\n \n-Current progress:\n------------------\n-At this moment we have following directives:\n+```\n+npm install\n+npm run build\n+```\n \n- - sm-button;\n- - sm-checkbox;\n- - sm-divider;\n- - sm-radio-group and sm-radio-button;\n- - sm-rating.\n+The distribution packages will be stored in `dist` folder.\n \n-## To do:\n-All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues\n+Running tests\n+-------------\n+Single run:\n+```\n+npm test\n+```\n \n+Dev mode:\n+```\n+npm run test-dev\n+```\n"},"addition_count":{"kind":"number","value":33,"string":"33"},"commit_subject":{"kind":"string","value":"docs(README): Update README.md"},"deletion_count":{"kind":"number","value":11,"string":"11"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676133,"cells":{"id":{"kind":"string","value":"10071783"},"text":{"kind":"string","value":" index.ts\n import abbreviation, { CSSAbbreviation, CSSProperty, CSSValue, Literal, Value, Field, FunctionCall } from '@emmetio/css-abbreviation';\nimport { Config, SnippetsMap } from '../config';\nimport createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets';\nimport calculateScore from './score';\n\ntype MatchInput = CSSSnippet | CSSKeywordRef;\n\n/**\n * Parses given Emmet abbreviation into a final abbreviation tree with all\n /** Include all possible snippets in match */\n Global = '@@global',\n /** Include raw snippets only (e.g. no properties) in abbreviation match */\n Section = '@@section',\n abbr = abbreviation(abbr);\n }\n\n // Run abbreviation resolve in two passes:\n // 1. Map each node to snippets, which are abbreviations as well. A single snippet\n // may produce multiple nodes\n // 2. Transform every resolved node\n // walk(abbr, snippets, config);\n // walk(abbr, transform, config);\n return abbr;\n}\n\n/**\n * Converts given raw snippets into internal snippets representation\n */\n\n if (config.cache) {\n config.cache.stylesheetSnippets = snippets;\n }\n\n if (typeof abbr === 'string') {\n abbr = abbreviation(abbr, { value: isValueScope(config) });\n }\n\n const filteredSnippets = getSnippetsForScope(snippets, config);\n\n * keyword aliases from node value\n */\nfunction resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty {\n // TODO implement\n // if (config.context) {\n // // Resolve as value of given CSS property\n // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config);\n // }\n\n const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']);\n\n if (!snippet) {\n // Edge case: `!important` snippet\n return node.important ? setNodeAsText(node, '!important') : node;\n }\n\n return snippet.type === CSSSnippetType.Property\n ? resolveAsProperty(node, snippet, config)\n : resolveAsSnippet(node, snippet);\n}\n\n/**\nfunction resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty {\n if (!resolveGradient(node, config)) {\n const score = config.options['stylesheet.fuzzySearchMinScore'];\n const abbr = node.name!;\n node.name = snippet.property;\n\n // Resolve keyword shortcuts\n const keywords = getKeywords(snippet);\n\n if (!node.value.length) {\n // No value defined, try to resolve unmatched part as a keyword alias\n const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords);\n if (kw) {\n node.value = snippet.value[kw.index]!;\n } else if (snippet.value.length) {\n const defaultValue = snippet.value[0]!;\n node.value = defaultValue.some(hasField)\n ? defaultValue\n : defaultValue.map(n => wrapWithField(n));\n }\n } else {\n // replace keyword aliases in current node value\n for (let i = 0, token; i < node.value.value.length; i++) {\n token = node.value.value[i];\n\n if (token === '!') {\n token = `${!i ? '${1} ' : ''}!important`;\n } else if (isKeyword(token)) {\n token = findBestMatch(token.value, keywords)\n || findBestMatch(token.value, globalKeywords)\n || token;\n } else if (isNumericValue(token)) {\n token = resolveNumericValue(node.name, token, formatOptions);\n }\n\n node.value.value[i] = token;\n }\n }\n\n // Resolve numeric values for CSS properties only\n resolveNumericValue(node, config);\n }\n\n return node;\n}\n\n/**\n * Resolves CSS gradient shortcut from given property, if possible\n */\nfunction resolveGradient(node: CSSProperty, config: Config): boolean {\n * Resolves given parsed abbreviation node as property value of given `snippet`:\n * tries to find best matching keyword from CSS snippet\n */\nfunction resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty {\n // Possible resolved result for CSS property:\n // * matched snippet keyword\n // * color (starts with #)\n // Everything else should result the same as input abbreviation\n let keywords = config.options['stylesheet.keywords'].slice();\n if (snippet) {\n keywords = keywords.concat(getKeywords(snippet));\n }\n\n const values = [node.name].concat(node.value.value)\n .filter(Boolean)\n .map(value => {\n if (typeof value === 'string' || value.type === 'keyword') {\n value = String(value);\n return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value;\n }\n\n return value;\n });\n\n node.name = null;\n node.value.value = values;\n\n return node;\n}\n\n type: 'FunctionCall',\n name: 'linear-gradient',\n arguments: [cssValue(field(0, ''))]\n };\n } else {\n gradientFn = {\n ...gradientFn,\n name: 'linear-gradient'\n };\n }\n\n if (!config.context) {\n node.name = 'background-image';\n }\n node.value = [cssValue(gradientFn)];\n return true;\n }\n\n return false;\n}\n\n/**\n * Resolves given parsed abbreviation node as CSS property\n */\nfunction resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, config: Config): CSSProperty {\n const abbr = node.name!;\n\n // Check for unmatched part of abbreviation\n // For example, in `dib` abbreviation the matched part is `d` and `ib` should\n // be considered as inline value. If unmatched fragment exists, we should check\n // if it matches actual value of snippet. If either explicit value is specified\n // or unmatched fragment did not resolve to to a keyword, we should consider\n // matched snippet as invalid\n const inlineValue = getUnmatchedPart(abbr, snippet.key);\n}\n\nfunction getScoringPart(item: MatchInput): string {\n return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key;\n}\n\n node.value.push(cssValue(kw));\n }\n\n node.name = snippet.property;\n\n if (node.value.length) {\n // Replace keyword alias from current abbreviation node with matched keyword\n resolveValueKeywords(node, config, snippet);\n } else if (snippet.value.length) {\n const defaultValue = snippet.value[0]!;\n\n // https://github.com/emmetio/emmet/issues/558\n // We should auto-select inserted value only if there’s multiple value\n // choice\n return '';\n}\n\n/**\n * Check if given CSS value token is a keyword\n * @param {*} token\n * @return {Boolean}\n */\nfunction isKeyword(token) {\n return tokenTypeOf(token, 'keyword');\n}\n\n/**\n * Check if given CSS value token is a numeric value\n * @param {*} token\n * @return {Boolean}\n */\nfunction isNumericValue(token) {\n return tokenTypeOf(token, 'numeric');\n}\n\nfunction tokenTypeOf(token, type) {\n return token && typeof token === 'object' && token.type === type;\n}\n\n/**\n * Resolves numeric value for given CSS property\n * @param {String} property CSS property name\n * @param {NumericValue} token CSS numeric value token\n * @param {Object} formatOptions Formatting options for units\n * @return {NumericValue}\n */\nfunction resolveNumericValue(property, token, formatOptions) {\n if (token.unit) {\n token.unit = formatOptions.unitAliases[token.unit] || token.unit;\n } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) {\n // use `px` for integers, `em` for floats\n // NB: num|0 is a quick alternative to Math.round(0)\n token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit;\n }\n\n return token;\n}\n\n/**\n return node;\n}\n\n/**\n * Finds best matching item from `items` array\n * @param abbr Abbreviation to match\n * @param items List of items for match\n * @param minScore The minimum score the best matched item should have to be a valid match.\n */\nexport function findBestMatch(abbr: string, items: T[], minScore = 0, partialMatch = false): T | null {\n let matchedItem: T | null = null;\n let maxScore = 0;\n\n for (const item of items) {\n const score = calculateScore(abbr, getScoringPart(item), partialMatch);\n\n if (score === 1) {\n // direct hit, no need to look further\n return item;\n }\n\n if (score && score >= maxScore) {\n maxScore = score;\n matchedItem = item;\n }\n }\n\n return maxScore >= minScore ? matchedItem : null;\n}\n\nfunction getScoringPart(item: MatchInput): string {\n return typeof item === 'string' ? item : item.key;\n}\n\n/**\n * Returns a part of `abbr` that wasn’t directly matched against `str`.\n * For example, if abbreviation `poas` is matched against `position`,\n * the unmatched part will be `as` since `a` wasn’t found in string stream\n */\nfunction getUnmatchedPart(abbr: string, str: string): string {\n for (let i = 0, lastPos = 0; i < abbr.length; i++) {\n lastPos = str.indexOf(abbr[i], lastPos);\n if (lastPos === -1) {\n return abbr.slice(i);\n }\n lastPos++;\n }\n\n return '';\n}\n\n/**\n * Resolves given keyword shorthand into matched snippet keyword or global keyword,\n * if possible\n */\nfunction resolveKeyword(kw: string, config: Config, snippet?: CSSSnippetProperty, minScore?: number): Literal | FunctionCall | null {\n let ref: string | null;\n\n if (snippet) {\n if (ref = findBestMatch(kw, Object.keys(snippet.keywords), minScore)) {\n return snippet.keywords[ref];\n }\n\n for (const dep of snippet.dependencies) {\n if (ref = findBestMatch(kw, Object.keys(dep.keywords), minScore)) {\n return dep.keywords[ref];\n }\n }\n }\n\n if (ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore)) {\n return literal(ref);\n }\n\n return null;\n}\n\n/**\n * Resolves numeric values in given abbreviation node\n */\nfunction resolveNumericValue(node: CSSProperty, config: Config) {\n const aliases = config.options['stylesheet.unitAliases'];\n const unitless = config.options['stylesheet.unitless'];\n\n for (const v of node.value) {\n for (const t of v.value) {\n if (t.type === 'NumberValue') {\n if (t.unit) {\n t.unit = aliases[t.unit] || t.unit;\n } else if (t.value !== 0 && !unitless.includes(node.name!)) {\n t.unit = t.rawValue.includes('.')\n ? config.options['stylesheet.floatUnit']\n : config.options['stylesheet.intUnit'];\n }\n }\n }\n }\n}\n\n/**\n * Constructs CSS value token\n */\nfunction cssValue(...args: Value[]): CSSValue {\n return {\n type: 'CSSValue',\n value: args\n };\n}\n\n/**\n * Constructs literal token\n */\nfunction literal(value: string): Literal {\n return { type: 'Literal', value };\n}\n\n/**\n * Constructs field token\n */\nfunction field(index: number, name: string): Field {\n return { type: 'Field', index, name };\n}\n\n/**\n * Check if given value contains fields\n */\nfunction hasField(value: CSSValue): boolean {\n for (const v of value.value) {\n if (v.type === 'Field' || (v.type === 'FunctionCall' && v.arguments.some(hasField))) {\n return true;\n }\n }\n\n return false;\n}\n\ninterface WrapState {\n index: number;\n}\n\n/**\n * Wraps tokens of given abbreviation with fields\n */\nfunction wrapWithField(node: CSSValue, config: Config, state: WrapState = { index: 1 }): CSSValue {\n let value: Value[] = [];\n for (const v of node.value) {\n switch (v.type) {\n case 'ColorValue':\n value.push(field(state.index++, color(v, config.options['stylesheet.shortHex'])));\n break;\n case 'Literal':\n value.push(field(state.index++, v.value));\n break;\n case 'NumberValue':\n value.push(field(state.index++, `${v.value}${v.unit}`));\n break;\n case 'StringValue':\n const q = v.quote === 'single' ? '\\'' : '\"';\n value.push(field(state.index++, q + v.value + q));\n break;\n case 'FunctionCall':\n value.push(field(state.index++, v.name), literal('('));\n for (let i = 0, il = v.arguments.length; i < il; i++) {\n value = value.concat(wrapWithField(v.arguments[i], config, state).value);\n if (i !== il - 1) {\n value.push(literal(', '));\n }\n }\n value.push(literal(')'));\n break;\n default:\n value.push(v);\n }\n }\n\n return {...node, value };\n}\n\n/**\n * Check if abbreviation should be expanded in CSS value context\n */\nfunction isValueScope(config: Config): boolean {\n if (config.context) {\n return config.context.name === CSSAbbreviationScope.Value || !config.context.name.startsWith('@@');\n }\n\n return false;\n}\n\n/**\n * Returns snippets for given scope\n */\nfunction getSnippetsForScope(snippets: CSSSnippet[], config: Config): CSSSnippet[] {\n if (config.context) {\n if (config.context.name === CSSAbbreviationScope.Section) {\n return snippets.filter(s => s.type === CSSSnippetType.Raw);\n }\n\n if (config.context.name === CSSAbbreviationScope.Property) {\n return snippets.filter(s => s.type === CSSSnippetType.Property);\n }\n }\n\n return snippets;\n}\n\n Working on stylesheet resolving and output\n\n @@ -3,7 +3,7 @@ import { Config, SnippetsMap } from '../config';\n import createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets';\n import calculateScore from './score';\n \n-type MatchInput = CSSSnippet | CSSKeywordRef;\n+type MatchInput = CSSSnippet | CSSKeywordRef | string;\n \n /**\n * Parses given Emmet abbreviation into a final abbreviation tree with all\n@@ -14,15 +14,15 @@ export default function parse(abbr: string | CSSAbbreviation, config: Config, sn\n abbr = abbreviation(abbr);\n }\n \n- // Run abbreviation resolve in two passes:\n- // 1. Map each node to snippets, which are abbreviations as well. A single snippet\n- // may produce multiple nodes\n- // 2. Transform every resolved node\n- // walk(abbr, snippets, config);\n- // walk(abbr, transform, config);\n+ for (const node of abbr) {\n+ resolveNode(node, snippets, config);\n+ }\n+\n return abbr;\n }\n \n+export { default as stringify } from './format';\n+\n /**\n * Converts given raw snippets into internal snippets representation\n */\n@@ -40,22 +40,25 @@ export function convertSnippets(snippets: SnippetsMap): CSSSnippet[] {\n * keyword aliases from node value\n */\n function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty {\n- // TODO implement\n- // if (config.context) {\n- // // Resolve as value of given CSS property\n- // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config);\n- // }\n-\n- const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']);\n+ if (config.context) {\n+ // Resolve as value of given CSS property\n+ const snippet = snippets.find(s => s.type === CSSSnippetType.Property && s.property === config.context) as CSSSnippetProperty | undefined;\n+ resolveAsPropertyValue(node, config, snippet);\n+ } else {\n+ const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']);\n \n- if (!snippet) {\n- // Edge case: `!important` snippet\n- return node.important ? setNodeAsText(node, '!important') : node;\n+ if (snippet) {\n+ if (snippet.type === CSSSnippetType.Property) {\n+ resolveAsProperty(node, snippet, config);\n+ } else {\n+ resolveAsSnippet(node, snippet);\n+ }\n+ }\n }\n \n- return snippet.type === CSSSnippetType.Property\n- ? resolveAsProperty(node, snippet, config)\n- : resolveAsSnippet(node, snippet);\n+ resolveNumericValue(node, config);\n+\n+ return node;\n }\n \n /**\n@@ -65,36 +68,21 @@ function resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, confi\n const abbr = node.name!;\n node.name = snippet.property;\n \n- // Resolve keyword shortcuts\n- const keywords = getKeywords(snippet);\n-\n if (!node.value.length) {\n- // No value defined, try to resolve unmatched part as a keyword alias\n- const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords);\n- if (kw) {\n- node.value = snippet.value[kw.index]!;\n- } else if (snippet.value.length) {\n+ // No value defined in abbreviation node, try to resolve unmatched part\n+ // as a keyword alias\n+ if (!resolveSnippetKeyword(node, getUnmatchedPart(abbr, snippet.key), snippet) && snippet.value.length) {\n const defaultValue = snippet.value[0]!;\n node.value = defaultValue.some(hasField)\n ? defaultValue\n : defaultValue.map(n => wrapWithField(n));\n }\n } else {\n- // replace keyword aliases in current node value\n- for (let i = 0, token; i < node.value.value.length; i++) {\n- token = node.value.value[i];\n-\n- if (token === '!') {\n- token = `${!i ? '${1} ' : ''}!important`;\n- } else if (isKeyword(token)) {\n- token = findBestMatch(token.value, keywords)\n- || findBestMatch(token.value, globalKeywords)\n- || token;\n- } else if (isNumericValue(token)) {\n- token = resolveNumericValue(node.name, token, formatOptions);\n- }\n-\n- node.value.value[i] = token;\n+ // Replace keyword alias from current abbreviation node with matched keyword\n+ const kw = getSingleKeyword(node);\n+ if (kw) {\n+ resolveSnippetKeyword(node, kw.value, snippet)\n+ || resolveGlobalKeyword(node, kw.value, config);\n }\n }\n \n@@ -112,30 +100,13 @@ function resolveAsSnippet(node: CSSProperty, snippet: CSSSnippetRaw): CSSPropert\n * Resolves given parsed abbreviation node as property value of given `snippet`:\n * tries to find best matching keyword from CSS snippet\n */\n-function resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty {\n- // Possible resolved result for CSS property:\n- // * matched snippet keyword\n- // * color (starts with #)\n- // Everything else should result the same as input abbreviation\n- let keywords = config.options['stylesheet.keywords'].slice();\n- if (snippet) {\n- keywords = keywords.concat(getKeywords(snippet));\n+function resolveAsPropertyValue(node: CSSProperty, config: Config, snippet?: CSSSnippetProperty): CSSProperty {\n+ const kw = getSingleKeyword(node);\n+ if (kw) {\n+ const score = config.options['stylesheet.fuzzySearchMinScore'];\n+ snippet && resolveSnippetKeyword(node, kw.value, snippet, score)\n+ || resolveGlobalKeyword(node, kw.value, config, score);\n }\n-\n- const values = [node.name].concat(node.value.value)\n- .filter(Boolean)\n- .map(value => {\n- if (typeof value === 'string' || value.type === 'keyword') {\n- value = String(value);\n- return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value;\n- }\n-\n- return value;\n- });\n-\n- node.name = null;\n- node.value.value = values;\n-\n return node;\n }\n \n@@ -176,6 +147,9 @@ export function findBestMatch(abbr: string, items: T[], mi\n }\n \n function getScoringPart(item: MatchInput): string {\n+ if (typeof item === 'string') {\n+ return item;\n+ }\n return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key;\n }\n \n@@ -196,45 +170,66 @@ function getUnmatchedPart(abbr: string, str: string): string {\n return '';\n }\n \n+function resolveSnippetKeyword(node: CSSProperty, kw: string, snippet: CSSSnippetProperty, minScore?: number): boolean {\n+ const keywords = getKeywords(snippet);\n+ const ref = findBestMatch(kw, keywords, minScore);\n+\n+ if (ref) {\n+ node.value = snippet.value[ref.index]!;\n+ return true;\n+ }\n+\n+ return false;\n+}\n+\n /**\n- * Check if given CSS value token is a keyword\n- * @param {*} token\n- * @return {Boolean}\n+ * Tries to resolve node’s value with matched global keyword from given `kw` alias\n+ * @returns `true` if value was successfully resolved\n */\n-function isKeyword(token) {\n- return tokenTypeOf(token, 'keyword');\n+function resolveGlobalKeyword(node: CSSProperty, kw: string, config: Config, minScore?: number): boolean {\n+ const ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore);\n+ if (ref) {\n+ node.value = [literalValue(ref)];\n+ return true;\n+ }\n+\n+ return false;\n }\n \n /**\n- * Check if given CSS value token is a numeric value\n- * @param {*} token\n- * @return {Boolean}\n+ * Resolves numeric values in given abbreviation node\n */\n-function isNumericValue(token) {\n- return tokenTypeOf(token, 'numeric');\n-}\n+function resolveNumericValue(node: CSSProperty, config: Config) {\n+ const aliases = config.options['stylesheet.unitAliases'];\n+ const unitless = config.options['stylesheet.unitless'];\n \n-function tokenTypeOf(token, type) {\n- return token && typeof token === 'object' && token.type === type;\n+ for (const v of node.value) {\n+ for (const t of v.value) {\n+ if (t.type === 'NumberValue') {\n+ if (t.unit) {\n+ t.unit = aliases[t.unit] || t.unit;\n+ } else if (t.value !== 0 && !unitless.includes(node.name!)) {\n+ // use `px` for integers, `em` for floats\n+ // NB: num|0 is a quick alternative to Math.round(0)\n+ t.unit = t.value === (t.value | 0)\n+ ? config.options['stylesheet.intUnit']\n+ : config.options['stylesheet.floatUnit'];\n+ }\n+ }\n+ }\n+ }\n }\n \n /**\n- * Resolves numeric value for given CSS property\n- * @param {String} property CSS property name\n- * @param {NumericValue} token CSS numeric value token\n- * @param {Object} formatOptions Formatting options for units\n- * @return {NumericValue}\n+ * Returns literal token if it’s a single value of given abbreviation node\n */\n-function resolveNumericValue(property, token, formatOptions) {\n- if (token.unit) {\n- token.unit = formatOptions.unitAliases[token.unit] || token.unit;\n- } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) {\n- // use `px` for integers, `em` for floats\n- // NB: num|0 is a quick alternative to Math.round(0)\n- token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit;\n+function getSingleKeyword(node: CSSProperty): Literal | void {\n+ if (node.value.length === 1) {\n+ const value = node.value[0]!;\n+ if (value.value.length === 1 && value.value[0].type === 'Literal') {\n+ return value.value[0] as Literal;\n+ }\n }\n-\n- return token;\n }\n \n /**\n"},"addition_count":{"kind":"number","value":88,"string":"88"},"commit_subject":{"kind":"string","value":"Working on stylesheet resolving and output"},"deletion_count":{"kind":"number","value":93,"string":"93"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676134,"cells":{"id":{"kind":"string","value":"10071784"},"text":{"kind":"string","value":" index.ts\n import abbreviation, { CSSAbbreviation, CSSProperty, CSSValue, Literal, Value, Field, FunctionCall } from '@emmetio/css-abbreviation';\nimport { Config, SnippetsMap } from '../config';\nimport createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets';\nimport calculateScore from './score';\n\ntype MatchInput = CSSSnippet | CSSKeywordRef;\n\n/**\n * Parses given Emmet abbreviation into a final abbreviation tree with all\n /** Include all possible snippets in match */\n Global = '@@global',\n /** Include raw snippets only (e.g. no properties) in abbreviation match */\n Section = '@@section',\n abbr = abbreviation(abbr);\n }\n\n // Run abbreviation resolve in two passes:\n // 1. Map each node to snippets, which are abbreviations as well. A single snippet\n // may produce multiple nodes\n // 2. Transform every resolved node\n // walk(abbr, snippets, config);\n // walk(abbr, transform, config);\n return abbr;\n}\n\n/**\n * Converts given raw snippets into internal snippets representation\n */\n\n if (config.cache) {\n config.cache.stylesheetSnippets = snippets;\n }\n\n if (typeof abbr === 'string') {\n abbr = abbreviation(abbr, { value: isValueScope(config) });\n }\n\n const filteredSnippets = getSnippetsForScope(snippets, config);\n\n * keyword aliases from node value\n */\nfunction resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty {\n // TODO implement\n // if (config.context) {\n // // Resolve as value of given CSS property\n // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config);\n // }\n\n const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']);\n\n if (!snippet) {\n // Edge case: `!important` snippet\n return node.important ? setNodeAsText(node, '!important') : node;\n }\n\n return snippet.type === CSSSnippetType.Property\n ? resolveAsProperty(node, snippet, config)\n : resolveAsSnippet(node, snippet);\n}\n\n/**\nfunction resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty {\n if (!resolveGradient(node, config)) {\n const score = config.options['stylesheet.fuzzySearchMinScore'];\n const abbr = node.name!;\n node.name = snippet.property;\n\n // Resolve keyword shortcuts\n const keywords = getKeywords(snippet);\n\n if (!node.value.length) {\n // No value defined, try to resolve unmatched part as a keyword alias\n const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords);\n if (kw) {\n node.value = snippet.value[kw.index]!;\n } else if (snippet.value.length) {\n const defaultValue = snippet.value[0]!;\n node.value = defaultValue.some(hasField)\n ? defaultValue\n : defaultValue.map(n => wrapWithField(n));\n }\n } else {\n // replace keyword aliases in current node value\n for (let i = 0, token; i < node.value.value.length; i++) {\n token = node.value.value[i];\n\n if (token === '!') {\n token = `${!i ? '${1} ' : ''}!important`;\n } else if (isKeyword(token)) {\n token = findBestMatch(token.value, keywords)\n || findBestMatch(token.value, globalKeywords)\n || token;\n } else if (isNumericValue(token)) {\n token = resolveNumericValue(node.name, token, formatOptions);\n }\n\n node.value.value[i] = token;\n }\n }\n\n // Resolve numeric values for CSS properties only\n resolveNumericValue(node, config);\n }\n\n return node;\n}\n\n/**\n * Resolves CSS gradient shortcut from given property, if possible\n */\nfunction resolveGradient(node: CSSProperty, config: Config): boolean {\n * Resolves given parsed abbreviation node as property value of given `snippet`:\n * tries to find best matching keyword from CSS snippet\n */\nfunction resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty {\n // Possible resolved result for CSS property:\n // * matched snippet keyword\n // * color (starts with #)\n // Everything else should result the same as input abbreviation\n let keywords = config.options['stylesheet.keywords'].slice();\n if (snippet) {\n keywords = keywords.concat(getKeywords(snippet));\n }\n\n const values = [node.name].concat(node.value.value)\n .filter(Boolean)\n .map(value => {\n if (typeof value === 'string' || value.type === 'keyword') {\n value = String(value);\n return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value;\n }\n\n return value;\n });\n\n node.name = null;\n node.value.value = values;\n\n return node;\n}\n\n type: 'FunctionCall',\n name: 'linear-gradient',\n arguments: [cssValue(field(0, ''))]\n };\n } else {\n gradientFn = {\n ...gradientFn,\n name: 'linear-gradient'\n };\n }\n\n if (!config.context) {\n node.name = 'background-image';\n }\n node.value = [cssValue(gradientFn)];\n return true;\n }\n\n return false;\n}\n\n/**\n * Resolves given parsed abbreviation node as CSS property\n */\nfunction resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, config: Config): CSSProperty {\n const abbr = node.name!;\n\n // Check for unmatched part of abbreviation\n // For example, in `dib` abbreviation the matched part is `d` and `ib` should\n // be considered as inline value. If unmatched fragment exists, we should check\n // if it matches actual value of snippet. If either explicit value is specified\n // or unmatched fragment did not resolve to to a keyword, we should consider\n // matched snippet as invalid\n const inlineValue = getUnmatchedPart(abbr, snippet.key);\n}\n\nfunction getScoringPart(item: MatchInput): string {\n return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key;\n}\n\n node.value.push(cssValue(kw));\n }\n\n node.name = snippet.property;\n\n if (node.value.length) {\n // Replace keyword alias from current abbreviation node with matched keyword\n resolveValueKeywords(node, config, snippet);\n } else if (snippet.value.length) {\n const defaultValue = snippet.value[0]!;\n\n // https://github.com/emmetio/emmet/issues/558\n // We should auto-select inserted value only if there’s multiple value\n // choice\n return '';\n}\n\n/**\n * Check if given CSS value token is a keyword\n * @param {*} token\n * @return {Boolean}\n */\nfunction isKeyword(token) {\n return tokenTypeOf(token, 'keyword');\n}\n\n/**\n * Check if given CSS value token is a numeric value\n * @param {*} token\n * @return {Boolean}\n */\nfunction isNumericValue(token) {\n return tokenTypeOf(token, 'numeric');\n}\n\nfunction tokenTypeOf(token, type) {\n return token && typeof token === 'object' && token.type === type;\n}\n\n/**\n * Resolves numeric value for given CSS property\n * @param {String} property CSS property name\n * @param {NumericValue} token CSS numeric value token\n * @param {Object} formatOptions Formatting options for units\n * @return {NumericValue}\n */\nfunction resolveNumericValue(property, token, formatOptions) {\n if (token.unit) {\n token.unit = formatOptions.unitAliases[token.unit] || token.unit;\n } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) {\n // use `px` for integers, `em` for floats\n // NB: num|0 is a quick alternative to Math.round(0)\n token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit;\n }\n\n return token;\n}\n\n/**\n return node;\n}\n\n/**\n * Finds best matching item from `items` array\n * @param abbr Abbreviation to match\n * @param items List of items for match\n * @param minScore The minimum score the best matched item should have to be a valid match.\n */\nexport function findBestMatch(abbr: string, items: T[], minScore = 0, partialMatch = false): T | null {\n let matchedItem: T | null = null;\n let maxScore = 0;\n\n for (const item of items) {\n const score = calculateScore(abbr, getScoringPart(item), partialMatch);\n\n if (score === 1) {\n // direct hit, no need to look further\n return item;\n }\n\n if (score && score >= maxScore) {\n maxScore = score;\n matchedItem = item;\n }\n }\n\n return maxScore >= minScore ? matchedItem : null;\n}\n\nfunction getScoringPart(item: MatchInput): string {\n return typeof item === 'string' ? item : item.key;\n}\n\n/**\n * Returns a part of `abbr` that wasn’t directly matched against `str`.\n * For example, if abbreviation `poas` is matched against `position`,\n * the unmatched part will be `as` since `a` wasn’t found in string stream\n */\nfunction getUnmatchedPart(abbr: string, str: string): string {\n for (let i = 0, lastPos = 0; i < abbr.length; i++) {\n lastPos = str.indexOf(abbr[i], lastPos);\n if (lastPos === -1) {\n return abbr.slice(i);\n }\n lastPos++;\n }\n\n return '';\n}\n\n/**\n * Resolves given keyword shorthand into matched snippet keyword or global keyword,\n * if possible\n */\nfunction resolveKeyword(kw: string, config: Config, snippet?: CSSSnippetProperty, minScore?: number): Literal | FunctionCall | null {\n let ref: string | null;\n\n if (snippet) {\n if (ref = findBestMatch(kw, Object.keys(snippet.keywords), minScore)) {\n return snippet.keywords[ref];\n }\n\n for (const dep of snippet.dependencies) {\n if (ref = findBestMatch(kw, Object.keys(dep.keywords), minScore)) {\n return dep.keywords[ref];\n }\n }\n }\n\n if (ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore)) {\n return literal(ref);\n }\n\n return null;\n}\n\n/**\n * Resolves numeric values in given abbreviation node\n */\nfunction resolveNumericValue(node: CSSProperty, config: Config) {\n const aliases = config.options['stylesheet.unitAliases'];\n const unitless = config.options['stylesheet.unitless'];\n\n for (const v of node.value) {\n for (const t of v.value) {\n if (t.type === 'NumberValue') {\n if (t.unit) {\n t.unit = aliases[t.unit] || t.unit;\n } else if (t.value !== 0 && !unitless.includes(node.name!)) {\n t.unit = t.rawValue.includes('.')\n ? config.options['stylesheet.floatUnit']\n : config.options['stylesheet.intUnit'];\n }\n }\n }\n }\n}\n\n/**\n * Constructs CSS value token\n */\nfunction cssValue(...args: Value[]): CSSValue {\n return {\n type: 'CSSValue',\n value: args\n };\n}\n\n/**\n * Constructs literal token\n */\nfunction literal(value: string): Literal {\n return { type: 'Literal', value };\n}\n\n/**\n * Constructs field token\n */\nfunction field(index: number, name: string): Field {\n return { type: 'Field', index, name };\n}\n\n/**\n * Check if given value contains fields\n */\nfunction hasField(value: CSSValue): boolean {\n for (const v of value.value) {\n if (v.type === 'Field' || (v.type === 'FunctionCall' && v.arguments.some(hasField))) {\n return true;\n }\n }\n\n return false;\n}\n\ninterface WrapState {\n index: number;\n}\n\n/**\n * Wraps tokens of given abbreviation with fields\n */\nfunction wrapWithField(node: CSSValue, config: Config, state: WrapState = { index: 1 }): CSSValue {\n let value: Value[] = [];\n for (const v of node.value) {\n switch (v.type) {\n case 'ColorValue':\n value.push(field(state.index++, color(v, config.options['stylesheet.shortHex'])));\n break;\n case 'Literal':\n value.push(field(state.index++, v.value));\n break;\n case 'NumberValue':\n value.push(field(state.index++, `${v.value}${v.unit}`));\n break;\n case 'StringValue':\n const q = v.quote === 'single' ? '\\'' : '\"';\n value.push(field(state.index++, q + v.value + q));\n break;\n case 'FunctionCall':\n value.push(field(state.index++, v.name), literal('('));\n for (let i = 0, il = v.arguments.length; i < il; i++) {\n value = value.concat(wrapWithField(v.arguments[i], config, state).value);\n if (i !== il - 1) {\n value.push(literal(', '));\n }\n }\n value.push(literal(')'));\n break;\n default:\n value.push(v);\n }\n }\n\n return {...node, value };\n}\n\n/**\n * Check if abbreviation should be expanded in CSS value context\n */\nfunction isValueScope(config: Config): boolean {\n if (config.context) {\n return config.context.name === CSSAbbreviationScope.Value || !config.context.name.startsWith('@@');\n }\n\n return false;\n}\n\n/**\n * Returns snippets for given scope\n */\nfunction getSnippetsForScope(snippets: CSSSnippet[], config: Config): CSSSnippet[] {\n if (config.context) {\n if (config.context.name === CSSAbbreviationScope.Section) {\n return snippets.filter(s => s.type === CSSSnippetType.Raw);\n }\n\n if (config.context.name === CSSAbbreviationScope.Property) {\n return snippets.filter(s => s.type === CSSSnippetType.Property);\n }\n }\n\n return snippets;\n}\n\n Working on stylesheet resolving and output\n\n @@ -3,7 +3,7 @@ import { Config, SnippetsMap } from '../config';\n import createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets';\n import calculateScore from './score';\n \n-type MatchInput = CSSSnippet | CSSKeywordRef;\n+type MatchInput = CSSSnippet | CSSKeywordRef | string;\n \n /**\n * Parses given Emmet abbreviation into a final abbreviation tree with all\n@@ -14,15 +14,15 @@ export default function parse(abbr: string | CSSAbbreviation, config: Config, sn\n abbr = abbreviation(abbr);\n }\n \n- // Run abbreviation resolve in two passes:\n- // 1. Map each node to snippets, which are abbreviations as well. A single snippet\n- // may produce multiple nodes\n- // 2. Transform every resolved node\n- // walk(abbr, snippets, config);\n- // walk(abbr, transform, config);\n+ for (const node of abbr) {\n+ resolveNode(node, snippets, config);\n+ }\n+\n return abbr;\n }\n \n+export { default as stringify } from './format';\n+\n /**\n * Converts given raw snippets into internal snippets representation\n */\n@@ -40,22 +40,25 @@ export function convertSnippets(snippets: SnippetsMap): CSSSnippet[] {\n * keyword aliases from node value\n */\n function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty {\n- // TODO implement\n- // if (config.context) {\n- // // Resolve as value of given CSS property\n- // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config);\n- // }\n-\n- const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']);\n+ if (config.context) {\n+ // Resolve as value of given CSS property\n+ const snippet = snippets.find(s => s.type === CSSSnippetType.Property && s.property === config.context) as CSSSnippetProperty | undefined;\n+ resolveAsPropertyValue(node, config, snippet);\n+ } else {\n+ const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']);\n \n- if (!snippet) {\n- // Edge case: `!important` snippet\n- return node.important ? setNodeAsText(node, '!important') : node;\n+ if (snippet) {\n+ if (snippet.type === CSSSnippetType.Property) {\n+ resolveAsProperty(node, snippet, config);\n+ } else {\n+ resolveAsSnippet(node, snippet);\n+ }\n+ }\n }\n \n- return snippet.type === CSSSnippetType.Property\n- ? resolveAsProperty(node, snippet, config)\n- : resolveAsSnippet(node, snippet);\n+ resolveNumericValue(node, config);\n+\n+ return node;\n }\n \n /**\n@@ -65,36 +68,21 @@ function resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, confi\n const abbr = node.name!;\n node.name = snippet.property;\n \n- // Resolve keyword shortcuts\n- const keywords = getKeywords(snippet);\n-\n if (!node.value.length) {\n- // No value defined, try to resolve unmatched part as a keyword alias\n- const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords);\n- if (kw) {\n- node.value = snippet.value[kw.index]!;\n- } else if (snippet.value.length) {\n+ // No value defined in abbreviation node, try to resolve unmatched part\n+ // as a keyword alias\n+ if (!resolveSnippetKeyword(node, getUnmatchedPart(abbr, snippet.key), snippet) && snippet.value.length) {\n const defaultValue = snippet.value[0]!;\n node.value = defaultValue.some(hasField)\n ? defaultValue\n : defaultValue.map(n => wrapWithField(n));\n }\n } else {\n- // replace keyword aliases in current node value\n- for (let i = 0, token; i < node.value.value.length; i++) {\n- token = node.value.value[i];\n-\n- if (token === '!') {\n- token = `${!i ? '${1} ' : ''}!important`;\n- } else if (isKeyword(token)) {\n- token = findBestMatch(token.value, keywords)\n- || findBestMatch(token.value, globalKeywords)\n- || token;\n- } else if (isNumericValue(token)) {\n- token = resolveNumericValue(node.name, token, formatOptions);\n- }\n-\n- node.value.value[i] = token;\n+ // Replace keyword alias from current abbreviation node with matched keyword\n+ const kw = getSingleKeyword(node);\n+ if (kw) {\n+ resolveSnippetKeyword(node, kw.value, snippet)\n+ || resolveGlobalKeyword(node, kw.value, config);\n }\n }\n \n@@ -112,30 +100,13 @@ function resolveAsSnippet(node: CSSProperty, snippet: CSSSnippetRaw): CSSPropert\n * Resolves given parsed abbreviation node as property value of given `snippet`:\n * tries to find best matching keyword from CSS snippet\n */\n-function resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty {\n- // Possible resolved result for CSS property:\n- // * matched snippet keyword\n- // * color (starts with #)\n- // Everything else should result the same as input abbreviation\n- let keywords = config.options['stylesheet.keywords'].slice();\n- if (snippet) {\n- keywords = keywords.concat(getKeywords(snippet));\n+function resolveAsPropertyValue(node: CSSProperty, config: Config, snippet?: CSSSnippetProperty): CSSProperty {\n+ const kw = getSingleKeyword(node);\n+ if (kw) {\n+ const score = config.options['stylesheet.fuzzySearchMinScore'];\n+ snippet && resolveSnippetKeyword(node, kw.value, snippet, score)\n+ || resolveGlobalKeyword(node, kw.value, config, score);\n }\n-\n- const values = [node.name].concat(node.value.value)\n- .filter(Boolean)\n- .map(value => {\n- if (typeof value === 'string' || value.type === 'keyword') {\n- value = String(value);\n- return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value;\n- }\n-\n- return value;\n- });\n-\n- node.name = null;\n- node.value.value = values;\n-\n return node;\n }\n \n@@ -176,6 +147,9 @@ export function findBestMatch(abbr: string, items: T[], mi\n }\n \n function getScoringPart(item: MatchInput): string {\n+ if (typeof item === 'string') {\n+ return item;\n+ }\n return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key;\n }\n \n@@ -196,45 +170,66 @@ function getUnmatchedPart(abbr: string, str: string): string {\n return '';\n }\n \n+function resolveSnippetKeyword(node: CSSProperty, kw: string, snippet: CSSSnippetProperty, minScore?: number): boolean {\n+ const keywords = getKeywords(snippet);\n+ const ref = findBestMatch(kw, keywords, minScore);\n+\n+ if (ref) {\n+ node.value = snippet.value[ref.index]!;\n+ return true;\n+ }\n+\n+ return false;\n+}\n+\n /**\n- * Check if given CSS value token is a keyword\n- * @param {*} token\n- * @return {Boolean}\n+ * Tries to resolve node’s value with matched global keyword from given `kw` alias\n+ * @returns `true` if value was successfully resolved\n */\n-function isKeyword(token) {\n- return tokenTypeOf(token, 'keyword');\n+function resolveGlobalKeyword(node: CSSProperty, kw: string, config: Config, minScore?: number): boolean {\n+ const ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore);\n+ if (ref) {\n+ node.value = [literalValue(ref)];\n+ return true;\n+ }\n+\n+ return false;\n }\n \n /**\n- * Check if given CSS value token is a numeric value\n- * @param {*} token\n- * @return {Boolean}\n+ * Resolves numeric values in given abbreviation node\n */\n-function isNumericValue(token) {\n- return tokenTypeOf(token, 'numeric');\n-}\n+function resolveNumericValue(node: CSSProperty, config: Config) {\n+ const aliases = config.options['stylesheet.unitAliases'];\n+ const unitless = config.options['stylesheet.unitless'];\n \n-function tokenTypeOf(token, type) {\n- return token && typeof token === 'object' && token.type === type;\n+ for (const v of node.value) {\n+ for (const t of v.value) {\n+ if (t.type === 'NumberValue') {\n+ if (t.unit) {\n+ t.unit = aliases[t.unit] || t.unit;\n+ } else if (t.value !== 0 && !unitless.includes(node.name!)) {\n+ // use `px` for integers, `em` for floats\n+ // NB: num|0 is a quick alternative to Math.round(0)\n+ t.unit = t.value === (t.value | 0)\n+ ? config.options['stylesheet.intUnit']\n+ : config.options['stylesheet.floatUnit'];\n+ }\n+ }\n+ }\n+ }\n }\n \n /**\n- * Resolves numeric value for given CSS property\n- * @param {String} property CSS property name\n- * @param {NumericValue} token CSS numeric value token\n- * @param {Object} formatOptions Formatting options for units\n- * @return {NumericValue}\n+ * Returns literal token if it’s a single value of given abbreviation node\n */\n-function resolveNumericValue(property, token, formatOptions) {\n- if (token.unit) {\n- token.unit = formatOptions.unitAliases[token.unit] || token.unit;\n- } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) {\n- // use `px` for integers, `em` for floats\n- // NB: num|0 is a quick alternative to Math.round(0)\n- token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit;\n+function getSingleKeyword(node: CSSProperty): Literal | void {\n+ if (node.value.length === 1) {\n+ const value = node.value[0]!;\n+ if (value.value.length === 1 && value.value[0].type === 'Literal') {\n+ return value.value[0] as Literal;\n+ }\n }\n-\n- return token;\n }\n \n /**\n"},"addition_count":{"kind":"number","value":88,"string":"88"},"commit_subject":{"kind":"string","value":"Working on stylesheet resolving and output"},"deletion_count":{"kind":"number","value":93,"string":"93"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676135,"cells":{"id":{"kind":"string","value":"10071785"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n s.require_paths = [\"lib\"]\n\n s.add_dependency(%q, [\"~> 2.1\"])\n s.add_dependency(%q, [\"~> 0.10.0\"])\n s.add_dependency(%q, [\"~> 1.2.6\"])\n\n # Development Dependencies\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n redis-namespace version bumped\n @@ -19,7 +19,7 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency(%q, [\"~> 2.1\"])\n- s.add_dependency(%q, [\"~> 0.10.0\"])\n+ s.add_dependency(%q, [\"~> 1.0.3\"])\n s.add_dependency(%q, [\"~> 1.2.6\"])\n \n # Development Dependencies\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"redis-namespace version bumped"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676136,"cells":{"id":{"kind":"string","value":"10071786"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n s.require_paths = [\"lib\"]\n\n s.add_dependency(%q, [\"~> 2.1\"])\n s.add_dependency(%q, [\"~> 0.10.0\"])\n s.add_dependency(%q, [\"~> 1.2.6\"])\n\n # Development Dependencies\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n redis-namespace version bumped\n @@ -19,7 +19,7 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency(%q, [\"~> 2.1\"])\n- s.add_dependency(%q, [\"~> 0.10.0\"])\n+ s.add_dependency(%q, [\"~> 1.0.3\"])\n s.add_dependency(%q, [\"~> 1.2.6\"])\n \n # Development Dependencies\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"redis-namespace version bumped"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676137,"cells":{"id":{"kind":"string","value":"10071787"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n s.require_paths = [\"lib\"]\n\n s.add_dependency(%q, [\"~> 2.1\"])\n s.add_dependency(%q, [\"~> 0.10.0\"])\n s.add_dependency(%q, [\"~> 1.2.6\"])\n\n # Development Dependencies\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n redis-namespace version bumped\n @@ -19,7 +19,7 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency(%q, [\"~> 2.1\"])\n- s.add_dependency(%q, [\"~> 0.10.0\"])\n+ s.add_dependency(%q, [\"~> 1.0.3\"])\n s.add_dependency(%q, [\"~> 1.2.6\"])\n \n # Development Dependencies\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"redis-namespace version bumped"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676138,"cells":{"id":{"kind":"string","value":"10071788"},"text":{"kind":"string","value":" split.rb\n %w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f|\n require \"split/#{f}\"\nend\n\nrequire \"split/experiment_catalog\"\nrequire \"split/extensions/string\"\nrequire \"split/goals_collection\"\nrequire \"split/helper\"\nrequire \"split/combined_experiments_helper\"\nrequire \"split/metric\"\nrequire \"split/persistence\"\nrequire \"split/redis_interface\"\nrequire \"split/trial\"\nrequire \"split/user\"\nrequire \"split/version\"\nrequire \"split/zscore\"\nrequire \"split/engine\" if defined?(Rails)\n\nmodule Split\n extend self\n attr_accessor :configuration\n\n # Accepts:\n # 1. A redis URL (valid for `Redis.new(url: url)`)\n # 2. an options hash compatible with `Redis.new`\n # 3. or a valid Redis instance (one that responds to `#smembers`). Likely,\n # this will be an instance of either `Redis`, `Redis::Client`,\n # `Redis::DistRedis`, or `Redis::Namespace`.\n def redis=(server)\n @redis = if server.is_a?(String)\n Redis.new(url: server)\n elsif server.is_a?(Hash)\n Redis.new(server)\n elsif server.respond_to?(:smembers)\n server\n else\n raise ArgumentError,\n \"You must supply a url, options hash or valid Redis connection instance\"\n end\n end\n\n # Returns the current Redis connection. If none has been created, will\n # create a new one.\n def redis\n return @redis if @redis\n self.redis = self.configuration.redis\n self.redis\n end\n\n # Call this method to modify defaults in your initializers.\n #\n # @example\n # Split.configure do |config|\n # config.ignore_ip_addresses = '192.168.2.1'\n # end\n def configure\n self.configuration ||= Configuration.new\n yield(configuration)\n end\n\n def cache(namespace, key, &block)\n Split::Cache.fetch(namespace, key, &block)\n end\nend\n\n# Check to see if being run in a Rails application. If so, wait until before_initialize to run configuration so Gems that create ENV variables have the chance to initialize first.\nif defined?(::Rails)\n class Split::Railtie < Rails::Railtie\n config.before_initialize { Split.configure { } }\n end\nelse\n Split.configure { }\nend\n\n made required files list more readable\n\n @@ -1,4 +1,14 @@\n-%w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f|\n+%w[algorithms\n+ alternative\n+ configuration\n+ exceptions\n+ experiment\n+ extensions\n+ helper\n+ metric\n+ persistence\n+ trial\n+ version].each do |f|\n require \"split/#{f}\"\n end\n \n"},"addition_count":{"kind":"number","value":11,"string":"11"},"commit_subject":{"kind":"string","value":"made required files list more readable"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676139,"cells":{"id":{"kind":"string","value":"10071789"},"text":{"kind":"string","value":" split.rb\n %w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f|\n require \"split/#{f}\"\nend\n\nrequire \"split/experiment_catalog\"\nrequire \"split/extensions/string\"\nrequire \"split/goals_collection\"\nrequire \"split/helper\"\nrequire \"split/combined_experiments_helper\"\nrequire \"split/metric\"\nrequire \"split/persistence\"\nrequire \"split/redis_interface\"\nrequire \"split/trial\"\nrequire \"split/user\"\nrequire \"split/version\"\nrequire \"split/zscore\"\nrequire \"split/engine\" if defined?(Rails)\n\nmodule Split\n extend self\n attr_accessor :configuration\n\n # Accepts:\n # 1. A redis URL (valid for `Redis.new(url: url)`)\n # 2. an options hash compatible with `Redis.new`\n # 3. or a valid Redis instance (one that responds to `#smembers`). Likely,\n # this will be an instance of either `Redis`, `Redis::Client`,\n # `Redis::DistRedis`, or `Redis::Namespace`.\n def redis=(server)\n @redis = if server.is_a?(String)\n Redis.new(url: server)\n elsif server.is_a?(Hash)\n Redis.new(server)\n elsif server.respond_to?(:smembers)\n server\n else\n raise ArgumentError,\n \"You must supply a url, options hash or valid Redis connection instance\"\n end\n end\n\n # Returns the current Redis connection. If none has been created, will\n # create a new one.\n def redis\n return @redis if @redis\n self.redis = self.configuration.redis\n self.redis\n end\n\n # Call this method to modify defaults in your initializers.\n #\n # @example\n # Split.configure do |config|\n # config.ignore_ip_addresses = '192.168.2.1'\n # end\n def configure\n self.configuration ||= Configuration.new\n yield(configuration)\n end\n\n def cache(namespace, key, &block)\n Split::Cache.fetch(namespace, key, &block)\n end\nend\n\n# Check to see if being run in a Rails application. If so, wait until before_initialize to run configuration so Gems that create ENV variables have the chance to initialize first.\nif defined?(::Rails)\n class Split::Railtie < Rails::Railtie\n config.before_initialize { Split.configure { } }\n end\nelse\n Split.configure { }\nend\n\n made required files list more readable\n\n @@ -1,4 +1,14 @@\n-%w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f|\n+%w[algorithms\n+ alternative\n+ configuration\n+ exceptions\n+ experiment\n+ extensions\n+ helper\n+ metric\n+ persistence\n+ trial\n+ version].each do |f|\n require \"split/#{f}\"\n end\n \n"},"addition_count":{"kind":"number","value":11,"string":"11"},"commit_subject":{"kind":"string","value":"made required files list more readable"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676140,"cells":{"id":{"kind":"string","value":"10071790"},"text":{"kind":"string","value":" split.rb\n %w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f|\n require \"split/#{f}\"\nend\n\nrequire \"split/experiment_catalog\"\nrequire \"split/extensions/string\"\nrequire \"split/goals_collection\"\nrequire \"split/helper\"\nrequire \"split/combined_experiments_helper\"\nrequire \"split/metric\"\nrequire \"split/persistence\"\nrequire \"split/redis_interface\"\nrequire \"split/trial\"\nrequire \"split/user\"\nrequire \"split/version\"\nrequire \"split/zscore\"\nrequire \"split/engine\" if defined?(Rails)\n\nmodule Split\n extend self\n attr_accessor :configuration\n\n # Accepts:\n # 1. A redis URL (valid for `Redis.new(url: url)`)\n # 2. an options hash compatible with `Redis.new`\n # 3. or a valid Redis instance (one that responds to `#smembers`). Likely,\n # this will be an instance of either `Redis`, `Redis::Client`,\n # `Redis::DistRedis`, or `Redis::Namespace`.\n def redis=(server)\n @redis = if server.is_a?(String)\n Redis.new(url: server)\n elsif server.is_a?(Hash)\n Redis.new(server)\n elsif server.respond_to?(:smembers)\n server\n else\n raise ArgumentError,\n \"You must supply a url, options hash or valid Redis connection instance\"\n end\n end\n\n # Returns the current Redis connection. If none has been created, will\n # create a new one.\n def redis\n return @redis if @redis\n self.redis = self.configuration.redis\n self.redis\n end\n\n # Call this method to modify defaults in your initializers.\n #\n # @example\n # Split.configure do |config|\n # config.ignore_ip_addresses = '192.168.2.1'\n # end\n def configure\n self.configuration ||= Configuration.new\n yield(configuration)\n end\n\n def cache(namespace, key, &block)\n Split::Cache.fetch(namespace, key, &block)\n end\nend\n\n# Check to see if being run in a Rails application. If so, wait until before_initialize to run configuration so Gems that create ENV variables have the chance to initialize first.\nif defined?(::Rails)\n class Split::Railtie < Rails::Railtie\n config.before_initialize { Split.configure { } }\n end\nelse\n Split.configure { }\nend\n\n made required files list more readable\n\n @@ -1,4 +1,14 @@\n-%w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f|\n+%w[algorithms\n+ alternative\n+ configuration\n+ exceptions\n+ experiment\n+ extensions\n+ helper\n+ metric\n+ persistence\n+ trial\n+ version].each do |f|\n require \"split/#{f}\"\n end\n \n"},"addition_count":{"kind":"number","value":11,"string":"11"},"commit_subject":{"kind":"string","value":"made required files list more readable"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676141,"cells":{"id":{"kind":"string","value":"10071791"},"text":{"kind":"string","value":" alternative_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/alternative\"\n\ndescribe Split::Alternative do\n let(:alternative) {\n Split::Alternative.new(\"Basket\", \"basket_text\")\n }\n\n let(:alternative2) {\n Split::Alternative.new(\"Cart\", \"basket_text\")\n }\n\n let!(:experiment) {\n Split::ExperimentCatalog.find_or_create({ \"basket_text\" => [\"purchase\", \"refund\"] }, \"Basket\", \"Cart\")\n }\n\n let(:goal1) { \"purchase\" }\n let(:goal2) { \"refund\" }\n\n it \"should have goals\" do\n expect(alternative.goals).to eq([\"purchase\", \"refund\"])\n end\n\n it \"should have and only return the name\" do\n expect(alternative.name).to eq(\"Basket\")\n end\n\n describe \"weights\" do\n it \"should set the weights\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [{ \"Basket\" => 0.6 }, { \"Cart\" => 0.4 }])\n first = experiment.alternatives[0]\n expect(first.name).to eq(\"Basket\")\n expect(first.weight).to eq(0.6)\n\n second = experiment.alternatives[1]\n expect(second.name).to eq(\"Cart\")\n expect(second.weight).to eq(0.4)\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ]\n }\n }\n experiment = Split::Experiment.new(:my_experiment)\n first = experiment.alternatives[0]\n expect(first.name).to eq(\"control_opt\")\n expect(first.weight).to eq(0.67)\n\n second = experiment.alternatives[1]\n expect(second.name).to eq(\"second_opt\")\n expect(second.weight).to eq(0.1)\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n }\n experiment = Split::Experiment.new(:my_experiment)\n alts = experiment.alternatives\n [\n [\"control_opt\", 0.34],\n [\"second_opt\", 0.215],\n [\"third_opt\", 0.23],\n [\"fourth_opt\", 0.215]\n ].each do |h|\n name, weight = h\n alt = alts.shift\n expect(alt.name).to eq(name)\n expect(alt.weight).to eq(weight)\n end\n end\n #\n it \"allows name param without probability\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n end\n end\n\n it \"should return an existing alternative\" do\n alternative = Split::Alternative.create('Basket', 'basket_text')\n Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket')\n end\n\n describe 'z score' do\n it 'should be zero when the control has no conversions' do\n experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')\n [\n [\"control_opt\", 0.18],\n [\"second_opt\", 0.18],\n [\"third_opt\", 0.64],\n ].each do |h|\n name, weight = h\n alt = alts.shift\n expect(alt.name).to eq(name)\n expect(alt.weight).to eq(weight)\n end\n end\n end\n\n it \"should have a default participation count of 0\" do\n expect(alternative.participant_count).to eq(0)\n end\n\n it \"should have a default completed count of 0 for each goal\" do\n expect(alternative.completed_count).to eq(0)\n expect(alternative.completed_count(goal1)).to eq(0)\n expect(alternative.completed_count(goal2)).to eq(0)\n end\n\n it \"should belong to an experiment\" do\n expect(alternative.experiment.name).to eq(experiment.name)\n end\n\n it \"should save to redis\" do\n alternative.save\n expect(Split.redis.exists?(\"basket_text:Basket\")).to be true\n end\n\n it \"should increment participation count\" do\n old_participant_count = alternative.participant_count\n alternative.increment_participation\n expect(alternative.participant_count).to eq(old_participant_count+1)\n end\n\n it \"should increment completed count for each goal\" do\n old_default_completed_count = alternative.completed_count\n old_completed_count_for_goal1 = alternative.completed_count(goal1)\n old_completed_count_for_goal2 = alternative.completed_count(goal2)\n\n alternative.increment_completion\n alternative.increment_completion(goal1)\n alternative.increment_completion(goal2)\n\n expect(alternative.completed_count).to eq(old_default_completed_count+1)\n expect(alternative.completed_count(goal1)).to eq(old_completed_count_for_goal1+1)\n expect(alternative.completed_count(goal2)).to eq(old_completed_count_for_goal2+1)\n end\n\n it \"can be reset\" do\n alternative.participant_count = 10\n alternative.set_completed_count(4, goal1)\n alternative.set_completed_count(5, goal2)\n alternative.set_completed_count(6)\n alternative.reset\n expect(alternative.participant_count).to eq(0)\n expect(alternative.completed_count(goal1)).to eq(0)\n expect(alternative.completed_count(goal2)).to eq(0)\n expect(alternative.completed_count).to eq(0)\n end\n\n it \"should know if it is the control of an experiment\" do\n expect(alternative.control?).to be_truthy\n expect(alternative2.control?).to be_falsey\n end\n\n describe \"unfinished_count\" do\n it \"should be difference between participant and completed counts\" do\n alternative.increment_participation\n expect(alternative.unfinished_count).to eq(alternative.participant_count)\n end\n\n it \"should return the correct unfinished_count\" do\n alternative.participant_count = 10\n alternative.set_completed_count(4, goal1)\n alternative.set_completed_count(3, goal2)\n alternative.set_completed_count(2)\n\n expect(alternative.unfinished_count).to eq(1)\n end\n end\n\n describe \"conversion rate\" do\n it \"should be 0 if there are no conversions\" do\n expect(alternative.completed_count).to eq(0)\n expect(alternative.conversion_rate).to eq(0)\n end\n\n it \"calculate conversion rate\" do\n expect(alternative).to receive(:participant_count).exactly(6).times.and_return(10)\n expect(alternative).to receive(:completed_count).and_return(4)\n expect(alternative.conversion_rate).to eq(0.4)\n\n expect(alternative).to receive(:completed_count).with(goal1).and_return(5)\n expect(alternative.conversion_rate(goal1)).to eq(0.5)\n\n expect(alternative).to receive(:completed_count).with(goal2).and_return(6)\n expect(alternative.conversion_rate(goal2)).to eq(0.6)\n end\n end\n\n describe \"probability winner\" do\n before do\n experiment.calc_winning_alternatives\n end\n\n it \"should have a probability of being the winning alternative (p_winner)\" do\n expect(alternative.p_winner).not_to be_nil\n end\n\n it \"should have a probability of being the winner for each goal\" do\n expect(alternative.p_winner(goal1)).not_to be_nil\n end\n\n it \"should be possible to set the p_winner\" do\n alternative.set_p_winner(0.5)\n expect(alternative.p_winner).to eq(0.5)\n end\n\n it \"should be possible to set the p_winner for each goal\" do\n alternative.set_p_winner(0.5, goal1)\n expect(alternative.p_winner(goal1)).to eq(0.5)\n end\n end\n\n describe \"z score\" do\n it \"should return an error string when the control has 0 people\" do\n expect(alternative2.z_score).to eq(\"Needs 30+ participants.\")\n expect(alternative2.z_score(goal1)).to eq(\"Needs 30+ participants.\")\n expect(alternative2.z_score(goal2)).to eq(\"Needs 30+ participants.\")\n end\n\n it \"should return an error string when the data is skewed or incomplete as per the np > 5 test\" do\n control = experiment.control\n control.participant_count = 100\n control.set_completed_count(50)\n\n alternative2.participant_count = 50\n alternative2.set_completed_count(1)\n\n expect(alternative2.z_score).to eq(\"Needs 5+ conversions.\")\n end\n\n it \"should return a float for a z_score given proper data\" do\n control = experiment.control\n control.participant_count = 120\n control.set_completed_count(20)\n\n alternative2.participant_count = 100\n alternative2.set_completed_count(25)\n\n expect(alternative2.z_score).to be_kind_of(Float)\n expect(alternative2.z_score).to_not eq(0)\n end\n\n it \"should correctly calculate a z_score given proper data\" do\n control = experiment.control\n control.participant_count = 126\n control.set_completed_count(89)\n\n alternative2.participant_count = 142\n alternative2.set_completed_count(119)\n\n expect(alternative2.z_score.round(2)).to eq(2.58)\n end\n\n it \"should be N/A for the control\" do\n control = experiment.control\n expect(control.z_score).to eq(\"N/A\")\n expect(control.z_score(goal1)).to eq(\"N/A\")\n expect(control.z_score(goal2)).to eq(\"N/A\")\n end\n\n it \"should not blow up for Conversion Rates > 1\" do\n control = experiment.control\n control.participant_count = 3474\n control.set_completed_count(4244)\n\n alternative2.participant_count = 3434\n alternative2.set_completed_count(4358)\n\n expect { control.z_score }.not_to raise_error\n expect { alternative2.z_score }.not_to raise_error\n end\n end\n\n describe \"extra_info\" do\n it \"reads saved value of recorded_info in redis\" do\n saved_recorded_info = { \"key_1\" => 1, \"key_2\" => \"2\" }\n Split.redis.hset \"#{alternative.experiment_name}:#{alternative.name}\", \"recorded_info\", saved_recorded_info.to_json\n extra_info = alternative.extra_info\n\n expect(extra_info).to eql(saved_recorded_info)\n end\n end\n\n describe \"record_extra_info\" do\n it \"saves key\" do\n alternative.record_extra_info(\"signup\", 1)\n expect(alternative.extra_info[\"signup\"]).to eql(1)\n end\n\n it \"adds value to saved key's value second argument is number\" do\n alternative.record_extra_info(\"signup\", 1)\n alternative.record_extra_info(\"signup\", 2)\n expect(alternative.extra_info[\"signup\"]).to eql(3)\n end\n\n it \"sets saved's key value to the second argument if it's a string\" do\n alternative.record_extra_info(\"signup\", \"Value 1\")\n expect(alternative.extra_info[\"signup\"]).to eql(\"Value 1\")\n\n alternative.record_extra_info(\"signup\", \"Value 2\")\n expect(alternative.extra_info[\"signup\"]).to eql(\"Value 2\")\n end\n end\nend\n\n Alternative.create doesn't make much sense now\n @@ -94,11 +94,6 @@ describe Split::Alternative do\n end\n end\n \n- it \"should return an existing alternative\" do\n- alternative = Split::Alternative.create('Basket', 'basket_text')\n- Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket')\n- end\n-\n describe 'z score' do\n it 'should be zero when the control has no conversions' do\n experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Alternative.create doesn't make much sense now"},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676142,"cells":{"id":{"kind":"string","value":"10071792"},"text":{"kind":"string","value":" alternative_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/alternative\"\n\ndescribe Split::Alternative do\n let(:alternative) {\n Split::Alternative.new(\"Basket\", \"basket_text\")\n }\n\n let(:alternative2) {\n Split::Alternative.new(\"Cart\", \"basket_text\")\n }\n\n let!(:experiment) {\n Split::ExperimentCatalog.find_or_create({ \"basket_text\" => [\"purchase\", \"refund\"] }, \"Basket\", \"Cart\")\n }\n\n let(:goal1) { \"purchase\" }\n let(:goal2) { \"refund\" }\n\n it \"should have goals\" do\n expect(alternative.goals).to eq([\"purchase\", \"refund\"])\n end\n\n it \"should have and only return the name\" do\n expect(alternative.name).to eq(\"Basket\")\n end\n\n describe \"weights\" do\n it \"should set the weights\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [{ \"Basket\" => 0.6 }, { \"Cart\" => 0.4 }])\n first = experiment.alternatives[0]\n expect(first.name).to eq(\"Basket\")\n expect(first.weight).to eq(0.6)\n\n second = experiment.alternatives[1]\n expect(second.name).to eq(\"Cart\")\n expect(second.weight).to eq(0.4)\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ]\n }\n }\n experiment = Split::Experiment.new(:my_experiment)\n first = experiment.alternatives[0]\n expect(first.name).to eq(\"control_opt\")\n expect(first.weight).to eq(0.67)\n\n second = experiment.alternatives[1]\n expect(second.name).to eq(\"second_opt\")\n expect(second.weight).to eq(0.1)\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n }\n experiment = Split::Experiment.new(:my_experiment)\n alts = experiment.alternatives\n [\n [\"control_opt\", 0.34],\n [\"second_opt\", 0.215],\n [\"third_opt\", 0.23],\n [\"fourth_opt\", 0.215]\n ].each do |h|\n name, weight = h\n alt = alts.shift\n expect(alt.name).to eq(name)\n expect(alt.weight).to eq(weight)\n end\n end\n #\n it \"allows name param without probability\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n end\n end\n\n it \"should return an existing alternative\" do\n alternative = Split::Alternative.create('Basket', 'basket_text')\n Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket')\n end\n\n describe 'z score' do\n it 'should be zero when the control has no conversions' do\n experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')\n [\n [\"control_opt\", 0.18],\n [\"second_opt\", 0.18],\n [\"third_opt\", 0.64],\n ].each do |h|\n name, weight = h\n alt = alts.shift\n expect(alt.name).to eq(name)\n expect(alt.weight).to eq(weight)\n end\n end\n end\n\n it \"should have a default participation count of 0\" do\n expect(alternative.participant_count).to eq(0)\n end\n\n it \"should have a default completed count of 0 for each goal\" do\n expect(alternative.completed_count).to eq(0)\n expect(alternative.completed_count(goal1)).to eq(0)\n expect(alternative.completed_count(goal2)).to eq(0)\n end\n\n it \"should belong to an experiment\" do\n expect(alternative.experiment.name).to eq(experiment.name)\n end\n\n it \"should save to redis\" do\n alternative.save\n expect(Split.redis.exists?(\"basket_text:Basket\")).to be true\n end\n\n it \"should increment participation count\" do\n old_participant_count = alternative.participant_count\n alternative.increment_participation\n expect(alternative.participant_count).to eq(old_participant_count+1)\n end\n\n it \"should increment completed count for each goal\" do\n old_default_completed_count = alternative.completed_count\n old_completed_count_for_goal1 = alternative.completed_count(goal1)\n old_completed_count_for_goal2 = alternative.completed_count(goal2)\n\n alternative.increment_completion\n alternative.increment_completion(goal1)\n alternative.increment_completion(goal2)\n\n expect(alternative.completed_count).to eq(old_default_completed_count+1)\n expect(alternative.completed_count(goal1)).to eq(old_completed_count_for_goal1+1)\n expect(alternative.completed_count(goal2)).to eq(old_completed_count_for_goal2+1)\n end\n\n it \"can be reset\" do\n alternative.participant_count = 10\n alternative.set_completed_count(4, goal1)\n alternative.set_completed_count(5, goal2)\n alternative.set_completed_count(6)\n alternative.reset\n expect(alternative.participant_count).to eq(0)\n expect(alternative.completed_count(goal1)).to eq(0)\n expect(alternative.completed_count(goal2)).to eq(0)\n expect(alternative.completed_count).to eq(0)\n end\n\n it \"should know if it is the control of an experiment\" do\n expect(alternative.control?).to be_truthy\n expect(alternative2.control?).to be_falsey\n end\n\n describe \"unfinished_count\" do\n it \"should be difference between participant and completed counts\" do\n alternative.increment_participation\n expect(alternative.unfinished_count).to eq(alternative.participant_count)\n end\n\n it \"should return the correct unfinished_count\" do\n alternative.participant_count = 10\n alternative.set_completed_count(4, goal1)\n alternative.set_completed_count(3, goal2)\n alternative.set_completed_count(2)\n\n expect(alternative.unfinished_count).to eq(1)\n end\n end\n\n describe \"conversion rate\" do\n it \"should be 0 if there are no conversions\" do\n expect(alternative.completed_count).to eq(0)\n expect(alternative.conversion_rate).to eq(0)\n end\n\n it \"calculate conversion rate\" do\n expect(alternative).to receive(:participant_count).exactly(6).times.and_return(10)\n expect(alternative).to receive(:completed_count).and_return(4)\n expect(alternative.conversion_rate).to eq(0.4)\n\n expect(alternative).to receive(:completed_count).with(goal1).and_return(5)\n expect(alternative.conversion_rate(goal1)).to eq(0.5)\n\n expect(alternative).to receive(:completed_count).with(goal2).and_return(6)\n expect(alternative.conversion_rate(goal2)).to eq(0.6)\n end\n end\n\n describe \"probability winner\" do\n before do\n experiment.calc_winning_alternatives\n end\n\n it \"should have a probability of being the winning alternative (p_winner)\" do\n expect(alternative.p_winner).not_to be_nil\n end\n\n it \"should have a probability of being the winner for each goal\" do\n expect(alternative.p_winner(goal1)).not_to be_nil\n end\n\n it \"should be possible to set the p_winner\" do\n alternative.set_p_winner(0.5)\n expect(alternative.p_winner).to eq(0.5)\n end\n\n it \"should be possible to set the p_winner for each goal\" do\n alternative.set_p_winner(0.5, goal1)\n expect(alternative.p_winner(goal1)).to eq(0.5)\n end\n end\n\n describe \"z score\" do\n it \"should return an error string when the control has 0 people\" do\n expect(alternative2.z_score).to eq(\"Needs 30+ participants.\")\n expect(alternative2.z_score(goal1)).to eq(\"Needs 30+ participants.\")\n expect(alternative2.z_score(goal2)).to eq(\"Needs 30+ participants.\")\n end\n\n it \"should return an error string when the data is skewed or incomplete as per the np > 5 test\" do\n control = experiment.control\n control.participant_count = 100\n control.set_completed_count(50)\n\n alternative2.participant_count = 50\n alternative2.set_completed_count(1)\n\n expect(alternative2.z_score).to eq(\"Needs 5+ conversions.\")\n end\n\n it \"should return a float for a z_score given proper data\" do\n control = experiment.control\n control.participant_count = 120\n control.set_completed_count(20)\n\n alternative2.participant_count = 100\n alternative2.set_completed_count(25)\n\n expect(alternative2.z_score).to be_kind_of(Float)\n expect(alternative2.z_score).to_not eq(0)\n end\n\n it \"should correctly calculate a z_score given proper data\" do\n control = experiment.control\n control.participant_count = 126\n control.set_completed_count(89)\n\n alternative2.participant_count = 142\n alternative2.set_completed_count(119)\n\n expect(alternative2.z_score.round(2)).to eq(2.58)\n end\n\n it \"should be N/A for the control\" do\n control = experiment.control\n expect(control.z_score).to eq(\"N/A\")\n expect(control.z_score(goal1)).to eq(\"N/A\")\n expect(control.z_score(goal2)).to eq(\"N/A\")\n end\n\n it \"should not blow up for Conversion Rates > 1\" do\n control = experiment.control\n control.participant_count = 3474\n control.set_completed_count(4244)\n\n alternative2.participant_count = 3434\n alternative2.set_completed_count(4358)\n\n expect { control.z_score }.not_to raise_error\n expect { alternative2.z_score }.not_to raise_error\n end\n end\n\n describe \"extra_info\" do\n it \"reads saved value of recorded_info in redis\" do\n saved_recorded_info = { \"key_1\" => 1, \"key_2\" => \"2\" }\n Split.redis.hset \"#{alternative.experiment_name}:#{alternative.name}\", \"recorded_info\", saved_recorded_info.to_json\n extra_info = alternative.extra_info\n\n expect(extra_info).to eql(saved_recorded_info)\n end\n end\n\n describe \"record_extra_info\" do\n it \"saves key\" do\n alternative.record_extra_info(\"signup\", 1)\n expect(alternative.extra_info[\"signup\"]).to eql(1)\n end\n\n it \"adds value to saved key's value second argument is number\" do\n alternative.record_extra_info(\"signup\", 1)\n alternative.record_extra_info(\"signup\", 2)\n expect(alternative.extra_info[\"signup\"]).to eql(3)\n end\n\n it \"sets saved's key value to the second argument if it's a string\" do\n alternative.record_extra_info(\"signup\", \"Value 1\")\n expect(alternative.extra_info[\"signup\"]).to eql(\"Value 1\")\n\n alternative.record_extra_info(\"signup\", \"Value 2\")\n expect(alternative.extra_info[\"signup\"]).to eql(\"Value 2\")\n end\n end\nend\n\n Alternative.create doesn't make much sense now\n @@ -94,11 +94,6 @@ describe Split::Alternative do\n end\n end\n \n- it \"should return an existing alternative\" do\n- alternative = Split::Alternative.create('Basket', 'basket_text')\n- Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket')\n- end\n-\n describe 'z score' do\n it 'should be zero when the control has no conversions' do\n experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Alternative.create doesn't make much sense now"},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676143,"cells":{"id":{"kind":"string","value":"10071793"},"text":{"kind":"string","value":" alternative_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/alternative\"\n\ndescribe Split::Alternative do\n let(:alternative) {\n Split::Alternative.new(\"Basket\", \"basket_text\")\n }\n\n let(:alternative2) {\n Split::Alternative.new(\"Cart\", \"basket_text\")\n }\n\n let!(:experiment) {\n Split::ExperimentCatalog.find_or_create({ \"basket_text\" => [\"purchase\", \"refund\"] }, \"Basket\", \"Cart\")\n }\n\n let(:goal1) { \"purchase\" }\n let(:goal2) { \"refund\" }\n\n it \"should have goals\" do\n expect(alternative.goals).to eq([\"purchase\", \"refund\"])\n end\n\n it \"should have and only return the name\" do\n expect(alternative.name).to eq(\"Basket\")\n end\n\n describe \"weights\" do\n it \"should set the weights\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [{ \"Basket\" => 0.6 }, { \"Cart\" => 0.4 }])\n first = experiment.alternatives[0]\n expect(first.name).to eq(\"Basket\")\n expect(first.weight).to eq(0.6)\n\n second = experiment.alternatives[1]\n expect(second.name).to eq(\"Cart\")\n expect(second.weight).to eq(0.4)\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ]\n }\n }\n experiment = Split::Experiment.new(:my_experiment)\n first = experiment.alternatives[0]\n expect(first.name).to eq(\"control_opt\")\n expect(first.weight).to eq(0.67)\n\n second = experiment.alternatives[1]\n expect(second.name).to eq(\"second_opt\")\n expect(second.weight).to eq(0.1)\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n }\n experiment = Split::Experiment.new(:my_experiment)\n alts = experiment.alternatives\n [\n [\"control_opt\", 0.34],\n [\"second_opt\", 0.215],\n [\"third_opt\", 0.23],\n [\"fourth_opt\", 0.215]\n ].each do |h|\n name, weight = h\n alt = alts.shift\n expect(alt.name).to eq(name)\n expect(alt.weight).to eq(weight)\n end\n end\n #\n it \"allows name param without probability\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n end\n end\n\n it \"should return an existing alternative\" do\n alternative = Split::Alternative.create('Basket', 'basket_text')\n Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket')\n end\n\n describe 'z score' do\n it 'should be zero when the control has no conversions' do\n experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')\n [\n [\"control_opt\", 0.18],\n [\"second_opt\", 0.18],\n [\"third_opt\", 0.64],\n ].each do |h|\n name, weight = h\n alt = alts.shift\n expect(alt.name).to eq(name)\n expect(alt.weight).to eq(weight)\n end\n end\n end\n\n it \"should have a default participation count of 0\" do\n expect(alternative.participant_count).to eq(0)\n end\n\n it \"should have a default completed count of 0 for each goal\" do\n expect(alternative.completed_count).to eq(0)\n expect(alternative.completed_count(goal1)).to eq(0)\n expect(alternative.completed_count(goal2)).to eq(0)\n end\n\n it \"should belong to an experiment\" do\n expect(alternative.experiment.name).to eq(experiment.name)\n end\n\n it \"should save to redis\" do\n alternative.save\n expect(Split.redis.exists?(\"basket_text:Basket\")).to be true\n end\n\n it \"should increment participation count\" do\n old_participant_count = alternative.participant_count\n alternative.increment_participation\n expect(alternative.participant_count).to eq(old_participant_count+1)\n end\n\n it \"should increment completed count for each goal\" do\n old_default_completed_count = alternative.completed_count\n old_completed_count_for_goal1 = alternative.completed_count(goal1)\n old_completed_count_for_goal2 = alternative.completed_count(goal2)\n\n alternative.increment_completion\n alternative.increment_completion(goal1)\n alternative.increment_completion(goal2)\n\n expect(alternative.completed_count).to eq(old_default_completed_count+1)\n expect(alternative.completed_count(goal1)).to eq(old_completed_count_for_goal1+1)\n expect(alternative.completed_count(goal2)).to eq(old_completed_count_for_goal2+1)\n end\n\n it \"can be reset\" do\n alternative.participant_count = 10\n alternative.set_completed_count(4, goal1)\n alternative.set_completed_count(5, goal2)\n alternative.set_completed_count(6)\n alternative.reset\n expect(alternative.participant_count).to eq(0)\n expect(alternative.completed_count(goal1)).to eq(0)\n expect(alternative.completed_count(goal2)).to eq(0)\n expect(alternative.completed_count).to eq(0)\n end\n\n it \"should know if it is the control of an experiment\" do\n expect(alternative.control?).to be_truthy\n expect(alternative2.control?).to be_falsey\n end\n\n describe \"unfinished_count\" do\n it \"should be difference between participant and completed counts\" do\n alternative.increment_participation\n expect(alternative.unfinished_count).to eq(alternative.participant_count)\n end\n\n it \"should return the correct unfinished_count\" do\n alternative.participant_count = 10\n alternative.set_completed_count(4, goal1)\n alternative.set_completed_count(3, goal2)\n alternative.set_completed_count(2)\n\n expect(alternative.unfinished_count).to eq(1)\n end\n end\n\n describe \"conversion rate\" do\n it \"should be 0 if there are no conversions\" do\n expect(alternative.completed_count).to eq(0)\n expect(alternative.conversion_rate).to eq(0)\n end\n\n it \"calculate conversion rate\" do\n expect(alternative).to receive(:participant_count).exactly(6).times.and_return(10)\n expect(alternative).to receive(:completed_count).and_return(4)\n expect(alternative.conversion_rate).to eq(0.4)\n\n expect(alternative).to receive(:completed_count).with(goal1).and_return(5)\n expect(alternative.conversion_rate(goal1)).to eq(0.5)\n\n expect(alternative).to receive(:completed_count).with(goal2).and_return(6)\n expect(alternative.conversion_rate(goal2)).to eq(0.6)\n end\n end\n\n describe \"probability winner\" do\n before do\n experiment.calc_winning_alternatives\n end\n\n it \"should have a probability of being the winning alternative (p_winner)\" do\n expect(alternative.p_winner).not_to be_nil\n end\n\n it \"should have a probability of being the winner for each goal\" do\n expect(alternative.p_winner(goal1)).not_to be_nil\n end\n\n it \"should be possible to set the p_winner\" do\n alternative.set_p_winner(0.5)\n expect(alternative.p_winner).to eq(0.5)\n end\n\n it \"should be possible to set the p_winner for each goal\" do\n alternative.set_p_winner(0.5, goal1)\n expect(alternative.p_winner(goal1)).to eq(0.5)\n end\n end\n\n describe \"z score\" do\n it \"should return an error string when the control has 0 people\" do\n expect(alternative2.z_score).to eq(\"Needs 30+ participants.\")\n expect(alternative2.z_score(goal1)).to eq(\"Needs 30+ participants.\")\n expect(alternative2.z_score(goal2)).to eq(\"Needs 30+ participants.\")\n end\n\n it \"should return an error string when the data is skewed or incomplete as per the np > 5 test\" do\n control = experiment.control\n control.participant_count = 100\n control.set_completed_count(50)\n\n alternative2.participant_count = 50\n alternative2.set_completed_count(1)\n\n expect(alternative2.z_score).to eq(\"Needs 5+ conversions.\")\n end\n\n it \"should return a float for a z_score given proper data\" do\n control = experiment.control\n control.participant_count = 120\n control.set_completed_count(20)\n\n alternative2.participant_count = 100\n alternative2.set_completed_count(25)\n\n expect(alternative2.z_score).to be_kind_of(Float)\n expect(alternative2.z_score).to_not eq(0)\n end\n\n it \"should correctly calculate a z_score given proper data\" do\n control = experiment.control\n control.participant_count = 126\n control.set_completed_count(89)\n\n alternative2.participant_count = 142\n alternative2.set_completed_count(119)\n\n expect(alternative2.z_score.round(2)).to eq(2.58)\n end\n\n it \"should be N/A for the control\" do\n control = experiment.control\n expect(control.z_score).to eq(\"N/A\")\n expect(control.z_score(goal1)).to eq(\"N/A\")\n expect(control.z_score(goal2)).to eq(\"N/A\")\n end\n\n it \"should not blow up for Conversion Rates > 1\" do\n control = experiment.control\n control.participant_count = 3474\n control.set_completed_count(4244)\n\n alternative2.participant_count = 3434\n alternative2.set_completed_count(4358)\n\n expect { control.z_score }.not_to raise_error\n expect { alternative2.z_score }.not_to raise_error\n end\n end\n\n describe \"extra_info\" do\n it \"reads saved value of recorded_info in redis\" do\n saved_recorded_info = { \"key_1\" => 1, \"key_2\" => \"2\" }\n Split.redis.hset \"#{alternative.experiment_name}:#{alternative.name}\", \"recorded_info\", saved_recorded_info.to_json\n extra_info = alternative.extra_info\n\n expect(extra_info).to eql(saved_recorded_info)\n end\n end\n\n describe \"record_extra_info\" do\n it \"saves key\" do\n alternative.record_extra_info(\"signup\", 1)\n expect(alternative.extra_info[\"signup\"]).to eql(1)\n end\n\n it \"adds value to saved key's value second argument is number\" do\n alternative.record_extra_info(\"signup\", 1)\n alternative.record_extra_info(\"signup\", 2)\n expect(alternative.extra_info[\"signup\"]).to eql(3)\n end\n\n it \"sets saved's key value to the second argument if it's a string\" do\n alternative.record_extra_info(\"signup\", \"Value 1\")\n expect(alternative.extra_info[\"signup\"]).to eql(\"Value 1\")\n\n alternative.record_extra_info(\"signup\", \"Value 2\")\n expect(alternative.extra_info[\"signup\"]).to eql(\"Value 2\")\n end\n end\nend\n\n Alternative.create doesn't make much sense now\n @@ -94,11 +94,6 @@ describe Split::Alternative do\n end\n end\n \n- it \"should return an existing alternative\" do\n- alternative = Split::Alternative.create('Basket', 'basket_text')\n- Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket')\n- end\n-\n describe 'z score' do\n it 'should be zero when the control has no conversions' do\n experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Alternative.create doesn't make much sense now"},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676144,"cells":{"id":{"kind":"string","value":"10071794"},"text":{"kind":"string","value":" helper.rb\n # frozen_string_literal: true\n\nmodule Split\n module Helper\n OVERRIDE_PARAM_NAME = \"ab_test\"\n\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n control_variable(experiment.control)\n end\n rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e\n raise(e) unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n\n if Split.configuration.db_failover_allow_parameter_override\n alternative = override_alternative(experiment.name) if override_present?(experiment.name)\n alternative = control_variable(experiment.control) if split_generically_disabled?\n end\n ensure\n alternative ||= control_variable(experiment.control)\n end\n\n if block_given?\n metadata = experiment.metadata[alternative] if experiment.metadata\n yield(alternative, metadata || {})\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def finish_experiment(experiment, options = { reset: true })\n return false if active_experiments[experiment.name].nil?\n return true if experiment.has_winner?\n should_reset = experiment.resettable? && options[:reset]\n if ab_user[experiment.finished_key] && !should_reset\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n Merge pull request #592 from splitrb/fix-unused-variable-warnings\n\nFix unused variable warnings\n @@ -80,7 +80,7 @@ module Split\n \n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n- metric_descriptor, goals = normalize_metric(metric_descriptor)\n+ metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n \n if experiments.any?\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #592 from splitrb/fix-unused-variable-warnings"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676145,"cells":{"id":{"kind":"string","value":"10071795"},"text":{"kind":"string","value":" helper.rb\n # frozen_string_literal: true\n\nmodule Split\n module Helper\n OVERRIDE_PARAM_NAME = \"ab_test\"\n\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n control_variable(experiment.control)\n end\n rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e\n raise(e) unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n\n if Split.configuration.db_failover_allow_parameter_override\n alternative = override_alternative(experiment.name) if override_present?(experiment.name)\n alternative = control_variable(experiment.control) if split_generically_disabled?\n end\n ensure\n alternative ||= control_variable(experiment.control)\n end\n\n if block_given?\n metadata = experiment.metadata[alternative] if experiment.metadata\n yield(alternative, metadata || {})\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def finish_experiment(experiment, options = { reset: true })\n return false if active_experiments[experiment.name].nil?\n return true if experiment.has_winner?\n should_reset = experiment.resettable? && options[:reset]\n if ab_user[experiment.finished_key] && !should_reset\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n Merge pull request #592 from splitrb/fix-unused-variable-warnings\n\nFix unused variable warnings\n @@ -80,7 +80,7 @@ module Split\n \n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n- metric_descriptor, goals = normalize_metric(metric_descriptor)\n+ metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n \n if experiments.any?\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #592 from splitrb/fix-unused-variable-warnings"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676146,"cells":{"id":{"kind":"string","value":"10071796"},"text":{"kind":"string","value":" helper.rb\n # frozen_string_literal: true\n\nmodule Split\n module Helper\n OVERRIDE_PARAM_NAME = \"ab_test\"\n\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n control_variable(experiment.control)\n end\n rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e\n raise(e) unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n\n if Split.configuration.db_failover_allow_parameter_override\n alternative = override_alternative(experiment.name) if override_present?(experiment.name)\n alternative = control_variable(experiment.control) if split_generically_disabled?\n end\n ensure\n alternative ||= control_variable(experiment.control)\n end\n\n if block_given?\n metadata = experiment.metadata[alternative] if experiment.metadata\n yield(alternative, metadata || {})\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def finish_experiment(experiment, options = { reset: true })\n return false if active_experiments[experiment.name].nil?\n return true if experiment.has_winner?\n should_reset = experiment.resettable? && options[:reset]\n if ab_user[experiment.finished_key] && !should_reset\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n Merge pull request #592 from splitrb/fix-unused-variable-warnings\n\nFix unused variable warnings\n @@ -80,7 +80,7 @@ module Split\n \n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n- metric_descriptor, goals = normalize_metric(metric_descriptor)\n+ metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n \n if experiments.any?\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #592 from splitrb/fix-unused-variable-warnings"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676147,"cells":{"id":{"kind":"string","value":"10071797"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency 'redis', '>= 4.2'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'rubystats', '>= 0.3.0'\n\n s.add_development_dependency 'bundler', '>= 1.17'\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Merge pull request #635 from splitrb/sinatra-has-left-the-building\n\nRemove Sinatra Dependency for Split Dashboard\n @@ -31,7 +31,6 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency 'redis', '>= 4.2'\n- s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'rubystats', '>= 0.3.0'\n \n s.add_development_dependency 'bundler', '>= 1.17'\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Merge pull request #635 from splitrb/sinatra-has-left-the-building"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676148,"cells":{"id":{"kind":"string","value":"10071798"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency 'redis', '>= 4.2'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'rubystats', '>= 0.3.0'\n\n s.add_development_dependency 'bundler', '>= 1.17'\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Merge pull request #635 from splitrb/sinatra-has-left-the-building\n\nRemove Sinatra Dependency for Split Dashboard\n @@ -31,7 +31,6 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency 'redis', '>= 4.2'\n- s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'rubystats', '>= 0.3.0'\n \n s.add_development_dependency 'bundler', '>= 1.17'\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Merge pull request #635 from splitrb/sinatra-has-left-the-building"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676149,"cells":{"id":{"kind":"string","value":"10071799"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency 'redis', '>= 4.2'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'rubystats', '>= 0.3.0'\n\n s.add_development_dependency 'bundler', '>= 1.17'\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Merge pull request #635 from splitrb/sinatra-has-left-the-building\n\nRemove Sinatra Dependency for Split Dashboard\n @@ -31,7 +31,6 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency 'redis', '>= 4.2'\n- s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'rubystats', '>= 0.3.0'\n \n s.add_development_dependency 'bundler', '>= 1.17'\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Merge pull request #635 from splitrb/sinatra-has-left-the-building"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676150,"cells":{"id":{"kind":"string","value":"10071800"},"text":{"kind":"string","value":" Rakefile\n require 'bundler/gem_tasks'\nrequire 'rspec/core/rake_task'\n\nrequire \"rspec/core/rake_task\"\n\nRSpec::Core::RakeTask.new(\"spec\")\n\ntask default: :spec\n\n Added rake shebang\n\n @@ -1,3 +1,4 @@\n+#!/usr/bin/env rake\n require 'bundler/gem_tasks'\n require 'rspec/core/rake_task'\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Added rake shebang"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"Rakefile"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676151,"cells":{"id":{"kind":"string","value":"10071801"},"text":{"kind":"string","value":" Rakefile\n require 'bundler/gem_tasks'\nrequire 'rspec/core/rake_task'\n\nrequire \"rspec/core/rake_task\"\n\nRSpec::Core::RakeTask.new(\"spec\")\n\ntask default: :spec\n\n Added rake shebang\n\n @@ -1,3 +1,4 @@\n+#!/usr/bin/env rake\n require 'bundler/gem_tasks'\n require 'rspec/core/rake_task'\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Added rake shebang"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"Rakefile"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676152,"cells":{"id":{"kind":"string","value":"10071802"},"text":{"kind":"string","value":" Rakefile\n require 'bundler/gem_tasks'\nrequire 'rspec/core/rake_task'\n\nrequire \"rspec/core/rake_task\"\n\nRSpec::Core::RakeTask.new(\"spec\")\n\ntask default: :spec\n\n Added rake shebang\n\n @@ -1,3 +1,4 @@\n+#!/usr/bin/env rake\n require 'bundler/gem_tasks'\n require 'rspec/core/rake_task'\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Added rake shebang"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"Rakefile"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676153,"cells":{"id":{"kind":"string","value":"10071803"},"text":{"kind":"string","value":" TODO\n PyPI feature replication\n========================\n\n* Make it possible to register users via distutils.\n There should be a setting to turn this feature on/off for private PyPIs.\n [taken-by: sverrej]\n* Maybe add a permission \"can upload new release\", so more than one\n user can change the same project.\n* Should a project have co-owners?\n* Documentation upload\n* Ratings\n* Random Monty Python quotes :-)\n* Comments :-)\n\nPost-PyPI\n=========\n\n* PEP-381: Mirroring infrastructure for PyPI\n [taken-by: jezdez]\n\n* API to submit test reports for smoke test bots. Like CPAN Testers.\n Platform/version/matrix etc.\n\n* Different listings: Author listings, classifier listings, etc.\n\n* Search metadata\n\n* Automatic generation of Sphinx for modules (so you can view them directly\non pypi, like CPAN), Module listing etc.\n\n* Listing of special files: README, LICENSE, Changefile/Changes, TODO,\n MANIFEST.\n\n* Dependency graphs.\n\n* Package file browser (like CPAN)\n\n\n\n\nDocumentation\n=============\n\n* Write a tutorial on how to set up the server, registering projects, and\n how to upload releases.\n\n\n\n more TODO tasks\n\n @@ -7,3 +7,7 @@\n * Maybe add a permission \"can upload new release\", so more than one\n user can change the same project.\n * Should a project have co-owners?\n+ - One possible solution: \n+ http://github.com/initcrash/django-object-permissions/tree\n+* Script to populate classifiers from \n+ http://pypi.python.org/pypi?%3Aaction=list_classifiers \n"},"addition_count":{"kind":"number","value":4,"string":"4"},"commit_subject":{"kind":"string","value":"more TODO tasks"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"TODO"},"license":{"kind":"string","value":"bsd-3-clause"},"repo_name":{"kind":"string","value":"ask/chishop"}}},{"rowIdx":10676154,"cells":{"id":{"kind":"string","value":"10071804"},"text":{"kind":"string","value":" engine.rb\n ADDFILE\n extracting Rails specifics into engine subclass\n\n @@ -0,0 +1,8 @@\n+module Split\n+ class Engine < ::Rails::Engine\n+ initializer \"split\" do |app|\n+ ActionController::Base.send :include, Split::Helper\n+ ActionController::Base.helper Split::Helper\n+ end\n+ end\n+end\n\\ No newline at end of file\n"},"addition_count":{"kind":"number","value":8,"string":"8"},"commit_subject":{"kind":"string","value":"extracting Rails specifics into engine subclass"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676155,"cells":{"id":{"kind":"string","value":"10071805"},"text":{"kind":"string","value":" engine.rb\n ADDFILE\n extracting Rails specifics into engine subclass\n\n @@ -0,0 +1,8 @@\n+module Split\n+ class Engine < ::Rails::Engine\n+ initializer \"split\" do |app|\n+ ActionController::Base.send :include, Split::Helper\n+ ActionController::Base.helper Split::Helper\n+ end\n+ end\n+end\n\\ No newline at end of file\n"},"addition_count":{"kind":"number","value":8,"string":"8"},"commit_subject":{"kind":"string","value":"extracting Rails specifics into engine subclass"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676156,"cells":{"id":{"kind":"string","value":"10071806"},"text":{"kind":"string","value":" engine.rb\n ADDFILE\n extracting Rails specifics into engine subclass\n\n @@ -0,0 +1,8 @@\n+module Split\n+ class Engine < ::Rails::Engine\n+ initializer \"split\" do |app|\n+ ActionController::Base.send :include, Split::Helper\n+ ActionController::Base.helper Split::Helper\n+ end\n+ end\n+end\n\\ No newline at end of file\n"},"addition_count":{"kind":"number","value":8,"string":"8"},"commit_subject":{"kind":"string","value":"extracting Rails specifics into engine subclass"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676157,"cells":{"id":{"kind":"string","value":"10071807"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n describe 'ab_finished' do\n before(:each) do\n @experiment_name = 'link_color'\n @alternatives = ['blue', 'red']\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it 'should increment the counter for the completed alternative' do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, {:reset => false})\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it 'should not increment the counter if reset is false and the experiment has been already finished' do\n 2.times { ab_finished(@experiment_name, {:reset => false}) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it 'should not increment the counter for an experiment that the user is not participating in' do\n ab_test('button_size', 'small', 'big')\n\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect(lambda {\n ab_finished('button_size')\n }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n end\n\n it 'should not increment the counter for an ended experiment' do\n e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n e.winner = 'small'\n a = ab_test('button_size', 'small', 'big')\n expect(a).to eq('small')\n expect(lambda {\n ab_finished('button_size')\n }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, {:reset => false})\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should do nothing where the experiment was not started by this user\" do\n ab_user = nil\n expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Check if the experiment about to finish is active for the user\n\n @@ -296,98 +296,126 @@ describe Split::Helper do\n end\n \n describe 'ab_finished' do\n- before(:each) do\n- @experiment_name = 'link_color'\n- @alternatives = ['blue', 'red']\n- @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n- @alternative_name = ab_test(@experiment_name, *@alternatives)\n- @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- end\n+ context 'for an experiment that the user participates in' do\n+ before(:each) do\n+ @experiment_name = 'link_color'\n+ @alternatives = ['blue', 'red']\n+ @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n+ @alternative_name = ab_test(@experiment_name, *@alternatives)\n+ @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ end\n \n- it 'should increment the counter for the completed alternative' do\n- ab_finished(@experiment_name)\n- new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- expect(new_completion_count).to eq(@previous_completion_count + 1)\n- end\n+ it 'should increment the counter for the completed alternative' do\n+ ab_finished(@experiment_name)\n+ new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ expect(new_completion_count).to eq(@previous_completion_count + 1)\n+ end\n \n- it \"should set experiment's finished key if reset is false\" do\n- ab_finished(@experiment_name, {:reset => false})\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- expect(ab_user[@experiment.finished_key]).to eq(true)\n- end\n+ it \"should set experiment's finished key if reset is false\" do\n+ ab_finished(@experiment_name, {:reset => false})\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ expect(ab_user[@experiment.finished_key]).to eq(true)\n+ end\n \n- it 'should not increment the counter if reset is false and the experiment has been already finished' do\n- 2.times { ab_finished(@experiment_name, {:reset => false}) }\n- new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- expect(new_completion_count).to eq(@previous_completion_count + 1)\n- end\n+ it 'should not increment the counter if reset is false and the experiment has been already finished' do\n+ 2.times { ab_finished(@experiment_name, {:reset => false}) }\n+ new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ expect(new_completion_count).to eq(@previous_completion_count + 1)\n+ end\n \n- it 'should not increment the counter for an experiment that the user is not participating in' do\n- ab_test('button_size', 'small', 'big')\n+ it 'should not increment the counter for an ended experiment' do\n+ e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n+ e.winner = 'small'\n+ a = ab_test('button_size', 'small', 'big')\n+ expect(a).to eq('small')\n+ expect(lambda {\n+ ab_finished('button_size')\n+ }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n+ end\n \n- # So, user should be participating in the link_color experiment and\n- # receive the control for button_size. As the user is not participating in\n- # the button size experiment, finishing it should not increase the\n- # completion count for that alternative.\n- expect(lambda {\n- ab_finished('button_size')\n- }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n- end\n+ it \"should clear out the user's participation from their session\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it 'should not increment the counter for an ended experiment' do\n- e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n- e.winner = 'small'\n- a = ab_test('button_size', 'small', 'big')\n- expect(a).to eq('small')\n- expect(lambda {\n- ab_finished('button_size')\n- }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n- end\n+ it \"should not clear out the users session if reset is false\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name, {:reset => false})\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ expect(ab_user[@experiment.finished_key]).to eq(true)\n+ end\n \n- it \"should clear out the user's participation from their session\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n- end\n+ it \"should reset the users session when experiment is not versioned\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it \"should not clear out the users session if reset is false\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name, {:reset => false})\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- expect(ab_user[@experiment.finished_key]).to eq(true)\n- end\n+ it \"should reset the users session when experiment is versioned\" do\n+ @experiment.increment_version\n+ @alternative_name = ab_test(@experiment_name, *@alternatives)\n \n- it \"should reset the users session when experiment is not versioned\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n- end\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it \"should reset the users session when experiment is versioned\" do\n- @experiment.increment_version\n- @alternative_name = ab_test(@experiment_name, *@alternatives)\n+ context \"when on_trial_complete is set\" do\n+ before { Split.configuration.on_trial_complete = :some_method }\n+ it \"should call the method\" do\n+ expect(self).to receive(:some_method)\n+ ab_finished(@experiment_name)\n+ end\n \n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n+ it \"should not call the method without alternative\" do\n+ ab_user[@experiment.key] = nil\n+ expect(self).not_to receive(:some_method)\n+ ab_finished(@experiment_name)\n+ end\n+ end\n end\n \n- it \"should do nothing where the experiment was not started by this user\" do\n- ab_user = nil\n- expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception\n+ context 'for an experiment that the user is excluded from' do\n+ before do\n+ alternative = ab_test('link_color', 'blue', 'red')\n+ expect(Split::Alternative.new(alternative, 'link_color').participant_count).to eq(1)\n+ alternative = ab_test('button_size', 'small', 'big')\n+ expect(Split::Alternative.new(alternative, 'button_size').participant_count).to eq(0)\n+ end\n+\n+ it 'should not increment the completed counter' do\n+ # So, user should be participating in the link_color experiment and\n+ # receive the control for button_size. As the user is not participating in\n+ # the button size experiment, finishing it should not increase the\n+ # completion count for that alternative.\n+ expect(lambda {\n+ ab_finished('button_size')\n+ }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n+ end\n end\n \n- context \"when on_trial_complete is set\" do\n- before { Split.configuration.on_trial_complete = :some_method }\n- it \"should call the method\" do\n- expect(self).to receive(:some_method)\n- ab_finished(@experiment_name)\n+ context 'for an experiment that the user does not participate in' do\n+ before do\n+ Split::ExperimentCatalog.find_or_create(:not_started_experiment, 'control', 'alt')\n+ end\n+ it 'should not raise an exception' do\n+ expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n \n- it \"should not call the method without alternative\" do\n- ab_user[@experiment.key] = nil\n- expect(self).not_to receive(:some_method)\n- ab_finished(@experiment_name)\n+ it 'should not change the user state when reset is false' do\n+ expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys}.from([])\n+ end\n+\n+ it 'should not change the user state when reset is true' do\n+ expect(self).not_to receive(:reset!)\n+ ab_finished(:not_started_experiment)\n+ end\n+\n+ it 'should not increment the completed counter' do\n+ ab_finished(:not_started_experiment)\n+ expect(Split::Alternative.new('control', :not_started_experiment).completed_count).to eq(0)\n+ expect(Split::Alternative.new('alt', :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":103,"string":"103"},"commit_subject":{"kind":"string","value":"Check if the experiment about to finish is active for the user"},"deletion_count":{"kind":"number","value":75,"string":"75"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676158,"cells":{"id":{"kind":"string","value":"10071808"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n describe 'ab_finished' do\n before(:each) do\n @experiment_name = 'link_color'\n @alternatives = ['blue', 'red']\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it 'should increment the counter for the completed alternative' do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, {:reset => false})\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it 'should not increment the counter if reset is false and the experiment has been already finished' do\n 2.times { ab_finished(@experiment_name, {:reset => false}) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it 'should not increment the counter for an experiment that the user is not participating in' do\n ab_test('button_size', 'small', 'big')\n\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect(lambda {\n ab_finished('button_size')\n }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n end\n\n it 'should not increment the counter for an ended experiment' do\n e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n e.winner = 'small'\n a = ab_test('button_size', 'small', 'big')\n expect(a).to eq('small')\n expect(lambda {\n ab_finished('button_size')\n }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, {:reset => false})\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should do nothing where the experiment was not started by this user\" do\n ab_user = nil\n expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Check if the experiment about to finish is active for the user\n\n @@ -296,98 +296,126 @@ describe Split::Helper do\n end\n \n describe 'ab_finished' do\n- before(:each) do\n- @experiment_name = 'link_color'\n- @alternatives = ['blue', 'red']\n- @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n- @alternative_name = ab_test(@experiment_name, *@alternatives)\n- @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- end\n+ context 'for an experiment that the user participates in' do\n+ before(:each) do\n+ @experiment_name = 'link_color'\n+ @alternatives = ['blue', 'red']\n+ @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n+ @alternative_name = ab_test(@experiment_name, *@alternatives)\n+ @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ end\n \n- it 'should increment the counter for the completed alternative' do\n- ab_finished(@experiment_name)\n- new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- expect(new_completion_count).to eq(@previous_completion_count + 1)\n- end\n+ it 'should increment the counter for the completed alternative' do\n+ ab_finished(@experiment_name)\n+ new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ expect(new_completion_count).to eq(@previous_completion_count + 1)\n+ end\n \n- it \"should set experiment's finished key if reset is false\" do\n- ab_finished(@experiment_name, {:reset => false})\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- expect(ab_user[@experiment.finished_key]).to eq(true)\n- end\n+ it \"should set experiment's finished key if reset is false\" do\n+ ab_finished(@experiment_name, {:reset => false})\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ expect(ab_user[@experiment.finished_key]).to eq(true)\n+ end\n \n- it 'should not increment the counter if reset is false and the experiment has been already finished' do\n- 2.times { ab_finished(@experiment_name, {:reset => false}) }\n- new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- expect(new_completion_count).to eq(@previous_completion_count + 1)\n- end\n+ it 'should not increment the counter if reset is false and the experiment has been already finished' do\n+ 2.times { ab_finished(@experiment_name, {:reset => false}) }\n+ new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ expect(new_completion_count).to eq(@previous_completion_count + 1)\n+ end\n \n- it 'should not increment the counter for an experiment that the user is not participating in' do\n- ab_test('button_size', 'small', 'big')\n+ it 'should not increment the counter for an ended experiment' do\n+ e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n+ e.winner = 'small'\n+ a = ab_test('button_size', 'small', 'big')\n+ expect(a).to eq('small')\n+ expect(lambda {\n+ ab_finished('button_size')\n+ }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n+ end\n \n- # So, user should be participating in the link_color experiment and\n- # receive the control for button_size. As the user is not participating in\n- # the button size experiment, finishing it should not increase the\n- # completion count for that alternative.\n- expect(lambda {\n- ab_finished('button_size')\n- }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n- end\n+ it \"should clear out the user's participation from their session\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it 'should not increment the counter for an ended experiment' do\n- e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n- e.winner = 'small'\n- a = ab_test('button_size', 'small', 'big')\n- expect(a).to eq('small')\n- expect(lambda {\n- ab_finished('button_size')\n- }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n- end\n+ it \"should not clear out the users session if reset is false\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name, {:reset => false})\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ expect(ab_user[@experiment.finished_key]).to eq(true)\n+ end\n \n- it \"should clear out the user's participation from their session\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n- end\n+ it \"should reset the users session when experiment is not versioned\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it \"should not clear out the users session if reset is false\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name, {:reset => false})\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- expect(ab_user[@experiment.finished_key]).to eq(true)\n- end\n+ it \"should reset the users session when experiment is versioned\" do\n+ @experiment.increment_version\n+ @alternative_name = ab_test(@experiment_name, *@alternatives)\n \n- it \"should reset the users session when experiment is not versioned\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n- end\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it \"should reset the users session when experiment is versioned\" do\n- @experiment.increment_version\n- @alternative_name = ab_test(@experiment_name, *@alternatives)\n+ context \"when on_trial_complete is set\" do\n+ before { Split.configuration.on_trial_complete = :some_method }\n+ it \"should call the method\" do\n+ expect(self).to receive(:some_method)\n+ ab_finished(@experiment_name)\n+ end\n \n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n+ it \"should not call the method without alternative\" do\n+ ab_user[@experiment.key] = nil\n+ expect(self).not_to receive(:some_method)\n+ ab_finished(@experiment_name)\n+ end\n+ end\n end\n \n- it \"should do nothing where the experiment was not started by this user\" do\n- ab_user = nil\n- expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception\n+ context 'for an experiment that the user is excluded from' do\n+ before do\n+ alternative = ab_test('link_color', 'blue', 'red')\n+ expect(Split::Alternative.new(alternative, 'link_color').participant_count).to eq(1)\n+ alternative = ab_test('button_size', 'small', 'big')\n+ expect(Split::Alternative.new(alternative, 'button_size').participant_count).to eq(0)\n+ end\n+\n+ it 'should not increment the completed counter' do\n+ # So, user should be participating in the link_color experiment and\n+ # receive the control for button_size. As the user is not participating in\n+ # the button size experiment, finishing it should not increase the\n+ # completion count for that alternative.\n+ expect(lambda {\n+ ab_finished('button_size')\n+ }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n+ end\n end\n \n- context \"when on_trial_complete is set\" do\n- before { Split.configuration.on_trial_complete = :some_method }\n- it \"should call the method\" do\n- expect(self).to receive(:some_method)\n- ab_finished(@experiment_name)\n+ context 'for an experiment that the user does not participate in' do\n+ before do\n+ Split::ExperimentCatalog.find_or_create(:not_started_experiment, 'control', 'alt')\n+ end\n+ it 'should not raise an exception' do\n+ expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n \n- it \"should not call the method without alternative\" do\n- ab_user[@experiment.key] = nil\n- expect(self).not_to receive(:some_method)\n- ab_finished(@experiment_name)\n+ it 'should not change the user state when reset is false' do\n+ expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys}.from([])\n+ end\n+\n+ it 'should not change the user state when reset is true' do\n+ expect(self).not_to receive(:reset!)\n+ ab_finished(:not_started_experiment)\n+ end\n+\n+ it 'should not increment the completed counter' do\n+ ab_finished(:not_started_experiment)\n+ expect(Split::Alternative.new('control', :not_started_experiment).completed_count).to eq(0)\n+ expect(Split::Alternative.new('alt', :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":103,"string":"103"},"commit_subject":{"kind":"string","value":"Check if the experiment about to finish is active for the user"},"deletion_count":{"kind":"number","value":75,"string":"75"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676159,"cells":{"id":{"kind":"string","value":"10071809"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n describe 'ab_finished' do\n before(:each) do\n @experiment_name = 'link_color'\n @alternatives = ['blue', 'red']\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it 'should increment the counter for the completed alternative' do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, {:reset => false})\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it 'should not increment the counter if reset is false and the experiment has been already finished' do\n 2.times { ab_finished(@experiment_name, {:reset => false}) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it 'should not increment the counter for an experiment that the user is not participating in' do\n ab_test('button_size', 'small', 'big')\n\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect(lambda {\n ab_finished('button_size')\n }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n end\n\n it 'should not increment the counter for an ended experiment' do\n e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n e.winner = 'small'\n a = ab_test('button_size', 'small', 'big')\n expect(a).to eq('small')\n expect(lambda {\n ab_finished('button_size')\n }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, {:reset => false})\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should do nothing where the experiment was not started by this user\" do\n ab_user = nil\n expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Check if the experiment about to finish is active for the user\n\n @@ -296,98 +296,126 @@ describe Split::Helper do\n end\n \n describe 'ab_finished' do\n- before(:each) do\n- @experiment_name = 'link_color'\n- @alternatives = ['blue', 'red']\n- @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n- @alternative_name = ab_test(@experiment_name, *@alternatives)\n- @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- end\n+ context 'for an experiment that the user participates in' do\n+ before(:each) do\n+ @experiment_name = 'link_color'\n+ @alternatives = ['blue', 'red']\n+ @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n+ @alternative_name = ab_test(@experiment_name, *@alternatives)\n+ @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ end\n \n- it 'should increment the counter for the completed alternative' do\n- ab_finished(@experiment_name)\n- new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- expect(new_completion_count).to eq(@previous_completion_count + 1)\n- end\n+ it 'should increment the counter for the completed alternative' do\n+ ab_finished(@experiment_name)\n+ new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ expect(new_completion_count).to eq(@previous_completion_count + 1)\n+ end\n \n- it \"should set experiment's finished key if reset is false\" do\n- ab_finished(@experiment_name, {:reset => false})\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- expect(ab_user[@experiment.finished_key]).to eq(true)\n- end\n+ it \"should set experiment's finished key if reset is false\" do\n+ ab_finished(@experiment_name, {:reset => false})\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ expect(ab_user[@experiment.finished_key]).to eq(true)\n+ end\n \n- it 'should not increment the counter if reset is false and the experiment has been already finished' do\n- 2.times { ab_finished(@experiment_name, {:reset => false}) }\n- new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n- expect(new_completion_count).to eq(@previous_completion_count + 1)\n- end\n+ it 'should not increment the counter if reset is false and the experiment has been already finished' do\n+ 2.times { ab_finished(@experiment_name, {:reset => false}) }\n+ new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n+ expect(new_completion_count).to eq(@previous_completion_count + 1)\n+ end\n \n- it 'should not increment the counter for an experiment that the user is not participating in' do\n- ab_test('button_size', 'small', 'big')\n+ it 'should not increment the counter for an ended experiment' do\n+ e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n+ e.winner = 'small'\n+ a = ab_test('button_size', 'small', 'big')\n+ expect(a).to eq('small')\n+ expect(lambda {\n+ ab_finished('button_size')\n+ }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n+ end\n \n- # So, user should be participating in the link_color experiment and\n- # receive the control for button_size. As the user is not participating in\n- # the button size experiment, finishing it should not increase the\n- # completion count for that alternative.\n- expect(lambda {\n- ab_finished('button_size')\n- }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n- end\n+ it \"should clear out the user's participation from their session\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it 'should not increment the counter for an ended experiment' do\n- e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big')\n- e.winner = 'small'\n- a = ab_test('button_size', 'small', 'big')\n- expect(a).to eq('small')\n- expect(lambda {\n- ab_finished('button_size')\n- }).not_to change { Split::Alternative.new(a, 'button_size').completed_count }\n- end\n+ it \"should not clear out the users session if reset is false\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name, {:reset => false})\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ expect(ab_user[@experiment.finished_key]).to eq(true)\n+ end\n \n- it \"should clear out the user's participation from their session\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n- end\n+ it \"should reset the users session when experiment is not versioned\" do\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it \"should not clear out the users session if reset is false\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name, {:reset => false})\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- expect(ab_user[@experiment.finished_key]).to eq(true)\n- end\n+ it \"should reset the users session when experiment is versioned\" do\n+ @experiment.increment_version\n+ @alternative_name = ab_test(@experiment_name, *@alternatives)\n \n- it \"should reset the users session when experiment is not versioned\" do\n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n- end\n+ expect(ab_user[@experiment.key]).to eq(@alternative_name)\n+ ab_finished(@experiment_name)\n+ expect(ab_user.keys).to be_empty\n+ end\n \n- it \"should reset the users session when experiment is versioned\" do\n- @experiment.increment_version\n- @alternative_name = ab_test(@experiment_name, *@alternatives)\n+ context \"when on_trial_complete is set\" do\n+ before { Split.configuration.on_trial_complete = :some_method }\n+ it \"should call the method\" do\n+ expect(self).to receive(:some_method)\n+ ab_finished(@experiment_name)\n+ end\n \n- expect(ab_user[@experiment.key]).to eq(@alternative_name)\n- ab_finished(@experiment_name)\n- expect(ab_user.keys).to be_empty\n+ it \"should not call the method without alternative\" do\n+ ab_user[@experiment.key] = nil\n+ expect(self).not_to receive(:some_method)\n+ ab_finished(@experiment_name)\n+ end\n+ end\n end\n \n- it \"should do nothing where the experiment was not started by this user\" do\n- ab_user = nil\n- expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception\n+ context 'for an experiment that the user is excluded from' do\n+ before do\n+ alternative = ab_test('link_color', 'blue', 'red')\n+ expect(Split::Alternative.new(alternative, 'link_color').participant_count).to eq(1)\n+ alternative = ab_test('button_size', 'small', 'big')\n+ expect(Split::Alternative.new(alternative, 'button_size').participant_count).to eq(0)\n+ end\n+\n+ it 'should not increment the completed counter' do\n+ # So, user should be participating in the link_color experiment and\n+ # receive the control for button_size. As the user is not participating in\n+ # the button size experiment, finishing it should not increase the\n+ # completion count for that alternative.\n+ expect(lambda {\n+ ab_finished('button_size')\n+ }).not_to change { Split::Alternative.new('small', 'button_size').completed_count }\n+ end\n end\n \n- context \"when on_trial_complete is set\" do\n- before { Split.configuration.on_trial_complete = :some_method }\n- it \"should call the method\" do\n- expect(self).to receive(:some_method)\n- ab_finished(@experiment_name)\n+ context 'for an experiment that the user does not participate in' do\n+ before do\n+ Split::ExperimentCatalog.find_or_create(:not_started_experiment, 'control', 'alt')\n+ end\n+ it 'should not raise an exception' do\n+ expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n \n- it \"should not call the method without alternative\" do\n- ab_user[@experiment.key] = nil\n- expect(self).not_to receive(:some_method)\n- ab_finished(@experiment_name)\n+ it 'should not change the user state when reset is false' do\n+ expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys}.from([])\n+ end\n+\n+ it 'should not change the user state when reset is true' do\n+ expect(self).not_to receive(:reset!)\n+ ab_finished(:not_started_experiment)\n+ end\n+\n+ it 'should not increment the completed counter' do\n+ ab_finished(:not_started_experiment)\n+ expect(Split::Alternative.new('control', :not_started_experiment).completed_count).to eq(0)\n+ expect(Split::Alternative.new('alt', :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":103,"string":"103"},"commit_subject":{"kind":"string","value":"Check if the experiment about to finish is active for the user"},"deletion_count":{"kind":"number","value":75,"string":"75"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676160,"cells":{"id":{"kind":"string","value":"10071810"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should call the on_before_experiment_delete hook\" do\n expect(Split.configuration.on_before_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should reset the start time if the experiment should be manually started\" do\n Split.configuration.start_manually = true\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n describe \"changing an existing experiment\" do\n def same_but_different_alternative\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"yellow\", \"orange\")\n end\n\n it \"should reset an experiment if it is loaded with different alternatives\" do\n experiment.save\n blue.participant_count = 5\n same_experiment = same_but_different_alternative\n expect(same_experiment.alternatives.map(&:name)).to eq([\"blue\", \"yellow\", \"orange\"])\n expect(blue.participant_count).to eq(0)\n end\n\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n Split::ExperimentCatalog.clear_cache\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Added `cache_catalog` config\n\n @@ -530,7 +530,6 @@ describe Split::Experiment do\n \n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n- Split::ExperimentCatalog.clear_cache\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n \n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Added `cache_catalog` config"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676161,"cells":{"id":{"kind":"string","value":"10071811"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should call the on_before_experiment_delete hook\" do\n expect(Split.configuration.on_before_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should reset the start time if the experiment should be manually started\" do\n Split.configuration.start_manually = true\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n describe \"changing an existing experiment\" do\n def same_but_different_alternative\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"yellow\", \"orange\")\n end\n\n it \"should reset an experiment if it is loaded with different alternatives\" do\n experiment.save\n blue.participant_count = 5\n same_experiment = same_but_different_alternative\n expect(same_experiment.alternatives.map(&:name)).to eq([\"blue\", \"yellow\", \"orange\"])\n expect(blue.participant_count).to eq(0)\n end\n\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n Split::ExperimentCatalog.clear_cache\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Added `cache_catalog` config\n\n @@ -530,7 +530,6 @@ describe Split::Experiment do\n \n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n- Split::ExperimentCatalog.clear_cache\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n \n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Added `cache_catalog` config"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676162,"cells":{"id":{"kind":"string","value":"10071812"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should call the on_before_experiment_delete hook\" do\n expect(Split.configuration.on_before_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should reset the start time if the experiment should be manually started\" do\n Split.configuration.start_manually = true\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n describe \"changing an existing experiment\" do\n def same_but_different_alternative\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"yellow\", \"orange\")\n end\n\n it \"should reset an experiment if it is loaded with different alternatives\" do\n experiment.save\n blue.participant_count = 5\n same_experiment = same_but_different_alternative\n expect(same_experiment.alternatives.map(&:name)).to eq([\"blue\", \"yellow\", \"orange\"])\n expect(blue.participant_count).to eq(0)\n end\n\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n Split::ExperimentCatalog.clear_cache\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Added `cache_catalog` config\n\n @@ -530,7 +530,6 @@ describe Split::Experiment do\n \n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n- Split::ExperimentCatalog.clear_cache\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n \n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Added `cache_catalog` config"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676163,"cells":{"id":{"kind":"string","value":"10071813"},"text":{"kind":"string","value":" AUTHORS\n Ask Solem \nRune Halvorsen \nRussell Sim \nBrian Rosner \nHugo Lopes Tavares \nSverre Johansen \nBo Shi \nCarl Meyer \nVinícius das Chagas Silva \nStefan Foulis \nMichael Richardson \nHalldór Rúnarsson \nBrent Tubbs \nDavid Cramer \n\n Added Stefan Foulis to AUTHORS\n\n @@ -7,3 +7,4 @@ Sverre Johansen \n Bo Shi \n Carl Meyer \n Vinícius das Chagas Silva \n+Stefan Foulis \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Added Stefan Foulis to AUTHORS"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"AUTHORS"},"license":{"kind":"string","value":"bsd-3-clause"},"repo_name":{"kind":"string","value":"ask/chishop"}}},{"rowIdx":10676164,"cells":{"id":{"kind":"string","value":"10071814"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #321 from rdunlop/master\n\nUpdate Warden Authentication Example to include :delete.\n @@ -317,7 +317,7 @@ end\n \n You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n ```ruby\n-match \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do\n+match \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #321 from rdunlop/master"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676165,"cells":{"id":{"kind":"string","value":"10071815"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #321 from rdunlop/master\n\nUpdate Warden Authentication Example to include :delete.\n @@ -317,7 +317,7 @@ end\n \n You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n ```ruby\n-match \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do\n+match \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #321 from rdunlop/master"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676166,"cells":{"id":{"kind":"string","value":"10071816"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #321 from rdunlop/master\n\nUpdate Warden Authentication Example to include :delete.\n @@ -317,7 +317,7 @@ end\n \n You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n ```ruby\n-match \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do\n+match \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #321 from rdunlop/master"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676167,"cells":{"id":{"kind":"string","value":"10071817"},"text":{"kind":"string","value":" dashboard_helpers_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/dashboard/helpers\"\n\ninclude Split::DashboardHelpers\n\ndescribe Split::DashboardHelpers do\n confidence_level(Complex(2e-18, -0.03)).should eql('No Change')\n end\n\n it \"should consider a z-score of 1.96 < z < 2.57 as 95% confident\" do\n confidence_level(2.12).should eql('95% confidence')\n end\n\n it \"should consider a z-score of -1.96 > z > -2.57 as 95% confident\" do\n confidence_level(-2.12).should eql('95% confidence')\n end\n end\nend\n\n it \"should consider a z-score of 1.96 <= z < 2.58 as 95% confident\" do\n expect(confidence_level(1.96)).to eq(\"95% confidence\")\n expect(confidence_level(2.00)).to eq(\"95% confidence\")\n end\n\n it \"should consider a z-score of z >= 2.58 as 99% confident\" do\n expect(confidence_level(2.58)).to eq(\"99% confidence\")\n expect(confidence_level(3.00)).to eq(\"99% confidence\")\n end\n\n describe \"#round\" do\n it \"can round number strings\" do\n expect(round(\"3.1415\")).to eq BigDecimal(\"3.14\")\n end\n\n it \"can round number strings for precsion\" do\n expect(round(\"3.1415\", 1)).to eq BigDecimal(\"3.1\")\n end\n\n it \"can handle invalid number strings\" do\n expect(round(\"N/A\")).to be_zero\n end\n end\n end\nend\n\n Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve.\n\nWe are not interested in negative z numbers. The null hypothesis is\nthat the control is not performing worse. We only have to be confident\nit's better to disprove the null hypothese, which means we only care\nabout the positive tail of the normal distribution.\n\n @@ -9,12 +9,9 @@ describe Split::DashboardHelpers do\n confidence_level(Complex(2e-18, -0.03)).should eql('No Change')\n end\n \n- it \"should consider a z-score of 1.96 < z < 2.57 as 95% confident\" do\n- confidence_level(2.12).should eql('95% confidence')\n+ it \"should consider a z-score of 1.645 < z < 1.96 as 95% confident\" do\n+ confidence_level(1.80).should eql('95% confidence')\n end\n \n- it \"should consider a z-score of -1.96 > z > -2.57 as 95% confident\" do\n- confidence_level(-2.12).should eql('95% confidence')\n- end\n end\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve."},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676168,"cells":{"id":{"kind":"string","value":"10071818"},"text":{"kind":"string","value":" dashboard_helpers_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/dashboard/helpers\"\n\ninclude Split::DashboardHelpers\n\ndescribe Split::DashboardHelpers do\n confidence_level(Complex(2e-18, -0.03)).should eql('No Change')\n end\n\n it \"should consider a z-score of 1.96 < z < 2.57 as 95% confident\" do\n confidence_level(2.12).should eql('95% confidence')\n end\n\n it \"should consider a z-score of -1.96 > z > -2.57 as 95% confident\" do\n confidence_level(-2.12).should eql('95% confidence')\n end\n end\nend\n\n it \"should consider a z-score of 1.96 <= z < 2.58 as 95% confident\" do\n expect(confidence_level(1.96)).to eq(\"95% confidence\")\n expect(confidence_level(2.00)).to eq(\"95% confidence\")\n end\n\n it \"should consider a z-score of z >= 2.58 as 99% confident\" do\n expect(confidence_level(2.58)).to eq(\"99% confidence\")\n expect(confidence_level(3.00)).to eq(\"99% confidence\")\n end\n\n describe \"#round\" do\n it \"can round number strings\" do\n expect(round(\"3.1415\")).to eq BigDecimal(\"3.14\")\n end\n\n it \"can round number strings for precsion\" do\n expect(round(\"3.1415\", 1)).to eq BigDecimal(\"3.1\")\n end\n\n it \"can handle invalid number strings\" do\n expect(round(\"N/A\")).to be_zero\n end\n end\n end\nend\n\n Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve.\n\nWe are not interested in negative z numbers. The null hypothesis is\nthat the control is not performing worse. We only have to be confident\nit's better to disprove the null hypothese, which means we only care\nabout the positive tail of the normal distribution.\n\n @@ -9,12 +9,9 @@ describe Split::DashboardHelpers do\n confidence_level(Complex(2e-18, -0.03)).should eql('No Change')\n end\n \n- it \"should consider a z-score of 1.96 < z < 2.57 as 95% confident\" do\n- confidence_level(2.12).should eql('95% confidence')\n+ it \"should consider a z-score of 1.645 < z < 1.96 as 95% confident\" do\n+ confidence_level(1.80).should eql('95% confidence')\n end\n \n- it \"should consider a z-score of -1.96 > z > -2.57 as 95% confident\" do\n- confidence_level(-2.12).should eql('95% confidence')\n- end\n end\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve."},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676169,"cells":{"id":{"kind":"string","value":"10071819"},"text":{"kind":"string","value":" dashboard_helpers_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/dashboard/helpers\"\n\ninclude Split::DashboardHelpers\n\ndescribe Split::DashboardHelpers do\n confidence_level(Complex(2e-18, -0.03)).should eql('No Change')\n end\n\n it \"should consider a z-score of 1.96 < z < 2.57 as 95% confident\" do\n confidence_level(2.12).should eql('95% confidence')\n end\n\n it \"should consider a z-score of -1.96 > z > -2.57 as 95% confident\" do\n confidence_level(-2.12).should eql('95% confidence')\n end\n end\nend\n\n it \"should consider a z-score of 1.96 <= z < 2.58 as 95% confident\" do\n expect(confidence_level(1.96)).to eq(\"95% confidence\")\n expect(confidence_level(2.00)).to eq(\"95% confidence\")\n end\n\n it \"should consider a z-score of z >= 2.58 as 99% confident\" do\n expect(confidence_level(2.58)).to eq(\"99% confidence\")\n expect(confidence_level(3.00)).to eq(\"99% confidence\")\n end\n\n describe \"#round\" do\n it \"can round number strings\" do\n expect(round(\"3.1415\")).to eq BigDecimal(\"3.14\")\n end\n\n it \"can round number strings for precsion\" do\n expect(round(\"3.1415\", 1)).to eq BigDecimal(\"3.1\")\n end\n\n it \"can handle invalid number strings\" do\n expect(round(\"N/A\")).to be_zero\n end\n end\n end\nend\n\n Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve.\n\nWe are not interested in negative z numbers. The null hypothesis is\nthat the control is not performing worse. We only have to be confident\nit's better to disprove the null hypothese, which means we only care\nabout the positive tail of the normal distribution.\n\n @@ -9,12 +9,9 @@ describe Split::DashboardHelpers do\n confidence_level(Complex(2e-18, -0.03)).should eql('No Change')\n end\n \n- it \"should consider a z-score of 1.96 < z < 2.57 as 95% confident\" do\n- confidence_level(2.12).should eql('95% confidence')\n+ it \"should consider a z-score of 1.645 < z < 1.96 as 95% confident\" do\n+ confidence_level(1.80).should eql('95% confidence')\n end\n \n- it \"should consider a z-score of -1.96 > z > -2.57 as 95% confident\" do\n- confidence_level(-2.12).should eql('95% confidence')\n- end\n end\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve."},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676170,"cells":{"id":{"kind":"string","value":"10071820"},"text":{"kind":"string","value":" goals_collection_spec.rb\n ADDFILE\n Introduce GoalsCollection and refactor\n\nTowards a more object-oriented approach, i introduced the GoalsCollection\nclass and refactored the code so that the overall complexity of\nExperiment#save is reduced.\n\n @@ -0,0 +1,80 @@\n+require 'spec_helper'\n+require 'split/goals_collection'\n+require 'time'\n+\n+describe Split::GoalsCollection do\n+ let(:experiment_name) { 'experiment_name' }\n+\n+ describe 'initialization' do\n+ let(:goals_collection) {\n+ Split::GoalsCollection.new('experiment_name', ['goal1', 'goal2'])\n+ }\n+\n+ it \"should have an experiment_name\" do\n+ expect(goals_collection.instance_variable_get(:@experiment_name)).\n+ to eq('experiment_name')\n+ end\n+\n+ it \"should have a list of goals\" do\n+ expect(goals_collection.instance_variable_get(:@goals)).\n+ to eq(['goal1', 'goal2'])\n+ end\n+ end\n+\n+ describe \"#validate!\" do\n+ it \"should't raise ArgumentError if @goals is nil?\" do\n+ goals_collection = Split::GoalsCollection.new('experiment_name')\n+ expect { goals_collection.validate! }.not_to raise_error(ArgumentError)\n+ end\n+\n+ it \"should raise ArgumentError if @goals is not an Array\" do\n+ goals_collection = Split::GoalsCollection.\n+ new('experiment_name', 'not an array')\n+ expect { goals_collection.validate! }.to raise_error(ArgumentError)\n+ end\n+\n+ it \"should't raise ArgumentError if @goals is an array\" do\n+ goals_collection = Split::GoalsCollection.\n+ new('experiment_name', ['an array'])\n+ expect { goals_collection.validate! }.not_to raise_error(ArgumentError)\n+ end\n+ end\n+\n+ describe \"#delete\" do\n+ let(:goals_key) { \"#{experiment_name}:goals\" }\n+\n+ it \"should delete goals from redis\" do\n+ goals_collection = Split::GoalsCollection.new(experiment_name, ['goal1'])\n+ goals_collection.save\n+\n+ goals_collection.delete\n+ expect(Split.redis.exists(goals_key)).to be false\n+ end\n+ end\n+\n+ describe \"#save\" do\n+ let(:goals_key) { \"#{experiment_name}:goals\" }\n+\n+ it \"should return false if @goals is nil\" do\n+ goals_collection = Split::GoalsCollection.\n+ new(experiment_name, nil)\n+\n+ expect(goals_collection.save).to be false\n+ end\n+\n+ it \"should save goals to redis if @goals is valid\" do\n+ goals = ['valid goal 1', 'valid goal 2']\n+ collection = Split::GoalsCollection.new(experiment_name, goals)\n+ collection.save\n+\n+ expect(Split.redis.lrange(goals_key, 0, -1)).to eq goals\n+ end\n+\n+ it \"should return @goals if @goals is valid\" do\n+ goals_collection = Split::GoalsCollection.\n+ new(experiment_name, ['valid goal'])\n+\n+ expect(goals_collection.save).to eq(['valid goal'])\n+ end\n+ end\n+end\n"},"addition_count":{"kind":"number","value":80,"string":"80"},"commit_subject":{"kind":"string","value":"Introduce GoalsCollection and refactor"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676171,"cells":{"id":{"kind":"string","value":"10071821"},"text":{"kind":"string","value":" goals_collection_spec.rb\n ADDFILE\n Introduce GoalsCollection and refactor\n\nTowards a more object-oriented approach, i introduced the GoalsCollection\nclass and refactored the code so that the overall complexity of\nExperiment#save is reduced.\n\n @@ -0,0 +1,80 @@\n+require 'spec_helper'\n+require 'split/goals_collection'\n+require 'time'\n+\n+describe Split::GoalsCollection do\n+ let(:experiment_name) { 'experiment_name' }\n+\n+ describe 'initialization' do\n+ let(:goals_collection) {\n+ Split::GoalsCollection.new('experiment_name', ['goal1', 'goal2'])\n+ }\n+\n+ it \"should have an experiment_name\" do\n+ expect(goals_collection.instance_variable_get(:@experiment_name)).\n+ to eq('experiment_name')\n+ end\n+\n+ it \"should have a list of goals\" do\n+ expect(goals_collection.instance_variable_get(:@goals)).\n+ to eq(['goal1', 'goal2'])\n+ end\n+ end\n+\n+ describe \"#validate!\" do\n+ it \"should't raise ArgumentError if @goals is nil?\" do\n+ goals_collection = Split::GoalsCollection.new('experiment_name')\n+ expect { goals_collection.validate! }.not_to raise_error(ArgumentError)\n+ end\n+\n+ it \"should raise ArgumentError if @goals is not an Array\" do\n+ goals_collection = Split::GoalsCollection.\n+ new('experiment_name', 'not an array')\n+ expect { goals_collection.validate! }.to raise_error(ArgumentError)\n+ end\n+\n+ it \"should't raise ArgumentError if @goals is an array\" do\n+ goals_collection = Split::GoalsCollection.\n+ new('experiment_name', ['an array'])\n+ expect { goals_collection.validate! }.not_to raise_error(ArgumentError)\n+ end\n+ end\n+\n+ describe \"#delete\" do\n+ let(:goals_key) { \"#{experiment_name}:goals\" }\n+\n+ it \"should delete goals from redis\" do\n+ goals_collection = Split::GoalsCollection.new(experiment_name, ['goal1'])\n+ goals_collection.save\n+\n+ goals_collection.delete\n+ expect(Split.redis.exists(goals_key)).to be false\n+ end\n+ end\n+\n+ describe \"#save\" do\n+ let(:goals_key) { \"#{experiment_name}:goals\" }\n+\n+ it \"should return false if @goals is nil\" do\n+ goals_collection = Split::GoalsCollection.\n+ new(experiment_name, nil)\n+\n+ expect(goals_collection.save).to be false\n+ end\n+\n+ it \"should save goals to redis if @goals is valid\" do\n+ goals = ['valid goal 1', 'valid goal 2']\n+ collection = Split::GoalsCollection.new(experiment_name, goals)\n+ collection.save\n+\n+ expect(Split.redis.lrange(goals_key, 0, -1)).to eq goals\n+ end\n+\n+ it \"should return @goals if @goals is valid\" do\n+ goals_collection = Split::GoalsCollection.\n+ new(experiment_name, ['valid goal'])\n+\n+ expect(goals_collection.save).to eq(['valid goal'])\n+ end\n+ end\n+end\n"},"addition_count":{"kind":"number","value":80,"string":"80"},"commit_subject":{"kind":"string","value":"Introduce GoalsCollection and refactor"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676172,"cells":{"id":{"kind":"string","value":"10071822"},"text":{"kind":"string","value":" goals_collection_spec.rb\n ADDFILE\n Introduce GoalsCollection and refactor\n\nTowards a more object-oriented approach, i introduced the GoalsCollection\nclass and refactored the code so that the overall complexity of\nExperiment#save is reduced.\n\n @@ -0,0 +1,80 @@\n+require 'spec_helper'\n+require 'split/goals_collection'\n+require 'time'\n+\n+describe Split::GoalsCollection do\n+ let(:experiment_name) { 'experiment_name' }\n+\n+ describe 'initialization' do\n+ let(:goals_collection) {\n+ Split::GoalsCollection.new('experiment_name', ['goal1', 'goal2'])\n+ }\n+\n+ it \"should have an experiment_name\" do\n+ expect(goals_collection.instance_variable_get(:@experiment_name)).\n+ to eq('experiment_name')\n+ end\n+\n+ it \"should have a list of goals\" do\n+ expect(goals_collection.instance_variable_get(:@goals)).\n+ to eq(['goal1', 'goal2'])\n+ end\n+ end\n+\n+ describe \"#validate!\" do\n+ it \"should't raise ArgumentError if @goals is nil?\" do\n+ goals_collection = Split::GoalsCollection.new('experiment_name')\n+ expect { goals_collection.validate! }.not_to raise_error(ArgumentError)\n+ end\n+\n+ it \"should raise ArgumentError if @goals is not an Array\" do\n+ goals_collection = Split::GoalsCollection.\n+ new('experiment_name', 'not an array')\n+ expect { goals_collection.validate! }.to raise_error(ArgumentError)\n+ end\n+\n+ it \"should't raise ArgumentError if @goals is an array\" do\n+ goals_collection = Split::GoalsCollection.\n+ new('experiment_name', ['an array'])\n+ expect { goals_collection.validate! }.not_to raise_error(ArgumentError)\n+ end\n+ end\n+\n+ describe \"#delete\" do\n+ let(:goals_key) { \"#{experiment_name}:goals\" }\n+\n+ it \"should delete goals from redis\" do\n+ goals_collection = Split::GoalsCollection.new(experiment_name, ['goal1'])\n+ goals_collection.save\n+\n+ goals_collection.delete\n+ expect(Split.redis.exists(goals_key)).to be false\n+ end\n+ end\n+\n+ describe \"#save\" do\n+ let(:goals_key) { \"#{experiment_name}:goals\" }\n+\n+ it \"should return false if @goals is nil\" do\n+ goals_collection = Split::GoalsCollection.\n+ new(experiment_name, nil)\n+\n+ expect(goals_collection.save).to be false\n+ end\n+\n+ it \"should save goals to redis if @goals is valid\" do\n+ goals = ['valid goal 1', 'valid goal 2']\n+ collection = Split::GoalsCollection.new(experiment_name, goals)\n+ collection.save\n+\n+ expect(Split.redis.lrange(goals_key, 0, -1)).to eq goals\n+ end\n+\n+ it \"should return @goals if @goals is valid\" do\n+ goals_collection = Split::GoalsCollection.\n+ new(experiment_name, ['valid goal'])\n+\n+ expect(goals_collection.save).to eq(['valid goal'])\n+ end\n+ end\n+end\n"},"addition_count":{"kind":"number","value":80,"string":"80"},"commit_subject":{"kind":"string","value":"Introduce GoalsCollection and refactor"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676173,"cells":{"id":{"kind":"string","value":"10071823"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n && typeof options.container === 'undefined') {\n default_meow_area = $(window.document.createElement('div'))\n .attr({'id': ((new Date()).getTime()), 'class': 'meows'});\n $('body').prepend(default_meow_area);\n }\n\n if (meows.size() <= 0) {\n if (typeof options.beforeCreateFirst === 'function') {\n options.beforeCreateFirst.call(that);\n }\n }\n\n if (typeof options.container === 'string') {\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n // Add title if it's defined\n if (typeof this.title === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('h1')).text(this.title)\n );\n }\n\n // Add icon if it's defined\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (event.type === 'mouseleave') {\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n that.hovered = true;\n that.manifest.addClass('hover');\n }\n });\n\n // Add a timeout if the duration isn't Infinity\n if (this.duration !== Infinity) {\n this.timeout = window.setTimeout(function () {\n // Make sure this meow hasn't already been destroyed\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date.getTime()) {\n that.destroy();\n }\n } else {\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n }\n }\n });\n });\n }\n };\n };\n\n $.fn.meow = function (args) {\n var meow = new Meow(args);\n meows.add(meow);\n return meow;\n };\n $.meow = $.fn.meow;\n}(jQuery, window));\n\n fixed line 152. You were right Chris. Forgot some parens\n\n @@ -149,7 +149,7 @@\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n- if (that.timestamp + that.duration <= new Date.getTime()) {\n+ if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"fixed line 152. You were right Chris. Forgot some parens"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10676174,"cells":{"id":{"kind":"string","value":"10071824"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n && typeof options.container === 'undefined') {\n default_meow_area = $(window.document.createElement('div'))\n .attr({'id': ((new Date()).getTime()), 'class': 'meows'});\n $('body').prepend(default_meow_area);\n }\n\n if (meows.size() <= 0) {\n if (typeof options.beforeCreateFirst === 'function') {\n options.beforeCreateFirst.call(that);\n }\n }\n\n if (typeof options.container === 'string') {\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n // Add title if it's defined\n if (typeof this.title === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('h1')).text(this.title)\n );\n }\n\n // Add icon if it's defined\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (event.type === 'mouseleave') {\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n that.hovered = true;\n that.manifest.addClass('hover');\n }\n });\n\n // Add a timeout if the duration isn't Infinity\n if (this.duration !== Infinity) {\n this.timeout = window.setTimeout(function () {\n // Make sure this meow hasn't already been destroyed\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date.getTime()) {\n that.destroy();\n }\n } else {\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n }\n }\n });\n });\n }\n };\n };\n\n $.fn.meow = function (args) {\n var meow = new Meow(args);\n meows.add(meow);\n return meow;\n };\n $.meow = $.fn.meow;\n}(jQuery, window));\n\n fixed line 152. You were right Chris. Forgot some parens\n\n @@ -149,7 +149,7 @@\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n- if (that.timestamp + that.duration <= new Date.getTime()) {\n+ if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"fixed line 152. You were right Chris. Forgot some parens"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10676175,"cells":{"id":{"kind":"string","value":"10071825"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n\n s.add_development_dependency 'bundler', '~> 1.14'\n s.add_development_dependency 'simplecov', '~> 0.15'\n s.add_development_dependency 'rack-test', '~> 0.6'\n s.add_development_dependency 'rake', '~> 12'\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Merge pull request #545 from splitrb/fix-travis-build\n\nUpdate travis config and add Ruby 2.6.0\n @@ -34,7 +34,7 @@ Gem::Specification.new do |s|\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n \n- s.add_development_dependency 'bundler', '~> 1.14'\n+ s.add_development_dependency 'bundler', '>= 1.17'\n s.add_development_dependency 'simplecov', '~> 0.15'\n s.add_development_dependency 'rack-test', '~> 0.6'\n s.add_development_dependency 'rake', '~> 12'\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #545 from splitrb/fix-travis-build"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676176,"cells":{"id":{"kind":"string","value":"10071826"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n\n s.add_development_dependency 'bundler', '~> 1.14'\n s.add_development_dependency 'simplecov', '~> 0.15'\n s.add_development_dependency 'rack-test', '~> 0.6'\n s.add_development_dependency 'rake', '~> 12'\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Merge pull request #545 from splitrb/fix-travis-build\n\nUpdate travis config and add Ruby 2.6.0\n @@ -34,7 +34,7 @@ Gem::Specification.new do |s|\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n \n- s.add_development_dependency 'bundler', '~> 1.14'\n+ s.add_development_dependency 'bundler', '>= 1.17'\n s.add_development_dependency 'simplecov', '~> 0.15'\n s.add_development_dependency 'rack-test', '~> 0.6'\n s.add_development_dependency 'rake', '~> 12'\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #545 from splitrb/fix-travis-build"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676177,"cells":{"id":{"kind":"string","value":"10071827"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n\n s.add_development_dependency 'bundler', '~> 1.14'\n s.add_development_dependency 'simplecov', '~> 0.15'\n s.add_development_dependency 'rack-test', '~> 0.6'\n s.add_development_dependency 'rake', '~> 12'\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Merge pull request #545 from splitrb/fix-travis-build\n\nUpdate travis config and add Ruby 2.6.0\n @@ -34,7 +34,7 @@ Gem::Specification.new do |s|\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n \n- s.add_development_dependency 'bundler', '~> 1.14'\n+ s.add_development_dependency 'bundler', '>= 1.17'\n s.add_development_dependency 'simplecov', '~> 0.15'\n s.add_development_dependency 'rack-test', '~> 0.6'\n s.add_development_dependency 'rake', '~> 12'\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #545 from splitrb/fix-travis-build"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676178,"cells":{"id":{"kind":"string","value":"10071828"},"text":{"kind":"string","value":" parser.ts\n import { strictEqual as equal, throws } from 'assert';\nimport parser from '../src/parser';\nimport tokenizer from '../src/tokenizer';\nimport stringify from './assets/stringify';\nimport { ParserOptions } from '../src';\n\nconst parse = (abbr: string, options?: ParserOptions) => parser(tokenizer(abbr), options);\nconst str = (abbr: string, options?: ParserOptions) => stringify(parse(abbr, options));\n\ndescribe('Parser', () => {\n it('basic abbreviations', () => {\n equal(str('p'), '

');\n equal(str('p{text}'), '

text

');\n equal(str('div.width1\\\\/2'), '
');\n equal(str('#sample*3'), '');\n\n equal(str('a>b'), '');\n equal(str('a+b'), '');\n equal(str('a+b>c+d'), '');\n equal(str('a+b'), '');\n equal(str('a+b>c+d'), '');\n equal(str('a>b>c+e'), '');\n equal(str('a>b>c^d'), '');\n equal(str('a>b>c^^^^d'), '');\n equal(str('a:b>c'), '');\n\n equal(str('ul.nav[title=\"foo\"]'), '');\n });\n\n it('groups', () => {\n equal(str('a>(b>c)+d'), '()');\n equal(str('(a>b)+(c>d)'), '()()');\n equal(str('a>((b>c)(d>e))f'), '(()())');\n equal(str('a>((((b>c))))+d'), '(((())))');\n equal(str('a>(((b>c))*4)+d'), '((())*4)');\n equal(str('(div>dl>(dt+dd)*2)'), '(
(
)*2
)');\n equal(str('a>()'), '()');\n });\n\n it('attributes', () => {\n equal(str('[].foo'), '');\n equal(str('[a]'), '');\n equal(str('[a b c [d]]'), '');\n equal(str('[a=b]'), '');\n equal(str('[a=b c= d=e]'), '');\n equal(str('[a=b.c d=тест]'), '');\n equal(str('[[a]=b (c)=d]'), '');\n\n // Quoted attribute values\n equal(str('[a=\"b\"]'), '');\n equal(str('[a=\"b\" c=\\'d\\' e=\"\"]'), '');\n equal(str('[[a]=\"b\" (c)=\\'d\\']'), '');\n\n // Mixed quoted\n equal(str('[a=\"foo\\'bar\" b=\\'foo\"bar\\' c=\"foo\\\\\\\"bar\"]'), '');\n\n // Boolean & implied attributes\n equal(str('[a. b.]'), '');\n equal(str('[!a !b.]'), '');\n\n // Default values\n equal(str('[\"a.b\"]'), '');\n equal(str('[\\'a.b\\' \"c=d\" foo=bar \"./test.html\"]'), '');\n\n // Expressions as values\n equal(str('[foo={1 + 2} bar={fn(1, \"foo\")}]'), '');\n\n // Tabstops as unquoted values\n equal(str('[name=${1} value=${2:test}]'), '');\n });\n\n it('malformed attributes', () => {\n equal(str('[a'), '');\n equal(str('[a={foo]'), '');\n throws(() => str('[a=\"foo]'), /Unclosed quote/);\n throws(() => str('[a=b=c]'), /Unexpected \"Operator\" token/);\n });\n\n it('elements', () => {\n equal(str('div'), '
');\n equal(str('div.foo'), '
');\n equal(str('div#foo'), '
');\n equal(str('div#foo.bar'), '
');\n equal(str('div.foo#bar'), '
');\n equal(str('div.foo.bar.baz'), '
');\n equal(str('.foo'), '');\n equal(str('#foo'), '');\n equal(str('.foo_bar'), '');\n equal(str('#foo.bar'), '');\n\n // Attribute shorthands\n equal(str('.'), '');\n equal(str('#'), '');\n equal(str('#.'), '');\n equal(str('.#.'), '');\n equal(str('.a..'), '');\n\n // Elements with attributes\n equal(str('div[foo=bar]'), '
');\n equal(str('div.a[b=c]'), '
');\n equal(str('div[b=c].a'), '
');\n equal(str('div[a=b][c=\"d\"]'), '
');\n equal(str('[b=c]'), '');\n equal(str('.a[b=c]'), '');\n equal(str('[b=c].a#d'), '');\n equal(str('[b=c]a'), '', 'Do not consume node name after attribute set');\n\n // Element with text\n equal(str('div{foo}'), '
foo
');\n equal(str('{foo}'), 'foo');\n\n // Mixed\n equal(str('div.foo{bar}'), '
bar
');\n equal(str('.foo{bar}#baz'), 'bar');\n equal(str('.foo[b=c]{bar}'), 'bar');\n\n // Repeated element\n equal(str('div.foo*3'), '
');\n equal(str('.foo*'), '');\n equal(str('.a[b=c]*10'), '');\n equal(str('.a*10[b=c]'), '');\n equal(str('.a*10{text}'), 'text');\n\n // Self-closing element\n equal(str('div/'), '
');\n equal(str('.foo/'), '');\n equal(str('.foo[bar]/'), '');\n equal(str('.foo/*3'), '');\n equal(str('.foo*3/'), '');\n\n throws(() => parse('/'), /Unexpected character/);\n });\n\n it('JSX', () => {\n const opt = { jsx: true };\n equal(str('foo.bar', opt), '');\n equal(str('Foo.bar', opt), '');\n equal(str('Foo.Bar', opt), '');\n equal(str('Foo.', opt), '');\n equal(str('Foo.Bar.baz', opt), '');\n equal(str('Foo.Bar.Baz', opt), '');\n\n equal(str('.{theme.class}', opt), '');\n equal(str('#{id}', opt), '');\n equal(str('Foo.{theme.class}', opt), '');\n });\n\n it('errors', () => {\n throws(() => parse('str?'), /Unexpected character at 4/);\n throws(() => parse('foo,bar'), /Unexpected character at 4/);\n equal(str('foo\\\\,bar'), '');\n equal(str('foo\\\\'), '');\n });\n\n it('missing braces', () => {\n // Do not throw errors on missing closing braces\n equal(str('div[title=\"test\"'), '
');\n equal(str('div(foo'), '
()');\n equal(str('div{foo'), '
foo
');\n });\n});\n\n Ensure dots in attributes are supported\n\nFixes #562\n\n @@ -14,6 +14,9 @@ describe('Parser', () => {\n equal(str('div.width1\\\\/2'), '
');\n equal(str('#sample*3'), '');\n \n+ // https://github.com/emmetio/emmet/issues/562\n+ equal(str('li[repeat.for=\"todo of todoList\"]'), '
  • ', 'Dots in attribute names');\n+\n equal(str('a>b'), '');\n equal(str('a+b'), '');\n equal(str('a+b>c+d'), '');\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Ensure dots in attributes are supported"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676179,"cells":{"id":{"kind":"string","value":"10071829"},"text":{"kind":"string","value":" parser.ts\n import { strictEqual as equal, throws } from 'assert';\nimport parser from '../src/parser';\nimport tokenizer from '../src/tokenizer';\nimport stringify from './assets/stringify';\nimport { ParserOptions } from '../src';\n\nconst parse = (abbr: string, options?: ParserOptions) => parser(tokenizer(abbr), options);\nconst str = (abbr: string, options?: ParserOptions) => stringify(parse(abbr, options));\n\ndescribe('Parser', () => {\n it('basic abbreviations', () => {\n equal(str('p'), '

    ');\n equal(str('p{text}'), '

    text

    ');\n equal(str('div.width1\\\\/2'), '
    ');\n equal(str('#sample*3'), '');\n\n equal(str('a>b'), '');\n equal(str('a+b'), '');\n equal(str('a+b>c+d'), '');\n equal(str('a+b'), '');\n equal(str('a+b>c+d'), '');\n equal(str('a>b>c+e'), '');\n equal(str('a>b>c^d'), '');\n equal(str('a>b>c^^^^d'), '');\n equal(str('a:b>c'), '');\n\n equal(str('ul.nav[title=\"foo\"]'), '');\n });\n\n it('groups', () => {\n equal(str('a>(b>c)+d'), '()');\n equal(str('(a>b)+(c>d)'), '()()');\n equal(str('a>((b>c)(d>e))f'), '(()())');\n equal(str('a>((((b>c))))+d'), '(((())))');\n equal(str('a>(((b>c))*4)+d'), '((())*4)');\n equal(str('(div>dl>(dt+dd)*2)'), '(
    (
    )*2
    )');\n equal(str('a>()'), '()');\n });\n\n it('attributes', () => {\n equal(str('[].foo'), '');\n equal(str('[a]'), '');\n equal(str('[a b c [d]]'), '');\n equal(str('[a=b]'), '');\n equal(str('[a=b c= d=e]'), '');\n equal(str('[a=b.c d=тест]'), '');\n equal(str('[[a]=b (c)=d]'), '');\n\n // Quoted attribute values\n equal(str('[a=\"b\"]'), '');\n equal(str('[a=\"b\" c=\\'d\\' e=\"\"]'), '');\n equal(str('[[a]=\"b\" (c)=\\'d\\']'), '');\n\n // Mixed quoted\n equal(str('[a=\"foo\\'bar\" b=\\'foo\"bar\\' c=\"foo\\\\\\\"bar\"]'), '');\n\n // Boolean & implied attributes\n equal(str('[a. b.]'), '');\n equal(str('[!a !b.]'), '');\n\n // Default values\n equal(str('[\"a.b\"]'), '');\n equal(str('[\\'a.b\\' \"c=d\" foo=bar \"./test.html\"]'), '');\n\n // Expressions as values\n equal(str('[foo={1 + 2} bar={fn(1, \"foo\")}]'), '');\n\n // Tabstops as unquoted values\n equal(str('[name=${1} value=${2:test}]'), '');\n });\n\n it('malformed attributes', () => {\n equal(str('[a'), '');\n equal(str('[a={foo]'), '');\n throws(() => str('[a=\"foo]'), /Unclosed quote/);\n throws(() => str('[a=b=c]'), /Unexpected \"Operator\" token/);\n });\n\n it('elements', () => {\n equal(str('div'), '
    ');\n equal(str('div.foo'), '
    ');\n equal(str('div#foo'), '
    ');\n equal(str('div#foo.bar'), '
    ');\n equal(str('div.foo#bar'), '
    ');\n equal(str('div.foo.bar.baz'), '
    ');\n equal(str('.foo'), '');\n equal(str('#foo'), '');\n equal(str('.foo_bar'), '');\n equal(str('#foo.bar'), '');\n\n // Attribute shorthands\n equal(str('.'), '');\n equal(str('#'), '');\n equal(str('#.'), '');\n equal(str('.#.'), '');\n equal(str('.a..'), '');\n\n // Elements with attributes\n equal(str('div[foo=bar]'), '
    ');\n equal(str('div.a[b=c]'), '
    ');\n equal(str('div[b=c].a'), '
    ');\n equal(str('div[a=b][c=\"d\"]'), '
    ');\n equal(str('[b=c]'), '');\n equal(str('.a[b=c]'), '');\n equal(str('[b=c].a#d'), '');\n equal(str('[b=c]a'), '', 'Do not consume node name after attribute set');\n\n // Element with text\n equal(str('div{foo}'), '
    foo
    ');\n equal(str('{foo}'), 'foo');\n\n // Mixed\n equal(str('div.foo{bar}'), '
    bar
    ');\n equal(str('.foo{bar}#baz'), 'bar');\n equal(str('.foo[b=c]{bar}'), 'bar');\n\n // Repeated element\n equal(str('div.foo*3'), '
    ');\n equal(str('.foo*'), '');\n equal(str('.a[b=c]*10'), '');\n equal(str('.a*10[b=c]'), '');\n equal(str('.a*10{text}'), 'text');\n\n // Self-closing element\n equal(str('div/'), '
    ');\n equal(str('.foo/'), '');\n equal(str('.foo[bar]/'), '');\n equal(str('.foo/*3'), '');\n equal(str('.foo*3/'), '');\n\n throws(() => parse('/'), /Unexpected character/);\n });\n\n it('JSX', () => {\n const opt = { jsx: true };\n equal(str('foo.bar', opt), '');\n equal(str('Foo.bar', opt), '');\n equal(str('Foo.Bar', opt), '');\n equal(str('Foo.', opt), '');\n equal(str('Foo.Bar.baz', opt), '');\n equal(str('Foo.Bar.Baz', opt), '');\n\n equal(str('.{theme.class}', opt), '');\n equal(str('#{id}', opt), '');\n equal(str('Foo.{theme.class}', opt), '');\n });\n\n it('errors', () => {\n throws(() => parse('str?'), /Unexpected character at 4/);\n throws(() => parse('foo,bar'), /Unexpected character at 4/);\n equal(str('foo\\\\,bar'), '');\n equal(str('foo\\\\'), '');\n });\n\n it('missing braces', () => {\n // Do not throw errors on missing closing braces\n equal(str('div[title=\"test\"'), '
    ');\n equal(str('div(foo'), '
    ()');\n equal(str('div{foo'), '
    foo
    ');\n });\n});\n\n Ensure dots in attributes are supported\n\nFixes #562\n\n @@ -14,6 +14,9 @@ describe('Parser', () => {\n equal(str('div.width1\\\\/2'), '
    ');\n equal(str('#sample*3'), '');\n \n+ // https://github.com/emmetio/emmet/issues/562\n+ equal(str('li[repeat.for=\"todo of todoList\"]'), '
  • ', 'Dots in attribute names');\n+\n equal(str('a>b'), '');\n equal(str('a+b'), '');\n equal(str('a+b>c+d'), '');\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Ensure dots in attributes are supported"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676180,"cells":{"id":{"kind":"string","value":"10071830"},"text":{"kind":"string","value":" transducers.js\n var transducers =\n/******/ (function(modules) { // webpackBootstrap\n/******/ \t// The module cache\n/******/ \tvar installedModules = {};\n/******/\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(installedModules[moduleId])\n/******/ \t\t\treturn installedModules[moduleId].exports;\n/******/\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = installedModules[moduleId] = {\n/******/ \t\t\texports: {},\n/******/ \t\t\tid: moduleId,\n/******/ \t\t\tloaded: false\n/******/ \t\t};\n/******/\n/******/ \t\t// Execute the module function\n/******/ \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n/******/\n/******/ \t\t// Flag the module as loaded\n/******/ \t\tmodule.loaded = true;\n/******/\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/\n/******/\n/******/ \t// expose the modules object (__webpack_modules__)\n/******/ \t__webpack_require__.m = modules;\n/******/\n/******/ \t// expose the module cache\n/******/ \t__webpack_require__.c = installedModules;\n/******/\n/******/ \t// __webpack_public_path__\n/******/ \t__webpack_require__.p = \"\";\n/******/\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(0);\n/******/ })\n/************************************************************************/\n/******/ ([\n/* 0 */\n/***/ function(module, exports, __webpack_require__) {\n\n\t\n\t// basic protocol helpers\n\n\tvar symbolExists = typeof Symbol !== 'undefined';\n\n\tvar protocols = {\n\t iterator: symbolExists ? Symbol.iterator : '@@iterator',\n\t transformer: symbolExists ? Symbol('transformer') : '@@transformer'\n\t};\n\n\tfunction throwProtocolError(name, coll) {\n\t throw new Error(\"don't know how to \" + name + \" collection: \" +\n\t coll);\n\t}\n\n\tfunction fulfillsProtocol(obj, name) {\n\t if(name === 'iterator') {\n\t // Accept ill-formed iterators that don't conform to the\n\t // protocol by accepting just next()\n\t return obj[protocols.iterator] || obj.next;\n\t }\n\n\t return obj[protocols[name]];\n\t}\n\n\tfunction getProtocolProperty(obj, name) {\n\t return obj[protocols[name]];\n\t}\n\n\tfunction iterator(coll) {\n\t var iter = getProtocolProperty(coll, 'iterator');\n\t if(iter) {\n\t return iter.call(coll);\n\t }\n\t else if(coll.next) {\n\t // Basic duck typing to accept an ill-formed iterator that doesn't\n\t // conform to the iterator protocol (all iterators should have the\n\t // @@iterator method and return themselves, but some engines don't\n\t // have that on generators like older v8)\n\t return coll;\n\t }\n\t else if(isArray(coll)) {\n\t return new ArrayIterator(coll);\n\t }\n\t else if(isObject(coll)) {\n\t return new ObjectIterator(coll);\n\t }\n\t}\n\n\tfunction ArrayIterator(arr) {\n\t this.arr = arr;\n\t this.index = 0;\n\t}\n\n\tArrayIterator.prototype.next = function() {\n\t if(this.index < this.arr.length) {\n\t return {\n\t value: this.arr[this.index++],\n\t done: false\n\t };\n\t }\n\t return {\n\t done: true\n\t }\n\t};\n\n\tfunction ObjectIterator(obj) {\n\t this.obj = obj;\n\t this.keys = Object.keys(obj);\n\t this.index = 0;\n\t}\n\n\tObjectIterator.prototype.next = function() {\n\t if(this.index < this.keys.length) {\n\t var k = this.keys[this.index++];\n\t return {\n\t value: [k, this.obj[k]],\n\t done: false\n\t };\n\t }\n\t return {\n\t done: true\n\t }\n\t};\n\n\t// helpers\n\n\tvar toString = Object.prototype.toString;\n\tvar isArray = typeof Array.isArray === 'function' ? Array.isArray : function(obj) {\n\t return toString.call(obj) == '[object Array]';\n\t};\n\n\tfunction isFunction(x) {\n\t return typeof x === 'function';\n\t}\n\n\tfunction isObject(x) {\n\t return x instanceof Object &&\n\t Object.getPrototypeOf(x) === Object.getPrototypeOf({});\n\t}\n\n\tfunction isNumber(x) {\n\t return typeof x === 'number';\n\t}\n\n\tfunction Reduced(val) {\n\t this.val = val;\n\t}\n\n\tfunction reduce(coll, xform, init) {\n\t if(isArray(coll)) {\n\t var result = init;\n\t var index = -1;\n\t var len = coll.length;\n\t while(++index < len) {\n\t result = xform.step(result, coll[index]);\n\t if(result instanceof Reduced) {\n\t return result.val;\n\t }\n\t }\n\t return xform.result(result);\n\t }\n\t else if(isObject(coll) || fulfillsProtocol(coll, 'iterator')) {\n\t var result = init;\n\t var iter = iterator(coll);\n\t var val = iter.next();\n\t while(!val.done) {\n\t result = xform.step(result, val.value);\n\t if(result instanceof Reduced) {\n\t return result.val;\n\t }\n\t val = iter.next();\n\t }\n\t return xform.result(result);\n\t }\n\t throwProtocolError('iterate', coll);\n\t}\n\n\tfunction transduce(coll, xform, reducer, init) {\n\t xform = xform(reducer);\n\t if(init === undefined) {\n\t init = xform.init();\n\t }\n\t return reduce(coll, xform, init);\n\t}\n\n\tfunction compose() {\n\t var funcs = Array.prototype.slice.call(arguments);\n\t return function(r) {\n\t var value = r;\n\t for(var i=funcs.length-1; i>=0; i--) {\n\t value = funcs[i](value);\n\t }\n\t return value;\n\t }\n\t}\n\n\t// transformations\n\n\tfunction transformer(f) {\n\t return {\n\t init: function() {\n\t throw new Error('init value unavailable');\n\t },\n\t result: function(v) {\n\t return v;\n\t },\n\t step: f\n\t };\n\t}\n\n\tfunction bound(f, ctx, count) {\n\t count = count != null ? count : 1;\n\n\t if(!ctx) {\n\t return f;\n\t }\n\t else {\n\t switch(count) {\n\t case 1:\n\t return function(x) {\n\t return f.call(ctx, x);\n\t }\n\t case 2:\n\t return function(x, y) {\n\t return f.call(ctx, x, y);\n\t }\n\t default:\n\t return f.bind(ctx);\n\t }\n\t }\n\t}\n\n\tfunction arrayMap(arr, f, ctx) {\n\t var index = -1;\n\t var length = arr.length;\n\t var result = Array(length);\n\t f = bound(f, ctx, 2);\n\n\t while (++index < length) {\n\t result[index] = f(arr[index], index);\n\t }\n\t return result;\n\t}\n\n\tfunction arrayFilter(arr, f, ctx) {\n\t var len = arr.length;\n\t var result = [];\n\t f = bound(f, ctx, 2);\n\n\t for(var i=0; i update browser file\n\n @@ -150,8 +150,42 @@ var transducers =\n \t return typeof x === 'number';\n \t}\n \n-\tfunction Reduced(val) {\n-\t this.val = val;\n+\tfunction Reduced(value) {\n+\t this.__transducers_reduced__ = true;\n+\t this.value = value;\n+\t}\n+\n+\tfunction isReduced(x) {\n+\t return (x instanceof Reduced) || (x && x.__transducers_reduced__);\n+\t}\n+\n+\tfunction deref(x) {\n+\t return x.value;\n+\t}\n+\n+\t/**\n+\t * This is for transforms that may call their nested transforms before\n+\t * Reduced-wrapping the result (e.g. \"take\"), to avoid nested Reduced.\n+\t */\n+\tfunction ensureReduced(val) {\n+\t if(isReduced(val)) {\n+\t return val;\n+\t } else {\n+\t return new Reduced(val);\n+\t }\n+\t}\n+\n+\t/**\n+\t * This is for tranforms that call their nested transforms when\n+\t * performing completion (like \"partition\"), to avoid signaling\n+\t * termination after already completing.\n+\t */\n+\tfunction ensureUnreduced(v) {\n+\t if(isReduced(v)) {\n+\t return deref(v);\n+\t } else {\n+\t return v;\n+\t }\n \t}\n \n \tfunction reduce(coll, xform, init) {\n@@ -161,8 +195,9 @@ var transducers =\n \t var len = coll.length;\n \t while(++index < len) {\n \t result = xform.step(result, coll[index]);\n-\t if(result instanceof Reduced) {\n-\t return result.val;\n+\t if(isReduced(result)) {\n+\t result = deref(result);\n+\t break;\n \t }\n \t }\n \t return xform.result(result);\n@@ -173,8 +208,9 @@ var transducers =\n \t var val = iter.next();\n \t while(!val.done) {\n \t result = xform.step(result, val.value);\n-\t if(result instanceof Reduced) {\n-\t return result.val;\n+\t if(isReduced(result)) {\n+\t result = deref(result);\n+\t break;\n \t }\n \t val = iter.next();\n \t }\n@@ -421,10 +457,16 @@ var transducers =\n \t};\n \n \tTake.prototype.step = function(result, input) {\n-\t if(this.i++ < this.n) {\n-\t return this.xform.step(result, input);\n+\t if (this.i < this.n) {\n+\t result = this.xform.step(result, input);\n+\t if(this.i + 1 >= this.n) {\n+\t // Finish reducing on the same step as the final value. TODO:\n+\t // double-check that this doesn't break any semantics\n+\t result = ensureReduced(result);\n+\t }\n \t }\n-\t return new Reduced(result);\n+\t this.i++;\n+\t return result;\n \t};\n \n \tfunction take(coll, n) {\n@@ -511,6 +553,98 @@ var transducers =\n \t }\n \t}\n \n+\tfunction Partition(n, xform) {\n+\t this.n = n;\n+\t this.i = 0;\n+\t this.xform = xform;\n+\t this.part = new Array(n);\n+\t}\n+\n+\tPartition.prototype.init = function() {\n+\t return this.xform.init();\n+\t};\n+\n+\tPartition.prototype.result = function(v) {\n+\t if (this.i > 0) {\n+\t return ensureUnreduced(this.xform.step(v, this.part.slice(0, this.i)));\n+\t }\n+\t return this.xform.result(v);\n+\t};\n+\n+\tPartition.prototype.step = function(result, input) {\n+\t this.part[this.i] = input;\n+\t this.i += 1;\n+\t if (this.i === this.n) {\n+\t var out = this.part.slice(0, this.n);\n+\t this.part = new Array(this.n);\n+\t this.i = 0;\n+\t return this.xform.step(result, out);\n+\t }\n+\t return result;\n+\t};\n+\n+\tfunction partition(coll, n) {\n+\t if (isNumber(coll)) {\n+\t n = coll; coll = null;\n+\t }\n+\n+\t if (coll) {\n+\t return seq(coll, partition(n));\n+\t }\n+\n+\t return function(xform) {\n+\t return new Partition(n, xform);\n+\t };\n+\t}\n+\n+\tvar NOTHING = {};\n+\n+\tfunction PartitionBy(f, xform) {\n+\t // TODO: take an \"opts\" object that allows the user to specify\n+\t // equality\n+\t this.f = f;\n+\t this.xform = xform;\n+\t this.part = [];\n+\t this.last = NOTHING;\n+\t}\n+\n+\tPartitionBy.prototype.init = function() {\n+\t return this.xform.init();\n+\t};\n+\n+\tPartitionBy.prototype.result = function(v) {\n+\t var l = this.part.length;\n+\t if (l > 0) {\n+\t return ensureUnreduced(this.xform.step(v, this.part.slice(0, l)));\n+\t }\n+\t return this.xform.result(v);\n+\t};\n+\n+\tPartitionBy.prototype.step = function(result, input) {\n+\t var current = this.f(input);\n+\t if (current === this.last || this.last === NOTHING) {\n+\t this.part.push(input);\n+\t } else {\n+\t result = this.xform.step(result, this.part);\n+\t this.part = [input];\n+\t }\n+\t this.last = current;\n+\t return result;\n+\t};\n+\n+\tfunction partitionBy(coll, f, ctx) {\n+\t if (isFunction(coll)) { ctx = f; f = coll; coll = null; }\n+\t f = bound(f, ctx);\n+\n+\t if (coll) {\n+\t return seq(coll, partitionBy(f));\n+\t }\n+\n+\t return function(xform) {\n+\t return new PartitionBy(f, xform);\n+\t };\n+\t}\n+\n \t// pure transducers (cannot take collections)\n \n \tfunction Cat(xform) {\n@@ -536,7 +670,7 @@ var transducers =\n \t },\n \t step: function(result, input) {\n \t var val = xform.step(result, input);\n-\t return (val instanceof Reduced) ? new Reduced(val) : val;\n+\t return isReduced(val) ? deref(val) : val;\n \t }\n \t }\n \n@@ -666,7 +800,7 @@ var transducers =\n \n \tvar stepper = {\n \t result: function(v) {\n-\t return (v instanceof Reduced) ? v.val : v;\n+\t return isReduced(v) ? deref(v) : v;\n \t },\n \t step: function(lt, x) {\n \t lt.items.push(x);\n@@ -683,7 +817,7 @@ var transducers =\n \t var len = lt.items.length;\n \t while(lt.items.length === len) {\n \t var n = this.iter.next();\n-\t if(n.done || n.value instanceof Reduced) {\n+\t if(n.done || isReduced(n.value)) {\n \t // finalize\n \t this.xform.result(this);\n \t break;\n@@ -760,6 +894,8 @@ var transducers =\n \t takeWhile: takeWhile,\n \t drop: drop,\n \t dropWhile: dropWhile,\n+\t partition: partition,\n+\t partitionBy: partitionBy,\n \t range: range,\n \n \t protocols: protocols,\n"},"addition_count":{"kind":"number","value":148,"string":"148"},"commit_subject":{"kind":"string","value":"update browser file"},"deletion_count":{"kind":"number","value":12,"string":"12"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"js"},"license":{"kind":"string","value":"bsd-2-clause"},"repo_name":{"kind":"string","value":"jlongster/transducers.js"}}},{"rowIdx":10676181,"cells":{"id":{"kind":"string","value":"10071831"},"text":{"kind":"string","value":" dashboard.rb\n # frozen_string_literal: true\n\nrequire \"sinatra/base\"\nrequire \"split\"\nrequire \"bigdecimal\"\nrequire \"split/dashboard/helpers\"\nrequire \"split/dashboard/pagination_helpers\"\n\nmodule Split\n class Dashboard < Sinatra::Base\n dir = File.dirname(File.expand_path(__FILE__))\n\n set :views, \"#{dir}/dashboard/views\"\n set :public_folder, \"#{dir}/dashboard/public\"\n set :static, true\n set :method_override, true\n\n helpers Split::DashboardHelpers\n helpers Split::DashboardPaginationHelpers\n\n get \"/\" do\n # Display experiments without a winner at the top of the dashboard\n @experiments = Split::ExperimentCatalog.all_active_first\n @unintialized_experiments = Split.configuration.experiments.keys - @experiments.map(&:name)\n\n @metrics = Split::Metric.all\n\n # Display Rails Environment mode (or Rack version if not using Rails)\n if Object.const_defined?(\"Rails\")\n @current_env = Rails.env.titlecase\n else\n @current_env = \"Rack: #{Rack.version}\"\n end\n erb :index\n end\n\n post \"/initialize_experiment\" do\n Split::ExperimentCatalog.find_or_create(params[:experiment]) unless params[:experiment].nil? || params[:experiment].empty?\n redirect url(\"/\")\n end\n\n post \"/force_alternative\" do\n experiment = Split::ExperimentCatalog.find(params[:experiment])\n alternative = Split::Alternative.new(params[:alternative], experiment.name)\n\n cookies = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n cookies[experiment.name] = alternative.name\n response.set_cookie(\"split_override\", { value: cookies.to_json, path: \"/\" })\n\n redirect url(\"/\")\n end\n\n post \"/experiment\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @alternative = Split::Alternative.new(params[:alternative], params[:experiment])\n @experiment.winner = @alternative.name\n redirect url(\"/\")\n end\n\n post \"/start\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.start\n redirect url(\"/\")\n end\n\n post \"/reset\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.reset\n redirect url(\"/\")\n end\n\n post \"/reopen\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.reset_winner\n redirect url(\"/\")\n end\n\n post \"/update_cohorting\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n case params[:cohorting_action].downcase\n when \"enable\"\n @experiment.enable_cohorting\n when \"disable\"\n @experiment.disable_cohorting\n end\n redirect url(\"/\")\n end\n\n delete \"/experiment\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.delete\n redirect url(\"/\")\n end\n end\nend\n\n Handle when Rails is partially loaded as a Gem\n\n...and not an application\n\n```\nNoMethodError: undefined method `env' for Rails:Module\n /bundle/ruby/3.0.0/gems/split-3.4.1/lib/split/dashboard.rb:28:in `block in '\n```\n\n @@ -26,7 +26,7 @@ module Split\n @metrics = Split::Metric.all\n \n # Display Rails Environment mode (or Rack version if not using Rails)\n- if Object.const_defined?(\"Rails\")\n+ if Object.const_defined?(\"Rails\") && Rails.respond_to?(:env)\n @current_env = Rails.env.titlecase\n else\n @current_env = \"Rack: #{Rack.version}\"\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Handle when Rails is partially loaded as a Gem"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676182,"cells":{"id":{"kind":"string","value":"10071832"},"text":{"kind":"string","value":" dashboard.rb\n # frozen_string_literal: true\n\nrequire \"sinatra/base\"\nrequire \"split\"\nrequire \"bigdecimal\"\nrequire \"split/dashboard/helpers\"\nrequire \"split/dashboard/pagination_helpers\"\n\nmodule Split\n class Dashboard < Sinatra::Base\n dir = File.dirname(File.expand_path(__FILE__))\n\n set :views, \"#{dir}/dashboard/views\"\n set :public_folder, \"#{dir}/dashboard/public\"\n set :static, true\n set :method_override, true\n\n helpers Split::DashboardHelpers\n helpers Split::DashboardPaginationHelpers\n\n get \"/\" do\n # Display experiments without a winner at the top of the dashboard\n @experiments = Split::ExperimentCatalog.all_active_first\n @unintialized_experiments = Split.configuration.experiments.keys - @experiments.map(&:name)\n\n @metrics = Split::Metric.all\n\n # Display Rails Environment mode (or Rack version if not using Rails)\n if Object.const_defined?(\"Rails\")\n @current_env = Rails.env.titlecase\n else\n @current_env = \"Rack: #{Rack.version}\"\n end\n erb :index\n end\n\n post \"/initialize_experiment\" do\n Split::ExperimentCatalog.find_or_create(params[:experiment]) unless params[:experiment].nil? || params[:experiment].empty?\n redirect url(\"/\")\n end\n\n post \"/force_alternative\" do\n experiment = Split::ExperimentCatalog.find(params[:experiment])\n alternative = Split::Alternative.new(params[:alternative], experiment.name)\n\n cookies = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n cookies[experiment.name] = alternative.name\n response.set_cookie(\"split_override\", { value: cookies.to_json, path: \"/\" })\n\n redirect url(\"/\")\n end\n\n post \"/experiment\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @alternative = Split::Alternative.new(params[:alternative], params[:experiment])\n @experiment.winner = @alternative.name\n redirect url(\"/\")\n end\n\n post \"/start\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.start\n redirect url(\"/\")\n end\n\n post \"/reset\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.reset\n redirect url(\"/\")\n end\n\n post \"/reopen\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.reset_winner\n redirect url(\"/\")\n end\n\n post \"/update_cohorting\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n case params[:cohorting_action].downcase\n when \"enable\"\n @experiment.enable_cohorting\n when \"disable\"\n @experiment.disable_cohorting\n end\n redirect url(\"/\")\n end\n\n delete \"/experiment\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.delete\n redirect url(\"/\")\n end\n end\nend\n\n Handle when Rails is partially loaded as a Gem\n\n...and not an application\n\n```\nNoMethodError: undefined method `env' for Rails:Module\n /bundle/ruby/3.0.0/gems/split-3.4.1/lib/split/dashboard.rb:28:in `block in '\n```\n\n @@ -26,7 +26,7 @@ module Split\n @metrics = Split::Metric.all\n \n # Display Rails Environment mode (or Rack version if not using Rails)\n- if Object.const_defined?(\"Rails\")\n+ if Object.const_defined?(\"Rails\") && Rails.respond_to?(:env)\n @current_env = Rails.env.titlecase\n else\n @current_env = \"Rack: #{Rack.version}\"\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Handle when Rails is partially loaded as a Gem"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676183,"cells":{"id":{"kind":"string","value":"10071833"},"text":{"kind":"string","value":" dashboard.rb\n # frozen_string_literal: true\n\nrequire \"sinatra/base\"\nrequire \"split\"\nrequire \"bigdecimal\"\nrequire \"split/dashboard/helpers\"\nrequire \"split/dashboard/pagination_helpers\"\n\nmodule Split\n class Dashboard < Sinatra::Base\n dir = File.dirname(File.expand_path(__FILE__))\n\n set :views, \"#{dir}/dashboard/views\"\n set :public_folder, \"#{dir}/dashboard/public\"\n set :static, true\n set :method_override, true\n\n helpers Split::DashboardHelpers\n helpers Split::DashboardPaginationHelpers\n\n get \"/\" do\n # Display experiments without a winner at the top of the dashboard\n @experiments = Split::ExperimentCatalog.all_active_first\n @unintialized_experiments = Split.configuration.experiments.keys - @experiments.map(&:name)\n\n @metrics = Split::Metric.all\n\n # Display Rails Environment mode (or Rack version if not using Rails)\n if Object.const_defined?(\"Rails\")\n @current_env = Rails.env.titlecase\n else\n @current_env = \"Rack: #{Rack.version}\"\n end\n erb :index\n end\n\n post \"/initialize_experiment\" do\n Split::ExperimentCatalog.find_or_create(params[:experiment]) unless params[:experiment].nil? || params[:experiment].empty?\n redirect url(\"/\")\n end\n\n post \"/force_alternative\" do\n experiment = Split::ExperimentCatalog.find(params[:experiment])\n alternative = Split::Alternative.new(params[:alternative], experiment.name)\n\n cookies = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n cookies[experiment.name] = alternative.name\n response.set_cookie(\"split_override\", { value: cookies.to_json, path: \"/\" })\n\n redirect url(\"/\")\n end\n\n post \"/experiment\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @alternative = Split::Alternative.new(params[:alternative], params[:experiment])\n @experiment.winner = @alternative.name\n redirect url(\"/\")\n end\n\n post \"/start\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.start\n redirect url(\"/\")\n end\n\n post \"/reset\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.reset\n redirect url(\"/\")\n end\n\n post \"/reopen\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.reset_winner\n redirect url(\"/\")\n end\n\n post \"/update_cohorting\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n case params[:cohorting_action].downcase\n when \"enable\"\n @experiment.enable_cohorting\n when \"disable\"\n @experiment.disable_cohorting\n end\n redirect url(\"/\")\n end\n\n delete \"/experiment\" do\n @experiment = Split::ExperimentCatalog.find(params[:experiment])\n @experiment.delete\n redirect url(\"/\")\n end\n end\nend\n\n Handle when Rails is partially loaded as a Gem\n\n...and not an application\n\n```\nNoMethodError: undefined method `env' for Rails:Module\n /bundle/ruby/3.0.0/gems/split-3.4.1/lib/split/dashboard.rb:28:in `block in '\n```\n\n @@ -26,7 +26,7 @@ module Split\n @metrics = Split::Metric.all\n \n # Display Rails Environment mode (or Rack version if not using Rails)\n- if Object.const_defined?(\"Rails\")\n+ if Object.const_defined?(\"Rails\") && Rails.respond_to?(:env)\n @current_env = Rails.env.titlecase\n else\n @current_env = \"Rack: #{Rack.version}\"\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Handle when Rails is partially loaded as a Gem"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676184,"cells":{"id":{"kind":"string","value":"10071834"},"text":{"kind":"string","value":" snippets.ts\n import { ok } from 'assert';\nimport markup from '@emmetio/abbreviation';\nimport html from '../snippets/html.json';\nimport xsl from '../snippets/xsl.json';\n\ndescribe('Snippets', () => {\n it('HTML', () => {\n Object.keys(html).forEach(k => ok(markup(html[k]), k));\n });\n\n it('XSL', () => {\n Object.keys(xsl).forEach(k => ok(markup(xsl[k]), k));\n });\n});\n\n FIxed issue with invalid nested snippets resolve\n\n @@ -1,11 +1,13 @@\n-import { ok } from 'assert';\n+import { ok, strictEqual as equal } from 'assert';\n import markup from '@emmetio/abbreviation';\n+import expand from '../src';\n import html from '../snippets/html.json';\n import xsl from '../snippets/xsl.json';\n \n describe('Snippets', () => {\n it('HTML', () => {\n Object.keys(html).forEach(k => ok(markup(html[k]), k));\n+ equal(expand('fset>input:c'), '
    ');\n });\n \n it('XSL', () => {\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"FIxed issue with invalid nested snippets resolve"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676185,"cells":{"id":{"kind":"string","value":"10071835"},"text":{"kind":"string","value":" snippets.ts\n import { ok } from 'assert';\nimport markup from '@emmetio/abbreviation';\nimport html from '../snippets/html.json';\nimport xsl from '../snippets/xsl.json';\n\ndescribe('Snippets', () => {\n it('HTML', () => {\n Object.keys(html).forEach(k => ok(markup(html[k]), k));\n });\n\n it('XSL', () => {\n Object.keys(xsl).forEach(k => ok(markup(xsl[k]), k));\n });\n});\n\n FIxed issue with invalid nested snippets resolve\n\n @@ -1,11 +1,13 @@\n-import { ok } from 'assert';\n+import { ok, strictEqual as equal } from 'assert';\n import markup from '@emmetio/abbreviation';\n+import expand from '../src';\n import html from '../snippets/html.json';\n import xsl from '../snippets/xsl.json';\n \n describe('Snippets', () => {\n it('HTML', () => {\n Object.keys(html).forEach(k => ok(markup(html[k]), k));\n+ equal(expand('fset>input:c'), '
    ');\n });\n \n it('XSL', () => {\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"FIxed issue with invalid nested snippets resolve"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10676186,"cells":{"id":{"kind":"string","value":"10071836"},"text":{"kind":"string","value":" experiment.rb\n # frozen_string_literal: true\n\nmodule Split\n class Experiment\n attr_accessor :name\n attr_accessor :goals\n attr_accessor :alternative_probabilities\n attr_accessor :metadata\n\n attr_reader :alternatives\n attr_reader :resettable\n\n DEFAULT_OPTIONS = {\n resettable: true\n }\n\n def self.find(name)\n Split.cache(:experiments, name) do\n return unless Split.redis.exists?(name)\n Experiment.new(name).tap { |exp| exp.load_from_redis }\n end\n end\n\n def initialize(name, options = {})\n options = DEFAULT_OPTIONS.merge(options)\n\n @name = name.to_s\n\n extract_alternatives_from_options(options)\n end\n\n def self.finished_key(key)\n \"#{key}:finished\"\n end\n\n def set_alternatives_and_options(options)\n options_with_defaults = DEFAULT_OPTIONS.merge(\n options.reject { |k, v| v.nil? }\n )\n\n self.alternatives = options_with_defaults[:alternatives]\n self.goals = options_with_defaults[:goals]\n self.resettable = options_with_defaults[:resettable]\n self.algorithm = options_with_defaults[:algorithm]\n self.metadata = options_with_defaults[:metadata]\n end\n\n def extract_alternatives_from_options(options)\n alts = options[:alternatives] || []\n\n if alts.length == 1\n if alts[0].is_a? Hash\n alts = alts[0].map { |k, v| { k => v } }\n end\n end\n\n if alts.empty?\n exp_config = Split.configuration.experiment_for(name)\n if exp_config\n alts = load_alternatives_from_configuration\n options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration\n options[:metadata] = load_metadata_from_configuration\n options[:resettable] = exp_config[:resettable]\n options[:algorithm] = exp_config[:algorithm]\n end\n end\n\n options[:alternatives] = alts\n\n set_alternatives_and_options(options)\n\n # calculate probability that each alternative is the winner\n @alternative_probabilities = {}\n alts\n end\n\n def save\n validate!\n\n if new_record?\n start unless Split.configuration.start_manually\n persist_experiment_configuration\n elsif experiment_configuration_has_changed?\n reset unless Split.configuration.reset_manually\n persist_experiment_configuration\n end\n\n redis.hmset(experiment_config_key, :resettable, resettable.to_s,\n :algorithm, algorithm.to_s)\n self\n end\n\n def validate!\n end\n\n def new_record?\n !redis.exists?(name)\n end\n\n def ==(obj)\n def new_record?\n ExperimentCatalog.find(name).nil?\n end\n\n def ==(obj)\n self.name == obj.name\n end\n\n def [](name)\n alternatives.find { |a| a.name == name }\n end\n\n def algorithm\n @algorithm ||= Split.configuration.algorithm\n end\n\n def algorithm=(algorithm)\n @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm\n end\n\n def resettable=(resettable)\n @resettable = resettable.is_a?(String) ? resettable == \"true\" : resettable\n end\n\n def alternatives=(alts)\n @alternatives = alts.map do |alternative|\n if alternative.kind_of?(Split::Alternative)\n alternative\n else\n Split::Alternative.new(alternative, @name)\n end\n end\n end\n\n def winner\n Split.cache(:experiment_winner, name) do\n experiment_winner = redis.hget(:experiment_winner, name)\n if experiment_winner\n Split::Alternative.new(experiment_winner, name)\n else\n nil\n end\n end\n end\n\n def has_winner?\n return @has_winner if defined? @has_winner\n @has_winner = !winner.nil?\n end\n\n def winner=(winner_name)\n redis.hset(:experiment_winner, name, winner_name.to_s)\n @has_winner = true\n Split.configuration.on_experiment_winner_choose.call(self)\n end\n\n def participant_count\n alternatives.inject(0) { |sum, a| sum + a.participant_count }\n end\n\n def control\n alternatives.first\n end\n\n def reset_winner\n redis.hdel(:experiment_winner, name)\n @has_winner = false\n Split::Cache.clear_key(@name)\n end\n\n def start\n redis.hset(:experiment_start_times, @name, Time.now.to_i)\n end\n\n def start_time\n Split.cache(:experiment_start_times, @name) do\n t = redis.hget(:experiment_start_times, @name)\n if t\n # Check if stored time is an integer\n if t =~ /^[-+]?[0-9]+$/\n Time.at(t.to_i)\n else\n Time.parse(t)\n end\n end\n end\n end\n\n def next_alternative\n winner || random_alternative\n end\n\n def random_alternative\n if alternatives.length > 1\n algorithm.choose_alternative(self)\n else\n alternatives.first\n end\n end\n\n def version\n @version ||= (redis.get(\"#{name}:version\").to_i || 0)\n end\n\n def increment_version\n @version = redis.incr(\"#{name}:version\")\n end\n\n def key\n if version.to_i > 0\n \"#{name}:#{version}\"\n else\n name\n end\n end\n\n def goals_key\n \"#{name}:goals\"\n end\n\n def finished_key\n self.class.finished_key(key)\n end\n\n def metadata_key\n \"#{name}:metadata\"\n end\n\n def resettable?\n resettable\n end\n\n def reset\n Split.configuration.on_before_experiment_reset.call(self)\n Split::Cache.clear_key(@name)\n alternatives.each(&:reset)\n reset_winner\n Split.configuration.on_experiment_reset.call(self)\n increment_version\n end\n\n def delete\n Split.configuration.on_before_experiment_delete.call(self)\n if Split.configuration.start_manually\n redis.hdel(:experiment_start_times, @name)\n end\n reset_winner\n redis.srem(:experiments, name)\n remove_experiment_cohorting\n remove_experiment_configuration\n Split.configuration.on_experiment_delete.call(self)\n increment_version\n end\n\n def delete_metadata\n redis.del(metadata_key)\n end\n\n def load_from_redis\n exp_config = redis.hgetall(experiment_config_key)\n\n options = {\n resettable: exp_config[\"resettable\"],\n algorithm: exp_config[\"algorithm\"],\n alternatives: load_alternatives_from_redis,\n goals: Split::GoalsCollection.new(@name).load_from_redis,\n metadata: load_metadata_from_redis\n }\n\n set_alternatives_and_options(options)\n end\n\n def calc_winning_alternatives\n # Cache the winning alternatives so we recalculate them once per the specified interval.\n intervals_since_epoch =\n Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval\n\n if self.calc_time != intervals_since_epoch\n if goals.empty?\n self.estimate_winning_alternative\n else\n goals.each do |goal|\n self.estimate_winning_alternative(goal)\n end\n end\n\n self.calc_time = intervals_since_epoch\n\n self.save\n end\n end\n\n def estimate_winning_alternative(goal = nil)\n # initialize a hash of beta distributions based on the alternatives' conversion rates\n beta_params = calc_beta_params(goal)\n\n winning_alternatives = []\n\n Split.configuration.beta_probability_simulations.times do\n # calculate simulated conversion rates from the beta distributions\n simulated_cr_hash = calc_simulated_conversion_rates(beta_params)\n\n winning_alternative = find_simulated_winner(simulated_cr_hash)\n\n # push the winning pair to the winning_alternatives array\n winning_alternatives.push(winning_alternative)\n end\n\n winning_counts = count_simulated_wins(winning_alternatives)\n\n @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)\n\n write_to_alternatives(goal)\n\n self.save\n end\n\n def write_to_alternatives(goal = nil)\n alternatives.each do |alternative|\n alternative.set_p_winner(@alternative_probabilities[alternative], goal)\n end\n end\n\n def calc_alternative_probabilities(winning_counts, number_of_simulations)\n alternative_probabilities = {}\n winning_counts.each do |alternative, wins|\n alternative_probabilities[alternative] = wins / number_of_simulations.to_f\n end\n alternative_probabilities\n end\n\n def count_simulated_wins(winning_alternatives)\n # initialize a hash to keep track of winning alternative in simulations\n winning_counts = {}\n alternatives.each do |alternative|\n winning_counts[alternative] = 0\n end\n # count number of times each alternative won, calculate probabilities, place in hash\n winning_alternatives.each do |alternative|\n winning_counts[alternative] += 1\n end\n winning_counts\n end\n\n def find_simulated_winner(simulated_cr_hash)\n # figure out which alternative had the highest simulated conversion rate\n winning_pair = [\"\", 0.0]\n simulated_cr_hash.each do |alternative, rate|\n if rate > winning_pair[1]\n winning_pair = [alternative, rate]\n end\n end\n winner = winning_pair[0]\n winner\n end\n\n def calc_simulated_conversion_rates(beta_params)\n simulated_cr_hash = {}\n\n # create a hash which has the conversion rate pulled from each alternative's beta distribution\n beta_params.each do |alternative, params|\n alpha = params[0]\n beta = params[1]\n simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)\n simulated_cr_hash[alternative] = simulated_conversion_rate\n end\n\n simulated_cr_hash\n end\n\n def calc_beta_params(goal = nil)\n beta_params = {}\n alternatives.each do |alternative|\n conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)\n alpha = 1 + conversions\n beta = 1 + alternative.participant_count - conversions\n\n params = [alpha, beta]\n\n beta_params[alternative] = params\n end\n beta_params\n end\n\n def calc_time=(time)\n redis.hset(experiment_config_key, :calc_time, time)\n end\n\n def calc_time\n redis.hget(experiment_config_key, :calc_time).to_i\n end\n\n def jstring(goal = nil)\n js_id = if goal.nil?\n name\n else\n name + \"-\" + goal\n end\n js_id.gsub(\"/\", \"--\")\n end\n\n def cohorting_disabled?\n @cohorting_disabled ||= begin\n value = redis.hget(experiment_config_key, :cohorting)\n value.nil? ? false : value.downcase == \"true\"\n end\n end\n\n def disable_cohorting\n @cohorting_disabled = true\n redis.hset(experiment_config_key, :cohorting, true.to_s)\n end\n\n def enable_cohorting\n @cohorting_disabled = false\n redis.hset(experiment_config_key, :cohorting, false.to_s)\n end\n\n protected\n def experiment_config_key\n \"experiment_configurations/#{@name}\"\n end\n\n def load_metadata_from_configuration\n Split.configuration.experiment_for(@name)[:metadata]\n end\n\n def load_metadata_from_redis\n meta = redis.get(metadata_key)\n JSON.parse(meta) unless meta.nil?\n end\n\n def load_alternatives_from_configuration\n alts = Split.configuration.experiment_for(@name)[:alternatives]\n raise ArgumentError, \"Experiment configuration is missing :alternatives array\" unless alts\n if alts.is_a?(Hash)\n alts.keys\n else\n alts.flatten\n end\n end\n\n def load_alternatives_from_redis\n alternatives = redis.lrange(@name, 0, -1)\n alternatives.map do |alt|\n alt = begin\n JSON.parse(alt)\n rescue\n alt\n end\n Split::Alternative.new(alt, @name)\n end\n end\n\n private\n def redis\n Split.redis\n end\n\n def redis_interface\n RedisInterface.new\n end\n\n def persist_experiment_configuration\n redis_interface.add_to_set(:experiments, name)\n redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })\n goals_collection.save\n\n if @metadata\n redis.set(metadata_key, @metadata.to_json)\n else\n delete_metadata\n end\n end\n\n def remove_experiment_configuration\n @alternatives.each(&:delete)\n goals_collection.delete\n delete_metadata\n redis.del(@name)\n end\n\n def experiment_configuration_has_changed?\n existing_experiment = Experiment.find(@name)\n\n existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||\n existing_experiment.goals != @goals ||\n existing_experiment.metadata != @metadata\n end\n\n def goals_collection\n Split::GoalsCollection.new(@name, @goals)\n end\n\n def remove_experiment_cohorting\n @cohorting_disabled = false\n redis.hdel(experiment_config_key, :cohorting)\n end\n end\nend\n\n Centralize checks for experiment existance in experiment catalog\n\n @@ -94,7 +94,7 @@ module Split\n end\n \n def new_record?\n- !redis.exists?(name)\n+ ExperimentCatalog.find(name).nil?\n end\n \n def ==(obj)\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Centralize checks for experiment existance in experiment catalog"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676187,"cells":{"id":{"kind":"string","value":"10071837"},"text":{"kind":"string","value":" experiment.rb\n # frozen_string_literal: true\n\nmodule Split\n class Experiment\n attr_accessor :name\n attr_accessor :goals\n attr_accessor :alternative_probabilities\n attr_accessor :metadata\n\n attr_reader :alternatives\n attr_reader :resettable\n\n DEFAULT_OPTIONS = {\n resettable: true\n }\n\n def self.find(name)\n Split.cache(:experiments, name) do\n return unless Split.redis.exists?(name)\n Experiment.new(name).tap { |exp| exp.load_from_redis }\n end\n end\n\n def initialize(name, options = {})\n options = DEFAULT_OPTIONS.merge(options)\n\n @name = name.to_s\n\n extract_alternatives_from_options(options)\n end\n\n def self.finished_key(key)\n \"#{key}:finished\"\n end\n\n def set_alternatives_and_options(options)\n options_with_defaults = DEFAULT_OPTIONS.merge(\n options.reject { |k, v| v.nil? }\n )\n\n self.alternatives = options_with_defaults[:alternatives]\n self.goals = options_with_defaults[:goals]\n self.resettable = options_with_defaults[:resettable]\n self.algorithm = options_with_defaults[:algorithm]\n self.metadata = options_with_defaults[:metadata]\n end\n\n def extract_alternatives_from_options(options)\n alts = options[:alternatives] || []\n\n if alts.length == 1\n if alts[0].is_a? Hash\n alts = alts[0].map { |k, v| { k => v } }\n end\n end\n\n if alts.empty?\n exp_config = Split.configuration.experiment_for(name)\n if exp_config\n alts = load_alternatives_from_configuration\n options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration\n options[:metadata] = load_metadata_from_configuration\n options[:resettable] = exp_config[:resettable]\n options[:algorithm] = exp_config[:algorithm]\n end\n end\n\n options[:alternatives] = alts\n\n set_alternatives_and_options(options)\n\n # calculate probability that each alternative is the winner\n @alternative_probabilities = {}\n alts\n end\n\n def save\n validate!\n\n if new_record?\n start unless Split.configuration.start_manually\n persist_experiment_configuration\n elsif experiment_configuration_has_changed?\n reset unless Split.configuration.reset_manually\n persist_experiment_configuration\n end\n\n redis.hmset(experiment_config_key, :resettable, resettable.to_s,\n :algorithm, algorithm.to_s)\n self\n end\n\n def validate!\n end\n\n def new_record?\n !redis.exists?(name)\n end\n\n def ==(obj)\n def new_record?\n ExperimentCatalog.find(name).nil?\n end\n\n def ==(obj)\n self.name == obj.name\n end\n\n def [](name)\n alternatives.find { |a| a.name == name }\n end\n\n def algorithm\n @algorithm ||= Split.configuration.algorithm\n end\n\n def algorithm=(algorithm)\n @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm\n end\n\n def resettable=(resettable)\n @resettable = resettable.is_a?(String) ? resettable == \"true\" : resettable\n end\n\n def alternatives=(alts)\n @alternatives = alts.map do |alternative|\n if alternative.kind_of?(Split::Alternative)\n alternative\n else\n Split::Alternative.new(alternative, @name)\n end\n end\n end\n\n def winner\n Split.cache(:experiment_winner, name) do\n experiment_winner = redis.hget(:experiment_winner, name)\n if experiment_winner\n Split::Alternative.new(experiment_winner, name)\n else\n nil\n end\n end\n end\n\n def has_winner?\n return @has_winner if defined? @has_winner\n @has_winner = !winner.nil?\n end\n\n def winner=(winner_name)\n redis.hset(:experiment_winner, name, winner_name.to_s)\n @has_winner = true\n Split.configuration.on_experiment_winner_choose.call(self)\n end\n\n def participant_count\n alternatives.inject(0) { |sum, a| sum + a.participant_count }\n end\n\n def control\n alternatives.first\n end\n\n def reset_winner\n redis.hdel(:experiment_winner, name)\n @has_winner = false\n Split::Cache.clear_key(@name)\n end\n\n def start\n redis.hset(:experiment_start_times, @name, Time.now.to_i)\n end\n\n def start_time\n Split.cache(:experiment_start_times, @name) do\n t = redis.hget(:experiment_start_times, @name)\n if t\n # Check if stored time is an integer\n if t =~ /^[-+]?[0-9]+$/\n Time.at(t.to_i)\n else\n Time.parse(t)\n end\n end\n end\n end\n\n def next_alternative\n winner || random_alternative\n end\n\n def random_alternative\n if alternatives.length > 1\n algorithm.choose_alternative(self)\n else\n alternatives.first\n end\n end\n\n def version\n @version ||= (redis.get(\"#{name}:version\").to_i || 0)\n end\n\n def increment_version\n @version = redis.incr(\"#{name}:version\")\n end\n\n def key\n if version.to_i > 0\n \"#{name}:#{version}\"\n else\n name\n end\n end\n\n def goals_key\n \"#{name}:goals\"\n end\n\n def finished_key\n self.class.finished_key(key)\n end\n\n def metadata_key\n \"#{name}:metadata\"\n end\n\n def resettable?\n resettable\n end\n\n def reset\n Split.configuration.on_before_experiment_reset.call(self)\n Split::Cache.clear_key(@name)\n alternatives.each(&:reset)\n reset_winner\n Split.configuration.on_experiment_reset.call(self)\n increment_version\n end\n\n def delete\n Split.configuration.on_before_experiment_delete.call(self)\n if Split.configuration.start_manually\n redis.hdel(:experiment_start_times, @name)\n end\n reset_winner\n redis.srem(:experiments, name)\n remove_experiment_cohorting\n remove_experiment_configuration\n Split.configuration.on_experiment_delete.call(self)\n increment_version\n end\n\n def delete_metadata\n redis.del(metadata_key)\n end\n\n def load_from_redis\n exp_config = redis.hgetall(experiment_config_key)\n\n options = {\n resettable: exp_config[\"resettable\"],\n algorithm: exp_config[\"algorithm\"],\n alternatives: load_alternatives_from_redis,\n goals: Split::GoalsCollection.new(@name).load_from_redis,\n metadata: load_metadata_from_redis\n }\n\n set_alternatives_and_options(options)\n end\n\n def calc_winning_alternatives\n # Cache the winning alternatives so we recalculate them once per the specified interval.\n intervals_since_epoch =\n Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval\n\n if self.calc_time != intervals_since_epoch\n if goals.empty?\n self.estimate_winning_alternative\n else\n goals.each do |goal|\n self.estimate_winning_alternative(goal)\n end\n end\n\n self.calc_time = intervals_since_epoch\n\n self.save\n end\n end\n\n def estimate_winning_alternative(goal = nil)\n # initialize a hash of beta distributions based on the alternatives' conversion rates\n beta_params = calc_beta_params(goal)\n\n winning_alternatives = []\n\n Split.configuration.beta_probability_simulations.times do\n # calculate simulated conversion rates from the beta distributions\n simulated_cr_hash = calc_simulated_conversion_rates(beta_params)\n\n winning_alternative = find_simulated_winner(simulated_cr_hash)\n\n # push the winning pair to the winning_alternatives array\n winning_alternatives.push(winning_alternative)\n end\n\n winning_counts = count_simulated_wins(winning_alternatives)\n\n @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)\n\n write_to_alternatives(goal)\n\n self.save\n end\n\n def write_to_alternatives(goal = nil)\n alternatives.each do |alternative|\n alternative.set_p_winner(@alternative_probabilities[alternative], goal)\n end\n end\n\n def calc_alternative_probabilities(winning_counts, number_of_simulations)\n alternative_probabilities = {}\n winning_counts.each do |alternative, wins|\n alternative_probabilities[alternative] = wins / number_of_simulations.to_f\n end\n alternative_probabilities\n end\n\n def count_simulated_wins(winning_alternatives)\n # initialize a hash to keep track of winning alternative in simulations\n winning_counts = {}\n alternatives.each do |alternative|\n winning_counts[alternative] = 0\n end\n # count number of times each alternative won, calculate probabilities, place in hash\n winning_alternatives.each do |alternative|\n winning_counts[alternative] += 1\n end\n winning_counts\n end\n\n def find_simulated_winner(simulated_cr_hash)\n # figure out which alternative had the highest simulated conversion rate\n winning_pair = [\"\", 0.0]\n simulated_cr_hash.each do |alternative, rate|\n if rate > winning_pair[1]\n winning_pair = [alternative, rate]\n end\n end\n winner = winning_pair[0]\n winner\n end\n\n def calc_simulated_conversion_rates(beta_params)\n simulated_cr_hash = {}\n\n # create a hash which has the conversion rate pulled from each alternative's beta distribution\n beta_params.each do |alternative, params|\n alpha = params[0]\n beta = params[1]\n simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)\n simulated_cr_hash[alternative] = simulated_conversion_rate\n end\n\n simulated_cr_hash\n end\n\n def calc_beta_params(goal = nil)\n beta_params = {}\n alternatives.each do |alternative|\n conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)\n alpha = 1 + conversions\n beta = 1 + alternative.participant_count - conversions\n\n params = [alpha, beta]\n\n beta_params[alternative] = params\n end\n beta_params\n end\n\n def calc_time=(time)\n redis.hset(experiment_config_key, :calc_time, time)\n end\n\n def calc_time\n redis.hget(experiment_config_key, :calc_time).to_i\n end\n\n def jstring(goal = nil)\n js_id = if goal.nil?\n name\n else\n name + \"-\" + goal\n end\n js_id.gsub(\"/\", \"--\")\n end\n\n def cohorting_disabled?\n @cohorting_disabled ||= begin\n value = redis.hget(experiment_config_key, :cohorting)\n value.nil? ? false : value.downcase == \"true\"\n end\n end\n\n def disable_cohorting\n @cohorting_disabled = true\n redis.hset(experiment_config_key, :cohorting, true.to_s)\n end\n\n def enable_cohorting\n @cohorting_disabled = false\n redis.hset(experiment_config_key, :cohorting, false.to_s)\n end\n\n protected\n def experiment_config_key\n \"experiment_configurations/#{@name}\"\n end\n\n def load_metadata_from_configuration\n Split.configuration.experiment_for(@name)[:metadata]\n end\n\n def load_metadata_from_redis\n meta = redis.get(metadata_key)\n JSON.parse(meta) unless meta.nil?\n end\n\n def load_alternatives_from_configuration\n alts = Split.configuration.experiment_for(@name)[:alternatives]\n raise ArgumentError, \"Experiment configuration is missing :alternatives array\" unless alts\n if alts.is_a?(Hash)\n alts.keys\n else\n alts.flatten\n end\n end\n\n def load_alternatives_from_redis\n alternatives = redis.lrange(@name, 0, -1)\n alternatives.map do |alt|\n alt = begin\n JSON.parse(alt)\n rescue\n alt\n end\n Split::Alternative.new(alt, @name)\n end\n end\n\n private\n def redis\n Split.redis\n end\n\n def redis_interface\n RedisInterface.new\n end\n\n def persist_experiment_configuration\n redis_interface.add_to_set(:experiments, name)\n redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })\n goals_collection.save\n\n if @metadata\n redis.set(metadata_key, @metadata.to_json)\n else\n delete_metadata\n end\n end\n\n def remove_experiment_configuration\n @alternatives.each(&:delete)\n goals_collection.delete\n delete_metadata\n redis.del(@name)\n end\n\n def experiment_configuration_has_changed?\n existing_experiment = Experiment.find(@name)\n\n existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||\n existing_experiment.goals != @goals ||\n existing_experiment.metadata != @metadata\n end\n\n def goals_collection\n Split::GoalsCollection.new(@name, @goals)\n end\n\n def remove_experiment_cohorting\n @cohorting_disabled = false\n redis.hdel(experiment_config_key, :cohorting)\n end\n end\nend\n\n Centralize checks for experiment existance in experiment catalog\n\n @@ -94,7 +94,7 @@ module Split\n end\n \n def new_record?\n- !redis.exists?(name)\n+ ExperimentCatalog.find(name).nil?\n end\n \n def ==(obj)\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Centralize checks for experiment existance in experiment catalog"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676188,"cells":{"id":{"kind":"string","value":"10071838"},"text":{"kind":"string","value":" experiment.rb\n # frozen_string_literal: true\n\nmodule Split\n class Experiment\n attr_accessor :name\n attr_accessor :goals\n attr_accessor :alternative_probabilities\n attr_accessor :metadata\n\n attr_reader :alternatives\n attr_reader :resettable\n\n DEFAULT_OPTIONS = {\n resettable: true\n }\n\n def self.find(name)\n Split.cache(:experiments, name) do\n return unless Split.redis.exists?(name)\n Experiment.new(name).tap { |exp| exp.load_from_redis }\n end\n end\n\n def initialize(name, options = {})\n options = DEFAULT_OPTIONS.merge(options)\n\n @name = name.to_s\n\n extract_alternatives_from_options(options)\n end\n\n def self.finished_key(key)\n \"#{key}:finished\"\n end\n\n def set_alternatives_and_options(options)\n options_with_defaults = DEFAULT_OPTIONS.merge(\n options.reject { |k, v| v.nil? }\n )\n\n self.alternatives = options_with_defaults[:alternatives]\n self.goals = options_with_defaults[:goals]\n self.resettable = options_with_defaults[:resettable]\n self.algorithm = options_with_defaults[:algorithm]\n self.metadata = options_with_defaults[:metadata]\n end\n\n def extract_alternatives_from_options(options)\n alts = options[:alternatives] || []\n\n if alts.length == 1\n if alts[0].is_a? Hash\n alts = alts[0].map { |k, v| { k => v } }\n end\n end\n\n if alts.empty?\n exp_config = Split.configuration.experiment_for(name)\n if exp_config\n alts = load_alternatives_from_configuration\n options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration\n options[:metadata] = load_metadata_from_configuration\n options[:resettable] = exp_config[:resettable]\n options[:algorithm] = exp_config[:algorithm]\n end\n end\n\n options[:alternatives] = alts\n\n set_alternatives_and_options(options)\n\n # calculate probability that each alternative is the winner\n @alternative_probabilities = {}\n alts\n end\n\n def save\n validate!\n\n if new_record?\n start unless Split.configuration.start_manually\n persist_experiment_configuration\n elsif experiment_configuration_has_changed?\n reset unless Split.configuration.reset_manually\n persist_experiment_configuration\n end\n\n redis.hmset(experiment_config_key, :resettable, resettable.to_s,\n :algorithm, algorithm.to_s)\n self\n end\n\n def validate!\n end\n\n def new_record?\n !redis.exists?(name)\n end\n\n def ==(obj)\n def new_record?\n ExperimentCatalog.find(name).nil?\n end\n\n def ==(obj)\n self.name == obj.name\n end\n\n def [](name)\n alternatives.find { |a| a.name == name }\n end\n\n def algorithm\n @algorithm ||= Split.configuration.algorithm\n end\n\n def algorithm=(algorithm)\n @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm\n end\n\n def resettable=(resettable)\n @resettable = resettable.is_a?(String) ? resettable == \"true\" : resettable\n end\n\n def alternatives=(alts)\n @alternatives = alts.map do |alternative|\n if alternative.kind_of?(Split::Alternative)\n alternative\n else\n Split::Alternative.new(alternative, @name)\n end\n end\n end\n\n def winner\n Split.cache(:experiment_winner, name) do\n experiment_winner = redis.hget(:experiment_winner, name)\n if experiment_winner\n Split::Alternative.new(experiment_winner, name)\n else\n nil\n end\n end\n end\n\n def has_winner?\n return @has_winner if defined? @has_winner\n @has_winner = !winner.nil?\n end\n\n def winner=(winner_name)\n redis.hset(:experiment_winner, name, winner_name.to_s)\n @has_winner = true\n Split.configuration.on_experiment_winner_choose.call(self)\n end\n\n def participant_count\n alternatives.inject(0) { |sum, a| sum + a.participant_count }\n end\n\n def control\n alternatives.first\n end\n\n def reset_winner\n redis.hdel(:experiment_winner, name)\n @has_winner = false\n Split::Cache.clear_key(@name)\n end\n\n def start\n redis.hset(:experiment_start_times, @name, Time.now.to_i)\n end\n\n def start_time\n Split.cache(:experiment_start_times, @name) do\n t = redis.hget(:experiment_start_times, @name)\n if t\n # Check if stored time is an integer\n if t =~ /^[-+]?[0-9]+$/\n Time.at(t.to_i)\n else\n Time.parse(t)\n end\n end\n end\n end\n\n def next_alternative\n winner || random_alternative\n end\n\n def random_alternative\n if alternatives.length > 1\n algorithm.choose_alternative(self)\n else\n alternatives.first\n end\n end\n\n def version\n @version ||= (redis.get(\"#{name}:version\").to_i || 0)\n end\n\n def increment_version\n @version = redis.incr(\"#{name}:version\")\n end\n\n def key\n if version.to_i > 0\n \"#{name}:#{version}\"\n else\n name\n end\n end\n\n def goals_key\n \"#{name}:goals\"\n end\n\n def finished_key\n self.class.finished_key(key)\n end\n\n def metadata_key\n \"#{name}:metadata\"\n end\n\n def resettable?\n resettable\n end\n\n def reset\n Split.configuration.on_before_experiment_reset.call(self)\n Split::Cache.clear_key(@name)\n alternatives.each(&:reset)\n reset_winner\n Split.configuration.on_experiment_reset.call(self)\n increment_version\n end\n\n def delete\n Split.configuration.on_before_experiment_delete.call(self)\n if Split.configuration.start_manually\n redis.hdel(:experiment_start_times, @name)\n end\n reset_winner\n redis.srem(:experiments, name)\n remove_experiment_cohorting\n remove_experiment_configuration\n Split.configuration.on_experiment_delete.call(self)\n increment_version\n end\n\n def delete_metadata\n redis.del(metadata_key)\n end\n\n def load_from_redis\n exp_config = redis.hgetall(experiment_config_key)\n\n options = {\n resettable: exp_config[\"resettable\"],\n algorithm: exp_config[\"algorithm\"],\n alternatives: load_alternatives_from_redis,\n goals: Split::GoalsCollection.new(@name).load_from_redis,\n metadata: load_metadata_from_redis\n }\n\n set_alternatives_and_options(options)\n end\n\n def calc_winning_alternatives\n # Cache the winning alternatives so we recalculate them once per the specified interval.\n intervals_since_epoch =\n Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval\n\n if self.calc_time != intervals_since_epoch\n if goals.empty?\n self.estimate_winning_alternative\n else\n goals.each do |goal|\n self.estimate_winning_alternative(goal)\n end\n end\n\n self.calc_time = intervals_since_epoch\n\n self.save\n end\n end\n\n def estimate_winning_alternative(goal = nil)\n # initialize a hash of beta distributions based on the alternatives' conversion rates\n beta_params = calc_beta_params(goal)\n\n winning_alternatives = []\n\n Split.configuration.beta_probability_simulations.times do\n # calculate simulated conversion rates from the beta distributions\n simulated_cr_hash = calc_simulated_conversion_rates(beta_params)\n\n winning_alternative = find_simulated_winner(simulated_cr_hash)\n\n # push the winning pair to the winning_alternatives array\n winning_alternatives.push(winning_alternative)\n end\n\n winning_counts = count_simulated_wins(winning_alternatives)\n\n @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)\n\n write_to_alternatives(goal)\n\n self.save\n end\n\n def write_to_alternatives(goal = nil)\n alternatives.each do |alternative|\n alternative.set_p_winner(@alternative_probabilities[alternative], goal)\n end\n end\n\n def calc_alternative_probabilities(winning_counts, number_of_simulations)\n alternative_probabilities = {}\n winning_counts.each do |alternative, wins|\n alternative_probabilities[alternative] = wins / number_of_simulations.to_f\n end\n alternative_probabilities\n end\n\n def count_simulated_wins(winning_alternatives)\n # initialize a hash to keep track of winning alternative in simulations\n winning_counts = {}\n alternatives.each do |alternative|\n winning_counts[alternative] = 0\n end\n # count number of times each alternative won, calculate probabilities, place in hash\n winning_alternatives.each do |alternative|\n winning_counts[alternative] += 1\n end\n winning_counts\n end\n\n def find_simulated_winner(simulated_cr_hash)\n # figure out which alternative had the highest simulated conversion rate\n winning_pair = [\"\", 0.0]\n simulated_cr_hash.each do |alternative, rate|\n if rate > winning_pair[1]\n winning_pair = [alternative, rate]\n end\n end\n winner = winning_pair[0]\n winner\n end\n\n def calc_simulated_conversion_rates(beta_params)\n simulated_cr_hash = {}\n\n # create a hash which has the conversion rate pulled from each alternative's beta distribution\n beta_params.each do |alternative, params|\n alpha = params[0]\n beta = params[1]\n simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)\n simulated_cr_hash[alternative] = simulated_conversion_rate\n end\n\n simulated_cr_hash\n end\n\n def calc_beta_params(goal = nil)\n beta_params = {}\n alternatives.each do |alternative|\n conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)\n alpha = 1 + conversions\n beta = 1 + alternative.participant_count - conversions\n\n params = [alpha, beta]\n\n beta_params[alternative] = params\n end\n beta_params\n end\n\n def calc_time=(time)\n redis.hset(experiment_config_key, :calc_time, time)\n end\n\n def calc_time\n redis.hget(experiment_config_key, :calc_time).to_i\n end\n\n def jstring(goal = nil)\n js_id = if goal.nil?\n name\n else\n name + \"-\" + goal\n end\n js_id.gsub(\"/\", \"--\")\n end\n\n def cohorting_disabled?\n @cohorting_disabled ||= begin\n value = redis.hget(experiment_config_key, :cohorting)\n value.nil? ? false : value.downcase == \"true\"\n end\n end\n\n def disable_cohorting\n @cohorting_disabled = true\n redis.hset(experiment_config_key, :cohorting, true.to_s)\n end\n\n def enable_cohorting\n @cohorting_disabled = false\n redis.hset(experiment_config_key, :cohorting, false.to_s)\n end\n\n protected\n def experiment_config_key\n \"experiment_configurations/#{@name}\"\n end\n\n def load_metadata_from_configuration\n Split.configuration.experiment_for(@name)[:metadata]\n end\n\n def load_metadata_from_redis\n meta = redis.get(metadata_key)\n JSON.parse(meta) unless meta.nil?\n end\n\n def load_alternatives_from_configuration\n alts = Split.configuration.experiment_for(@name)[:alternatives]\n raise ArgumentError, \"Experiment configuration is missing :alternatives array\" unless alts\n if alts.is_a?(Hash)\n alts.keys\n else\n alts.flatten\n end\n end\n\n def load_alternatives_from_redis\n alternatives = redis.lrange(@name, 0, -1)\n alternatives.map do |alt|\n alt = begin\n JSON.parse(alt)\n rescue\n alt\n end\n Split::Alternative.new(alt, @name)\n end\n end\n\n private\n def redis\n Split.redis\n end\n\n def redis_interface\n RedisInterface.new\n end\n\n def persist_experiment_configuration\n redis_interface.add_to_set(:experiments, name)\n redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })\n goals_collection.save\n\n if @metadata\n redis.set(metadata_key, @metadata.to_json)\n else\n delete_metadata\n end\n end\n\n def remove_experiment_configuration\n @alternatives.each(&:delete)\n goals_collection.delete\n delete_metadata\n redis.del(@name)\n end\n\n def experiment_configuration_has_changed?\n existing_experiment = Experiment.find(@name)\n\n existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||\n existing_experiment.goals != @goals ||\n existing_experiment.metadata != @metadata\n end\n\n def goals_collection\n Split::GoalsCollection.new(@name, @goals)\n end\n\n def remove_experiment_cohorting\n @cohorting_disabled = false\n redis.hdel(experiment_config_key, :cohorting)\n end\n end\nend\n\n Centralize checks for experiment existance in experiment catalog\n\n @@ -94,7 +94,7 @@ module Split\n end\n \n def new_record?\n- !redis.exists?(name)\n+ ExperimentCatalog.find(name).nil?\n end\n \n def ==(obj)\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Centralize checks for experiment existance in experiment catalog"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676189,"cells":{"id":{"kind":"string","value":"10071839"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis)\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative\n\n## Screencast\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Add a link to split-cli\n @@ -661,6 +661,7 @@ end\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis)\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative\n+ - [Split::Cli](https://github.com/craigmcnamara/split-cli) - a CLI to trigger Split A/B tests\n \n ## Screencast\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Add a link to split-cli"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676190,"cells":{"id":{"kind":"string","value":"10071840"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis)\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative\n\n## Screencast\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Add a link to split-cli\n @@ -661,6 +661,7 @@ end\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis)\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative\n+ - [Split::Cli](https://github.com/craigmcnamara/split-cli) - a CLI to trigger Split A/B tests\n \n ## Screencast\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Add a link to split-cli"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676191,"cells":{"id":{"kind":"string","value":"10071841"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis)\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative\n\n## Screencast\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Add a link to split-cli\n @@ -661,6 +661,7 @@ end\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis)\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative\n+ - [Split::Cli](https://github.com/craigmcnamara/split-cli) - a CLI to trigger Split A/B tests\n \n ## Screencast\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Add a link to split-cli"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676192,"cells":{"id":{"kind":"string","value":"10071842"},"text":{"kind":"string","value":" spec_helper.rb\n # frozen_string_literal: true\n\nENV[\"RACK_ENV\"] = \"test\"\nrequire 'bundler/setup'\nrequire 'split'\nrequire 'ostruct'\nrequire 'complex' if RUBY_VERSION.match(/1\\.8/)\n\nDir['./spec/support/*.rb'].each { |f| require f }\nrequire \"split\"\nrequire \"ostruct\"\nrequire \"yaml\"\n\nDir[\"./spec/support/*.rb\"].each { |f| require f }\n\nmodule GlobalSharedContext\n extend RSpec::SharedContext\n let(:mock_user) { Split::User.new(double(session: {})) }\n\n before(:each) do\n Split.configuration = Split::Configuration.new\n Split.redis = Redis.new\n Split.redis.select(10)\n Split.redis.flushdb\n Split::Cache.clear\n @ab_user = mock_user\n @params = nil\n end\nend\n\nRSpec.configure do |config|\n config.order = \"random\"\n config.include GlobalSharedContext\n config.raise_errors_for_deprecations!\nend\n\ndef session\n @session ||= {}\nend\n\ndef params\n @params ||= {}\nend\n\ndef request(ua = \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27\")\n @request ||= begin\n r = OpenStruct.new\n r.user_agent = ua\n r.ip = \"192.168.1.1\"\n r\n end\nend\n\n Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2\n\nrequire yaml as it is not loaded in 2.0.0-p0 by default\n @@ -4,6 +4,7 @@ require 'rubygems'\n require 'bundler/setup'\n require 'split'\n require 'ostruct'\n+require 'yaml'\n require 'complex' if RUBY_VERSION.match(/1\\.8/)\n \n Dir['./spec/support/*.rb'].each { |f| require f }\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676193,"cells":{"id":{"kind":"string","value":"10071843"},"text":{"kind":"string","value":" spec_helper.rb\n # frozen_string_literal: true\n\nENV[\"RACK_ENV\"] = \"test\"\nrequire 'bundler/setup'\nrequire 'split'\nrequire 'ostruct'\nrequire 'complex' if RUBY_VERSION.match(/1\\.8/)\n\nDir['./spec/support/*.rb'].each { |f| require f }\nrequire \"split\"\nrequire \"ostruct\"\nrequire \"yaml\"\n\nDir[\"./spec/support/*.rb\"].each { |f| require f }\n\nmodule GlobalSharedContext\n extend RSpec::SharedContext\n let(:mock_user) { Split::User.new(double(session: {})) }\n\n before(:each) do\n Split.configuration = Split::Configuration.new\n Split.redis = Redis.new\n Split.redis.select(10)\n Split.redis.flushdb\n Split::Cache.clear\n @ab_user = mock_user\n @params = nil\n end\nend\n\nRSpec.configure do |config|\n config.order = \"random\"\n config.include GlobalSharedContext\n config.raise_errors_for_deprecations!\nend\n\ndef session\n @session ||= {}\nend\n\ndef params\n @params ||= {}\nend\n\ndef request(ua = \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27\")\n @request ||= begin\n r = OpenStruct.new\n r.user_agent = ua\n r.ip = \"192.168.1.1\"\n r\n end\nend\n\n Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2\n\nrequire yaml as it is not loaded in 2.0.0-p0 by default\n @@ -4,6 +4,7 @@ require 'rubygems'\n require 'bundler/setup'\n require 'split'\n require 'ostruct'\n+require 'yaml'\n require 'complex' if RUBY_VERSION.match(/1\\.8/)\n \n Dir['./spec/support/*.rb'].each { |f| require f }\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676194,"cells":{"id":{"kind":"string","value":"10071844"},"text":{"kind":"string","value":" spec_helper.rb\n # frozen_string_literal: true\n\nENV[\"RACK_ENV\"] = \"test\"\nrequire 'bundler/setup'\nrequire 'split'\nrequire 'ostruct'\nrequire 'complex' if RUBY_VERSION.match(/1\\.8/)\n\nDir['./spec/support/*.rb'].each { |f| require f }\nrequire \"split\"\nrequire \"ostruct\"\nrequire \"yaml\"\n\nDir[\"./spec/support/*.rb\"].each { |f| require f }\n\nmodule GlobalSharedContext\n extend RSpec::SharedContext\n let(:mock_user) { Split::User.new(double(session: {})) }\n\n before(:each) do\n Split.configuration = Split::Configuration.new\n Split.redis = Redis.new\n Split.redis.select(10)\n Split.redis.flushdb\n Split::Cache.clear\n @ab_user = mock_user\n @params = nil\n end\nend\n\nRSpec.configure do |config|\n config.order = \"random\"\n config.include GlobalSharedContext\n config.raise_errors_for_deprecations!\nend\n\ndef session\n @session ||= {}\nend\n\ndef params\n @params ||= {}\nend\n\ndef request(ua = \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27\")\n @request ||= begin\n r = OpenStruct.new\n r.user_agent = ua\n r.ip = \"192.168.1.1\"\n r\n end\nend\n\n Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2\n\nrequire yaml as it is not loaded in 2.0.0-p0 by default\n @@ -4,6 +4,7 @@ require 'rubygems'\n require 'bundler/setup'\n require 'split'\n require 'ostruct'\n+require 'yaml'\n require 'complex' if RUBY_VERSION.match(/1\\.8/)\n \n Dir['./spec/support/*.rb'].each { |f| require f }\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676195,"cells":{"id":{"kind":"string","value":"10071845"},"text":{"kind":"string","value":" README.md\n Semantic-UI-Angular\n===================\n\nStatus: Currently migrating from https://github.com/caitp/angular-semantic\n[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n[![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)\n\n**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components.\nWe are considering Angular 2 support in the future. We've decided to use TypeScript as a step to Angular 2 friendly environment.\n\nStatus\n------\n**Work in progress**\n\nWe are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions.\nOnce we release first `alpha.0` we are happy to get community help.\n\n\nSupport\n-------\nWe support AngularJS 1.4.8 version.\n\n\nBuilding Semantic-UI-Angular\n----------------------------\nYou have to have `nodejs` installed before running following commands.\n\n```\nnpm install\nnpm run build\n```\n\nThe distribution packages will be stored in `dist` folder.\n\nRunning tests\n-------------\nSingle run:\n```\nnpm test\n```\n\nDev mode:\n```\nnpm run test-dev\n```\n\n chore(README): Update README with dependencies badges\n\n @@ -1,4 +1,6 @@\n Semantic-UI-Angular\n ===================\n+[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n+[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n \n-Status: Currently migrating from https://github.com/caitp/angular-semantic\n+Status: **Work in Progress** (migration from https://github.com/caitp/angular-semantic)\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"chore(README): Update README with dependencies badges"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676196,"cells":{"id":{"kind":"string","value":"10071846"},"text":{"kind":"string","value":" README.md\n Semantic-UI-Angular\n===================\n\nStatus: Currently migrating from https://github.com/caitp/angular-semantic\n[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n[![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)\n\n**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components.\nWe are considering Angular 2 support in the future. We've decided to use TypeScript as a step to Angular 2 friendly environment.\n\nStatus\n------\n**Work in progress**\n\nWe are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions.\nOnce we release first `alpha.0` we are happy to get community help.\n\n\nSupport\n-------\nWe support AngularJS 1.4.8 version.\n\n\nBuilding Semantic-UI-Angular\n----------------------------\nYou have to have `nodejs` installed before running following commands.\n\n```\nnpm install\nnpm run build\n```\n\nThe distribution packages will be stored in `dist` folder.\n\nRunning tests\n-------------\nSingle run:\n```\nnpm test\n```\n\nDev mode:\n```\nnpm run test-dev\n```\n\n chore(README): Update README with dependencies badges\n\n @@ -1,4 +1,6 @@\n Semantic-UI-Angular\n ===================\n+[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n+[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n \n-Status: Currently migrating from https://github.com/caitp/angular-semantic\n+Status: **Work in Progress** (migration from https://github.com/caitp/angular-semantic)\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"chore(README): Update README with dependencies badges"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676197,"cells":{"id":{"kind":"string","value":"10071847"},"text":{"kind":"string","value":" README.md\n Semantic-UI-Angular\n===================\n\nStatus: Currently migrating from https://github.com/caitp/angular-semantic\n[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n[![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)\n\n**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components.\nWe are considering Angular 2 support in the future. We've decided to use TypeScript as a step to Angular 2 friendly environment.\n\nStatus\n------\n**Work in progress**\n\nWe are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions.\nOnce we release first `alpha.0` we are happy to get community help.\n\n\nSupport\n-------\nWe support AngularJS 1.4.8 version.\n\n\nBuilding Semantic-UI-Angular\n----------------------------\nYou have to have `nodejs` installed before running following commands.\n\n```\nnpm install\nnpm run build\n```\n\nThe distribution packages will be stored in `dist` folder.\n\nRunning tests\n-------------\nSingle run:\n```\nnpm test\n```\n\nDev mode:\n```\nnpm run test-dev\n```\n\n chore(README): Update README with dependencies badges\n\n @@ -1,4 +1,6 @@\n Semantic-UI-Angular\n ===================\n+[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular)\n+[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies)\n \n-Status: Currently migrating from https://github.com/caitp/angular-semantic\n+Status: **Work in Progress** (migration from https://github.com/caitp/angular-semantic)\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"chore(README): Update README with dependencies badges"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"Semantic-Org/Semantic-UI-Angular"}}},{"rowIdx":10676198,"cells":{"id":{"kind":"string","value":"10071848"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\nAnd our initializer:\n\n```ruby\nrails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'\nrails_env = ENV['RAILS_ENV'] || 'development'\n\nsplit_config = YAML.load_file(rails_root + '/config/split.yml')\nSplit.redis = split_config[rails_env]\n```\n\n## Namespaces\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #289 from byterussian/update-readme-rails-initializer\n\nUpdate initializer code with Rails.root and Rails.env\n @@ -551,11 +551,8 @@ production: redis1.example.com:6379\n And our initializer:\n \n ```ruby\n-rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'\n-rails_env = ENV['RAILS_ENV'] || 'development'\n-\n-split_config = YAML.load_file(rails_root + '/config/split.yml')\n-Split.redis = split_config[rails_env]\n+split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\n+Split.redis = split_config[Rails.env]\n ```\n \n ## Namespaces\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Merge pull request #289 from byterussian/update-readme-rails-initializer"},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10676199,"cells":{"id":{"kind":"string","value":"10071849"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\nAnd our initializer:\n\n```ruby\nrails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'\nrails_env = ENV['RAILS_ENV'] || 'development'\n\nsplit_config = YAML.load_file(rails_root + '/config/split.yml')\nSplit.redis = split_config[rails_env]\n```\n\n## Namespaces\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #289 from byterussian/update-readme-rails-initializer\n\nUpdate initializer code with Rails.root and Rails.env\n @@ -551,11 +551,8 @@ production: redis1.example.com:6379\n And our initializer:\n \n ```ruby\n-rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..'\n-rails_env = ENV['RAILS_ENV'] || 'development'\n-\n-split_config = YAML.load_file(rails_root + '/config/split.yml')\n-Split.redis = split_config[rails_env]\n+split_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\n+Split.redis = split_config[Rails.env]\n ```\n \n ## Namespaces\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Merge pull request #289 from byterussian/update-readme-rails-initializer"},"deletion_count":{"kind":"number","value":5,"string":"5"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":106761,"numItemsPerPage":100,"numTotalItems":10676919,"offset":10676100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjI4MTAzNSwic3ViIjoiL2RhdGFzZXRzL0NhcnBlckFJL2dpdGh1Yi1kaWZmcy1kZWR1cGVkIiwiZXhwIjoxNzU2Mjg0NjM1LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.itU2quIU1ePCqOmICpp6gEKct_TSTVprOo5a0Iv7Q6mgjT_ImX3-RaJ09kQMkjJ9Y_IyZHiQ7f302NKEF7jEBA","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
    Search is not available for this dataset
    id
    stringlengths
    1
    8
    text
    stringlengths
    72
    9.81M
    addition_count
    int64
    0
    10k
    commit_subject
    stringlengths
    0
    3.7k
    deletion_count
    int64
    0
    8.43k
    file_extension
    stringlengths
    0
    32
    lang
    stringlengths
    1
    94
    license
    stringclasses
    10 values
    repo_name
    stringlengths
    9
    59
    10071750
    <NME> README.md <BEF> jQuery Meow =========== A plugin to provide Growl-like notifications. Will support bindings for various jQuery events and ability to 'meow' the content of a bound element (e.g. a Rails flash on load) or a message passed as an argument (e.g. button clicks). [![endorse](http://api.coderwall.com/zacstewart/endorsecount.png)](http://coderwall.com/zacstewart) </tr> </table> <MSG> Add documentation to README <DFF> @@ -3,4 +3,108 @@ jQuery Meow A plugin to provide Growl-like notifications. Will support bindings for various jQuery events and ability to 'meow' the content of a bound element (e.g. a Rails flash on load) or a message passed as an argument (e.g. button clicks). -[![endorse](http://api.coderwall.com/zacstewart/endorsecount.png)](http://coderwall.com/zacstewart) \ No newline at end of file +[![endorse](http://api.coderwall.com/zacstewart/endorsecount.png)](http://coderwall.com/zacstewart) + +## Usage example + +```javascript +var options = { + title: 'Meow Example', + message: 'Hello, World!', +}; + +$.meow(options); +``` +## Options +<table> + <tr> + <th>Key</th> + <th>Type</th> + <th>Default</th> + <th>Description</th> + </tr> + <tr> + <td>message</td> + <td>String, Object</td> + <td>null</td> + <td>Either a string or a jQuery selected element. If it's an element, Meow will use its value, innerHTML or innerText depending on its type.</td> + </tr> + <tr> + <td>title</td> + <td>String</td> + <td>null</td> + <td>If a string is given, the meow's title will reflect it. However, if you do no set this and use a selector element in <code>message</code>, it will default to the <code>title</code> attribute of that element if available.</td> + </tr> + <tr> + <td>icon</td> + <td>String</td> + <td>null</td> + <td>Sets the image URL for the icon</td> + </tr> + <tr> + <td>duration</td> + <td>Number</td> + <td>5000</td> + <td>Sets the duration of of the meow in milliseconds. Any positive, numeric value (including <code>Infinity</code>) is acceptable.</td> + </tr> + <tr> + <td>sticky</td> + <td>Boolean</td> + <td>false</td> + <td>Sets the meow to never time out. Has the same effect as setting duration to <code>Infinity</code>.</td> + </tr> + <tr> + <td>closeable</td> + <td>Boolean</td> + <td>true</td> + <td>Determines whether the meow will have a close (&times;) button. If <code>false</code>, yout must rely on the duration timeout to remove the meow.</td> + </tr> + <tr> + <td>container</td> + <td>String</td> + <td>null</td> + <td>Sets the root element the meow should be contained within. Be default, meows will be put in an auto-generated container.</td> + </tr> + <tr> + <td>beforeCreateFirst</td> + <td>Function</td> + <td>null</td> + <td>Gets called just before the first meow on the screen is created.</td> + </tr> + <tr> + <td>beforeCreate</td> + <td>Function</td> + <td>null</td> + <td>Gets called just before any meow is created.</td> + </tr> + <tr> + <td>afterCreate</td> + <td>Function</td> + <td>null</td> + <td>Gets called right after a meow is created.</td> + </tr> + <tr> + <td>onTimeout</td> + <td>Function</td> + <td>null</td> + <td>Gets called whenever a meow times out.</td> + </tr> + <tr> + <td>beforeDestroy</td> + <td>Function</td> + <td>null</td> + <td>Gets called just before a meow gets destroyed.</td> + </tr> + <tr> + <td>afterDestroy</td> + <td>Function</td> + <td>null</td> + <td>Gets called right after a meow gets destroyed.</td> + </tr> + <tr> + <td>afterDestroyLast</td> + <td>Function</td> + <td>null</td> + <td>Gets called after the last meow on the screen is destroyed.</td> + </tr> +</table>
    105
    Add documentation to README
    1
    .md
    md
    mit
    zacstewart/Meow
    10071751
    <NME> README.md <BEF> jQuery Meow =========== A plugin to provide Growl-like notifications. Will support bindings for various jQuery events and ability to 'meow' the content of a bound element (e.g. a Rails flash on load) or a message passed as an argument (e.g. button clicks). [![endorse](http://api.coderwall.com/zacstewart/endorsecount.png)](http://coderwall.com/zacstewart) </tr> </table> <MSG> Add documentation to README <DFF> @@ -3,4 +3,108 @@ jQuery Meow A plugin to provide Growl-like notifications. Will support bindings for various jQuery events and ability to 'meow' the content of a bound element (e.g. a Rails flash on load) or a message passed as an argument (e.g. button clicks). -[![endorse](http://api.coderwall.com/zacstewart/endorsecount.png)](http://coderwall.com/zacstewart) \ No newline at end of file +[![endorse](http://api.coderwall.com/zacstewart/endorsecount.png)](http://coderwall.com/zacstewart) + +## Usage example + +```javascript +var options = { + title: 'Meow Example', + message: 'Hello, World!', +}; + +$.meow(options); +``` +## Options +<table> + <tr> + <th>Key</th> + <th>Type</th> + <th>Default</th> + <th>Description</th> + </tr> + <tr> + <td>message</td> + <td>String, Object</td> + <td>null</td> + <td>Either a string or a jQuery selected element. If it's an element, Meow will use its value, innerHTML or innerText depending on its type.</td> + </tr> + <tr> + <td>title</td> + <td>String</td> + <td>null</td> + <td>If a string is given, the meow's title will reflect it. However, if you do no set this and use a selector element in <code>message</code>, it will default to the <code>title</code> attribute of that element if available.</td> + </tr> + <tr> + <td>icon</td> + <td>String</td> + <td>null</td> + <td>Sets the image URL for the icon</td> + </tr> + <tr> + <td>duration</td> + <td>Number</td> + <td>5000</td> + <td>Sets the duration of of the meow in milliseconds. Any positive, numeric value (including <code>Infinity</code>) is acceptable.</td> + </tr> + <tr> + <td>sticky</td> + <td>Boolean</td> + <td>false</td> + <td>Sets the meow to never time out. Has the same effect as setting duration to <code>Infinity</code>.</td> + </tr> + <tr> + <td>closeable</td> + <td>Boolean</td> + <td>true</td> + <td>Determines whether the meow will have a close (&times;) button. If <code>false</code>, yout must rely on the duration timeout to remove the meow.</td> + </tr> + <tr> + <td>container</td> + <td>String</td> + <td>null</td> + <td>Sets the root element the meow should be contained within. Be default, meows will be put in an auto-generated container.</td> + </tr> + <tr> + <td>beforeCreateFirst</td> + <td>Function</td> + <td>null</td> + <td>Gets called just before the first meow on the screen is created.</td> + </tr> + <tr> + <td>beforeCreate</td> + <td>Function</td> + <td>null</td> + <td>Gets called just before any meow is created.</td> + </tr> + <tr> + <td>afterCreate</td> + <td>Function</td> + <td>null</td> + <td>Gets called right after a meow is created.</td> + </tr> + <tr> + <td>onTimeout</td> + <td>Function</td> + <td>null</td> + <td>Gets called whenever a meow times out.</td> + </tr> + <tr> + <td>beforeDestroy</td> + <td>Function</td> + <td>null</td> + <td>Gets called just before a meow gets destroyed.</td> + </tr> + <tr> + <td>afterDestroy</td> + <td>Function</td> + <td>null</td> + <td>Gets called right after a meow gets destroyed.</td> + </tr> + <tr> + <td>afterDestroyLast</td> + <td>Function</td> + <td>null</td> + <td>Gets called after the last meow on the screen is destroyed.</td> + </tr> +</table>
    105
    Add documentation to README
    1
    .md
    md
    mit
    zacstewart/Meow
    10071752
    <NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" require "split/experiment" require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end end context '#cleanup_old_versions!' do let(:user_keys) { { 'link_color:1' => 'blue' } } it 'removes key if old experiment is found' do @subject.cleanup_old_versions!(experiment) expect(@subject.keys).to be_empty end end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Fix cleanup_old_versions! misbehaviour <DFF> @@ -17,11 +17,25 @@ describe Split::User do end context '#cleanup_old_versions!' do - let(:user_keys) { { 'link_color:1' => 'blue' } } + let(:experiment_version) { "#{experiment.name}:1" } + let(:second_experiment_version) { "#{experiment.name}_another:1" } + let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } + let(:user_keys) do + { + experiment_version => 'blue', + second_experiment_version => 'red', + third_experiment_version => 'yellow' + } + end + + before(:each) { @subject.cleanup_old_versions!(experiment) } it 'removes key if old experiment is found' do - @subject.cleanup_old_versions!(experiment) - expect(@subject.keys).to be_empty + expect(@subject.keys).not_to include(experiment_version) + end + + it 'does not remove other keys' do + expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end
    17
    Fix cleanup_old_versions! misbehaviour
    3
    .rb
    rb
    mit
    splitrb/split
    10071753
    <NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" require "split/experiment" require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end end context '#cleanup_old_versions!' do let(:user_keys) { { 'link_color:1' => 'blue' } } it 'removes key if old experiment is found' do @subject.cleanup_old_versions!(experiment) expect(@subject.keys).to be_empty end end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Fix cleanup_old_versions! misbehaviour <DFF> @@ -17,11 +17,25 @@ describe Split::User do end context '#cleanup_old_versions!' do - let(:user_keys) { { 'link_color:1' => 'blue' } } + let(:experiment_version) { "#{experiment.name}:1" } + let(:second_experiment_version) { "#{experiment.name}_another:1" } + let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } + let(:user_keys) do + { + experiment_version => 'blue', + second_experiment_version => 'red', + third_experiment_version => 'yellow' + } + end + + before(:each) { @subject.cleanup_old_versions!(experiment) } it 'removes key if old experiment is found' do - @subject.cleanup_old_versions!(experiment) - expect(@subject.keys).to be_empty + expect(@subject.keys).not_to include(experiment_version) + end + + it 'does not remove other keys' do + expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end
    17
    Fix cleanup_old_versions! misbehaviour
    3
    .rb
    rb
    mit
    splitrb/split
    10071754
    <NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" require "split/experiment" require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end end context '#cleanup_old_versions!' do let(:user_keys) { { 'link_color:1' => 'blue' } } it 'removes key if old experiment is found' do @subject.cleanup_old_versions!(experiment) expect(@subject.keys).to be_empty end end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Fix cleanup_old_versions! misbehaviour <DFF> @@ -17,11 +17,25 @@ describe Split::User do end context '#cleanup_old_versions!' do - let(:user_keys) { { 'link_color:1' => 'blue' } } + let(:experiment_version) { "#{experiment.name}:1" } + let(:second_experiment_version) { "#{experiment.name}_another:1" } + let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } + let(:user_keys) do + { + experiment_version => 'blue', + second_experiment_version => 'red', + third_experiment_version => 'yellow' + } + end + + before(:each) { @subject.cleanup_old_versions!(experiment) } it 'removes key if old experiment is found' do - @subject.cleanup_old_versions!(experiment) - expect(@subject.keys).to be_empty + expect(@subject.keys).not_to include(experiment_version) + end + + it 'does not remove other keys' do + expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end
    17
    Fix cleanup_old_versions! misbehaviour
    3
    .rb
    rb
    mit
    splitrb/split
    10071755
    <NME> version.rb <BEF> module Split VERSION = "0.2.1" end VERSION = "4.0.1" end <MSG> Version 0.2.2 <DFF> @@ -1,3 +1,3 @@ module Split - VERSION = "0.2.1" + VERSION = "0.2.2" end
    1
    Version 0.2.2
    1
    .rb
    rb
    mit
    splitrb/split
    10071756
    <NME> version.rb <BEF> module Split VERSION = "0.2.1" end VERSION = "4.0.1" end <MSG> Version 0.2.2 <DFF> @@ -1,3 +1,3 @@ module Split - VERSION = "0.2.1" + VERSION = "0.2.2" end
    1
    Version 0.2.2
    1
    .rb
    rb
    mit
    splitrb/split
    10071757
    <NME> version.rb <BEF> module Split VERSION = "0.2.1" end VERSION = "4.0.1" end <MSG> Version 0.2.2 <DFF> @@ -1,3 +1,3 @@ module Split - VERSION = "0.2.1" + VERSION = "0.2.2" end
    1
    Version 0.2.2
    1
    .rb
    rb
    mit
    splitrb/split
    10071758
    <NME> format.ts <BEF> import { equal } from 'assert'; import html from '../src/markup/format/html'; import haml from '../src/markup/format/haml'; import pug from '../src/markup/format/pug'; import slim from '../src/markup/format/slim'; import parse from '../src/markup'; import createConfig, { Options } from '../src/config'; describe('Format', () => { const defaultConfig = createConfig(); const field = createConfig({ options: { 'output.field': (index, placeholder) => placeholder ? `\${${index}:${placeholder}}` : `\${${index}}` } }); function createProfile(options: Partial<Options>) { const config = createConfig({ options }); return config; } describe('HTML', () => { const format = (abbr: string, config = defaultConfig) => html(parse(abbr, config), config); it('basic', () => { equal(format('div>p'), '<div>\n\t<p></p>\n</div>'); equal(format('div>p*3'), '<div>\n\t<p></p>\n\t<p></p>\n\t<p></p>\n</div>'); equal(format('div#a>p.b*2>span'), '<div id="a">\n\t<p class="b"><span></span></p>\n\t<p class="b"><span></span></p>\n</div>'); equal(format('div>div>div'), '<div>\n\t<div>\n\t\t<div></div>\n\t</div>\n</div>'); equal(format('table>tr*2>td{item}*2'), '<table>\n\t<tr>\n\t\t<td>item</td>\n\t\t<td>item</td>\n\t</tr>\n\t<tr>\n\t\t<td>item</td>\n\t\t<td>item</td>\n\t</tr>\n</table>'); }); it('inline elements', () => { const profile = createProfile({ 'output.inlineBreak': 3 }); const breakInline = createProfile({ 'output.inlineBreak': 1 }); const keepInline = createProfile({ 'output.inlineBreak': 0 }); const xhtml = createProfile({ 'output.selfClosingStyle': 'xhtml' }); equal(format('div>a>b*3', xhtml), '<div>\n\t<a href="">\n\t\t<b></b>\n\t\t<b></b>\n\t\t<b></b>\n\t</a>\n</div>'); equal(format('p>i', profile), '<p><i></i></p>'); equal(format('p>i*2', profile), '<p><i></i><i></i></p>'); equal(format('p>i*2', breakInline), '<p>\n\t<i></i>\n\t<i></i>\n</p>'); equal(format('p>i*3', profile), '<p>\n\t<i></i>\n\t<i></i>\n\t<i></i>\n</p>'); equal(format('p>i*3', keepInline), '<p><i></i><i></i><i></i></p>'); equal(format('i*2', profile), '<i></i><i></i>'); equal(format('i*3', profile), '<i></i>\n<i></i>\n<i></i>'); equal(format('i{a}+i{b}', profile), '<i>a</i><i>b</i>'); equal(format('img[src]/+p', xhtml), '<img src="" alt="" />\n<p></p>'); equal(format('div>img[src]/+p', xhtml), '<div>\n\t<img src="" alt="" />\n\t<p></p>\n</div>'); equal(format('div>p+img[src]/', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n</div>'); equal(format('div>p+img[src]/+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n\t<p></p>\n</div>'); equal(format('div>p+img[src]/*2+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" /><img src="" alt="" />\n\t<p></p>\n</div>'); equal(format('div>p+img[src]/*3+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n\t<img src="" alt="" />\n\t<img src="" alt="" />\n\t<p></p>\n</div>'); }); it('generate fields', () => { equal(format('a[href]', field), '<a href="${1}">${2}</a>'); equal(format('a[href]*2', field), '<a href="${1}">${2}</a><a href="${3}">${4}</a>'); equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}\n${4} ${5:foo} ${6:bar}'); equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar\n foo bar'); equal(format('ul>li*2', field), '<ul>\n\t<li>${1}</li>\n\t<li>${2}</li>\n</ul>'); equal(format('div>img[src]/', field), '<div><img src="${1}" alt="${2}"></div>'); }); // it.only('debug', () => { // equal(format('div>{foo}+{bar}+p'), '<div>\n\tfoobar\n\t<p></p>\n</div>'); // }); it('mixed content', () => { equal(format('div{foo}'), '<div>foo</div>'); equal(format('div>{foo}'), '<div>foo</div>'); equal(format('div>{foo}+{bar}'), '<div>\n\tfoo\n\tbar\n</div>'); equal(format('div>{foo}+{bar}+p'), '<div>\n\tfoo\n\tbar\n\t<p></p>\n</div>'); equal(format('div>{foo}+{bar}+p+{foo}+{bar}+p'), '<div>\n\tfoo\n\tbar\n\t<p></p>\n\tfoo\n\tbar\n\t<p></p>\n</div>'); equal(format('div>{foo}+p+{bar}'), '<div>\n\tfoo\n\t<p></p>\n\tbar\n</div>'); equal(format('div>{foo}>p'), '<div>\n\tfoo\n\t<p></p>\n</div>'); equal(format('div>{<!-- ${0} -->}'), '<div><!-- --></div>'); equal(format('div>{<!-- ${0} -->}+p'), '<div>\n\t<!-- -->\n\t<p></p>\n</div>'); equal(format('div>p+{<!-- ${0} -->}'), '<div>\n\t<p></p>\n\t<!-- -->\n</div>'); equal(format('div>{<!-- ${0} -->}>p'), '<div>\n\t<!-- <p></p> -->\n</div>'); equal(format('div>{<!-- ${0} -->}*2>p'), '<div>\n\t<!-- <p></p> -->\n\t<!-- <p></p> -->\n</div>'); equal(format('div>{<!-- ${0} -->}>p*2'), '<div>\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n</div>'); equal(format('div>{<!-- ${0} -->}*2>p*2'), '<div>\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n</div>'); equal(format('div>{<!-- ${0} -->}>b'), '<div>\n\t<!-- <b></b> -->\n</div>'); equal(format('div>{<!-- ${0} -->}>b*2'), '<div>\n\t<!-- <b></b><b></b> -->\n</div>'); equal(format('div>{<!-- ${0} -->}>b*3'), '<div>\n\t<!-- \n\t<b></b>\n\t<b></b>\n\t<b></b>\n\t-->\n</div>'); equal(format('div>{<!-- ${0} -->}', field), '<div><!-- ${1} --></div>'); equal(format('div>{<!-- ${0} -->}>b', field), '<div>\n\t<!-- <b>${1}</b> -->\n</div>'); }); it('self-closing', () => { const xmlStyle = createProfile({ 'output.selfClosingStyle': 'xml' }); const htmlStyle = createProfile({ 'output.selfClosingStyle': 'html' }); const xhtmlStyle = createProfile({ 'output.selfClosingStyle': 'xhtml' }); equal(format('img[src]/', htmlStyle), '<img src="" alt="">'); equal(format('img[src]/', xhtmlStyle), '<img src="" alt="" />'); equal(format('img[src]/', xmlStyle), '<img src="" alt=""/>'); equal(format('div>img[src]/', xhtmlStyle), '<div><img src="" alt="" /></div>'); }); it('boolean attributes', () => { const compact = createProfile({ 'output.compactBoolean': true }); const noCompact = createProfile({ 'output.compactBoolean': false }); equal(format('p[b.]', noCompact), '<p b="b"></p>'); equal(format('p[b.]', compact), '<p b></p>'); equal(format('p[contenteditable]', compact), '<p contenteditable></p>'); equal(format('p[contenteditable]', noCompact), '<p contenteditable="contenteditable"></p>'); equal(format('p[contenteditable=foo]', compact), '<p contenteditable="foo"></p>'); }); it('no formatting', () => { const profile = createProfile({ 'output.format': false }); equal(format('div>p', profile), '<div><p></p></div>'); equal(format('div>{foo}+p+{bar}', profile), '<div>foo<p></p>bar</div>'); equal(format('div>{foo}>p', profile), '<div>foo<p></p></div>'); equal(format('div>{<!-- ${0} -->}>p', profile), '<div><!-- <p></p> --></div>'); }); it('format specific nodes', () => { equal(format('{<!DOCTYPE html>}+html>(head>meta[charset=${charset}]/+title{${1:Document}})+body', field), '<!DOCTYPE html>\n<html>\n<head>\n\t<meta charset="UTF-8">\n\t<title>${2:Document}</title>\n</head>\n<body>\n\t${3}\n</body>\n</html>'); }); it('comment', () => { const opt = createConfig({ options: { 'comment.enabled': true } }); equal(format('ul>li.item', opt), '<ul>\n\t<li class="item"></li>\n\t<!-- /.item -->\n</ul>'); equal(format('div>ul>li.item#foo', opt), '<div>\n\t<ul>\n\t\t<li class="item" id="foo"></li>\n\t\t<!-- /#foo.item -->\n\t</ul>\n</div>'); opt.options['comment.after'] = ' { [%ID] }'; equal(format('div>ul>li.item#foo', opt), '<div>\n\t<ul>\n\t\t<li class="item" id="foo"></li> { %foo }\n\t</ul>\n</div>'); }); }); describe('HAML', () => { describe('HAML', () => { const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config); it.only('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), '#header\n\t%ul.nav\n\t\t%li.nav-item(title="test")\n\t\t%li.nav-item(title="test")'); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), '#foo.bar(data-n1="v1" title="test" data-n2="v2")'); let profile = createProfile({ compactBoolean: true }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled foo="" title="test")/'); let profile = createProfile({ 'output.compactBoolean': true }); profile = createProfile({ compactBoolean: false }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled=true foo="" title="test")/'); }); }); }); }); describe('Pug', () => { const format = (abbr: string, config = defaultConfig) => pug(parse(abbr, config), config); it('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), '#header\n\tul.nav\n\t\tli.nav-item(title="test") \n\t\tli.nav-item(title="test") '); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), '#foo.bar(data-n1="v1", title="test", data-n2="v2") '); equal(format('input[disabled. foo title=test]'), 'input(type="text", disabled, foo="", title="test")'); // Use closing slash for XML output format equal(format('input[disabled. foo title=test]', createProfile({ 'output.selfClosingStyle': 'xml' })), 'input(type="text", disabled, foo="", title="test")/'); }); it('nodes with text', () => { equal(format('{Text 1}'), 'Text 1'); equal(format('span{Text 1}'), 'span Text 1'); equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\n\tb Text 2'); equal(format('span{Text 1\nText 2}>b{Text 3}'), 'span\n\t| Text 1\n\t| Text 2\n\tb Text 3'); equal(format('div>span{Text 1\nText 2}>b{Text 3}'), 'div\n\tspan\n\t\t| Text 1\n\t\t| Text 2\n\t\tb Text 3'); }); it('generate fields', () => { equal(format('a[href]', field), 'a(href="${1}") ${2}'); equal(format('a[href]*2', field), 'a(href="${1}") ${2}\na(href="${3}") ${4}'); equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}'); equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar'); equal(format('ul>li*2', field), 'ul\n\tli ${1}\n\tli ${2}'); equal(format('div>img[src]/', field), 'div\n\timg(src="${1}", alt="${2}")'); }); }); describe('Slim', () => { const format = (abbr: string, config = defaultConfig) => slim(parse(abbr, config), config); it('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), '#header\n\tul.nav\n\t\tli.nav-item title="test" \n\t\tli.nav-item title="test" '); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), '#foo.bar data-n1="v1" title="test" data-n2="v2" '); // const profile = createProfile({ inlineBreak: 0 }); // equal(format('ul>li>span{Text}', profile), 'ul\n\tli: span Text'); // equal(format('ul>li>span{Text}'), 'ul\n\tli\n\t\tspan Text'); // equal(format('ul>li>span{Text}*2', profile), 'ul\n\tli\n\t\tspan Text\n\t\tspan Text'); }); // it.skip('attribute wrappers', () => { // equal(format('input[disabled. foo title=test]'), 'input disabled=true foo="" title="test"'); // equal(format('input[disabled. foo title=test]', null, { attributeWrap: 'round' }), // 'input(disabled foo="" title="test")'); // }); it('nodes with text', () => { equal(format('{Text 1}'), 'Text 1'); equal(format('span{Text 1}'), 'span Text 1'); equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\n\tb Text 2'); equal(format('span{Text 1\nText 2}>b{Text 3}'), 'span\n\t| Text 1\n\t| Text 2\n\tb Text 3'); equal(format('div>span{Text 1\nText 2}>b{Text 3}'), 'div\n\tspan\n\t\t| Text 1\n\t\t| Text 2\n\t\tb Text 3'); }); it('generate fields', () => { equal(format('a[href]', field), 'a href="${1}" ${2}'); equal(format('a[href]*2', field), 'a href="${1}" ${2}\na href="${3}" ${4}'); equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}'); equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar'); equal(format('ul>li*2', field), 'ul\n\tli ${1}\n\tli ${2}'); equal(format('div>img[src]/', field), 'div\n\timg src="${1}" alt="${2}"/'); }); }); }); <MSG> Support HAML output <DFF> @@ -146,12 +146,12 @@ describe('Format', () => { describe('HAML', () => { const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config); - it.only('basic', () => { + it('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), - '#header\n\t%ul.nav\n\t\t%li.nav-item(title="test")\n\t\t%li.nav-item(title="test")'); + '#header\n\t%ul.nav\n\t\t%li.nav-item(title="test") \n\t\t%li.nav-item(title="test") '); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), - '#foo.bar(data-n1="v1" title="test" data-n2="v2")'); + '#foo.bar(data-n1="v1" title="test" data-n2="v2") '); let profile = createProfile({ compactBoolean: true }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled foo="" title="test")/'); @@ -159,5 +159,22 @@ describe('Format', () => { profile = createProfile({ compactBoolean: false }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled=true foo="" title="test")/'); }); + + it('nodes with text', () => { + equal(format('{Text 1}'), 'Text 1'); + equal(format('span{Text 1}'), '%span Text 1'); + equal(format('span{Text 1}>b{Text 2}'), '%span Text 1\n\t%b Text 2'); + equal(format('span{Text 1\nText 2}>b{Text 3}'), '%span\n\tText 1 |\n\tText 2 |\n\t%b Text 3'); + equal(format('div>span{Text 1\nText 2\nText 123}>b{Text 3}'), '%div\n\t%span\n\t\tText 1 |\n\t\tText 2 |\n\t\tText 123 |\n\t\t%b Text 3'); + }); + + it('generate fields', () => { + equal(format('a[href]', field), '%a(href="${1}") ${2}'); + equal(format('a[href]*2', field), '%a(href="${1}") ${2}\n%a(href="${3}") ${4}'); + equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}'); + equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar'); + equal(format('ul>li*2', field), '%ul\n\t%li ${1}\n\t%li ${2}'); + equal(format('div>img[src]/', field), '%div\n\t%img(src="${1}" alt="${2}")/'); + }); }); });
    20
    Support HAML output
    3
    .ts
    ts
    mit
    emmetio/emmet
    10071759
    <NME> format.ts <BEF> import { equal } from 'assert'; import html from '../src/markup/format/html'; import haml from '../src/markup/format/haml'; import pug from '../src/markup/format/pug'; import slim from '../src/markup/format/slim'; import parse from '../src/markup'; import createConfig, { Options } from '../src/config'; describe('Format', () => { const defaultConfig = createConfig(); const field = createConfig({ options: { 'output.field': (index, placeholder) => placeholder ? `\${${index}:${placeholder}}` : `\${${index}}` } }); function createProfile(options: Partial<Options>) { const config = createConfig({ options }); return config; } describe('HTML', () => { const format = (abbr: string, config = defaultConfig) => html(parse(abbr, config), config); it('basic', () => { equal(format('div>p'), '<div>\n\t<p></p>\n</div>'); equal(format('div>p*3'), '<div>\n\t<p></p>\n\t<p></p>\n\t<p></p>\n</div>'); equal(format('div#a>p.b*2>span'), '<div id="a">\n\t<p class="b"><span></span></p>\n\t<p class="b"><span></span></p>\n</div>'); equal(format('div>div>div'), '<div>\n\t<div>\n\t\t<div></div>\n\t</div>\n</div>'); equal(format('table>tr*2>td{item}*2'), '<table>\n\t<tr>\n\t\t<td>item</td>\n\t\t<td>item</td>\n\t</tr>\n\t<tr>\n\t\t<td>item</td>\n\t\t<td>item</td>\n\t</tr>\n</table>'); }); it('inline elements', () => { const profile = createProfile({ 'output.inlineBreak': 3 }); const breakInline = createProfile({ 'output.inlineBreak': 1 }); const keepInline = createProfile({ 'output.inlineBreak': 0 }); const xhtml = createProfile({ 'output.selfClosingStyle': 'xhtml' }); equal(format('div>a>b*3', xhtml), '<div>\n\t<a href="">\n\t\t<b></b>\n\t\t<b></b>\n\t\t<b></b>\n\t</a>\n</div>'); equal(format('p>i', profile), '<p><i></i></p>'); equal(format('p>i*2', profile), '<p><i></i><i></i></p>'); equal(format('p>i*2', breakInline), '<p>\n\t<i></i>\n\t<i></i>\n</p>'); equal(format('p>i*3', profile), '<p>\n\t<i></i>\n\t<i></i>\n\t<i></i>\n</p>'); equal(format('p>i*3', keepInline), '<p><i></i><i></i><i></i></p>'); equal(format('i*2', profile), '<i></i><i></i>'); equal(format('i*3', profile), '<i></i>\n<i></i>\n<i></i>'); equal(format('i{a}+i{b}', profile), '<i>a</i><i>b</i>'); equal(format('img[src]/+p', xhtml), '<img src="" alt="" />\n<p></p>'); equal(format('div>img[src]/+p', xhtml), '<div>\n\t<img src="" alt="" />\n\t<p></p>\n</div>'); equal(format('div>p+img[src]/', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n</div>'); equal(format('div>p+img[src]/+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n\t<p></p>\n</div>'); equal(format('div>p+img[src]/*2+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" /><img src="" alt="" />\n\t<p></p>\n</div>'); equal(format('div>p+img[src]/*3+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n\t<img src="" alt="" />\n\t<img src="" alt="" />\n\t<p></p>\n</div>'); }); it('generate fields', () => { equal(format('a[href]', field), '<a href="${1}">${2}</a>'); equal(format('a[href]*2', field), '<a href="${1}">${2}</a><a href="${3}">${4}</a>'); equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}\n${4} ${5:foo} ${6:bar}'); equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar\n foo bar'); equal(format('ul>li*2', field), '<ul>\n\t<li>${1}</li>\n\t<li>${2}</li>\n</ul>'); equal(format('div>img[src]/', field), '<div><img src="${1}" alt="${2}"></div>'); }); // it.only('debug', () => { // equal(format('div>{foo}+{bar}+p'), '<div>\n\tfoobar\n\t<p></p>\n</div>'); // }); it('mixed content', () => { equal(format('div{foo}'), '<div>foo</div>'); equal(format('div>{foo}'), '<div>foo</div>'); equal(format('div>{foo}+{bar}'), '<div>\n\tfoo\n\tbar\n</div>'); equal(format('div>{foo}+{bar}+p'), '<div>\n\tfoo\n\tbar\n\t<p></p>\n</div>'); equal(format('div>{foo}+{bar}+p+{foo}+{bar}+p'), '<div>\n\tfoo\n\tbar\n\t<p></p>\n\tfoo\n\tbar\n\t<p></p>\n</div>'); equal(format('div>{foo}+p+{bar}'), '<div>\n\tfoo\n\t<p></p>\n\tbar\n</div>'); equal(format('div>{foo}>p'), '<div>\n\tfoo\n\t<p></p>\n</div>'); equal(format('div>{<!-- ${0} -->}'), '<div><!-- --></div>'); equal(format('div>{<!-- ${0} -->}+p'), '<div>\n\t<!-- -->\n\t<p></p>\n</div>'); equal(format('div>p+{<!-- ${0} -->}'), '<div>\n\t<p></p>\n\t<!-- -->\n</div>'); equal(format('div>{<!-- ${0} -->}>p'), '<div>\n\t<!-- <p></p> -->\n</div>'); equal(format('div>{<!-- ${0} -->}*2>p'), '<div>\n\t<!-- <p></p> -->\n\t<!-- <p></p> -->\n</div>'); equal(format('div>{<!-- ${0} -->}>p*2'), '<div>\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n</div>'); equal(format('div>{<!-- ${0} -->}*2>p*2'), '<div>\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n</div>'); equal(format('div>{<!-- ${0} -->}>b'), '<div>\n\t<!-- <b></b> -->\n</div>'); equal(format('div>{<!-- ${0} -->}>b*2'), '<div>\n\t<!-- <b></b><b></b> -->\n</div>'); equal(format('div>{<!-- ${0} -->}>b*3'), '<div>\n\t<!-- \n\t<b></b>\n\t<b></b>\n\t<b></b>\n\t-->\n</div>'); equal(format('div>{<!-- ${0} -->}', field), '<div><!-- ${1} --></div>'); equal(format('div>{<!-- ${0} -->}>b', field), '<div>\n\t<!-- <b>${1}</b> -->\n</div>'); }); it('self-closing', () => { const xmlStyle = createProfile({ 'output.selfClosingStyle': 'xml' }); const htmlStyle = createProfile({ 'output.selfClosingStyle': 'html' }); const xhtmlStyle = createProfile({ 'output.selfClosingStyle': 'xhtml' }); equal(format('img[src]/', htmlStyle), '<img src="" alt="">'); equal(format('img[src]/', xhtmlStyle), '<img src="" alt="" />'); equal(format('img[src]/', xmlStyle), '<img src="" alt=""/>'); equal(format('div>img[src]/', xhtmlStyle), '<div><img src="" alt="" /></div>'); }); it('boolean attributes', () => { const compact = createProfile({ 'output.compactBoolean': true }); const noCompact = createProfile({ 'output.compactBoolean': false }); equal(format('p[b.]', noCompact), '<p b="b"></p>'); equal(format('p[b.]', compact), '<p b></p>'); equal(format('p[contenteditable]', compact), '<p contenteditable></p>'); equal(format('p[contenteditable]', noCompact), '<p contenteditable="contenteditable"></p>'); equal(format('p[contenteditable=foo]', compact), '<p contenteditable="foo"></p>'); }); it('no formatting', () => { const profile = createProfile({ 'output.format': false }); equal(format('div>p', profile), '<div><p></p></div>'); equal(format('div>{foo}+p+{bar}', profile), '<div>foo<p></p>bar</div>'); equal(format('div>{foo}>p', profile), '<div>foo<p></p></div>'); equal(format('div>{<!-- ${0} -->}>p', profile), '<div><!-- <p></p> --></div>'); }); it('format specific nodes', () => { equal(format('{<!DOCTYPE html>}+html>(head>meta[charset=${charset}]/+title{${1:Document}})+body', field), '<!DOCTYPE html>\n<html>\n<head>\n\t<meta charset="UTF-8">\n\t<title>${2:Document}</title>\n</head>\n<body>\n\t${3}\n</body>\n</html>'); }); it('comment', () => { const opt = createConfig({ options: { 'comment.enabled': true } }); equal(format('ul>li.item', opt), '<ul>\n\t<li class="item"></li>\n\t<!-- /.item -->\n</ul>'); equal(format('div>ul>li.item#foo', opt), '<div>\n\t<ul>\n\t\t<li class="item" id="foo"></li>\n\t\t<!-- /#foo.item -->\n\t</ul>\n</div>'); opt.options['comment.after'] = ' { [%ID] }'; equal(format('div>ul>li.item#foo', opt), '<div>\n\t<ul>\n\t\t<li class="item" id="foo"></li> { %foo }\n\t</ul>\n</div>'); }); }); describe('HAML', () => { describe('HAML', () => { const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config); it.only('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), '#header\n\t%ul.nav\n\t\t%li.nav-item(title="test")\n\t\t%li.nav-item(title="test")'); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), '#foo.bar(data-n1="v1" title="test" data-n2="v2")'); let profile = createProfile({ compactBoolean: true }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled foo="" title="test")/'); let profile = createProfile({ 'output.compactBoolean': true }); profile = createProfile({ compactBoolean: false }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled=true foo="" title="test")/'); }); }); }); }); describe('Pug', () => { const format = (abbr: string, config = defaultConfig) => pug(parse(abbr, config), config); it('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), '#header\n\tul.nav\n\t\tli.nav-item(title="test") \n\t\tli.nav-item(title="test") '); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), '#foo.bar(data-n1="v1", title="test", data-n2="v2") '); equal(format('input[disabled. foo title=test]'), 'input(type="text", disabled, foo="", title="test")'); // Use closing slash for XML output format equal(format('input[disabled. foo title=test]', createProfile({ 'output.selfClosingStyle': 'xml' })), 'input(type="text", disabled, foo="", title="test")/'); }); it('nodes with text', () => { equal(format('{Text 1}'), 'Text 1'); equal(format('span{Text 1}'), 'span Text 1'); equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\n\tb Text 2'); equal(format('span{Text 1\nText 2}>b{Text 3}'), 'span\n\t| Text 1\n\t| Text 2\n\tb Text 3'); equal(format('div>span{Text 1\nText 2}>b{Text 3}'), 'div\n\tspan\n\t\t| Text 1\n\t\t| Text 2\n\t\tb Text 3'); }); it('generate fields', () => { equal(format('a[href]', field), 'a(href="${1}") ${2}'); equal(format('a[href]*2', field), 'a(href="${1}") ${2}\na(href="${3}") ${4}'); equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}'); equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar'); equal(format('ul>li*2', field), 'ul\n\tli ${1}\n\tli ${2}'); equal(format('div>img[src]/', field), 'div\n\timg(src="${1}", alt="${2}")'); }); }); describe('Slim', () => { const format = (abbr: string, config = defaultConfig) => slim(parse(abbr, config), config); it('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), '#header\n\tul.nav\n\t\tli.nav-item title="test" \n\t\tli.nav-item title="test" '); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), '#foo.bar data-n1="v1" title="test" data-n2="v2" '); // const profile = createProfile({ inlineBreak: 0 }); // equal(format('ul>li>span{Text}', profile), 'ul\n\tli: span Text'); // equal(format('ul>li>span{Text}'), 'ul\n\tli\n\t\tspan Text'); // equal(format('ul>li>span{Text}*2', profile), 'ul\n\tli\n\t\tspan Text\n\t\tspan Text'); }); // it.skip('attribute wrappers', () => { // equal(format('input[disabled. foo title=test]'), 'input disabled=true foo="" title="test"'); // equal(format('input[disabled. foo title=test]', null, { attributeWrap: 'round' }), // 'input(disabled foo="" title="test")'); // }); it('nodes with text', () => { equal(format('{Text 1}'), 'Text 1'); equal(format('span{Text 1}'), 'span Text 1'); equal(format('span{Text 1}>b{Text 2}'), 'span Text 1\n\tb Text 2'); equal(format('span{Text 1\nText 2}>b{Text 3}'), 'span\n\t| Text 1\n\t| Text 2\n\tb Text 3'); equal(format('div>span{Text 1\nText 2}>b{Text 3}'), 'div\n\tspan\n\t\t| Text 1\n\t\t| Text 2\n\t\tb Text 3'); }); it('generate fields', () => { equal(format('a[href]', field), 'a href="${1}" ${2}'); equal(format('a[href]*2', field), 'a href="${1}" ${2}\na href="${3}" ${4}'); equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}'); equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar'); equal(format('ul>li*2', field), 'ul\n\tli ${1}\n\tli ${2}'); equal(format('div>img[src]/', field), 'div\n\timg src="${1}" alt="${2}"/'); }); }); }); <MSG> Support HAML output <DFF> @@ -146,12 +146,12 @@ describe('Format', () => { describe('HAML', () => { const format = (abbr: string, config = defaultConfig) => haml(parse(abbr, config), config); - it.only('basic', () => { + it('basic', () => { equal(format('div#header>ul.nav>li[title=test].nav-item*2'), - '#header\n\t%ul.nav\n\t\t%li.nav-item(title="test")\n\t\t%li.nav-item(title="test")'); + '#header\n\t%ul.nav\n\t\t%li.nav-item(title="test") \n\t\t%li.nav-item(title="test") '); equal(format('div#foo[data-n1=v1 title=test data-n2=v2].bar'), - '#foo.bar(data-n1="v1" title="test" data-n2="v2")'); + '#foo.bar(data-n1="v1" title="test" data-n2="v2") '); let profile = createProfile({ compactBoolean: true }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled foo="" title="test")/'); @@ -159,5 +159,22 @@ describe('Format', () => { profile = createProfile({ compactBoolean: false }); equal(format('input[disabled. foo title=test]/', profile), '%input(type="text" disabled=true foo="" title="test")/'); }); + + it('nodes with text', () => { + equal(format('{Text 1}'), 'Text 1'); + equal(format('span{Text 1}'), '%span Text 1'); + equal(format('span{Text 1}>b{Text 2}'), '%span Text 1\n\t%b Text 2'); + equal(format('span{Text 1\nText 2}>b{Text 3}'), '%span\n\tText 1 |\n\tText 2 |\n\t%b Text 3'); + equal(format('div>span{Text 1\nText 2\nText 123}>b{Text 3}'), '%div\n\t%span\n\t\tText 1 |\n\t\tText 2 |\n\t\tText 123 |\n\t\t%b Text 3'); + }); + + it('generate fields', () => { + equal(format('a[href]', field), '%a(href="${1}") ${2}'); + equal(format('a[href]*2', field), '%a(href="${1}") ${2}\n%a(href="${3}") ${4}'); + equal(format('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}'); + equal(format('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar'); + equal(format('ul>li*2', field), '%ul\n\t%li ${1}\n\t%li ${2}'); + equal(format('div>img[src]/', field), '%div\n\t%img(src="${1}" alt="${2}")/'); + }); }); });
    20
    Support HAML output
    3
    .ts
    ts
    mit
    emmetio/emmet
    10071760
    <NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' && typeof options.container === 'undefined') { default_meow_area = $(window.document.createElement('div')) .attr({'id': ((new Date()).getTime()), 'class': 'meows'}); $('body').prepend(default_meow_area); delete this.queue[timestamp]; }, size: function () { var size = 0; for (var timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); // Add title if it's defined if (typeof this.title === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('h1')).text(this.title) ); } // Add icon if it's defined if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (event.type === 'mouseleave') { that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else { that.hovered = true; that.manifest.addClass('hover'); $(document.createElement('a')) .addClass('close') .html('&times;') .attr('href', 'javascript:;') .click(function (e) { e.preventDefault(); that.destroy(); if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow if (that.hovered !== true && typeof that === 'object') { that.destroy(); } } }, that.duration); } this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); this.destroy = function () { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { $('#' + meow_area).remove(); if (typeof options.lastDestroyed === 'function') { options.lastDestroyed.call(null); } } }); }); }; <MSG> JSLinted <DFF> @@ -37,8 +37,9 @@ delete this.queue[timestamp]; }, size: function () { - var size = 0; - for (var timestamp in this.queue) { + var timestamp, + size = 0; + for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; @@ -135,7 +136,7 @@ $(document.createElement('a')) .addClass('close') .html('&times;') - .attr('href', 'javascript:;') + .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); @@ -176,22 +177,22 @@ this.destroy = function () { // Call callback if it's defined (this = meow DOM element) - if (typeof options.beforeDestroy === 'function') { - options.beforeDestroy.call(that.manifest); - } + if (typeof options.beforeDestroy === 'function') { + options.beforeDestroy.call(that.manifest); + } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { - options.afterDestroy.call(null); - } - if (meows.size() <= 0) { - $('#' + meow_area).remove(); - if (typeof options.lastDestroyed === 'function') { - options.lastDestroyed.call(null); - } - } + options.afterDestroy.call(null); + } + if (meows.size() <= 0) { + $('#' + meow_area).remove(); + if (typeof options.lastDestroyed === 'function') { + options.lastDestroyed.call(null); + } + } }); }); };
    15
    JSLinted
    14
    .js
    meow
    mit
    zacstewart/Meow
    10071761
    <NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' && typeof options.container === 'undefined') { default_meow_area = $(window.document.createElement('div')) .attr({'id': ((new Date()).getTime()), 'class': 'meows'}); $('body').prepend(default_meow_area); delete this.queue[timestamp]; }, size: function () { var size = 0; for (var timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); // Add title if it's defined if (typeof this.title === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('h1')).text(this.title) ); } // Add icon if it's defined if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (event.type === 'mouseleave') { that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else { that.hovered = true; that.manifest.addClass('hover'); $(document.createElement('a')) .addClass('close') .html('&times;') .attr('href', 'javascript:;') .click(function (e) { e.preventDefault(); that.destroy(); if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow if (that.hovered !== true && typeof that === 'object') { that.destroy(); } } }, that.duration); } this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); this.destroy = function () { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { $('#' + meow_area).remove(); if (typeof options.lastDestroyed === 'function') { options.lastDestroyed.call(null); } } }); }); }; <MSG> JSLinted <DFF> @@ -37,8 +37,9 @@ delete this.queue[timestamp]; }, size: function () { - var size = 0; - for (var timestamp in this.queue) { + var timestamp, + size = 0; + for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; @@ -135,7 +136,7 @@ $(document.createElement('a')) .addClass('close') .html('&times;') - .attr('href', 'javascript:;') + .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); @@ -176,22 +177,22 @@ this.destroy = function () { // Call callback if it's defined (this = meow DOM element) - if (typeof options.beforeDestroy === 'function') { - options.beforeDestroy.call(that.manifest); - } + if (typeof options.beforeDestroy === 'function') { + options.beforeDestroy.call(that.manifest); + } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { - options.afterDestroy.call(null); - } - if (meows.size() <= 0) { - $('#' + meow_area).remove(); - if (typeof options.lastDestroyed === 'function') { - options.lastDestroyed.call(null); - } - } + options.afterDestroy.call(null); + } + if (meows.size() <= 0) { + $('#' + meow_area).remove(); + if (typeof options.lastDestroyed === 'function') { + options.lastDestroyed.call(null); + } + } }); }); };
    15
    JSLinted
    14
    .js
    meow
    mit
    zacstewart/Meow
    10071762
    <NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 1 MINOR = 7 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.0.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split - MAJOR = 1 - MINOR = 7 + MAJOR = 2 + MINOR = 0 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
    2
    v2.0.0
    2
    .rb
    rb
    mit
    splitrb/split
    10071763
    <NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 1 MINOR = 7 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.0.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split - MAJOR = 1 - MINOR = 7 + MAJOR = 2 + MINOR = 0 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
    2
    v2.0.0
    2
    .rb
    rb
    mit
    splitrb/split
    10071764
    <NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 1 MINOR = 7 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.0.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split - MAJOR = 1 - MINOR = 7 + MAJOR = 2 + MINOR = 0 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
    2
    v2.0.0
    2
    .rb
    rb
    mit
    splitrb/split
    10071765
    <NME> trial.rb <BEF> # frozen_string_literal: true module Split class Trial attr_accessor :goals attr_accessor :experiment attr_writer :metadata def initialize(attrs = {}) self.experiment = attrs.delete(:experiment) self.alternative = attrs.delete(:alternative) self.metadata = attrs.delete(:metadata) self.goals = attrs.delete(:goals) || [] @user = attrs.delete(:user) @options = attrs @alternative_chosen = false end def metadata @metadata ||= experiment.metadata[alternative.name] if experiment.metadata end def alternative @alternative ||= if @experiment.has_winner? @experiment.winner end end def alternative=(alternative) @alternative = if alternative.kind_of?(Split::Alternative) alternative else @experiment.alternatives.find { |a| a.name == alternative } end end def complete!(context = nil) if alternative if Array(goals).empty? alternative.increment_completion else Array(goals).each { |g| alternative.increment_completion(g) } end run_callback context, Split.configuration.on_trial_complete end end # Choose an alternative, add a participant, and save the alternative choice on the user. This # method is guaranteed to only run once, and will skip the alternative choosing process if run # a second time. def choose!(context = nil) @user.cleanup_old_experiments! # Only run the process once return alternative if @alternative_chosen new_participant = @user[@experiment.key].nil? if override_is_alternative? self.alternative = @options[:override] if should_store_alternative? && !@user[@experiment.key] self.alternative.increment_participation end elsif @options[:disabled] || Split.configuration.disabled? self.alternative = @experiment.control elsif @experiment.has_winner? self.alternative = @experiment.winner else cleanup_old_versions if exclude_user? self.alternative = @experiment.control else self.alternative = @user[@experiment.key] if alternative.nil? if @experiment.cohorting_disabled? self.alternative = @experiment.control else self.alternative = @experiment.next_alternative # Increment the number of participants since we are actually choosing a new alternative self.alternative.increment_participation end end @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?) @alternative_choosen = true run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) alternative end alternative end private def run_callback(context, callback_name) context.send(callback_name, self) if callback_name && context.respond_to?(callback_name, true) end def override_is_alternative? @experiment.alternatives.map(&:name).include?(@options[:override]) end def should_store_alternative? if @options[:override] || @options[:disabled] Split.configuration.store_override else !exclude_user? end end def cleanup_old_versions if @experiment.version > 0 @user.cleanup_old_versions!(@experiment) end end def exclude_user? @options[:exclude] || @experiment.start_time.nil? || @user.max_experiments_reached?(@experiment.key) end end end <MSG> disable cohorting revision <DFF> @@ -85,9 +85,11 @@ module Split end end - @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?) + new_participant_and_cohorting_disabled = new_participant && @experiment.cohorting_disabled? + + @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || new_participant_and_cohorting_disabled @alternative_choosen = true - run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) + run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || new_participant_and_cohorting_disabled alternative end
    4
    disable cohorting revision
    2
    .rb
    rb
    mit
    splitrb/split
    10071766
    <NME> trial.rb <BEF> # frozen_string_literal: true module Split class Trial attr_accessor :goals attr_accessor :experiment attr_writer :metadata def initialize(attrs = {}) self.experiment = attrs.delete(:experiment) self.alternative = attrs.delete(:alternative) self.metadata = attrs.delete(:metadata) self.goals = attrs.delete(:goals) || [] @user = attrs.delete(:user) @options = attrs @alternative_chosen = false end def metadata @metadata ||= experiment.metadata[alternative.name] if experiment.metadata end def alternative @alternative ||= if @experiment.has_winner? @experiment.winner end end def alternative=(alternative) @alternative = if alternative.kind_of?(Split::Alternative) alternative else @experiment.alternatives.find { |a| a.name == alternative } end end def complete!(context = nil) if alternative if Array(goals).empty? alternative.increment_completion else Array(goals).each { |g| alternative.increment_completion(g) } end run_callback context, Split.configuration.on_trial_complete end end # Choose an alternative, add a participant, and save the alternative choice on the user. This # method is guaranteed to only run once, and will skip the alternative choosing process if run # a second time. def choose!(context = nil) @user.cleanup_old_experiments! # Only run the process once return alternative if @alternative_chosen new_participant = @user[@experiment.key].nil? if override_is_alternative? self.alternative = @options[:override] if should_store_alternative? && !@user[@experiment.key] self.alternative.increment_participation end elsif @options[:disabled] || Split.configuration.disabled? self.alternative = @experiment.control elsif @experiment.has_winner? self.alternative = @experiment.winner else cleanup_old_versions if exclude_user? self.alternative = @experiment.control else self.alternative = @user[@experiment.key] if alternative.nil? if @experiment.cohorting_disabled? self.alternative = @experiment.control else self.alternative = @experiment.next_alternative # Increment the number of participants since we are actually choosing a new alternative self.alternative.increment_participation end end @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?) @alternative_choosen = true run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) alternative end alternative end private def run_callback(context, callback_name) context.send(callback_name, self) if callback_name && context.respond_to?(callback_name, true) end def override_is_alternative? @experiment.alternatives.map(&:name).include?(@options[:override]) end def should_store_alternative? if @options[:override] || @options[:disabled] Split.configuration.store_override else !exclude_user? end end def cleanup_old_versions if @experiment.version > 0 @user.cleanup_old_versions!(@experiment) end end def exclude_user? @options[:exclude] || @experiment.start_time.nil? || @user.max_experiments_reached?(@experiment.key) end end end <MSG> disable cohorting revision <DFF> @@ -85,9 +85,11 @@ module Split end end - @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?) + new_participant_and_cohorting_disabled = new_participant && @experiment.cohorting_disabled? + + @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || new_participant_and_cohorting_disabled @alternative_choosen = true - run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) + run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || new_participant_and_cohorting_disabled alternative end
    4
    disable cohorting revision
    2
    .rb
    rb
    mit
    splitrb/split
    10071767
    <NME> trial.rb <BEF> # frozen_string_literal: true module Split class Trial attr_accessor :goals attr_accessor :experiment attr_writer :metadata def initialize(attrs = {}) self.experiment = attrs.delete(:experiment) self.alternative = attrs.delete(:alternative) self.metadata = attrs.delete(:metadata) self.goals = attrs.delete(:goals) || [] @user = attrs.delete(:user) @options = attrs @alternative_chosen = false end def metadata @metadata ||= experiment.metadata[alternative.name] if experiment.metadata end def alternative @alternative ||= if @experiment.has_winner? @experiment.winner end end def alternative=(alternative) @alternative = if alternative.kind_of?(Split::Alternative) alternative else @experiment.alternatives.find { |a| a.name == alternative } end end def complete!(context = nil) if alternative if Array(goals).empty? alternative.increment_completion else Array(goals).each { |g| alternative.increment_completion(g) } end run_callback context, Split.configuration.on_trial_complete end end # Choose an alternative, add a participant, and save the alternative choice on the user. This # method is guaranteed to only run once, and will skip the alternative choosing process if run # a second time. def choose!(context = nil) @user.cleanup_old_experiments! # Only run the process once return alternative if @alternative_chosen new_participant = @user[@experiment.key].nil? if override_is_alternative? self.alternative = @options[:override] if should_store_alternative? && !@user[@experiment.key] self.alternative.increment_participation end elsif @options[:disabled] || Split.configuration.disabled? self.alternative = @experiment.control elsif @experiment.has_winner? self.alternative = @experiment.winner else cleanup_old_versions if exclude_user? self.alternative = @experiment.control else self.alternative = @user[@experiment.key] if alternative.nil? if @experiment.cohorting_disabled? self.alternative = @experiment.control else self.alternative = @experiment.next_alternative # Increment the number of participants since we are actually choosing a new alternative self.alternative.increment_participation end end @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?) @alternative_choosen = true run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) alternative end alternative end private def run_callback(context, callback_name) context.send(callback_name, self) if callback_name && context.respond_to?(callback_name, true) end def override_is_alternative? @experiment.alternatives.map(&:name).include?(@options[:override]) end def should_store_alternative? if @options[:override] || @options[:disabled] Split.configuration.store_override else !exclude_user? end end def cleanup_old_versions if @experiment.version > 0 @user.cleanup_old_versions!(@experiment) end end def exclude_user? @options[:exclude] || @experiment.start_time.nil? || @user.max_experiments_reached?(@experiment.key) end end end <MSG> disable cohorting revision <DFF> @@ -85,9 +85,11 @@ module Split end end - @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || (new_participant && @experiment.cohorting_disabled?) + new_participant_and_cohorting_disabled = new_participant && @experiment.cohorting_disabled? + + @user[@experiment.key] = alternative.name unless @experiment.has_winner? || !should_store_alternative? || new_participant_and_cohorting_disabled @alternative_choosen = true - run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || (new_participant && @experiment.cohorting_disabled?) + run_callback context, Split.configuration.on_trial unless @options[:disabled] || Split.configuration.disabled? || new_participant_and_cohorting_disabled alternative end
    4
    disable cohorting revision
    2
    .rb
    rb
    mit
    splitrb/split
    10071768
    <NME> helper.rb <BEF> module Split module Helper def ab_test(experiment_name, *alternatives, &block) experiment = Split::Experiment.find_or_create(experiment_name, *alternatives) if experiment.winner ret = experiment.winner.name module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else end end ret = yield(ret) if block_given? ret end def finished(experiment_name) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> ab_test can now be passed a block in a rails view as well, closes #4 <DFF> @@ -1,6 +1,6 @@ module Split module Helper - def ab_test(experiment_name, *alternatives, &block) + def ab_test(experiment_name, *alternatives) experiment = Split::Experiment.find_or_create(experiment_name, *alternatives) if experiment.winner ret = experiment.winner.name @@ -21,8 +21,17 @@ module Split end end - ret = yield(ret) if block_given? - ret + if block_given? + if defined?(capture) # a block in a rails view + block = Proc.new { yield(ret) } + concat(capture(ret, &block)) + false + else + yield(ret) + end + else + ret + end end def finished(experiment_name)
    12
    ab_test can now be passed a block in a rails view as well, closes #4
    3
    .rb
    rb
    mit
    splitrb/split
    10071769
    <NME> helper.rb <BEF> module Split module Helper def ab_test(experiment_name, *alternatives, &block) experiment = Split::Experiment.find_or_create(experiment_name, *alternatives) if experiment.winner ret = experiment.winner.name module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else end end ret = yield(ret) if block_given? ret end def finished(experiment_name) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> ab_test can now be passed a block in a rails view as well, closes #4 <DFF> @@ -1,6 +1,6 @@ module Split module Helper - def ab_test(experiment_name, *alternatives, &block) + def ab_test(experiment_name, *alternatives) experiment = Split::Experiment.find_or_create(experiment_name, *alternatives) if experiment.winner ret = experiment.winner.name @@ -21,8 +21,17 @@ module Split end end - ret = yield(ret) if block_given? - ret + if block_given? + if defined?(capture) # a block in a rails view + block = Proc.new { yield(ret) } + concat(capture(ret, &block)) + false + else + yield(ret) + end + else + ret + end end def finished(experiment_name)
    12
    ab_test can now be passed a block in a rails view as well, closes #4
    3
    .rb
    rb
    mit
    splitrb/split
    10071770
    <NME> helper.rb <BEF> module Split module Helper def ab_test(experiment_name, *alternatives, &block) experiment = Split::Experiment.find_or_create(experiment_name, *alternatives) if experiment.winner ret = experiment.winner.name module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else end end ret = yield(ret) if block_given? ret end def finished(experiment_name) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> ab_test can now be passed a block in a rails view as well, closes #4 <DFF> @@ -1,6 +1,6 @@ module Split module Helper - def ab_test(experiment_name, *alternatives, &block) + def ab_test(experiment_name, *alternatives) experiment = Split::Experiment.find_or_create(experiment_name, *alternatives) if experiment.winner ret = experiment.winner.name @@ -21,8 +21,17 @@ module Split end end - ret = yield(ret) if block_given? - ret + if block_given? + if defined?(capture) # a block in a rails view + block = Proc.new { yield(ret) } + concat(capture(ret, &block)) + false + else + yield(ret) + end + else + ret + end end def finished(experiment_name)
    12
    ab_test can now be passed a block in a rails view as well, closes #4
    3
    .rb
    rb
    mit
    splitrb/split
    10071771
    <NME> demo.js <BEF> ADDFILE <MSG> Rename elements to components <DFF> @@ -0,0 +1,2 @@ +angular + .module('demo', ['semantic.ui.components.divider']);
    2
    Rename elements to components
    0
    .js
    js
    mit
    Semantic-Org/Semantic-UI-Angular
    10071772
    <NME> demo.js <BEF> ADDFILE <MSG> Rename elements to components <DFF> @@ -0,0 +1,2 @@ +angular + .module('demo', ['semantic.ui.components.divider']);
    2
    Rename elements to components
    0
    .js
    js
    mit
    Semantic-Org/Semantic-UI-Angular
    10071773
    <NME> demo.js <BEF> ADDFILE <MSG> Rename elements to components <DFF> @@ -0,0 +1,2 @@ +angular + .module('demo', ['semantic.ui.components.divider']);
    2
    Rename elements to components
    0
    .js
    js
    mit
    Semantic-Org/Semantic-UI-Angular
    10071774
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) [![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split) [![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split) [![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split) [![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split) ## Requirements ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Fix travis badges <DFF> @@ -7,10 +7,10 @@ Split is heavily inspired by the Abingo and Vanity rails ab testing plugins and Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) -[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split) -[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split) -[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split) -[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split) +[![Build Status](https://secure.travis-ci.org/splitrb/split.svg?branch=master)](http://travis-ci.org/splitrb/split) +[![Dependency Status](https://gemnasium.com/splitrb/split.svg)](https://gemnasium.com/splitrb/split) +[![Code Climate](https://codeclimate.com/github/splitrb/split.svg)](https://codeclimate.com/github/splitrb/split) +[![Coverage Status](http://img.shields.io/coveralls/splitrb/split.svg)](https://coveralls.io/r/splitrb/split) ## Requirements
    4
    Fix travis badges
    4
    .md
    md
    mit
    splitrb/split
    10071775
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) [![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split) [![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split) [![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split) [![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split) ## Requirements ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Fix travis badges <DFF> @@ -7,10 +7,10 @@ Split is heavily inspired by the Abingo and Vanity rails ab testing plugins and Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) -[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split) -[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split) -[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split) -[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split) +[![Build Status](https://secure.travis-ci.org/splitrb/split.svg?branch=master)](http://travis-ci.org/splitrb/split) +[![Dependency Status](https://gemnasium.com/splitrb/split.svg)](https://gemnasium.com/splitrb/split) +[![Code Climate](https://codeclimate.com/github/splitrb/split.svg)](https://codeclimate.com/github/splitrb/split) +[![Coverage Status](http://img.shields.io/coveralls/splitrb/split.svg)](https://coveralls.io/r/splitrb/split) ## Requirements
    4
    Fix travis badges
    4
    .md
    md
    mit
    splitrb/split
    10071776
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) [![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split) [![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split) [![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split) [![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split) ## Requirements ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Fix travis badges <DFF> @@ -7,10 +7,10 @@ Split is heavily inspired by the Abingo and Vanity rails ab testing plugins and Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) -[![Build Status](https://secure.travis-ci.org/andrew/split.svg?branch=master)](http://travis-ci.org/andrew/split) -[![Dependency Status](https://gemnasium.com/andrew/split.svg)](https://gemnasium.com/andrew/split) -[![Code Climate](https://codeclimate.com/github/andrew/split.svg)](https://codeclimate.com/github/andrew/split) -[![Coverage Status](http://img.shields.io/coveralls/andrew/split.svg)](https://coveralls.io/r/andrew/split) +[![Build Status](https://secure.travis-ci.org/splitrb/split.svg?branch=master)](http://travis-ci.org/splitrb/split) +[![Dependency Status](https://gemnasium.com/splitrb/split.svg)](https://gemnasium.com/splitrb/split) +[![Code Climate](https://codeclimate.com/github/splitrb/split.svg)](https://codeclimate.com/github/splitrb/split) +[![Coverage Status](http://img.shields.io/coveralls/splitrb/split.svg)](https://coveralls.io/r/splitrb/split) ## Requirements
    4
    Fix travis badges
    4
    .md
    md
    mit
    splitrb/split
    10071777
    <NME> .jshintrc <BEF> ADDFILE <MSG> chore(.jshintrc): Added .jshintrc <DFF> @@ -0,0 +1,30 @@ +{ + "browser": true, + "bitwise": true, + "curly": true, + "eqeqeq": true, + "immed": true, + "indent": 2, + "newcap": true, + "noarg": true, + "regexp": true, + "undef": true, + "unused": true, + "maxlen": 120, + "strict": true, + "trailing": true, + "smarttabs": true, + "globals": { + "angular": false, + "describe": false, + "it": false, + "beforeEach": false, + "afterEach": false, + "inject": false, + "module": false, + "browser": false, + "expect": false, + "_": false, + "$": false + } +} \ No newline at end of file
    30
    chore(.jshintrc): Added .jshintrc
    0
    jshintrc
    mit
    Semantic-Org/Semantic-UI-Angular
    10071778
    <NME> .jshintrc <BEF> ADDFILE <MSG> chore(.jshintrc): Added .jshintrc <DFF> @@ -0,0 +1,30 @@ +{ + "browser": true, + "bitwise": true, + "curly": true, + "eqeqeq": true, + "immed": true, + "indent": 2, + "newcap": true, + "noarg": true, + "regexp": true, + "undef": true, + "unused": true, + "maxlen": 120, + "strict": true, + "trailing": true, + "smarttabs": true, + "globals": { + "angular": false, + "describe": false, + "it": false, + "beforeEach": false, + "afterEach": false, + "inject": false, + "module": false, + "browser": false, + "expect": false, + "_": false, + "$": false + } +} \ No newline at end of file
    30
    chore(.jshintrc): Added .jshintrc
    0
    jshintrc
    mit
    Semantic-Org/Semantic-UI-Angular
    10071779
    <NME> .jshintrc <BEF> ADDFILE <MSG> chore(.jshintrc): Added .jshintrc <DFF> @@ -0,0 +1,30 @@ +{ + "browser": true, + "bitwise": true, + "curly": true, + "eqeqeq": true, + "immed": true, + "indent": 2, + "newcap": true, + "noarg": true, + "regexp": true, + "undef": true, + "unused": true, + "maxlen": 120, + "strict": true, + "trailing": true, + "smarttabs": true, + "globals": { + "angular": false, + "describe": false, + "it": false, + "beforeEach": false, + "afterEach": false, + "inject": false, + "module": false, + "browser": false, + "expect": false, + "_": false, + "$": false + } +} \ No newline at end of file
    30
    chore(.jshintrc): Added .jshintrc
    0
    jshintrc
    mit
    Semantic-Org/Semantic-UI-Angular
    10071780
    <NME> README.md <BEF> Semantic-UI-Angular =================== [![Join the chat at https://gitter.im/Semantic-Org/Semantic-UI-Angular](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Semantic-Org/Semantic-UI-Angular?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular.svg)](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular) [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) Status ------ Working to open the first release. Current progress: ----------------- At this moment we have following directives: - sm-button; - sm-checkbox; - sm-divider; - sm-radio-group and sm-radio-button; - sm-rating. ## To do: All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues <MSG> docs(README): Update README.md <DFF> @@ -6,20 +6,42 @@ Semantic-UI-Angular [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) +**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components. +As soon as Angular 2 will get better animations support, we will consider creating (or merging some existing project if there will be one) Angular 2 components as well. +We've decided to use TypeScript as a step to Angular 2 friendly environment. + Status ------ -Working to open the first release. +**Work in progress** + +We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions. +Once we release first `alpha.0` we are happy to get community help. + + +Support +------- +We support AngularJS 1.4.8 version. + + +Building Semantic-UI-Angular +---------------------------- +You have to have `nodejs` installed before running following commands. -Current progress: ------------------ -At this moment we have following directives: +``` +npm install +npm run build +``` - - sm-button; - - sm-checkbox; - - sm-divider; - - sm-radio-group and sm-radio-button; - - sm-rating. +The distribution packages will be stored in `dist` folder. -## To do: -All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues +Running tests +------------- +Single run: +``` +npm test +``` +Dev mode: +``` +npm run test-dev +```
    33
    docs(README): Update README.md
    11
    .md
    md
    mit
    Semantic-Org/Semantic-UI-Angular
    10071781
    <NME> README.md <BEF> Semantic-UI-Angular =================== [![Join the chat at https://gitter.im/Semantic-Org/Semantic-UI-Angular](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Semantic-Org/Semantic-UI-Angular?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular.svg)](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular) [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) Status ------ Working to open the first release. Current progress: ----------------- At this moment we have following directives: - sm-button; - sm-checkbox; - sm-divider; - sm-radio-group and sm-radio-button; - sm-rating. ## To do: All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues <MSG> docs(README): Update README.md <DFF> @@ -6,20 +6,42 @@ Semantic-UI-Angular [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) +**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components. +As soon as Angular 2 will get better animations support, we will consider creating (or merging some existing project if there will be one) Angular 2 components as well. +We've decided to use TypeScript as a step to Angular 2 friendly environment. + Status ------ -Working to open the first release. +**Work in progress** + +We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions. +Once we release first `alpha.0` we are happy to get community help. + + +Support +------- +We support AngularJS 1.4.8 version. + + +Building Semantic-UI-Angular +---------------------------- +You have to have `nodejs` installed before running following commands. -Current progress: ------------------ -At this moment we have following directives: +``` +npm install +npm run build +``` - - sm-button; - - sm-checkbox; - - sm-divider; - - sm-radio-group and sm-radio-button; - - sm-rating. +The distribution packages will be stored in `dist` folder. -## To do: -All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues +Running tests +------------- +Single run: +``` +npm test +``` +Dev mode: +``` +npm run test-dev +```
    33
    docs(README): Update README.md
    11
    .md
    md
    mit
    Semantic-Org/Semantic-UI-Angular
    10071782
    <NME> README.md <BEF> Semantic-UI-Angular =================== [![Join the chat at https://gitter.im/Semantic-Org/Semantic-UI-Angular](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Semantic-Org/Semantic-UI-Angular?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular.svg)](https://travis-ci.org/Semantic-Org/Semantic-UI-Angular) [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) Status ------ Working to open the first release. Current progress: ----------------- At this moment we have following directives: - sm-button; - sm-checkbox; - sm-divider; - sm-radio-group and sm-radio-button; - sm-rating. ## To do: All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues <MSG> docs(README): Update README.md <DFF> @@ -6,20 +6,42 @@ Semantic-UI-Angular [![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) +**Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components. +As soon as Angular 2 will get better animations support, we will consider creating (or merging some existing project if there will be one) Angular 2 components as well. +We've decided to use TypeScript as a step to Angular 2 friendly environment. + Status ------ -Working to open the first release. +**Work in progress** + +We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions. +Once we release first `alpha.0` we are happy to get community help. + + +Support +------- +We support AngularJS 1.4.8 version. + + +Building Semantic-UI-Angular +---------------------------- +You have to have `nodejs` installed before running following commands. -Current progress: ------------------ -At this moment we have following directives: +``` +npm install +npm run build +``` - - sm-button; - - sm-checkbox; - - sm-divider; - - sm-radio-group and sm-radio-button; - - sm-rating. +The distribution packages will be stored in `dist` folder. -## To do: -All tasks for the release of the first release are the issues. https://github.com/Semantic-Org/Semantic-UI-Angular/issues +Running tests +------------- +Single run: +``` +npm test +``` +Dev mode: +``` +npm run test-dev +```
    33
    docs(README): Update README.md
    11
    .md
    md
    mit
    Semantic-Org/Semantic-UI-Angular
    10071783
    <NME> index.ts <BEF> import abbreviation, { CSSAbbreviation, CSSProperty, CSSValue, Literal, Value, Field, FunctionCall } from '@emmetio/css-abbreviation'; import { Config, SnippetsMap } from '../config'; import createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets'; import calculateScore from './score'; type MatchInput = CSSSnippet | CSSKeywordRef; /** * Parses given Emmet abbreviation into a final abbreviation tree with all /** Include all possible snippets in match */ Global = '@@global', /** Include raw snippets only (e.g. no properties) in abbreviation match */ Section = '@@section', abbr = abbreviation(abbr); } // Run abbreviation resolve in two passes: // 1. Map each node to snippets, which are abbreviations as well. A single snippet // may produce multiple nodes // 2. Transform every resolved node // walk(abbr, snippets, config); // walk(abbr, transform, config); return abbr; } /** * Converts given raw snippets into internal snippets representation */ if (config.cache) { config.cache.stylesheetSnippets = snippets; } if (typeof abbr === 'string') { abbr = abbreviation(abbr, { value: isValueScope(config) }); } const filteredSnippets = getSnippetsForScope(snippets, config); * keyword aliases from node value */ function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty { // TODO implement // if (config.context) { // // Resolve as value of given CSS property // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config); // } const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']); if (!snippet) { // Edge case: `!important` snippet return node.important ? setNodeAsText(node, '!important') : node; } return snippet.type === CSSSnippetType.Property ? resolveAsProperty(node, snippet, config) : resolveAsSnippet(node, snippet); } /** function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty { if (!resolveGradient(node, config)) { const score = config.options['stylesheet.fuzzySearchMinScore']; const abbr = node.name!; node.name = snippet.property; // Resolve keyword shortcuts const keywords = getKeywords(snippet); if (!node.value.length) { // No value defined, try to resolve unmatched part as a keyword alias const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords); if (kw) { node.value = snippet.value[kw.index]!; } else if (snippet.value.length) { const defaultValue = snippet.value[0]!; node.value = defaultValue.some(hasField) ? defaultValue : defaultValue.map(n => wrapWithField(n)); } } else { // replace keyword aliases in current node value for (let i = 0, token; i < node.value.value.length; i++) { token = node.value.value[i]; if (token === '!') { token = `${!i ? '${1} ' : ''}!important`; } else if (isKeyword(token)) { token = findBestMatch(token.value, keywords) || findBestMatch(token.value, globalKeywords) || token; } else if (isNumericValue(token)) { token = resolveNumericValue(node.name, token, formatOptions); } node.value.value[i] = token; } } // Resolve numeric values for CSS properties only resolveNumericValue(node, config); } return node; } /** * Resolves CSS gradient shortcut from given property, if possible */ function resolveGradient(node: CSSProperty, config: Config): boolean { * Resolves given parsed abbreviation node as property value of given `snippet`: * tries to find best matching keyword from CSS snippet */ function resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty { // Possible resolved result for CSS property: // * matched snippet keyword // * color (starts with #) // Everything else should result the same as input abbreviation let keywords = config.options['stylesheet.keywords'].slice(); if (snippet) { keywords = keywords.concat(getKeywords(snippet)); } const values = [node.name].concat(node.value.value) .filter(Boolean) .map(value => { if (typeof value === 'string' || value.type === 'keyword') { value = String(value); return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value; } return value; }); node.name = null; node.value.value = values; return node; } type: 'FunctionCall', name: 'linear-gradient', arguments: [cssValue(field(0, ''))] }; } else { gradientFn = { ...gradientFn, name: 'linear-gradient' }; } if (!config.context) { node.name = 'background-image'; } node.value = [cssValue(gradientFn)]; return true; } return false; } /** * Resolves given parsed abbreviation node as CSS property */ function resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, config: Config): CSSProperty { const abbr = node.name!; // Check for unmatched part of abbreviation // For example, in `dib` abbreviation the matched part is `d` and `ib` should // be considered as inline value. If unmatched fragment exists, we should check // if it matches actual value of snippet. If either explicit value is specified // or unmatched fragment did not resolve to to a keyword, we should consider // matched snippet as invalid const inlineValue = getUnmatchedPart(abbr, snippet.key); } function getScoringPart(item: MatchInput): string { return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key; } node.value.push(cssValue(kw)); } node.name = snippet.property; if (node.value.length) { // Replace keyword alias from current abbreviation node with matched keyword resolveValueKeywords(node, config, snippet); } else if (snippet.value.length) { const defaultValue = snippet.value[0]!; // https://github.com/emmetio/emmet/issues/558 // We should auto-select inserted value only if there’s multiple value // choice return ''; } /** * Check if given CSS value token is a keyword * @param {*} token * @return {Boolean} */ function isKeyword(token) { return tokenTypeOf(token, 'keyword'); } /** * Check if given CSS value token is a numeric value * @param {*} token * @return {Boolean} */ function isNumericValue(token) { return tokenTypeOf(token, 'numeric'); } function tokenTypeOf(token, type) { return token && typeof token === 'object' && token.type === type; } /** * Resolves numeric value for given CSS property * @param {String} property CSS property name * @param {NumericValue} token CSS numeric value token * @param {Object} formatOptions Formatting options for units * @return {NumericValue} */ function resolveNumericValue(property, token, formatOptions) { if (token.unit) { token.unit = formatOptions.unitAliases[token.unit] || token.unit; } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) { // use `px` for integers, `em` for floats // NB: num|0 is a quick alternative to Math.round(0) token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit; } return token; } /** return node; } /** * Finds best matching item from `items` array * @param abbr Abbreviation to match * @param items List of items for match * @param minScore The minimum score the best matched item should have to be a valid match. */ export function findBestMatch<T extends MatchInput>(abbr: string, items: T[], minScore = 0, partialMatch = false): T | null { let matchedItem: T | null = null; let maxScore = 0; for (const item of items) { const score = calculateScore(abbr, getScoringPart(item), partialMatch); if (score === 1) { // direct hit, no need to look further return item; } if (score && score >= maxScore) { maxScore = score; matchedItem = item; } } return maxScore >= minScore ? matchedItem : null; } function getScoringPart(item: MatchInput): string { return typeof item === 'string' ? item : item.key; } /** * Returns a part of `abbr` that wasn’t directly matched against `str`. * For example, if abbreviation `poas` is matched against `position`, * the unmatched part will be `as` since `a` wasn’t found in string stream */ function getUnmatchedPart(abbr: string, str: string): string { for (let i = 0, lastPos = 0; i < abbr.length; i++) { lastPos = str.indexOf(abbr[i], lastPos); if (lastPos === -1) { return abbr.slice(i); } lastPos++; } return ''; } /** * Resolves given keyword shorthand into matched snippet keyword or global keyword, * if possible */ function resolveKeyword(kw: string, config: Config, snippet?: CSSSnippetProperty, minScore?: number): Literal | FunctionCall | null { let ref: string | null; if (snippet) { if (ref = findBestMatch(kw, Object.keys(snippet.keywords), minScore)) { return snippet.keywords[ref]; } for (const dep of snippet.dependencies) { if (ref = findBestMatch(kw, Object.keys(dep.keywords), minScore)) { return dep.keywords[ref]; } } } if (ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore)) { return literal(ref); } return null; } /** * Resolves numeric values in given abbreviation node */ function resolveNumericValue(node: CSSProperty, config: Config) { const aliases = config.options['stylesheet.unitAliases']; const unitless = config.options['stylesheet.unitless']; for (const v of node.value) { for (const t of v.value) { if (t.type === 'NumberValue') { if (t.unit) { t.unit = aliases[t.unit] || t.unit; } else if (t.value !== 0 && !unitless.includes(node.name!)) { t.unit = t.rawValue.includes('.') ? config.options['stylesheet.floatUnit'] : config.options['stylesheet.intUnit']; } } } } } /** * Constructs CSS value token */ function cssValue(...args: Value[]): CSSValue { return { type: 'CSSValue', value: args }; } /** * Constructs literal token */ function literal(value: string): Literal { return { type: 'Literal', value }; } /** * Constructs field token */ function field(index: number, name: string): Field { return { type: 'Field', index, name }; } /** * Check if given value contains fields */ function hasField(value: CSSValue): boolean { for (const v of value.value) { if (v.type === 'Field' || (v.type === 'FunctionCall' && v.arguments.some(hasField))) { return true; } } return false; } interface WrapState { index: number; } /** * Wraps tokens of given abbreviation with fields */ function wrapWithField(node: CSSValue, config: Config, state: WrapState = { index: 1 }): CSSValue { let value: Value[] = []; for (const v of node.value) { switch (v.type) { case 'ColorValue': value.push(field(state.index++, color(v, config.options['stylesheet.shortHex']))); break; case 'Literal': value.push(field(state.index++, v.value)); break; case 'NumberValue': value.push(field(state.index++, `${v.value}${v.unit}`)); break; case 'StringValue': const q = v.quote === 'single' ? '\'' : '"'; value.push(field(state.index++, q + v.value + q)); break; case 'FunctionCall': value.push(field(state.index++, v.name), literal('(')); for (let i = 0, il = v.arguments.length; i < il; i++) { value = value.concat(wrapWithField(v.arguments[i], config, state).value); if (i !== il - 1) { value.push(literal(', ')); } } value.push(literal(')')); break; default: value.push(v); } } return {...node, value }; } /** * Check if abbreviation should be expanded in CSS value context */ function isValueScope(config: Config): boolean { if (config.context) { return config.context.name === CSSAbbreviationScope.Value || !config.context.name.startsWith('@@'); } return false; } /** * Returns snippets for given scope */ function getSnippetsForScope(snippets: CSSSnippet[], config: Config): CSSSnippet[] { if (config.context) { if (config.context.name === CSSAbbreviationScope.Section) { return snippets.filter(s => s.type === CSSSnippetType.Raw); } if (config.context.name === CSSAbbreviationScope.Property) { return snippets.filter(s => s.type === CSSSnippetType.Property); } } return snippets; } <MSG> Working on stylesheet resolving and output <DFF> @@ -3,7 +3,7 @@ import { Config, SnippetsMap } from '../config'; import createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets'; import calculateScore from './score'; -type MatchInput = CSSSnippet | CSSKeywordRef; +type MatchInput = CSSSnippet | CSSKeywordRef | string; /** * Parses given Emmet abbreviation into a final abbreviation tree with all @@ -14,15 +14,15 @@ export default function parse(abbr: string | CSSAbbreviation, config: Config, sn abbr = abbreviation(abbr); } - // Run abbreviation resolve in two passes: - // 1. Map each node to snippets, which are abbreviations as well. A single snippet - // may produce multiple nodes - // 2. Transform every resolved node - // walk(abbr, snippets, config); - // walk(abbr, transform, config); + for (const node of abbr) { + resolveNode(node, snippets, config); + } + return abbr; } +export { default as stringify } from './format'; + /** * Converts given raw snippets into internal snippets representation */ @@ -40,22 +40,25 @@ export function convertSnippets(snippets: SnippetsMap): CSSSnippet[] { * keyword aliases from node value */ function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty { - // TODO implement - // if (config.context) { - // // Resolve as value of given CSS property - // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config); - // } - - const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']); + if (config.context) { + // Resolve as value of given CSS property + const snippet = snippets.find(s => s.type === CSSSnippetType.Property && s.property === config.context) as CSSSnippetProperty | undefined; + resolveAsPropertyValue(node, config, snippet); + } else { + const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']); - if (!snippet) { - // Edge case: `!important` snippet - return node.important ? setNodeAsText(node, '!important') : node; + if (snippet) { + if (snippet.type === CSSSnippetType.Property) { + resolveAsProperty(node, snippet, config); + } else { + resolveAsSnippet(node, snippet); + } + } } - return snippet.type === CSSSnippetType.Property - ? resolveAsProperty(node, snippet, config) - : resolveAsSnippet(node, snippet); + resolveNumericValue(node, config); + + return node; } /** @@ -65,36 +68,21 @@ function resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, confi const abbr = node.name!; node.name = snippet.property; - // Resolve keyword shortcuts - const keywords = getKeywords(snippet); - if (!node.value.length) { - // No value defined, try to resolve unmatched part as a keyword alias - const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords); - if (kw) { - node.value = snippet.value[kw.index]!; - } else if (snippet.value.length) { + // No value defined in abbreviation node, try to resolve unmatched part + // as a keyword alias + if (!resolveSnippetKeyword(node, getUnmatchedPart(abbr, snippet.key), snippet) && snippet.value.length) { const defaultValue = snippet.value[0]!; node.value = defaultValue.some(hasField) ? defaultValue : defaultValue.map(n => wrapWithField(n)); } } else { - // replace keyword aliases in current node value - for (let i = 0, token; i < node.value.value.length; i++) { - token = node.value.value[i]; - - if (token === '!') { - token = `${!i ? '${1} ' : ''}!important`; - } else if (isKeyword(token)) { - token = findBestMatch(token.value, keywords) - || findBestMatch(token.value, globalKeywords) - || token; - } else if (isNumericValue(token)) { - token = resolveNumericValue(node.name, token, formatOptions); - } - - node.value.value[i] = token; + // Replace keyword alias from current abbreviation node with matched keyword + const kw = getSingleKeyword(node); + if (kw) { + resolveSnippetKeyword(node, kw.value, snippet) + || resolveGlobalKeyword(node, kw.value, config); } } @@ -112,30 +100,13 @@ function resolveAsSnippet(node: CSSProperty, snippet: CSSSnippetRaw): CSSPropert * Resolves given parsed abbreviation node as property value of given `snippet`: * tries to find best matching keyword from CSS snippet */ -function resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty { - // Possible resolved result for CSS property: - // * matched snippet keyword - // * color (starts with #) - // Everything else should result the same as input abbreviation - let keywords = config.options['stylesheet.keywords'].slice(); - if (snippet) { - keywords = keywords.concat(getKeywords(snippet)); +function resolveAsPropertyValue(node: CSSProperty, config: Config, snippet?: CSSSnippetProperty): CSSProperty { + const kw = getSingleKeyword(node); + if (kw) { + const score = config.options['stylesheet.fuzzySearchMinScore']; + snippet && resolveSnippetKeyword(node, kw.value, snippet, score) + || resolveGlobalKeyword(node, kw.value, config, score); } - - const values = [node.name].concat(node.value.value) - .filter(Boolean) - .map(value => { - if (typeof value === 'string' || value.type === 'keyword') { - value = String(value); - return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value; - } - - return value; - }); - - node.name = null; - node.value.value = values; - return node; } @@ -176,6 +147,9 @@ export function findBestMatch<T extends MatchInput>(abbr: string, items: T[], mi } function getScoringPart(item: MatchInput): string { + if (typeof item === 'string') { + return item; + } return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key; } @@ -196,45 +170,66 @@ function getUnmatchedPart(abbr: string, str: string): string { return ''; } +function resolveSnippetKeyword(node: CSSProperty, kw: string, snippet: CSSSnippetProperty, minScore?: number): boolean { + const keywords = getKeywords(snippet); + const ref = findBestMatch(kw, keywords, minScore); + + if (ref) { + node.value = snippet.value[ref.index]!; + return true; + } + + return false; +} + /** - * Check if given CSS value token is a keyword - * @param {*} token - * @return {Boolean} + * Tries to resolve node’s value with matched global keyword from given `kw` alias + * @returns `true` if value was successfully resolved */ -function isKeyword(token) { - return tokenTypeOf(token, 'keyword'); +function resolveGlobalKeyword(node: CSSProperty, kw: string, config: Config, minScore?: number): boolean { + const ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore); + if (ref) { + node.value = [literalValue(ref)]; + return true; + } + + return false; } /** - * Check if given CSS value token is a numeric value - * @param {*} token - * @return {Boolean} + * Resolves numeric values in given abbreviation node */ -function isNumericValue(token) { - return tokenTypeOf(token, 'numeric'); -} +function resolveNumericValue(node: CSSProperty, config: Config) { + const aliases = config.options['stylesheet.unitAliases']; + const unitless = config.options['stylesheet.unitless']; -function tokenTypeOf(token, type) { - return token && typeof token === 'object' && token.type === type; + for (const v of node.value) { + for (const t of v.value) { + if (t.type === 'NumberValue') { + if (t.unit) { + t.unit = aliases[t.unit] || t.unit; + } else if (t.value !== 0 && !unitless.includes(node.name!)) { + // use `px` for integers, `em` for floats + // NB: num|0 is a quick alternative to Math.round(0) + t.unit = t.value === (t.value | 0) + ? config.options['stylesheet.intUnit'] + : config.options['stylesheet.floatUnit']; + } + } + } + } } /** - * Resolves numeric value for given CSS property - * @param {String} property CSS property name - * @param {NumericValue} token CSS numeric value token - * @param {Object} formatOptions Formatting options for units - * @return {NumericValue} + * Returns literal token if it’s a single value of given abbreviation node */ -function resolveNumericValue(property, token, formatOptions) { - if (token.unit) { - token.unit = formatOptions.unitAliases[token.unit] || token.unit; - } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) { - // use `px` for integers, `em` for floats - // NB: num|0 is a quick alternative to Math.round(0) - token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit; +function getSingleKeyword(node: CSSProperty): Literal | void { + if (node.value.length === 1) { + const value = node.value[0]!; + if (value.value.length === 1 && value.value[0].type === 'Literal') { + return value.value[0] as Literal; + } } - - return token; } /**
    88
    Working on stylesheet resolving and output
    93
    .ts
    ts
    mit
    emmetio/emmet
    10071784
    <NME> index.ts <BEF> import abbreviation, { CSSAbbreviation, CSSProperty, CSSValue, Literal, Value, Field, FunctionCall } from '@emmetio/css-abbreviation'; import { Config, SnippetsMap } from '../config'; import createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets'; import calculateScore from './score'; type MatchInput = CSSSnippet | CSSKeywordRef; /** * Parses given Emmet abbreviation into a final abbreviation tree with all /** Include all possible snippets in match */ Global = '@@global', /** Include raw snippets only (e.g. no properties) in abbreviation match */ Section = '@@section', abbr = abbreviation(abbr); } // Run abbreviation resolve in two passes: // 1. Map each node to snippets, which are abbreviations as well. A single snippet // may produce multiple nodes // 2. Transform every resolved node // walk(abbr, snippets, config); // walk(abbr, transform, config); return abbr; } /** * Converts given raw snippets into internal snippets representation */ if (config.cache) { config.cache.stylesheetSnippets = snippets; } if (typeof abbr === 'string') { abbr = abbreviation(abbr, { value: isValueScope(config) }); } const filteredSnippets = getSnippetsForScope(snippets, config); * keyword aliases from node value */ function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty { // TODO implement // if (config.context) { // // Resolve as value of given CSS property // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config); // } const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']); if (!snippet) { // Edge case: `!important` snippet return node.important ? setNodeAsText(node, '!important') : node; } return snippet.type === CSSSnippetType.Property ? resolveAsProperty(node, snippet, config) : resolveAsSnippet(node, snippet); } /** function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty { if (!resolveGradient(node, config)) { const score = config.options['stylesheet.fuzzySearchMinScore']; const abbr = node.name!; node.name = snippet.property; // Resolve keyword shortcuts const keywords = getKeywords(snippet); if (!node.value.length) { // No value defined, try to resolve unmatched part as a keyword alias const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords); if (kw) { node.value = snippet.value[kw.index]!; } else if (snippet.value.length) { const defaultValue = snippet.value[0]!; node.value = defaultValue.some(hasField) ? defaultValue : defaultValue.map(n => wrapWithField(n)); } } else { // replace keyword aliases in current node value for (let i = 0, token; i < node.value.value.length; i++) { token = node.value.value[i]; if (token === '!') { token = `${!i ? '${1} ' : ''}!important`; } else if (isKeyword(token)) { token = findBestMatch(token.value, keywords) || findBestMatch(token.value, globalKeywords) || token; } else if (isNumericValue(token)) { token = resolveNumericValue(node.name, token, formatOptions); } node.value.value[i] = token; } } // Resolve numeric values for CSS properties only resolveNumericValue(node, config); } return node; } /** * Resolves CSS gradient shortcut from given property, if possible */ function resolveGradient(node: CSSProperty, config: Config): boolean { * Resolves given parsed abbreviation node as property value of given `snippet`: * tries to find best matching keyword from CSS snippet */ function resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty { // Possible resolved result for CSS property: // * matched snippet keyword // * color (starts with #) // Everything else should result the same as input abbreviation let keywords = config.options['stylesheet.keywords'].slice(); if (snippet) { keywords = keywords.concat(getKeywords(snippet)); } const values = [node.name].concat(node.value.value) .filter(Boolean) .map(value => { if (typeof value === 'string' || value.type === 'keyword') { value = String(value); return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value; } return value; }); node.name = null; node.value.value = values; return node; } type: 'FunctionCall', name: 'linear-gradient', arguments: [cssValue(field(0, ''))] }; } else { gradientFn = { ...gradientFn, name: 'linear-gradient' }; } if (!config.context) { node.name = 'background-image'; } node.value = [cssValue(gradientFn)]; return true; } return false; } /** * Resolves given parsed abbreviation node as CSS property */ function resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, config: Config): CSSProperty { const abbr = node.name!; // Check for unmatched part of abbreviation // For example, in `dib` abbreviation the matched part is `d` and `ib` should // be considered as inline value. If unmatched fragment exists, we should check // if it matches actual value of snippet. If either explicit value is specified // or unmatched fragment did not resolve to to a keyword, we should consider // matched snippet as invalid const inlineValue = getUnmatchedPart(abbr, snippet.key); } function getScoringPart(item: MatchInput): string { return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key; } node.value.push(cssValue(kw)); } node.name = snippet.property; if (node.value.length) { // Replace keyword alias from current abbreviation node with matched keyword resolveValueKeywords(node, config, snippet); } else if (snippet.value.length) { const defaultValue = snippet.value[0]!; // https://github.com/emmetio/emmet/issues/558 // We should auto-select inserted value only if there’s multiple value // choice return ''; } /** * Check if given CSS value token is a keyword * @param {*} token * @return {Boolean} */ function isKeyword(token) { return tokenTypeOf(token, 'keyword'); } /** * Check if given CSS value token is a numeric value * @param {*} token * @return {Boolean} */ function isNumericValue(token) { return tokenTypeOf(token, 'numeric'); } function tokenTypeOf(token, type) { return token && typeof token === 'object' && token.type === type; } /** * Resolves numeric value for given CSS property * @param {String} property CSS property name * @param {NumericValue} token CSS numeric value token * @param {Object} formatOptions Formatting options for units * @return {NumericValue} */ function resolveNumericValue(property, token, formatOptions) { if (token.unit) { token.unit = formatOptions.unitAliases[token.unit] || token.unit; } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) { // use `px` for integers, `em` for floats // NB: num|0 is a quick alternative to Math.round(0) token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit; } return token; } /** return node; } /** * Finds best matching item from `items` array * @param abbr Abbreviation to match * @param items List of items for match * @param minScore The minimum score the best matched item should have to be a valid match. */ export function findBestMatch<T extends MatchInput>(abbr: string, items: T[], minScore = 0, partialMatch = false): T | null { let matchedItem: T | null = null; let maxScore = 0; for (const item of items) { const score = calculateScore(abbr, getScoringPart(item), partialMatch); if (score === 1) { // direct hit, no need to look further return item; } if (score && score >= maxScore) { maxScore = score; matchedItem = item; } } return maxScore >= minScore ? matchedItem : null; } function getScoringPart(item: MatchInput): string { return typeof item === 'string' ? item : item.key; } /** * Returns a part of `abbr` that wasn’t directly matched against `str`. * For example, if abbreviation `poas` is matched against `position`, * the unmatched part will be `as` since `a` wasn’t found in string stream */ function getUnmatchedPart(abbr: string, str: string): string { for (let i = 0, lastPos = 0; i < abbr.length; i++) { lastPos = str.indexOf(abbr[i], lastPos); if (lastPos === -1) { return abbr.slice(i); } lastPos++; } return ''; } /** * Resolves given keyword shorthand into matched snippet keyword or global keyword, * if possible */ function resolveKeyword(kw: string, config: Config, snippet?: CSSSnippetProperty, minScore?: number): Literal | FunctionCall | null { let ref: string | null; if (snippet) { if (ref = findBestMatch(kw, Object.keys(snippet.keywords), minScore)) { return snippet.keywords[ref]; } for (const dep of snippet.dependencies) { if (ref = findBestMatch(kw, Object.keys(dep.keywords), minScore)) { return dep.keywords[ref]; } } } if (ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore)) { return literal(ref); } return null; } /** * Resolves numeric values in given abbreviation node */ function resolveNumericValue(node: CSSProperty, config: Config) { const aliases = config.options['stylesheet.unitAliases']; const unitless = config.options['stylesheet.unitless']; for (const v of node.value) { for (const t of v.value) { if (t.type === 'NumberValue') { if (t.unit) { t.unit = aliases[t.unit] || t.unit; } else if (t.value !== 0 && !unitless.includes(node.name!)) { t.unit = t.rawValue.includes('.') ? config.options['stylesheet.floatUnit'] : config.options['stylesheet.intUnit']; } } } } } /** * Constructs CSS value token */ function cssValue(...args: Value[]): CSSValue { return { type: 'CSSValue', value: args }; } /** * Constructs literal token */ function literal(value: string): Literal { return { type: 'Literal', value }; } /** * Constructs field token */ function field(index: number, name: string): Field { return { type: 'Field', index, name }; } /** * Check if given value contains fields */ function hasField(value: CSSValue): boolean { for (const v of value.value) { if (v.type === 'Field' || (v.type === 'FunctionCall' && v.arguments.some(hasField))) { return true; } } return false; } interface WrapState { index: number; } /** * Wraps tokens of given abbreviation with fields */ function wrapWithField(node: CSSValue, config: Config, state: WrapState = { index: 1 }): CSSValue { let value: Value[] = []; for (const v of node.value) { switch (v.type) { case 'ColorValue': value.push(field(state.index++, color(v, config.options['stylesheet.shortHex']))); break; case 'Literal': value.push(field(state.index++, v.value)); break; case 'NumberValue': value.push(field(state.index++, `${v.value}${v.unit}`)); break; case 'StringValue': const q = v.quote === 'single' ? '\'' : '"'; value.push(field(state.index++, q + v.value + q)); break; case 'FunctionCall': value.push(field(state.index++, v.name), literal('(')); for (let i = 0, il = v.arguments.length; i < il; i++) { value = value.concat(wrapWithField(v.arguments[i], config, state).value); if (i !== il - 1) { value.push(literal(', ')); } } value.push(literal(')')); break; default: value.push(v); } } return {...node, value }; } /** * Check if abbreviation should be expanded in CSS value context */ function isValueScope(config: Config): boolean { if (config.context) { return config.context.name === CSSAbbreviationScope.Value || !config.context.name.startsWith('@@'); } return false; } /** * Returns snippets for given scope */ function getSnippetsForScope(snippets: CSSSnippet[], config: Config): CSSSnippet[] { if (config.context) { if (config.context.name === CSSAbbreviationScope.Section) { return snippets.filter(s => s.type === CSSSnippetType.Raw); } if (config.context.name === CSSAbbreviationScope.Property) { return snippets.filter(s => s.type === CSSSnippetType.Property); } } return snippets; } <MSG> Working on stylesheet resolving and output <DFF> @@ -3,7 +3,7 @@ import { Config, SnippetsMap } from '../config'; import createSnippet, { CSSSnippet, nest, getKeywords, CSSSnippetType, CSSSnippetRaw, CSSSnippetProperty, CSSKeywordRef } from './snippets'; import calculateScore from './score'; -type MatchInput = CSSSnippet | CSSKeywordRef; +type MatchInput = CSSSnippet | CSSKeywordRef | string; /** * Parses given Emmet abbreviation into a final abbreviation tree with all @@ -14,15 +14,15 @@ export default function parse(abbr: string | CSSAbbreviation, config: Config, sn abbr = abbreviation(abbr); } - // Run abbreviation resolve in two passes: - // 1. Map each node to snippets, which are abbreviations as well. A single snippet - // may produce multiple nodes - // 2. Transform every resolved node - // walk(abbr, snippets, config); - // walk(abbr, transform, config); + for (const node of abbr) { + resolveNode(node, snippets, config); + } + return abbr; } +export { default as stringify } from './format'; + /** * Converts given raw snippets into internal snippets representation */ @@ -40,22 +40,25 @@ export function convertSnippets(snippets: SnippetsMap): CSSSnippet[] { * keyword aliases from node value */ function resolveNode(node: CSSProperty, snippets: CSSSnippet[], config: Config): CSSProperty { - // TODO implement - // if (config.context) { - // // Resolve as value of given CSS property - // return resolveAsPropertyValue(node, snippets.find(s => s.property === config.context), config); - // } - - const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']); + if (config.context) { + // Resolve as value of given CSS property + const snippet = snippets.find(s => s.type === CSSSnippetType.Property && s.property === config.context) as CSSSnippetProperty | undefined; + resolveAsPropertyValue(node, config, snippet); + } else { + const snippet = findBestMatch(node.name!, snippets, config.options['stylesheet.fuzzySearchMinScore']); - if (!snippet) { - // Edge case: `!important` snippet - return node.important ? setNodeAsText(node, '!important') : node; + if (snippet) { + if (snippet.type === CSSSnippetType.Property) { + resolveAsProperty(node, snippet, config); + } else { + resolveAsSnippet(node, snippet); + } + } } - return snippet.type === CSSSnippetType.Property - ? resolveAsProperty(node, snippet, config) - : resolveAsSnippet(node, snippet); + resolveNumericValue(node, config); + + return node; } /** @@ -65,36 +68,21 @@ function resolveAsProperty(node: CSSProperty, snippet: CSSSnippetProperty, confi const abbr = node.name!; node.name = snippet.property; - // Resolve keyword shortcuts - const keywords = getKeywords(snippet); - if (!node.value.length) { - // No value defined, try to resolve unmatched part as a keyword alias - const kw = findBestMatch(getUnmatchedPart(abbr, snippet.key), keywords); - if (kw) { - node.value = snippet.value[kw.index]!; - } else if (snippet.value.length) { + // No value defined in abbreviation node, try to resolve unmatched part + // as a keyword alias + if (!resolveSnippetKeyword(node, getUnmatchedPart(abbr, snippet.key), snippet) && snippet.value.length) { const defaultValue = snippet.value[0]!; node.value = defaultValue.some(hasField) ? defaultValue : defaultValue.map(n => wrapWithField(n)); } } else { - // replace keyword aliases in current node value - for (let i = 0, token; i < node.value.value.length; i++) { - token = node.value.value[i]; - - if (token === '!') { - token = `${!i ? '${1} ' : ''}!important`; - } else if (isKeyword(token)) { - token = findBestMatch(token.value, keywords) - || findBestMatch(token.value, globalKeywords) - || token; - } else if (isNumericValue(token)) { - token = resolveNumericValue(node.name, token, formatOptions); - } - - node.value.value[i] = token; + // Replace keyword alias from current abbreviation node with matched keyword + const kw = getSingleKeyword(node); + if (kw) { + resolveSnippetKeyword(node, kw.value, snippet) + || resolveGlobalKeyword(node, kw.value, config); } } @@ -112,30 +100,13 @@ function resolveAsSnippet(node: CSSProperty, snippet: CSSSnippetRaw): CSSPropert * Resolves given parsed abbreviation node as property value of given `snippet`: * tries to find best matching keyword from CSS snippet */ -function resolveAsPropertyValue(node: CSSProperty, snippet: CSSSnippet, config: Config): CSSProperty { - // Possible resolved result for CSS property: - // * matched snippet keyword - // * color (starts with #) - // Everything else should result the same as input abbreviation - let keywords = config.options['stylesheet.keywords'].slice(); - if (snippet) { - keywords = keywords.concat(getKeywords(snippet)); +function resolveAsPropertyValue(node: CSSProperty, config: Config, snippet?: CSSSnippetProperty): CSSProperty { + const kw = getSingleKeyword(node); + if (kw) { + const score = config.options['stylesheet.fuzzySearchMinScore']; + snippet && resolveSnippetKeyword(node, kw.value, snippet, score) + || resolveGlobalKeyword(node, kw.value, config, score); } - - const values = [node.name].concat(node.value.value) - .filter(Boolean) - .map(value => { - if (typeof value === 'string' || value.type === 'keyword') { - value = String(value); - return findBestMatch(value, keywords, null, config.fuzzySearchMinScore) || value; - } - - return value; - }); - - node.name = null; - node.value.value = values; - return node; } @@ -176,6 +147,9 @@ export function findBestMatch<T extends MatchInput>(abbr: string, items: T[], mi } function getScoringPart(item: MatchInput): string { + if (typeof item === 'string') { + return item; + } return (item as CSSKeywordRef).keyword || (item as CSSSnippet).key; } @@ -196,45 +170,66 @@ function getUnmatchedPart(abbr: string, str: string): string { return ''; } +function resolveSnippetKeyword(node: CSSProperty, kw: string, snippet: CSSSnippetProperty, minScore?: number): boolean { + const keywords = getKeywords(snippet); + const ref = findBestMatch(kw, keywords, minScore); + + if (ref) { + node.value = snippet.value[ref.index]!; + return true; + } + + return false; +} + /** - * Check if given CSS value token is a keyword - * @param {*} token - * @return {Boolean} + * Tries to resolve node’s value with matched global keyword from given `kw` alias + * @returns `true` if value was successfully resolved */ -function isKeyword(token) { - return tokenTypeOf(token, 'keyword'); +function resolveGlobalKeyword(node: CSSProperty, kw: string, config: Config, minScore?: number): boolean { + const ref = findBestMatch(kw, config.options['stylesheet.keywords'], minScore); + if (ref) { + node.value = [literalValue(ref)]; + return true; + } + + return false; } /** - * Check if given CSS value token is a numeric value - * @param {*} token - * @return {Boolean} + * Resolves numeric values in given abbreviation node */ -function isNumericValue(token) { - return tokenTypeOf(token, 'numeric'); -} +function resolveNumericValue(node: CSSProperty, config: Config) { + const aliases = config.options['stylesheet.unitAliases']; + const unitless = config.options['stylesheet.unitless']; -function tokenTypeOf(token, type) { - return token && typeof token === 'object' && token.type === type; + for (const v of node.value) { + for (const t of v.value) { + if (t.type === 'NumberValue') { + if (t.unit) { + t.unit = aliases[t.unit] || t.unit; + } else if (t.value !== 0 && !unitless.includes(node.name!)) { + // use `px` for integers, `em` for floats + // NB: num|0 is a quick alternative to Math.round(0) + t.unit = t.value === (t.value | 0) + ? config.options['stylesheet.intUnit'] + : config.options['stylesheet.floatUnit']; + } + } + } + } } /** - * Resolves numeric value for given CSS property - * @param {String} property CSS property name - * @param {NumericValue} token CSS numeric value token - * @param {Object} formatOptions Formatting options for units - * @return {NumericValue} + * Returns literal token if it’s a single value of given abbreviation node */ -function resolveNumericValue(property, token, formatOptions) { - if (token.unit) { - token.unit = formatOptions.unitAliases[token.unit] || token.unit; - } else if (token.value !== 0 && unitlessProperties.indexOf(property) === -1) { - // use `px` for integers, `em` for floats - // NB: num|0 is a quick alternative to Math.round(0) - token.unit = token.value === (token.value | 0) ? formatOptions.intUnit : formatOptions.floatUnit; +function getSingleKeyword(node: CSSProperty): Literal | void { + if (node.value.length === 1) { + const value = node.value[0]!; + if (value.value.length === 1 && value.value[0].type === 'Literal') { + return value.value[0] as Literal; + } } - - return token; } /**
    88
    Working on stylesheet resolving and output
    93
    .ts
    ts
    mit
    emmetio/emmet
    10071785
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", s.require_paths = ["lib"] s.add_dependency(%q<redis>, ["~> 2.1"]) s.add_dependency(%q<redis-namespace>, ["~> 0.10.0"]) s.add_dependency(%q<sinatra>, ["~> 1.2.6"]) # Development Dependencies s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> redis-namespace version bumped <DFF> @@ -19,7 +19,7 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency(%q<redis>, ["~> 2.1"]) - s.add_dependency(%q<redis-namespace>, ["~> 0.10.0"]) + s.add_dependency(%q<redis-namespace>, ["~> 1.0.3"]) s.add_dependency(%q<sinatra>, ["~> 1.2.6"]) # Development Dependencies
    1
    redis-namespace version bumped
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071786
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", s.require_paths = ["lib"] s.add_dependency(%q<redis>, ["~> 2.1"]) s.add_dependency(%q<redis-namespace>, ["~> 0.10.0"]) s.add_dependency(%q<sinatra>, ["~> 1.2.6"]) # Development Dependencies s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> redis-namespace version bumped <DFF> @@ -19,7 +19,7 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency(%q<redis>, ["~> 2.1"]) - s.add_dependency(%q<redis-namespace>, ["~> 0.10.0"]) + s.add_dependency(%q<redis-namespace>, ["~> 1.0.3"]) s.add_dependency(%q<sinatra>, ["~> 1.2.6"]) # Development Dependencies
    1
    redis-namespace version bumped
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071787
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", s.require_paths = ["lib"] s.add_dependency(%q<redis>, ["~> 2.1"]) s.add_dependency(%q<redis-namespace>, ["~> 0.10.0"]) s.add_dependency(%q<sinatra>, ["~> 1.2.6"]) # Development Dependencies s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> redis-namespace version bumped <DFF> @@ -19,7 +19,7 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency(%q<redis>, ["~> 2.1"]) - s.add_dependency(%q<redis-namespace>, ["~> 0.10.0"]) + s.add_dependency(%q<redis-namespace>, ["~> 1.0.3"]) s.add_dependency(%q<sinatra>, ["~> 1.2.6"]) # Development Dependencies
    1
    redis-namespace version bumped
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071788
    <NME> split.rb <BEF> %w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f| require "split/#{f}" end require "split/experiment_catalog" require "split/extensions/string" require "split/goals_collection" require "split/helper" require "split/combined_experiments_helper" require "split/metric" require "split/persistence" require "split/redis_interface" require "split/trial" require "split/user" require "split/version" require "split/zscore" require "split/engine" if defined?(Rails) module Split extend self attr_accessor :configuration # Accepts: # 1. A redis URL (valid for `Redis.new(url: url)`) # 2. an options hash compatible with `Redis.new` # 3. or a valid Redis instance (one that responds to `#smembers`). Likely, # this will be an instance of either `Redis`, `Redis::Client`, # `Redis::DistRedis`, or `Redis::Namespace`. def redis=(server) @redis = if server.is_a?(String) Redis.new(url: server) elsif server.is_a?(Hash) Redis.new(server) elsif server.respond_to?(:smembers) server else raise ArgumentError, "You must supply a url, options hash or valid Redis connection instance" end end # Returns the current Redis connection. If none has been created, will # create a new one. def redis return @redis if @redis self.redis = self.configuration.redis self.redis end # Call this method to modify defaults in your initializers. # # @example # Split.configure do |config| # config.ignore_ip_addresses = '192.168.2.1' # end def configure self.configuration ||= Configuration.new yield(configuration) end def cache(namespace, key, &block) Split::Cache.fetch(namespace, key, &block) end end # Check to see if being run in a Rails application. If so, wait until before_initialize to run configuration so Gems that create ENV variables have the chance to initialize first. if defined?(::Rails) class Split::Railtie < Rails::Railtie config.before_initialize { Split.configure { } } end else Split.configure { } end <MSG> made required files list more readable <DFF> @@ -1,4 +1,14 @@ -%w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f| +%w[algorithms + alternative + configuration + exceptions + experiment + extensions + helper + metric + persistence + trial + version].each do |f| require "split/#{f}" end
    11
    made required files list more readable
    1
    .rb
    rb
    mit
    splitrb/split
    10071789
    <NME> split.rb <BEF> %w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f| require "split/#{f}" end require "split/experiment_catalog" require "split/extensions/string" require "split/goals_collection" require "split/helper" require "split/combined_experiments_helper" require "split/metric" require "split/persistence" require "split/redis_interface" require "split/trial" require "split/user" require "split/version" require "split/zscore" require "split/engine" if defined?(Rails) module Split extend self attr_accessor :configuration # Accepts: # 1. A redis URL (valid for `Redis.new(url: url)`) # 2. an options hash compatible with `Redis.new` # 3. or a valid Redis instance (one that responds to `#smembers`). Likely, # this will be an instance of either `Redis`, `Redis::Client`, # `Redis::DistRedis`, or `Redis::Namespace`. def redis=(server) @redis = if server.is_a?(String) Redis.new(url: server) elsif server.is_a?(Hash) Redis.new(server) elsif server.respond_to?(:smembers) server else raise ArgumentError, "You must supply a url, options hash or valid Redis connection instance" end end # Returns the current Redis connection. If none has been created, will # create a new one. def redis return @redis if @redis self.redis = self.configuration.redis self.redis end # Call this method to modify defaults in your initializers. # # @example # Split.configure do |config| # config.ignore_ip_addresses = '192.168.2.1' # end def configure self.configuration ||= Configuration.new yield(configuration) end def cache(namespace, key, &block) Split::Cache.fetch(namespace, key, &block) end end # Check to see if being run in a Rails application. If so, wait until before_initialize to run configuration so Gems that create ENV variables have the chance to initialize first. if defined?(::Rails) class Split::Railtie < Rails::Railtie config.before_initialize { Split.configure { } } end else Split.configure { } end <MSG> made required files list more readable <DFF> @@ -1,4 +1,14 @@ -%w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f| +%w[algorithms + alternative + configuration + exceptions + experiment + extensions + helper + metric + persistence + trial + version].each do |f| require "split/#{f}" end
    11
    made required files list more readable
    1
    .rb
    rb
    mit
    splitrb/split
    10071790
    <NME> split.rb <BEF> %w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f| require "split/#{f}" end require "split/experiment_catalog" require "split/extensions/string" require "split/goals_collection" require "split/helper" require "split/combined_experiments_helper" require "split/metric" require "split/persistence" require "split/redis_interface" require "split/trial" require "split/user" require "split/version" require "split/zscore" require "split/engine" if defined?(Rails) module Split extend self attr_accessor :configuration # Accepts: # 1. A redis URL (valid for `Redis.new(url: url)`) # 2. an options hash compatible with `Redis.new` # 3. or a valid Redis instance (one that responds to `#smembers`). Likely, # this will be an instance of either `Redis`, `Redis::Client`, # `Redis::DistRedis`, or `Redis::Namespace`. def redis=(server) @redis = if server.is_a?(String) Redis.new(url: server) elsif server.is_a?(Hash) Redis.new(server) elsif server.respond_to?(:smembers) server else raise ArgumentError, "You must supply a url, options hash or valid Redis connection instance" end end # Returns the current Redis connection. If none has been created, will # create a new one. def redis return @redis if @redis self.redis = self.configuration.redis self.redis end # Call this method to modify defaults in your initializers. # # @example # Split.configure do |config| # config.ignore_ip_addresses = '192.168.2.1' # end def configure self.configuration ||= Configuration.new yield(configuration) end def cache(namespace, key, &block) Split::Cache.fetch(namespace, key, &block) end end # Check to see if being run in a Rails application. If so, wait until before_initialize to run configuration so Gems that create ENV variables have the chance to initialize first. if defined?(::Rails) class Split::Railtie < Rails::Railtie config.before_initialize { Split.configure { } } end else Split.configure { } end <MSG> made required files list more readable <DFF> @@ -1,4 +1,14 @@ -%w[algorithms extensions metric trial experiment alternative helper version configuration persistence exceptions].each do |f| +%w[algorithms + alternative + configuration + exceptions + experiment + extensions + helper + metric + persistence + trial + version].each do |f| require "split/#{f}" end
    11
    made required files list more readable
    1
    .rb
    rb
    mit
    splitrb/split
    10071791
    <NME> alternative_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/alternative" describe Split::Alternative do let(:alternative) { Split::Alternative.new("Basket", "basket_text") } let(:alternative2) { Split::Alternative.new("Cart", "basket_text") } let!(:experiment) { Split::ExperimentCatalog.find_or_create({ "basket_text" => ["purchase", "refund"] }, "Basket", "Cart") } let(:goal1) { "purchase" } let(:goal2) { "refund" } it "should have goals" do expect(alternative.goals).to eq(["purchase", "refund"]) end it "should have and only return the name" do expect(alternative.name).to eq("Basket") end describe "weights" do it "should set the weights" do experiment = Split::Experiment.new("basket_text", alternatives: [{ "Basket" => 0.6 }, { "Cart" => 0.4 }]) first = experiment.alternatives[0] expect(first.name).to eq("Basket") expect(first.weight).to eq(0.6) second = experiment.alternatives[1] expect(second.name).to eq("Cart") expect(second.weight).to eq(0.4) end it "accepts probability on alternatives" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ] } } experiment = Split::Experiment.new(:my_experiment) first = experiment.alternatives[0] expect(first.name).to eq("control_opt") expect(first.weight).to eq(0.67) second = experiment.alternatives[1] expect(second.name).to eq("second_opt") expect(second.weight).to eq(0.1) end it "accepts probability on some alternatives" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } } experiment = Split::Experiment.new(:my_experiment) alts = experiment.alternatives [ ["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215] ].each do |h| name, weight = h alt = alts.shift expect(alt.name).to eq(name) expect(alt.weight).to eq(weight) end end # it "allows name param without probability" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt" }, "second_opt", end end it "should return an existing alternative" do alternative = Split::Alternative.create('Basket', 'basket_text') Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket') end describe 'z score' do it 'should be zero when the control has no conversions' do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') [ ["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64], ].each do |h| name, weight = h alt = alts.shift expect(alt.name).to eq(name) expect(alt.weight).to eq(weight) end end end it "should have a default participation count of 0" do expect(alternative.participant_count).to eq(0) end it "should have a default completed count of 0 for each goal" do expect(alternative.completed_count).to eq(0) expect(alternative.completed_count(goal1)).to eq(0) expect(alternative.completed_count(goal2)).to eq(0) end it "should belong to an experiment" do expect(alternative.experiment.name).to eq(experiment.name) end it "should save to redis" do alternative.save expect(Split.redis.exists?("basket_text:Basket")).to be true end it "should increment participation count" do old_participant_count = alternative.participant_count alternative.increment_participation expect(alternative.participant_count).to eq(old_participant_count+1) end it "should increment completed count for each goal" do old_default_completed_count = alternative.completed_count old_completed_count_for_goal1 = alternative.completed_count(goal1) old_completed_count_for_goal2 = alternative.completed_count(goal2) alternative.increment_completion alternative.increment_completion(goal1) alternative.increment_completion(goal2) expect(alternative.completed_count).to eq(old_default_completed_count+1) expect(alternative.completed_count(goal1)).to eq(old_completed_count_for_goal1+1) expect(alternative.completed_count(goal2)).to eq(old_completed_count_for_goal2+1) end it "can be reset" do alternative.participant_count = 10 alternative.set_completed_count(4, goal1) alternative.set_completed_count(5, goal2) alternative.set_completed_count(6) alternative.reset expect(alternative.participant_count).to eq(0) expect(alternative.completed_count(goal1)).to eq(0) expect(alternative.completed_count(goal2)).to eq(0) expect(alternative.completed_count).to eq(0) end it "should know if it is the control of an experiment" do expect(alternative.control?).to be_truthy expect(alternative2.control?).to be_falsey end describe "unfinished_count" do it "should be difference between participant and completed counts" do alternative.increment_participation expect(alternative.unfinished_count).to eq(alternative.participant_count) end it "should return the correct unfinished_count" do alternative.participant_count = 10 alternative.set_completed_count(4, goal1) alternative.set_completed_count(3, goal2) alternative.set_completed_count(2) expect(alternative.unfinished_count).to eq(1) end end describe "conversion rate" do it "should be 0 if there are no conversions" do expect(alternative.completed_count).to eq(0) expect(alternative.conversion_rate).to eq(0) end it "calculate conversion rate" do expect(alternative).to receive(:participant_count).exactly(6).times.and_return(10) expect(alternative).to receive(:completed_count).and_return(4) expect(alternative.conversion_rate).to eq(0.4) expect(alternative).to receive(:completed_count).with(goal1).and_return(5) expect(alternative.conversion_rate(goal1)).to eq(0.5) expect(alternative).to receive(:completed_count).with(goal2).and_return(6) expect(alternative.conversion_rate(goal2)).to eq(0.6) end end describe "probability winner" do before do experiment.calc_winning_alternatives end it "should have a probability of being the winning alternative (p_winner)" do expect(alternative.p_winner).not_to be_nil end it "should have a probability of being the winner for each goal" do expect(alternative.p_winner(goal1)).not_to be_nil end it "should be possible to set the p_winner" do alternative.set_p_winner(0.5) expect(alternative.p_winner).to eq(0.5) end it "should be possible to set the p_winner for each goal" do alternative.set_p_winner(0.5, goal1) expect(alternative.p_winner(goal1)).to eq(0.5) end end describe "z score" do it "should return an error string when the control has 0 people" do expect(alternative2.z_score).to eq("Needs 30+ participants.") expect(alternative2.z_score(goal1)).to eq("Needs 30+ participants.") expect(alternative2.z_score(goal2)).to eq("Needs 30+ participants.") end it "should return an error string when the data is skewed or incomplete as per the np > 5 test" do control = experiment.control control.participant_count = 100 control.set_completed_count(50) alternative2.participant_count = 50 alternative2.set_completed_count(1) expect(alternative2.z_score).to eq("Needs 5+ conversions.") end it "should return a float for a z_score given proper data" do control = experiment.control control.participant_count = 120 control.set_completed_count(20) alternative2.participant_count = 100 alternative2.set_completed_count(25) expect(alternative2.z_score).to be_kind_of(Float) expect(alternative2.z_score).to_not eq(0) end it "should correctly calculate a z_score given proper data" do control = experiment.control control.participant_count = 126 control.set_completed_count(89) alternative2.participant_count = 142 alternative2.set_completed_count(119) expect(alternative2.z_score.round(2)).to eq(2.58) end it "should be N/A for the control" do control = experiment.control expect(control.z_score).to eq("N/A") expect(control.z_score(goal1)).to eq("N/A") expect(control.z_score(goal2)).to eq("N/A") end it "should not blow up for Conversion Rates > 1" do control = experiment.control control.participant_count = 3474 control.set_completed_count(4244) alternative2.participant_count = 3434 alternative2.set_completed_count(4358) expect { control.z_score }.not_to raise_error expect { alternative2.z_score }.not_to raise_error end end describe "extra_info" do it "reads saved value of recorded_info in redis" do saved_recorded_info = { "key_1" => 1, "key_2" => "2" } Split.redis.hset "#{alternative.experiment_name}:#{alternative.name}", "recorded_info", saved_recorded_info.to_json extra_info = alternative.extra_info expect(extra_info).to eql(saved_recorded_info) end end describe "record_extra_info" do it "saves key" do alternative.record_extra_info("signup", 1) expect(alternative.extra_info["signup"]).to eql(1) end it "adds value to saved key's value second argument is number" do alternative.record_extra_info("signup", 1) alternative.record_extra_info("signup", 2) expect(alternative.extra_info["signup"]).to eql(3) end it "sets saved's key value to the second argument if it's a string" do alternative.record_extra_info("signup", "Value 1") expect(alternative.extra_info["signup"]).to eql("Value 1") alternative.record_extra_info("signup", "Value 2") expect(alternative.extra_info["signup"]).to eql("Value 2") end end end <MSG> Alternative.create doesn't make much sense now <DFF> @@ -94,11 +94,6 @@ describe Split::Alternative do end end - it "should return an existing alternative" do - alternative = Split::Alternative.create('Basket', 'basket_text') - Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket') - end - describe 'z score' do it 'should be zero when the control has no conversions' do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
    0
    Alternative.create doesn't make much sense now
    5
    .rb
    rb
    mit
    splitrb/split
    10071792
    <NME> alternative_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/alternative" describe Split::Alternative do let(:alternative) { Split::Alternative.new("Basket", "basket_text") } let(:alternative2) { Split::Alternative.new("Cart", "basket_text") } let!(:experiment) { Split::ExperimentCatalog.find_or_create({ "basket_text" => ["purchase", "refund"] }, "Basket", "Cart") } let(:goal1) { "purchase" } let(:goal2) { "refund" } it "should have goals" do expect(alternative.goals).to eq(["purchase", "refund"]) end it "should have and only return the name" do expect(alternative.name).to eq("Basket") end describe "weights" do it "should set the weights" do experiment = Split::Experiment.new("basket_text", alternatives: [{ "Basket" => 0.6 }, { "Cart" => 0.4 }]) first = experiment.alternatives[0] expect(first.name).to eq("Basket") expect(first.weight).to eq(0.6) second = experiment.alternatives[1] expect(second.name).to eq("Cart") expect(second.weight).to eq(0.4) end it "accepts probability on alternatives" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ] } } experiment = Split::Experiment.new(:my_experiment) first = experiment.alternatives[0] expect(first.name).to eq("control_opt") expect(first.weight).to eq(0.67) second = experiment.alternatives[1] expect(second.name).to eq("second_opt") expect(second.weight).to eq(0.1) end it "accepts probability on some alternatives" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } } experiment = Split::Experiment.new(:my_experiment) alts = experiment.alternatives [ ["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215] ].each do |h| name, weight = h alt = alts.shift expect(alt.name).to eq(name) expect(alt.weight).to eq(weight) end end # it "allows name param without probability" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt" }, "second_opt", end end it "should return an existing alternative" do alternative = Split::Alternative.create('Basket', 'basket_text') Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket') end describe 'z score' do it 'should be zero when the control has no conversions' do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') [ ["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64], ].each do |h| name, weight = h alt = alts.shift expect(alt.name).to eq(name) expect(alt.weight).to eq(weight) end end end it "should have a default participation count of 0" do expect(alternative.participant_count).to eq(0) end it "should have a default completed count of 0 for each goal" do expect(alternative.completed_count).to eq(0) expect(alternative.completed_count(goal1)).to eq(0) expect(alternative.completed_count(goal2)).to eq(0) end it "should belong to an experiment" do expect(alternative.experiment.name).to eq(experiment.name) end it "should save to redis" do alternative.save expect(Split.redis.exists?("basket_text:Basket")).to be true end it "should increment participation count" do old_participant_count = alternative.participant_count alternative.increment_participation expect(alternative.participant_count).to eq(old_participant_count+1) end it "should increment completed count for each goal" do old_default_completed_count = alternative.completed_count old_completed_count_for_goal1 = alternative.completed_count(goal1) old_completed_count_for_goal2 = alternative.completed_count(goal2) alternative.increment_completion alternative.increment_completion(goal1) alternative.increment_completion(goal2) expect(alternative.completed_count).to eq(old_default_completed_count+1) expect(alternative.completed_count(goal1)).to eq(old_completed_count_for_goal1+1) expect(alternative.completed_count(goal2)).to eq(old_completed_count_for_goal2+1) end it "can be reset" do alternative.participant_count = 10 alternative.set_completed_count(4, goal1) alternative.set_completed_count(5, goal2) alternative.set_completed_count(6) alternative.reset expect(alternative.participant_count).to eq(0) expect(alternative.completed_count(goal1)).to eq(0) expect(alternative.completed_count(goal2)).to eq(0) expect(alternative.completed_count).to eq(0) end it "should know if it is the control of an experiment" do expect(alternative.control?).to be_truthy expect(alternative2.control?).to be_falsey end describe "unfinished_count" do it "should be difference between participant and completed counts" do alternative.increment_participation expect(alternative.unfinished_count).to eq(alternative.participant_count) end it "should return the correct unfinished_count" do alternative.participant_count = 10 alternative.set_completed_count(4, goal1) alternative.set_completed_count(3, goal2) alternative.set_completed_count(2) expect(alternative.unfinished_count).to eq(1) end end describe "conversion rate" do it "should be 0 if there are no conversions" do expect(alternative.completed_count).to eq(0) expect(alternative.conversion_rate).to eq(0) end it "calculate conversion rate" do expect(alternative).to receive(:participant_count).exactly(6).times.and_return(10) expect(alternative).to receive(:completed_count).and_return(4) expect(alternative.conversion_rate).to eq(0.4) expect(alternative).to receive(:completed_count).with(goal1).and_return(5) expect(alternative.conversion_rate(goal1)).to eq(0.5) expect(alternative).to receive(:completed_count).with(goal2).and_return(6) expect(alternative.conversion_rate(goal2)).to eq(0.6) end end describe "probability winner" do before do experiment.calc_winning_alternatives end it "should have a probability of being the winning alternative (p_winner)" do expect(alternative.p_winner).not_to be_nil end it "should have a probability of being the winner for each goal" do expect(alternative.p_winner(goal1)).not_to be_nil end it "should be possible to set the p_winner" do alternative.set_p_winner(0.5) expect(alternative.p_winner).to eq(0.5) end it "should be possible to set the p_winner for each goal" do alternative.set_p_winner(0.5, goal1) expect(alternative.p_winner(goal1)).to eq(0.5) end end describe "z score" do it "should return an error string when the control has 0 people" do expect(alternative2.z_score).to eq("Needs 30+ participants.") expect(alternative2.z_score(goal1)).to eq("Needs 30+ participants.") expect(alternative2.z_score(goal2)).to eq("Needs 30+ participants.") end it "should return an error string when the data is skewed or incomplete as per the np > 5 test" do control = experiment.control control.participant_count = 100 control.set_completed_count(50) alternative2.participant_count = 50 alternative2.set_completed_count(1) expect(alternative2.z_score).to eq("Needs 5+ conversions.") end it "should return a float for a z_score given proper data" do control = experiment.control control.participant_count = 120 control.set_completed_count(20) alternative2.participant_count = 100 alternative2.set_completed_count(25) expect(alternative2.z_score).to be_kind_of(Float) expect(alternative2.z_score).to_not eq(0) end it "should correctly calculate a z_score given proper data" do control = experiment.control control.participant_count = 126 control.set_completed_count(89) alternative2.participant_count = 142 alternative2.set_completed_count(119) expect(alternative2.z_score.round(2)).to eq(2.58) end it "should be N/A for the control" do control = experiment.control expect(control.z_score).to eq("N/A") expect(control.z_score(goal1)).to eq("N/A") expect(control.z_score(goal2)).to eq("N/A") end it "should not blow up for Conversion Rates > 1" do control = experiment.control control.participant_count = 3474 control.set_completed_count(4244) alternative2.participant_count = 3434 alternative2.set_completed_count(4358) expect { control.z_score }.not_to raise_error expect { alternative2.z_score }.not_to raise_error end end describe "extra_info" do it "reads saved value of recorded_info in redis" do saved_recorded_info = { "key_1" => 1, "key_2" => "2" } Split.redis.hset "#{alternative.experiment_name}:#{alternative.name}", "recorded_info", saved_recorded_info.to_json extra_info = alternative.extra_info expect(extra_info).to eql(saved_recorded_info) end end describe "record_extra_info" do it "saves key" do alternative.record_extra_info("signup", 1) expect(alternative.extra_info["signup"]).to eql(1) end it "adds value to saved key's value second argument is number" do alternative.record_extra_info("signup", 1) alternative.record_extra_info("signup", 2) expect(alternative.extra_info["signup"]).to eql(3) end it "sets saved's key value to the second argument if it's a string" do alternative.record_extra_info("signup", "Value 1") expect(alternative.extra_info["signup"]).to eql("Value 1") alternative.record_extra_info("signup", "Value 2") expect(alternative.extra_info["signup"]).to eql("Value 2") end end end <MSG> Alternative.create doesn't make much sense now <DFF> @@ -94,11 +94,6 @@ describe Split::Alternative do end end - it "should return an existing alternative" do - alternative = Split::Alternative.create('Basket', 'basket_text') - Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket') - end - describe 'z score' do it 'should be zero when the control has no conversions' do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
    0
    Alternative.create doesn't make much sense now
    5
    .rb
    rb
    mit
    splitrb/split
    10071793
    <NME> alternative_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/alternative" describe Split::Alternative do let(:alternative) { Split::Alternative.new("Basket", "basket_text") } let(:alternative2) { Split::Alternative.new("Cart", "basket_text") } let!(:experiment) { Split::ExperimentCatalog.find_or_create({ "basket_text" => ["purchase", "refund"] }, "Basket", "Cart") } let(:goal1) { "purchase" } let(:goal2) { "refund" } it "should have goals" do expect(alternative.goals).to eq(["purchase", "refund"]) end it "should have and only return the name" do expect(alternative.name).to eq("Basket") end describe "weights" do it "should set the weights" do experiment = Split::Experiment.new("basket_text", alternatives: [{ "Basket" => 0.6 }, { "Cart" => 0.4 }]) first = experiment.alternatives[0] expect(first.name).to eq("Basket") expect(first.weight).to eq(0.6) second = experiment.alternatives[1] expect(second.name).to eq("Cart") expect(second.weight).to eq(0.4) end it "accepts probability on alternatives" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ] } } experiment = Split::Experiment.new(:my_experiment) first = experiment.alternatives[0] expect(first.name).to eq("control_opt") expect(first.weight).to eq(0.67) second = experiment.alternatives[1] expect(second.name).to eq("second_opt") expect(second.weight).to eq(0.1) end it "accepts probability on some alternatives" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } } experiment = Split::Experiment.new(:my_experiment) alts = experiment.alternatives [ ["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215] ].each do |h| name, weight = h alt = alts.shift expect(alt.name).to eq(name) expect(alt.weight).to eq(weight) end end # it "allows name param without probability" do Split.configuration.experiments = { my_experiment: { alternatives: [ { name: "control_opt" }, "second_opt", end end it "should return an existing alternative" do alternative = Split::Alternative.create('Basket', 'basket_text') Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket') end describe 'z score' do it 'should be zero when the control has no conversions' do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red') [ ["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64], ].each do |h| name, weight = h alt = alts.shift expect(alt.name).to eq(name) expect(alt.weight).to eq(weight) end end end it "should have a default participation count of 0" do expect(alternative.participant_count).to eq(0) end it "should have a default completed count of 0 for each goal" do expect(alternative.completed_count).to eq(0) expect(alternative.completed_count(goal1)).to eq(0) expect(alternative.completed_count(goal2)).to eq(0) end it "should belong to an experiment" do expect(alternative.experiment.name).to eq(experiment.name) end it "should save to redis" do alternative.save expect(Split.redis.exists?("basket_text:Basket")).to be true end it "should increment participation count" do old_participant_count = alternative.participant_count alternative.increment_participation expect(alternative.participant_count).to eq(old_participant_count+1) end it "should increment completed count for each goal" do old_default_completed_count = alternative.completed_count old_completed_count_for_goal1 = alternative.completed_count(goal1) old_completed_count_for_goal2 = alternative.completed_count(goal2) alternative.increment_completion alternative.increment_completion(goal1) alternative.increment_completion(goal2) expect(alternative.completed_count).to eq(old_default_completed_count+1) expect(alternative.completed_count(goal1)).to eq(old_completed_count_for_goal1+1) expect(alternative.completed_count(goal2)).to eq(old_completed_count_for_goal2+1) end it "can be reset" do alternative.participant_count = 10 alternative.set_completed_count(4, goal1) alternative.set_completed_count(5, goal2) alternative.set_completed_count(6) alternative.reset expect(alternative.participant_count).to eq(0) expect(alternative.completed_count(goal1)).to eq(0) expect(alternative.completed_count(goal2)).to eq(0) expect(alternative.completed_count).to eq(0) end it "should know if it is the control of an experiment" do expect(alternative.control?).to be_truthy expect(alternative2.control?).to be_falsey end describe "unfinished_count" do it "should be difference between participant and completed counts" do alternative.increment_participation expect(alternative.unfinished_count).to eq(alternative.participant_count) end it "should return the correct unfinished_count" do alternative.participant_count = 10 alternative.set_completed_count(4, goal1) alternative.set_completed_count(3, goal2) alternative.set_completed_count(2) expect(alternative.unfinished_count).to eq(1) end end describe "conversion rate" do it "should be 0 if there are no conversions" do expect(alternative.completed_count).to eq(0) expect(alternative.conversion_rate).to eq(0) end it "calculate conversion rate" do expect(alternative).to receive(:participant_count).exactly(6).times.and_return(10) expect(alternative).to receive(:completed_count).and_return(4) expect(alternative.conversion_rate).to eq(0.4) expect(alternative).to receive(:completed_count).with(goal1).and_return(5) expect(alternative.conversion_rate(goal1)).to eq(0.5) expect(alternative).to receive(:completed_count).with(goal2).and_return(6) expect(alternative.conversion_rate(goal2)).to eq(0.6) end end describe "probability winner" do before do experiment.calc_winning_alternatives end it "should have a probability of being the winning alternative (p_winner)" do expect(alternative.p_winner).not_to be_nil end it "should have a probability of being the winner for each goal" do expect(alternative.p_winner(goal1)).not_to be_nil end it "should be possible to set the p_winner" do alternative.set_p_winner(0.5) expect(alternative.p_winner).to eq(0.5) end it "should be possible to set the p_winner for each goal" do alternative.set_p_winner(0.5, goal1) expect(alternative.p_winner(goal1)).to eq(0.5) end end describe "z score" do it "should return an error string when the control has 0 people" do expect(alternative2.z_score).to eq("Needs 30+ participants.") expect(alternative2.z_score(goal1)).to eq("Needs 30+ participants.") expect(alternative2.z_score(goal2)).to eq("Needs 30+ participants.") end it "should return an error string when the data is skewed or incomplete as per the np > 5 test" do control = experiment.control control.participant_count = 100 control.set_completed_count(50) alternative2.participant_count = 50 alternative2.set_completed_count(1) expect(alternative2.z_score).to eq("Needs 5+ conversions.") end it "should return a float for a z_score given proper data" do control = experiment.control control.participant_count = 120 control.set_completed_count(20) alternative2.participant_count = 100 alternative2.set_completed_count(25) expect(alternative2.z_score).to be_kind_of(Float) expect(alternative2.z_score).to_not eq(0) end it "should correctly calculate a z_score given proper data" do control = experiment.control control.participant_count = 126 control.set_completed_count(89) alternative2.participant_count = 142 alternative2.set_completed_count(119) expect(alternative2.z_score.round(2)).to eq(2.58) end it "should be N/A for the control" do control = experiment.control expect(control.z_score).to eq("N/A") expect(control.z_score(goal1)).to eq("N/A") expect(control.z_score(goal2)).to eq("N/A") end it "should not blow up for Conversion Rates > 1" do control = experiment.control control.participant_count = 3474 control.set_completed_count(4244) alternative2.participant_count = 3434 alternative2.set_completed_count(4358) expect { control.z_score }.not_to raise_error expect { alternative2.z_score }.not_to raise_error end end describe "extra_info" do it "reads saved value of recorded_info in redis" do saved_recorded_info = { "key_1" => 1, "key_2" => "2" } Split.redis.hset "#{alternative.experiment_name}:#{alternative.name}", "recorded_info", saved_recorded_info.to_json extra_info = alternative.extra_info expect(extra_info).to eql(saved_recorded_info) end end describe "record_extra_info" do it "saves key" do alternative.record_extra_info("signup", 1) expect(alternative.extra_info["signup"]).to eql(1) end it "adds value to saved key's value second argument is number" do alternative.record_extra_info("signup", 1) alternative.record_extra_info("signup", 2) expect(alternative.extra_info["signup"]).to eql(3) end it "sets saved's key value to the second argument if it's a string" do alternative.record_extra_info("signup", "Value 1") expect(alternative.extra_info["signup"]).to eql("Value 1") alternative.record_extra_info("signup", "Value 2") expect(alternative.extra_info["signup"]).to eql("Value 2") end end end <MSG> Alternative.create doesn't make much sense now <DFF> @@ -94,11 +94,6 @@ describe Split::Alternative do end end - it "should return an existing alternative" do - alternative = Split::Alternative.create('Basket', 'basket_text') - Split::Alternative.find('Basket', 'basket_text').name.should eql('Basket') - end - describe 'z score' do it 'should be zero when the control has no conversions' do experiment = Split::Experiment.find_or_create('link_color', 'blue', 'red')
    0
    Alternative.create doesn't make much sense now
    5
    .rb
    rb
    mit
    splitrb/split
    10071794
    <NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) alternative = control_variable(experiment.control) if split_generically_disabled? end ensure alternative ||= control_variable(experiment.control) end if block_given? metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> Merge pull request #592 from splitrb/fix-unused-variable-warnings Fix unused variable warnings <DFF> @@ -80,7 +80,7 @@ module Split def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? - metric_descriptor, goals = normalize_metric(metric_descriptor) + metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any?
    1
    Merge pull request #592 from splitrb/fix-unused-variable-warnings
    1
    .rb
    rb
    mit
    splitrb/split
    10071795
    <NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) alternative = control_variable(experiment.control) if split_generically_disabled? end ensure alternative ||= control_variable(experiment.control) end if block_given? metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> Merge pull request #592 from splitrb/fix-unused-variable-warnings Fix unused variable warnings <DFF> @@ -80,7 +80,7 @@ module Split def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? - metric_descriptor, goals = normalize_metric(metric_descriptor) + metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any?
    1
    Merge pull request #592 from splitrb/fix-unused-variable-warnings
    1
    .rb
    rb
    mit
    splitrb/split
    10071796
    <NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) alternative = control_variable(experiment.control) if split_generically_disabled? end ensure alternative ||= control_variable(experiment.control) end if block_given? metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def finish_experiment(experiment, options = { reset: true }) return false if active_experiments[experiment.name].nil? return true if experiment.has_winner? should_reset = experiment.resettable? && options[:reset] if ab_user[experiment.finished_key] && !should_reset true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> Merge pull request #592 from splitrb/fix-unused-variable-warnings Fix unused variable warnings <DFF> @@ -80,7 +80,7 @@ module Split def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? - metric_descriptor, goals = normalize_metric(metric_descriptor) + metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any?
    1
    Merge pull request #592 from splitrb/fix-unused-variable-warnings
    1
    .rb
    rb
    mit
    splitrb/split
    10071797
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency 'redis', '>= 4.2' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'rubystats', '>= 0.3.0' s.add_development_dependency 'bundler', '>= 1.17' s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Merge pull request #635 from splitrb/sinatra-has-left-the-building Remove Sinatra Dependency for Split Dashboard <DFF> @@ -31,7 +31,6 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency 'redis', '>= 4.2' - s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'rubystats', '>= 0.3.0' s.add_development_dependency 'bundler', '>= 1.17'
    0
    Merge pull request #635 from splitrb/sinatra-has-left-the-building
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071798
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency 'redis', '>= 4.2' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'rubystats', '>= 0.3.0' s.add_development_dependency 'bundler', '>= 1.17' s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Merge pull request #635 from splitrb/sinatra-has-left-the-building Remove Sinatra Dependency for Split Dashboard <DFF> @@ -31,7 +31,6 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency 'redis', '>= 4.2' - s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'rubystats', '>= 0.3.0' s.add_development_dependency 'bundler', '>= 1.17'
    0
    Merge pull request #635 from splitrb/sinatra-has-left-the-building
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071799
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency 'redis', '>= 4.2' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'rubystats', '>= 0.3.0' s.add_development_dependency 'bundler', '>= 1.17' s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Merge pull request #635 from splitrb/sinatra-has-left-the-building Remove Sinatra Dependency for Split Dashboard <DFF> @@ -31,7 +31,6 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency 'redis', '>= 4.2' - s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'rubystats', '>= 0.3.0' s.add_development_dependency 'bundler', '>= 1.17'
    0
    Merge pull request #635 from splitrb/sinatra-has-left-the-building
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071800
    <NME> Rakefile <BEF> require 'bundler/gem_tasks' require 'rspec/core/rake_task' require "rspec/core/rake_task" RSpec::Core::RakeTask.new("spec") task default: :spec <MSG> Added rake shebang <DFF> @@ -1,3 +1,4 @@ +#!/usr/bin/env rake require 'bundler/gem_tasks' require 'rspec/core/rake_task'
    1
    Added rake shebang
    0
    Rakefile
    mit
    splitrb/split
    10071801
    <NME> Rakefile <BEF> require 'bundler/gem_tasks' require 'rspec/core/rake_task' require "rspec/core/rake_task" RSpec::Core::RakeTask.new("spec") task default: :spec <MSG> Added rake shebang <DFF> @@ -1,3 +1,4 @@ +#!/usr/bin/env rake require 'bundler/gem_tasks' require 'rspec/core/rake_task'
    1
    Added rake shebang
    0
    Rakefile
    mit
    splitrb/split
    10071802
    <NME> Rakefile <BEF> require 'bundler/gem_tasks' require 'rspec/core/rake_task' require "rspec/core/rake_task" RSpec::Core::RakeTask.new("spec") task default: :spec <MSG> Added rake shebang <DFF> @@ -1,3 +1,4 @@ +#!/usr/bin/env rake require 'bundler/gem_tasks' require 'rspec/core/rake_task'
    1
    Added rake shebang
    0
    Rakefile
    mit
    splitrb/split
    10071803
    <NME> TODO <BEF> PyPI feature replication ======================== * Make it possible to register users via distutils. There should be a setting to turn this feature on/off for private PyPIs. [taken-by: sverrej] * Maybe add a permission "can upload new release", so more than one user can change the same project. * Should a project have co-owners? * Documentation upload * Ratings * Random Monty Python quotes :-) * Comments :-) Post-PyPI ========= * PEP-381: Mirroring infrastructure for PyPI [taken-by: jezdez] * API to submit test reports for smoke test bots. Like CPAN Testers. Platform/version/matrix etc. * Different listings: Author listings, classifier listings, etc. * Search metadata * Automatic generation of Sphinx for modules (so you can view them directly on pypi, like CPAN), Module listing etc. * Listing of special files: README, LICENSE, Changefile/Changes, TODO, MANIFEST. * Dependency graphs. * Package file browser (like CPAN) Documentation ============= * Write a tutorial on how to set up the server, registering projects, and how to upload releases. <MSG> more TODO tasks <DFF> @@ -7,3 +7,7 @@ * Maybe add a permission "can upload new release", so more than one user can change the same project. * Should a project have co-owners? + - One possible solution: + http://github.com/initcrash/django-object-permissions/tree +* Script to populate classifiers from + http://pypi.python.org/pypi?%3Aaction=list_classifiers
    4
    more TODO tasks
    0
    TODO
    bsd-3-clause
    ask/chishop
    10071804
    <NME> engine.rb <BEF> ADDFILE <MSG> extracting Rails specifics into engine subclass <DFF> @@ -0,0 +1,8 @@ +module Split + class Engine < ::Rails::Engine + initializer "split" do |app| + ActionController::Base.send :include, Split::Helper + ActionController::Base.helper Split::Helper + end + end +end \ No newline at end of file
    8
    extracting Rails specifics into engine subclass
    0
    .rb
    rb
    mit
    splitrb/split
    10071805
    <NME> engine.rb <BEF> ADDFILE <MSG> extracting Rails specifics into engine subclass <DFF> @@ -0,0 +1,8 @@ +module Split + class Engine < ::Rails::Engine + initializer "split" do |app| + ActionController::Base.send :include, Split::Helper + ActionController::Base.helper Split::Helper + end + end +end \ No newline at end of file
    8
    extracting Rails specifics into engine subclass
    0
    .rb
    rb
    mit
    splitrb/split
    10071806
    <NME> engine.rb <BEF> ADDFILE <MSG> extracting Rails specifics into engine subclass <DFF> @@ -0,0 +1,8 @@ +module Split + class Engine < ::Rails::Engine + initializer "split" do |app| + ActionController::Base.send :include, Split::Helper + ActionController::Base.helper Split::Helper + end + end +end \ No newline at end of file
    8
    extracting Rails specifics into engine subclass
    0
    .rb
    rb
    mit
    splitrb/split
    10071807
    <NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end describe 'ab_finished' do before(:each) do @experiment_name = 'link_color' @alternatives = ['blue', 'red'] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it 'should increment the counter for the completed alternative' do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, {:reset => false}) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it 'should not increment the counter if reset is false and the experiment has been already finished' do 2.times { ab_finished(@experiment_name, {:reset => false}) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it 'should not increment the counter for an experiment that the user is not participating in' do ab_test('button_size', 'small', 'big') # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect(lambda { ab_finished('button_size') }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } end it 'should not increment the counter for an ended experiment' do e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') e.winner = 'small' a = ab_test('button_size', 'small', 'big') expect(a).to eq('small') expect(lambda { ab_finished('button_size') }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, {:reset => false}) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should do nothing where the experiment was not started by this user" do ab_user = nil expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Check if the experiment about to finish is active for the user <DFF> @@ -296,98 +296,126 @@ describe Split::Helper do end describe 'ab_finished' do - before(:each) do - @experiment_name = 'link_color' - @alternatives = ['blue', 'red'] - @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) - @alternative_name = ab_test(@experiment_name, *@alternatives) - @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - end + context 'for an experiment that the user participates in' do + before(:each) do + @experiment_name = 'link_color' + @alternatives = ['blue', 'red'] + @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) + @alternative_name = ab_test(@experiment_name, *@alternatives) + @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + end - it 'should increment the counter for the completed alternative' do - ab_finished(@experiment_name) - new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - expect(new_completion_count).to eq(@previous_completion_count + 1) - end + it 'should increment the counter for the completed alternative' do + ab_finished(@experiment_name) + new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + expect(new_completion_count).to eq(@previous_completion_count + 1) + end - it "should set experiment's finished key if reset is false" do - ab_finished(@experiment_name, {:reset => false}) - expect(ab_user[@experiment.key]).to eq(@alternative_name) - expect(ab_user[@experiment.finished_key]).to eq(true) - end + it "should set experiment's finished key if reset is false" do + ab_finished(@experiment_name, {:reset => false}) + expect(ab_user[@experiment.key]).to eq(@alternative_name) + expect(ab_user[@experiment.finished_key]).to eq(true) + end - it 'should not increment the counter if reset is false and the experiment has been already finished' do - 2.times { ab_finished(@experiment_name, {:reset => false}) } - new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - expect(new_completion_count).to eq(@previous_completion_count + 1) - end + it 'should not increment the counter if reset is false and the experiment has been already finished' do + 2.times { ab_finished(@experiment_name, {:reset => false}) } + new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + expect(new_completion_count).to eq(@previous_completion_count + 1) + end - it 'should not increment the counter for an experiment that the user is not participating in' do - ab_test('button_size', 'small', 'big') + it 'should not increment the counter for an ended experiment' do + e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') + e.winner = 'small' + a = ab_test('button_size', 'small', 'big') + expect(a).to eq('small') + expect(lambda { + ab_finished('button_size') + }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } + end - # So, user should be participating in the link_color experiment and - # receive the control for button_size. As the user is not participating in - # the button size experiment, finishing it should not increase the - # completion count for that alternative. - expect(lambda { - ab_finished('button_size') - }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } - end + it "should clear out the user's participation from their session" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it 'should not increment the counter for an ended experiment' do - e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') - e.winner = 'small' - a = ab_test('button_size', 'small', 'big') - expect(a).to eq('small') - expect(lambda { - ab_finished('button_size') - }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } - end + it "should not clear out the users session if reset is false" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name, {:reset => false}) + expect(ab_user[@experiment.key]).to eq(@alternative_name) + expect(ab_user[@experiment.finished_key]).to eq(true) + end - it "should clear out the user's participation from their session" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty - end + it "should reset the users session when experiment is not versioned" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it "should not clear out the users session if reset is false" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name, {:reset => false}) - expect(ab_user[@experiment.key]).to eq(@alternative_name) - expect(ab_user[@experiment.finished_key]).to eq(true) - end + it "should reset the users session when experiment is versioned" do + @experiment.increment_version + @alternative_name = ab_test(@experiment_name, *@alternatives) - it "should reset the users session when experiment is not versioned" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty - end + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it "should reset the users session when experiment is versioned" do - @experiment.increment_version - @alternative_name = ab_test(@experiment_name, *@alternatives) + context "when on_trial_complete is set" do + before { Split.configuration.on_trial_complete = :some_method } + it "should call the method" do + expect(self).to receive(:some_method) + ab_finished(@experiment_name) + end - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty + it "should not call the method without alternative" do + ab_user[@experiment.key] = nil + expect(self).not_to receive(:some_method) + ab_finished(@experiment_name) + end + end end - it "should do nothing where the experiment was not started by this user" do - ab_user = nil - expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception + context 'for an experiment that the user is excluded from' do + before do + alternative = ab_test('link_color', 'blue', 'red') + expect(Split::Alternative.new(alternative, 'link_color').participant_count).to eq(1) + alternative = ab_test('button_size', 'small', 'big') + expect(Split::Alternative.new(alternative, 'button_size').participant_count).to eq(0) + end + + it 'should not increment the completed counter' do + # So, user should be participating in the link_color experiment and + # receive the control for button_size. As the user is not participating in + # the button size experiment, finishing it should not increase the + # completion count for that alternative. + expect(lambda { + ab_finished('button_size') + }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } + end end - context "when on_trial_complete is set" do - before { Split.configuration.on_trial_complete = :some_method } - it "should call the method" do - expect(self).to receive(:some_method) - ab_finished(@experiment_name) + context 'for an experiment that the user does not participate in' do + before do + Split::ExperimentCatalog.find_or_create(:not_started_experiment, 'control', 'alt') + end + it 'should not raise an exception' do + expect { ab_finished(:not_started_experiment) }.not_to raise_exception end - it "should not call the method without alternative" do - ab_user[@experiment.key] = nil - expect(self).not_to receive(:some_method) - ab_finished(@experiment_name) + it 'should not change the user state when reset is false' do + expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys}.from([]) + end + + it 'should not change the user state when reset is true' do + expect(self).not_to receive(:reset!) + ab_finished(:not_started_experiment) + end + + it 'should not increment the completed counter' do + ab_finished(:not_started_experiment) + expect(Split::Alternative.new('control', :not_started_experiment).completed_count).to eq(0) + expect(Split::Alternative.new('alt', :not_started_experiment).completed_count).to eq(0) end end end
    103
    Check if the experiment about to finish is active for the user
    75
    .rb
    rb
    mit
    splitrb/split
    10071808
    <NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end describe 'ab_finished' do before(:each) do @experiment_name = 'link_color' @alternatives = ['blue', 'red'] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it 'should increment the counter for the completed alternative' do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, {:reset => false}) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it 'should not increment the counter if reset is false and the experiment has been already finished' do 2.times { ab_finished(@experiment_name, {:reset => false}) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it 'should not increment the counter for an experiment that the user is not participating in' do ab_test('button_size', 'small', 'big') # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect(lambda { ab_finished('button_size') }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } end it 'should not increment the counter for an ended experiment' do e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') e.winner = 'small' a = ab_test('button_size', 'small', 'big') expect(a).to eq('small') expect(lambda { ab_finished('button_size') }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, {:reset => false}) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should do nothing where the experiment was not started by this user" do ab_user = nil expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Check if the experiment about to finish is active for the user <DFF> @@ -296,98 +296,126 @@ describe Split::Helper do end describe 'ab_finished' do - before(:each) do - @experiment_name = 'link_color' - @alternatives = ['blue', 'red'] - @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) - @alternative_name = ab_test(@experiment_name, *@alternatives) - @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - end + context 'for an experiment that the user participates in' do + before(:each) do + @experiment_name = 'link_color' + @alternatives = ['blue', 'red'] + @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) + @alternative_name = ab_test(@experiment_name, *@alternatives) + @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + end - it 'should increment the counter for the completed alternative' do - ab_finished(@experiment_name) - new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - expect(new_completion_count).to eq(@previous_completion_count + 1) - end + it 'should increment the counter for the completed alternative' do + ab_finished(@experiment_name) + new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + expect(new_completion_count).to eq(@previous_completion_count + 1) + end - it "should set experiment's finished key if reset is false" do - ab_finished(@experiment_name, {:reset => false}) - expect(ab_user[@experiment.key]).to eq(@alternative_name) - expect(ab_user[@experiment.finished_key]).to eq(true) - end + it "should set experiment's finished key if reset is false" do + ab_finished(@experiment_name, {:reset => false}) + expect(ab_user[@experiment.key]).to eq(@alternative_name) + expect(ab_user[@experiment.finished_key]).to eq(true) + end - it 'should not increment the counter if reset is false and the experiment has been already finished' do - 2.times { ab_finished(@experiment_name, {:reset => false}) } - new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - expect(new_completion_count).to eq(@previous_completion_count + 1) - end + it 'should not increment the counter if reset is false and the experiment has been already finished' do + 2.times { ab_finished(@experiment_name, {:reset => false}) } + new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + expect(new_completion_count).to eq(@previous_completion_count + 1) + end - it 'should not increment the counter for an experiment that the user is not participating in' do - ab_test('button_size', 'small', 'big') + it 'should not increment the counter for an ended experiment' do + e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') + e.winner = 'small' + a = ab_test('button_size', 'small', 'big') + expect(a).to eq('small') + expect(lambda { + ab_finished('button_size') + }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } + end - # So, user should be participating in the link_color experiment and - # receive the control for button_size. As the user is not participating in - # the button size experiment, finishing it should not increase the - # completion count for that alternative. - expect(lambda { - ab_finished('button_size') - }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } - end + it "should clear out the user's participation from their session" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it 'should not increment the counter for an ended experiment' do - e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') - e.winner = 'small' - a = ab_test('button_size', 'small', 'big') - expect(a).to eq('small') - expect(lambda { - ab_finished('button_size') - }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } - end + it "should not clear out the users session if reset is false" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name, {:reset => false}) + expect(ab_user[@experiment.key]).to eq(@alternative_name) + expect(ab_user[@experiment.finished_key]).to eq(true) + end - it "should clear out the user's participation from their session" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty - end + it "should reset the users session when experiment is not versioned" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it "should not clear out the users session if reset is false" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name, {:reset => false}) - expect(ab_user[@experiment.key]).to eq(@alternative_name) - expect(ab_user[@experiment.finished_key]).to eq(true) - end + it "should reset the users session when experiment is versioned" do + @experiment.increment_version + @alternative_name = ab_test(@experiment_name, *@alternatives) - it "should reset the users session when experiment is not versioned" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty - end + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it "should reset the users session when experiment is versioned" do - @experiment.increment_version - @alternative_name = ab_test(@experiment_name, *@alternatives) + context "when on_trial_complete is set" do + before { Split.configuration.on_trial_complete = :some_method } + it "should call the method" do + expect(self).to receive(:some_method) + ab_finished(@experiment_name) + end - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty + it "should not call the method without alternative" do + ab_user[@experiment.key] = nil + expect(self).not_to receive(:some_method) + ab_finished(@experiment_name) + end + end end - it "should do nothing where the experiment was not started by this user" do - ab_user = nil - expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception + context 'for an experiment that the user is excluded from' do + before do + alternative = ab_test('link_color', 'blue', 'red') + expect(Split::Alternative.new(alternative, 'link_color').participant_count).to eq(1) + alternative = ab_test('button_size', 'small', 'big') + expect(Split::Alternative.new(alternative, 'button_size').participant_count).to eq(0) + end + + it 'should not increment the completed counter' do + # So, user should be participating in the link_color experiment and + # receive the control for button_size. As the user is not participating in + # the button size experiment, finishing it should not increase the + # completion count for that alternative. + expect(lambda { + ab_finished('button_size') + }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } + end end - context "when on_trial_complete is set" do - before { Split.configuration.on_trial_complete = :some_method } - it "should call the method" do - expect(self).to receive(:some_method) - ab_finished(@experiment_name) + context 'for an experiment that the user does not participate in' do + before do + Split::ExperimentCatalog.find_or_create(:not_started_experiment, 'control', 'alt') + end + it 'should not raise an exception' do + expect { ab_finished(:not_started_experiment) }.not_to raise_exception end - it "should not call the method without alternative" do - ab_user[@experiment.key] = nil - expect(self).not_to receive(:some_method) - ab_finished(@experiment_name) + it 'should not change the user state when reset is false' do + expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys}.from([]) + end + + it 'should not change the user state when reset is true' do + expect(self).not_to receive(:reset!) + ab_finished(:not_started_experiment) + end + + it 'should not increment the completed counter' do + ab_finished(:not_started_experiment) + expect(Split::Alternative.new('control', :not_started_experiment).completed_count).to eq(0) + expect(Split::Alternative.new('alt', :not_started_experiment).completed_count).to eq(0) end end end
    103
    Check if the experiment about to finish is active for the user
    75
    .rb
    rb
    mit
    splitrb/split
    10071809
    <NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end describe 'ab_finished' do before(:each) do @experiment_name = 'link_color' @alternatives = ['blue', 'red'] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it 'should increment the counter for the completed alternative' do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, {:reset => false}) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it 'should not increment the counter if reset is false and the experiment has been already finished' do 2.times { ab_finished(@experiment_name, {:reset => false}) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it 'should not increment the counter for an experiment that the user is not participating in' do ab_test('button_size', 'small', 'big') # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect(lambda { ab_finished('button_size') }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } end it 'should not increment the counter for an ended experiment' do e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') e.winner = 'small' a = ab_test('button_size', 'small', 'big') expect(a).to eq('small') expect(lambda { ab_finished('button_size') }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, {:reset => false}) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should do nothing where the experiment was not started by this user" do ab_user = nil expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Check if the experiment about to finish is active for the user <DFF> @@ -296,98 +296,126 @@ describe Split::Helper do end describe 'ab_finished' do - before(:each) do - @experiment_name = 'link_color' - @alternatives = ['blue', 'red'] - @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) - @alternative_name = ab_test(@experiment_name, *@alternatives) - @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - end + context 'for an experiment that the user participates in' do + before(:each) do + @experiment_name = 'link_color' + @alternatives = ['blue', 'red'] + @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) + @alternative_name = ab_test(@experiment_name, *@alternatives) + @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + end - it 'should increment the counter for the completed alternative' do - ab_finished(@experiment_name) - new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - expect(new_completion_count).to eq(@previous_completion_count + 1) - end + it 'should increment the counter for the completed alternative' do + ab_finished(@experiment_name) + new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + expect(new_completion_count).to eq(@previous_completion_count + 1) + end - it "should set experiment's finished key if reset is false" do - ab_finished(@experiment_name, {:reset => false}) - expect(ab_user[@experiment.key]).to eq(@alternative_name) - expect(ab_user[@experiment.finished_key]).to eq(true) - end + it "should set experiment's finished key if reset is false" do + ab_finished(@experiment_name, {:reset => false}) + expect(ab_user[@experiment.key]).to eq(@alternative_name) + expect(ab_user[@experiment.finished_key]).to eq(true) + end - it 'should not increment the counter if reset is false and the experiment has been already finished' do - 2.times { ab_finished(@experiment_name, {:reset => false}) } - new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count - expect(new_completion_count).to eq(@previous_completion_count + 1) - end + it 'should not increment the counter if reset is false and the experiment has been already finished' do + 2.times { ab_finished(@experiment_name, {:reset => false}) } + new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count + expect(new_completion_count).to eq(@previous_completion_count + 1) + end - it 'should not increment the counter for an experiment that the user is not participating in' do - ab_test('button_size', 'small', 'big') + it 'should not increment the counter for an ended experiment' do + e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') + e.winner = 'small' + a = ab_test('button_size', 'small', 'big') + expect(a).to eq('small') + expect(lambda { + ab_finished('button_size') + }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } + end - # So, user should be participating in the link_color experiment and - # receive the control for button_size. As the user is not participating in - # the button size experiment, finishing it should not increase the - # completion count for that alternative. - expect(lambda { - ab_finished('button_size') - }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } - end + it "should clear out the user's participation from their session" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it 'should not increment the counter for an ended experiment' do - e = Split::ExperimentCatalog.find_or_create('button_size', 'small', 'big') - e.winner = 'small' - a = ab_test('button_size', 'small', 'big') - expect(a).to eq('small') - expect(lambda { - ab_finished('button_size') - }).not_to change { Split::Alternative.new(a, 'button_size').completed_count } - end + it "should not clear out the users session if reset is false" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name, {:reset => false}) + expect(ab_user[@experiment.key]).to eq(@alternative_name) + expect(ab_user[@experiment.finished_key]).to eq(true) + end - it "should clear out the user's participation from their session" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty - end + it "should reset the users session when experiment is not versioned" do + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it "should not clear out the users session if reset is false" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name, {:reset => false}) - expect(ab_user[@experiment.key]).to eq(@alternative_name) - expect(ab_user[@experiment.finished_key]).to eq(true) - end + it "should reset the users session when experiment is versioned" do + @experiment.increment_version + @alternative_name = ab_test(@experiment_name, *@alternatives) - it "should reset the users session when experiment is not versioned" do - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty - end + expect(ab_user[@experiment.key]).to eq(@alternative_name) + ab_finished(@experiment_name) + expect(ab_user.keys).to be_empty + end - it "should reset the users session when experiment is versioned" do - @experiment.increment_version - @alternative_name = ab_test(@experiment_name, *@alternatives) + context "when on_trial_complete is set" do + before { Split.configuration.on_trial_complete = :some_method } + it "should call the method" do + expect(self).to receive(:some_method) + ab_finished(@experiment_name) + end - expect(ab_user[@experiment.key]).to eq(@alternative_name) - ab_finished(@experiment_name) - expect(ab_user.keys).to be_empty + it "should not call the method without alternative" do + ab_user[@experiment.key] = nil + expect(self).not_to receive(:some_method) + ab_finished(@experiment_name) + end + end end - it "should do nothing where the experiment was not started by this user" do - ab_user = nil - expect(lambda { ab_finished('some_experiment_not_started_by_the_user') }).not_to raise_exception + context 'for an experiment that the user is excluded from' do + before do + alternative = ab_test('link_color', 'blue', 'red') + expect(Split::Alternative.new(alternative, 'link_color').participant_count).to eq(1) + alternative = ab_test('button_size', 'small', 'big') + expect(Split::Alternative.new(alternative, 'button_size').participant_count).to eq(0) + end + + it 'should not increment the completed counter' do + # So, user should be participating in the link_color experiment and + # receive the control for button_size. As the user is not participating in + # the button size experiment, finishing it should not increase the + # completion count for that alternative. + expect(lambda { + ab_finished('button_size') + }).not_to change { Split::Alternative.new('small', 'button_size').completed_count } + end end - context "when on_trial_complete is set" do - before { Split.configuration.on_trial_complete = :some_method } - it "should call the method" do - expect(self).to receive(:some_method) - ab_finished(@experiment_name) + context 'for an experiment that the user does not participate in' do + before do + Split::ExperimentCatalog.find_or_create(:not_started_experiment, 'control', 'alt') + end + it 'should not raise an exception' do + expect { ab_finished(:not_started_experiment) }.not_to raise_exception end - it "should not call the method without alternative" do - ab_user[@experiment.key] = nil - expect(self).not_to receive(:some_method) - ab_finished(@experiment_name) + it 'should not change the user state when reset is false' do + expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys}.from([]) + end + + it 'should not change the user state when reset is true' do + expect(self).not_to receive(:reset!) + ab_finished(:not_started_experiment) + end + + it 'should not increment the completed counter' do + ab_finished(:not_started_experiment) + expect(Split::Alternative.new('control', :not_started_experiment).completed_count).to eq(0) + expect(Split::Alternative.new('alt', :not_started_experiment).completed_count).to eq(0) end end end
    103
    Check if the experiment about to finish is active for the user
    75
    .rb
    rb
    mit
    splitrb/split
    10071810
    <NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals Split::ExperimentCatalog.clear_cache expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Added `cache_catalog` config <DFF> @@ -530,7 +530,6 @@ describe Split::Experiment do it "should reset an experiment if it is loaded with different goals" do same_but_different_goals - Split::ExperimentCatalog.clear_cache expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end
    0
    Added `cache_catalog` config
    1
    .rb
    rb
    mit
    splitrb/split
    10071811
    <NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals Split::ExperimentCatalog.clear_cache expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Added `cache_catalog` config <DFF> @@ -530,7 +530,6 @@ describe Split::Experiment do it "should reset an experiment if it is loaded with different goals" do same_but_different_goals - Split::ExperimentCatalog.clear_cache expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end
    0
    Added `cache_catalog` config
    1
    .rb
    rb
    mit
    splitrb/split
    10071812
    <NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals Split::ExperimentCatalog.clear_cache expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Added `cache_catalog` config <DFF> @@ -530,7 +530,6 @@ describe Split::Experiment do it "should reset an experiment if it is loaded with different goals" do same_but_different_goals - Split::ExperimentCatalog.clear_cache expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end
    0
    Added `cache_catalog` config
    1
    .rb
    rb
    mit
    splitrb/split
    10071813
    <NME> AUTHORS <BEF> Ask Solem <[email protected]> Rune Halvorsen <[email protected]> Russell Sim <[email protected]> Brian Rosner <[email protected]> Hugo Lopes Tavares <[email protected]> Sverre Johansen <[email protected]> Bo Shi <[email protected]> Carl Meyer <[email protected]> Vinícius das Chagas Silva <[email protected]> Stefan Foulis <[email protected]> Michael Richardson <[email protected]> Halldór Rúnarsson <[email protected]> Brent Tubbs <[email protected]> David Cramer <[email protected]> <MSG> Added Stefan Foulis to AUTHORS <DFF> @@ -7,3 +7,4 @@ Sverre Johansen <[email protected]> Bo Shi <[email protected]> Carl Meyer <[email protected]> Vinícius das Chagas Silva <[email protected]> +Stefan Foulis <[email protected]>
    1
    Added Stefan Foulis to AUTHORS
    0
    AUTHORS
    bsd-3-clause
    ask/chishop
    10071814
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #321 from rdunlop/master Update Warden Authentication Example to include :delete. <DFF> @@ -317,7 +317,7 @@ end You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby -match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do +match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin?
    1
    Merge pull request #321 from rdunlop/master
    1
    .md
    md
    mit
    splitrb/split
    10071815
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #321 from rdunlop/master Update Warden Authentication Example to include :delete. <DFF> @@ -317,7 +317,7 @@ end You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby -match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do +match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin?
    1
    Merge pull request #321 from rdunlop/master
    1
    .md
    md
    mit
    splitrb/split
    10071816
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #321 from rdunlop/master Update Warden Authentication Example to include :delete. <DFF> @@ -317,7 +317,7 @@ end You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby -match "/split" => Split::Dashboard, anchor: false, via: [:get, :post], constraints: -> (request) do +match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin?
    1
    Merge pull request #321 from rdunlop/master
    1
    .md
    md
    mit
    splitrb/split
    10071817
    <NME> dashboard_helpers_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/dashboard/helpers" include Split::DashboardHelpers describe Split::DashboardHelpers do confidence_level(Complex(2e-18, -0.03)).should eql('No Change') end it "should consider a z-score of 1.96 < z < 2.57 as 95% confident" do confidence_level(2.12).should eql('95% confidence') end it "should consider a z-score of -1.96 > z > -2.57 as 95% confident" do confidence_level(-2.12).should eql('95% confidence') end end end it "should consider a z-score of 1.96 <= z < 2.58 as 95% confident" do expect(confidence_level(1.96)).to eq("95% confidence") expect(confidence_level(2.00)).to eq("95% confidence") end it "should consider a z-score of z >= 2.58 as 99% confident" do expect(confidence_level(2.58)).to eq("99% confidence") expect(confidence_level(3.00)).to eq("99% confidence") end describe "#round" do it "can round number strings" do expect(round("3.1415")).to eq BigDecimal("3.14") end it "can round number strings for precsion" do expect(round("3.1415", 1)).to eq BigDecimal("3.1") end it "can handle invalid number strings" do expect(round("N/A")).to be_zero end end end end <MSG> Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve. We are not interested in negative z numbers. The null hypothesis is that the control is not performing worse. We only have to be confident it's better to disprove the null hypothese, which means we only care about the positive tail of the normal distribution. <DFF> @@ -9,12 +9,9 @@ describe Split::DashboardHelpers do confidence_level(Complex(2e-18, -0.03)).should eql('No Change') end - it "should consider a z-score of 1.96 < z < 2.57 as 95% confident" do - confidence_level(2.12).should eql('95% confidence') + it "should consider a z-score of 1.645 < z < 1.96 as 95% confident" do + confidence_level(1.80).should eql('95% confidence') end - it "should consider a z-score of -1.96 > z > -2.57 as 95% confident" do - confidence_level(-2.12).should eql('95% confidence') - end end end
    2
    Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve.
    5
    .rb
    rb
    mit
    splitrb/split
    10071818
    <NME> dashboard_helpers_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/dashboard/helpers" include Split::DashboardHelpers describe Split::DashboardHelpers do confidence_level(Complex(2e-18, -0.03)).should eql('No Change') end it "should consider a z-score of 1.96 < z < 2.57 as 95% confident" do confidence_level(2.12).should eql('95% confidence') end it "should consider a z-score of -1.96 > z > -2.57 as 95% confident" do confidence_level(-2.12).should eql('95% confidence') end end end it "should consider a z-score of 1.96 <= z < 2.58 as 95% confident" do expect(confidence_level(1.96)).to eq("95% confidence") expect(confidence_level(2.00)).to eq("95% confidence") end it "should consider a z-score of z >= 2.58 as 99% confident" do expect(confidence_level(2.58)).to eq("99% confidence") expect(confidence_level(3.00)).to eq("99% confidence") end describe "#round" do it "can round number strings" do expect(round("3.1415")).to eq BigDecimal("3.14") end it "can round number strings for precsion" do expect(round("3.1415", 1)).to eq BigDecimal("3.1") end it "can handle invalid number strings" do expect(round("N/A")).to be_zero end end end end <MSG> Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve. We are not interested in negative z numbers. The null hypothesis is that the control is not performing worse. We only have to be confident it's better to disprove the null hypothese, which means we only care about the positive tail of the normal distribution. <DFF> @@ -9,12 +9,9 @@ describe Split::DashboardHelpers do confidence_level(Complex(2e-18, -0.03)).should eql('No Change') end - it "should consider a z-score of 1.96 < z < 2.57 as 95% confident" do - confidence_level(2.12).should eql('95% confidence') + it "should consider a z-score of 1.645 < z < 1.96 as 95% confident" do + confidence_level(1.80).should eql('95% confidence') end - it "should consider a z-score of -1.96 > z > -2.57 as 95% confident" do - confidence_level(-2.12).should eql('95% confidence') - end end end
    2
    Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve.
    5
    .rb
    rb
    mit
    splitrb/split
    10071819
    <NME> dashboard_helpers_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/dashboard/helpers" include Split::DashboardHelpers describe Split::DashboardHelpers do confidence_level(Complex(2e-18, -0.03)).should eql('No Change') end it "should consider a z-score of 1.96 < z < 2.57 as 95% confident" do confidence_level(2.12).should eql('95% confidence') end it "should consider a z-score of -1.96 > z > -2.57 as 95% confident" do confidence_level(-2.12).should eql('95% confidence') end end end it "should consider a z-score of 1.96 <= z < 2.58 as 95% confident" do expect(confidence_level(1.96)).to eq("95% confidence") expect(confidence_level(2.00)).to eq("95% confidence") end it "should consider a z-score of z >= 2.58 as 99% confident" do expect(confidence_level(2.58)).to eq("99% confidence") expect(confidence_level(3.00)).to eq("99% confidence") end describe "#round" do it "can round number strings" do expect(round("3.1415")).to eq BigDecimal("3.14") end it "can round number strings for precsion" do expect(round("3.1415", 1)).to eq BigDecimal("3.1") end it "can handle invalid number strings" do expect(round("N/A")).to be_zero end end end end <MSG> Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve. We are not interested in negative z numbers. The null hypothesis is that the control is not performing worse. We only have to be confident it's better to disprove the null hypothese, which means we only care about the positive tail of the normal distribution. <DFF> @@ -9,12 +9,9 @@ describe Split::DashboardHelpers do confidence_level(Complex(2e-18, -0.03)).should eql('No Change') end - it "should consider a z-score of 1.96 < z < 2.57 as 95% confident" do - confidence_level(2.12).should eql('95% confidence') + it "should consider a z-score of 1.645 < z < 1.96 as 95% confident" do + confidence_level(1.80).should eql('95% confidence') end - it "should consider a z-score of -1.96 > z > -2.57 as 95% confident" do - confidence_level(-2.12).should eql('95% confidence') - end end end
    2
    Since we only care if the difference is greater than zero we only need a z-score of 1.65, corresponding to the positive half of the normal curve.
    5
    .rb
    rb
    mit
    splitrb/split
    10071820
    <NME> goals_collection_spec.rb <BEF> ADDFILE <MSG> Introduce GoalsCollection and refactor Towards a more object-oriented approach, i introduced the GoalsCollection class and refactored the code so that the overall complexity of Experiment#save is reduced. <DFF> @@ -0,0 +1,80 @@ +require 'spec_helper' +require 'split/goals_collection' +require 'time' + +describe Split::GoalsCollection do + let(:experiment_name) { 'experiment_name' } + + describe 'initialization' do + let(:goals_collection) { + Split::GoalsCollection.new('experiment_name', ['goal1', 'goal2']) + } + + it "should have an experiment_name" do + expect(goals_collection.instance_variable_get(:@experiment_name)). + to eq('experiment_name') + end + + it "should have a list of goals" do + expect(goals_collection.instance_variable_get(:@goals)). + to eq(['goal1', 'goal2']) + end + end + + describe "#validate!" do + it "should't raise ArgumentError if @goals is nil?" do + goals_collection = Split::GoalsCollection.new('experiment_name') + expect { goals_collection.validate! }.not_to raise_error(ArgumentError) + end + + it "should raise ArgumentError if @goals is not an Array" do + goals_collection = Split::GoalsCollection. + new('experiment_name', 'not an array') + expect { goals_collection.validate! }.to raise_error(ArgumentError) + end + + it "should't raise ArgumentError if @goals is an array" do + goals_collection = Split::GoalsCollection. + new('experiment_name', ['an array']) + expect { goals_collection.validate! }.not_to raise_error(ArgumentError) + end + end + + describe "#delete" do + let(:goals_key) { "#{experiment_name}:goals" } + + it "should delete goals from redis" do + goals_collection = Split::GoalsCollection.new(experiment_name, ['goal1']) + goals_collection.save + + goals_collection.delete + expect(Split.redis.exists(goals_key)).to be false + end + end + + describe "#save" do + let(:goals_key) { "#{experiment_name}:goals" } + + it "should return false if @goals is nil" do + goals_collection = Split::GoalsCollection. + new(experiment_name, nil) + + expect(goals_collection.save).to be false + end + + it "should save goals to redis if @goals is valid" do + goals = ['valid goal 1', 'valid goal 2'] + collection = Split::GoalsCollection.new(experiment_name, goals) + collection.save + + expect(Split.redis.lrange(goals_key, 0, -1)).to eq goals + end + + it "should return @goals if @goals is valid" do + goals_collection = Split::GoalsCollection. + new(experiment_name, ['valid goal']) + + expect(goals_collection.save).to eq(['valid goal']) + end + end +end
    80
    Introduce GoalsCollection and refactor
    0
    .rb
    rb
    mit
    splitrb/split
    10071821
    <NME> goals_collection_spec.rb <BEF> ADDFILE <MSG> Introduce GoalsCollection and refactor Towards a more object-oriented approach, i introduced the GoalsCollection class and refactored the code so that the overall complexity of Experiment#save is reduced. <DFF> @@ -0,0 +1,80 @@ +require 'spec_helper' +require 'split/goals_collection' +require 'time' + +describe Split::GoalsCollection do + let(:experiment_name) { 'experiment_name' } + + describe 'initialization' do + let(:goals_collection) { + Split::GoalsCollection.new('experiment_name', ['goal1', 'goal2']) + } + + it "should have an experiment_name" do + expect(goals_collection.instance_variable_get(:@experiment_name)). + to eq('experiment_name') + end + + it "should have a list of goals" do + expect(goals_collection.instance_variable_get(:@goals)). + to eq(['goal1', 'goal2']) + end + end + + describe "#validate!" do + it "should't raise ArgumentError if @goals is nil?" do + goals_collection = Split::GoalsCollection.new('experiment_name') + expect { goals_collection.validate! }.not_to raise_error(ArgumentError) + end + + it "should raise ArgumentError if @goals is not an Array" do + goals_collection = Split::GoalsCollection. + new('experiment_name', 'not an array') + expect { goals_collection.validate! }.to raise_error(ArgumentError) + end + + it "should't raise ArgumentError if @goals is an array" do + goals_collection = Split::GoalsCollection. + new('experiment_name', ['an array']) + expect { goals_collection.validate! }.not_to raise_error(ArgumentError) + end + end + + describe "#delete" do + let(:goals_key) { "#{experiment_name}:goals" } + + it "should delete goals from redis" do + goals_collection = Split::GoalsCollection.new(experiment_name, ['goal1']) + goals_collection.save + + goals_collection.delete + expect(Split.redis.exists(goals_key)).to be false + end + end + + describe "#save" do + let(:goals_key) { "#{experiment_name}:goals" } + + it "should return false if @goals is nil" do + goals_collection = Split::GoalsCollection. + new(experiment_name, nil) + + expect(goals_collection.save).to be false + end + + it "should save goals to redis if @goals is valid" do + goals = ['valid goal 1', 'valid goal 2'] + collection = Split::GoalsCollection.new(experiment_name, goals) + collection.save + + expect(Split.redis.lrange(goals_key, 0, -1)).to eq goals + end + + it "should return @goals if @goals is valid" do + goals_collection = Split::GoalsCollection. + new(experiment_name, ['valid goal']) + + expect(goals_collection.save).to eq(['valid goal']) + end + end +end
    80
    Introduce GoalsCollection and refactor
    0
    .rb
    rb
    mit
    splitrb/split
    10071822
    <NME> goals_collection_spec.rb <BEF> ADDFILE <MSG> Introduce GoalsCollection and refactor Towards a more object-oriented approach, i introduced the GoalsCollection class and refactored the code so that the overall complexity of Experiment#save is reduced. <DFF> @@ -0,0 +1,80 @@ +require 'spec_helper' +require 'split/goals_collection' +require 'time' + +describe Split::GoalsCollection do + let(:experiment_name) { 'experiment_name' } + + describe 'initialization' do + let(:goals_collection) { + Split::GoalsCollection.new('experiment_name', ['goal1', 'goal2']) + } + + it "should have an experiment_name" do + expect(goals_collection.instance_variable_get(:@experiment_name)). + to eq('experiment_name') + end + + it "should have a list of goals" do + expect(goals_collection.instance_variable_get(:@goals)). + to eq(['goal1', 'goal2']) + end + end + + describe "#validate!" do + it "should't raise ArgumentError if @goals is nil?" do + goals_collection = Split::GoalsCollection.new('experiment_name') + expect { goals_collection.validate! }.not_to raise_error(ArgumentError) + end + + it "should raise ArgumentError if @goals is not an Array" do + goals_collection = Split::GoalsCollection. + new('experiment_name', 'not an array') + expect { goals_collection.validate! }.to raise_error(ArgumentError) + end + + it "should't raise ArgumentError if @goals is an array" do + goals_collection = Split::GoalsCollection. + new('experiment_name', ['an array']) + expect { goals_collection.validate! }.not_to raise_error(ArgumentError) + end + end + + describe "#delete" do + let(:goals_key) { "#{experiment_name}:goals" } + + it "should delete goals from redis" do + goals_collection = Split::GoalsCollection.new(experiment_name, ['goal1']) + goals_collection.save + + goals_collection.delete + expect(Split.redis.exists(goals_key)).to be false + end + end + + describe "#save" do + let(:goals_key) { "#{experiment_name}:goals" } + + it "should return false if @goals is nil" do + goals_collection = Split::GoalsCollection. + new(experiment_name, nil) + + expect(goals_collection.save).to be false + end + + it "should save goals to redis if @goals is valid" do + goals = ['valid goal 1', 'valid goal 2'] + collection = Split::GoalsCollection.new(experiment_name, goals) + collection.save + + expect(Split.redis.lrange(goals_key, 0, -1)).to eq goals + end + + it "should return @goals if @goals is valid" do + goals_collection = Split::GoalsCollection. + new(experiment_name, ['valid goal']) + + expect(goals_collection.save).to eq(['valid goal']) + end + end +end
    80
    Introduce GoalsCollection and refactor
    0
    .rb
    rb
    mit
    splitrb/split
    10071823
    <NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' && typeof options.container === 'undefined') { default_meow_area = $(window.document.createElement('div')) .attr({'id': ((new Date()).getTime()), 'class': 'meows'}); $('body').prepend(default_meow_area); } if (meows.size() <= 0) { if (typeof options.beforeCreateFirst === 'function') { options.beforeCreateFirst.call(that); } } if (typeof options.container === 'string') { this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); // Add title if it's defined if (typeof this.title === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('h1')).text(this.title) ); } // Add icon if it's defined if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (event.type === 'mouseleave') { that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else { that.hovered = true; that.manifest.addClass('hover'); } }); // Add a timeout if the duration isn't Infinity if (this.duration !== Infinity) { this.timeout = window.setTimeout(function () { // Make sure this meow hasn't already been destroyed if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date.getTime()) { that.destroy(); } } else { this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); } } }); }); } }; }; $.fn.meow = function (args) { var meow = new Meow(args); meows.add(meow); return meow; }; $.meow = $.fn.meow; }(jQuery, window)); <MSG> fixed line 152. You were right Chris. Forgot some parens <DFF> @@ -149,7 +149,7 @@ that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out - if (that.timestamp + that.duration <= new Date.getTime()) { + if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else {
    1
    fixed line 152. You were right Chris. Forgot some parens
    1
    .js
    meow
    mit
    zacstewart/Meow
    10071824
    <NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' && typeof options.container === 'undefined') { default_meow_area = $(window.document.createElement('div')) .attr({'id': ((new Date()).getTime()), 'class': 'meows'}); $('body').prepend(default_meow_area); } if (meows.size() <= 0) { if (typeof options.beforeCreateFirst === 'function') { options.beforeCreateFirst.call(that); } } if (typeof options.container === 'string') { this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); // Add title if it's defined if (typeof this.title === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('h1')).text(this.title) ); } // Add icon if it's defined if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (event.type === 'mouseleave') { that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else { that.hovered = true; that.manifest.addClass('hover'); } }); // Add a timeout if the duration isn't Infinity if (this.duration !== Infinity) { this.timeout = window.setTimeout(function () { // Make sure this meow hasn't already been destroyed if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date.getTime()) { that.destroy(); } } else { this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); } } }); }); } }; }; $.fn.meow = function (args) { var meow = new Meow(args); meows.add(meow); return meow; }; $.meow = $.fn.meow; }(jQuery, window)); <MSG> fixed line 152. You were right Chris. Forgot some parens <DFF> @@ -149,7 +149,7 @@ that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out - if (that.timestamp + that.duration <= new Date.getTime()) { + if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else {
    1
    fixed line 152. You were right Chris. Forgot some parens
    1
    .js
    meow
    mit
    zacstewart/Meow
    10071825
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Merge pull request #545 from splitrb/fix-travis-build Update travis config and add Ruby 2.6.0 <DFF> @@ -34,7 +34,7 @@ Gem::Specification.new do |s| s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' - s.add_development_dependency 'bundler', '~> 1.14' + s.add_development_dependency 'bundler', '>= 1.17' s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12'
    1
    Merge pull request #545 from splitrb/fix-travis-build
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071826
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Merge pull request #545 from splitrb/fix-travis-build Update travis config and add Ruby 2.6.0 <DFF> @@ -34,7 +34,7 @@ Gem::Specification.new do |s| s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' - s.add_development_dependency 'bundler', '~> 1.14' + s.add_development_dependency 'bundler', '>= 1.17' s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12'
    1
    Merge pull request #545 from splitrb/fix-travis-build
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071827
    <NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' s.add_development_dependency 'bundler', '~> 1.14' s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12' s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Merge pull request #545 from splitrb/fix-travis-build Update travis config and add Ruby 2.6.0 <DFF> @@ -34,7 +34,7 @@ Gem::Specification.new do |s| s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' - s.add_development_dependency 'bundler', '~> 1.14' + s.add_development_dependency 'bundler', '>= 1.17' s.add_development_dependency 'simplecov', '~> 0.15' s.add_development_dependency 'rack-test', '~> 0.6' s.add_development_dependency 'rake', '~> 12'
    1
    Merge pull request #545 from splitrb/fix-travis-build
    1
    .gemspec
    gemspec
    mit
    splitrb/split
    10071828
    <NME> parser.ts <BEF> import { strictEqual as equal, throws } from 'assert'; import parser from '../src/parser'; import tokenizer from '../src/tokenizer'; import stringify from './assets/stringify'; import { ParserOptions } from '../src'; const parse = (abbr: string, options?: ParserOptions) => parser(tokenizer(abbr), options); const str = (abbr: string, options?: ParserOptions) => stringify(parse(abbr, options)); describe('Parser', () => { it('basic abbreviations', () => { equal(str('p'), '<p></p>'); equal(str('p{text}'), '<p>text</p>'); equal(str('div.width1\\/2'), '<div class=width1/2></div>'); equal(str('#sample*3'), '<?*3 id=sample></?>'); equal(str('a>b'), '<a><b></b></a>'); equal(str('a+b'), '<a></a><b></b>'); equal(str('a+b>c+d'), '<a></a><b><c></c><d></d></b>'); equal(str('a+b'), '<a></a><b></b>'); equal(str('a+b>c+d'), '<a></a><b><c></c><d></d></b>'); equal(str('a>b>c+e'), '<a><b><c></c><e></e></b></a>'); equal(str('a>b>c^d'), '<a><b><c></c></b><d></d></a>'); equal(str('a>b>c^^^^d'), '<a><b><c></c></b></a><d></d>'); equal(str('a:b>c'), '<a:b><c></c></a:b>'); equal(str('ul.nav[title="foo"]'), '<ul class=nav title="foo"></ul>'); }); it('groups', () => { equal(str('a>(b>c)+d'), '<a>(<b><c></c></b>)<d></d></a>'); equal(str('(a>b)+(c>d)'), '(<a><b></b></a>)(<c><d></d></c>)'); equal(str('a>((b>c)(d>e))f'), '<a>((<b><c></c></b>)(<d><e></e></d>))<f></f></a>'); equal(str('a>((((b>c))))+d'), '<a>((((<b><c></c></b>))))<d></d></a>'); equal(str('a>(((b>c))*4)+d'), '<a>(((<b><c></c></b>))*4)<d></d></a>'); equal(str('(div>dl>(dt+dd)*2)'), '(<div><dl>(<dt></dt><dd></dd>)*2</dl></div>)'); equal(str('a>()'), '<a>()</a>'); }); it('attributes', () => { equal(str('[].foo'), '<? class=foo></?>'); equal(str('[a]'), '<? a></?>'); equal(str('[a b c [d]]'), '<? a b c [d]></?>'); equal(str('[a=b]'), '<? a=b></?>'); equal(str('[a=b c= d=e]'), '<? a=b c d=e></?>'); equal(str('[a=b.c d=тест]'), '<? a=b.c d=тест></?>'); equal(str('[[a]=b (c)=d]'), '<? [a]=b (c)=d></?>'); // Quoted attribute values equal(str('[a="b"]'), '<? a="b"></?>'); equal(str('[a="b" c=\'d\' e=""]'), '<? a="b" c=\'d\' e=""></?>'); equal(str('[[a]="b" (c)=\'d\']'), '<? [a]="b" (c)=\'d\'></?>'); // Mixed quoted equal(str('[a="foo\'bar" b=\'foo"bar\' c="foo\\\"bar"]'), '<? a="foo\'bar" b=\'foo"bar\' c="foo"bar"></?>'); // Boolean & implied attributes equal(str('[a. b.]'), '<? a. b.></?>'); equal(str('[!a !b.]'), '<? !a !b.></?>'); // Default values equal(str('["a.b"]'), '<? ?="a.b"></?>'); equal(str('[\'a.b\' "c=d" foo=bar "./test.html"]'), '<? ?=\'a.b\' ?="c=d" foo=bar ?="./test.html"></?>'); // Expressions as values equal(str('[foo={1 + 2} bar={fn(1, "foo")}]'), '<? foo={1 + 2} bar={fn(1, "foo")}></?>'); // Tabstops as unquoted values equal(str('[name=${1} value=${2:test}]'), '<? name=${1} value=${2:test}></?>'); }); it('malformed attributes', () => { equal(str('[a'), '<? a></?>'); equal(str('[a={foo]'), '<? a={foo]></?>'); throws(() => str('[a="foo]'), /Unclosed quote/); throws(() => str('[a=b=c]'), /Unexpected "Operator" token/); }); it('elements', () => { equal(str('div'), '<div></div>'); equal(str('div.foo'), '<div class=foo></div>'); equal(str('div#foo'), '<div id=foo></div>'); equal(str('div#foo.bar'), '<div id=foo class=bar></div>'); equal(str('div.foo#bar'), '<div class=foo id=bar></div>'); equal(str('div.foo.bar.baz'), '<div class=foo class=bar class=baz></div>'); equal(str('.foo'), '<? class=foo></?>'); equal(str('#foo'), '<? id=foo></?>'); equal(str('.foo_bar'), '<? class=foo_bar></?>'); equal(str('#foo.bar'), '<? id=foo class=bar></?>'); // Attribute shorthands equal(str('.'), '<? class></?>'); equal(str('#'), '<? id></?>'); equal(str('#.'), '<? id class></?>'); equal(str('.#.'), '<? class id class></?>'); equal(str('.a..'), '<? class=a class></?>'); // Elements with attributes equal(str('div[foo=bar]'), '<div foo=bar></div>'); equal(str('div.a[b=c]'), '<div class=a b=c></div>'); equal(str('div[b=c].a'), '<div b=c class=a></div>'); equal(str('div[a=b][c="d"]'), '<div a=b c="d"></div>'); equal(str('[b=c]'), '<? b=c></?>'); equal(str('.a[b=c]'), '<? class=a b=c></?>'); equal(str('[b=c].a#d'), '<? b=c class=a id=d></?>'); equal(str('[b=c]a'), '<? b=c></?><a></a>', 'Do not consume node name after attribute set'); // Element with text equal(str('div{foo}'), '<div>foo</div>'); equal(str('{foo}'), '<?>foo</?>'); // Mixed equal(str('div.foo{bar}'), '<div class=foo>bar</div>'); equal(str('.foo{bar}#baz'), '<? class=foo id=baz>bar</?>'); equal(str('.foo[b=c]{bar}'), '<? class=foo b=c>bar</?>'); // Repeated element equal(str('div.foo*3'), '<div*3 class=foo></div>'); equal(str('.foo*'), '<?* class=foo></?>'); equal(str('.a[b=c]*10'), '<?*10 class=a b=c></?>'); equal(str('.a*10[b=c]'), '<?*10 class=a b=c></?>'); equal(str('.a*10{text}'), '<?*10 class=a>text</?>'); // Self-closing element equal(str('div/'), '<div />'); equal(str('.foo/'), '<? class=foo />'); equal(str('.foo[bar]/'), '<? class=foo bar />'); equal(str('.foo/*3'), '<?*3 class=foo />'); equal(str('.foo*3/'), '<?*3 class=foo />'); throws(() => parse('/'), /Unexpected character/); }); it('JSX', () => { const opt = { jsx: true }; equal(str('foo.bar', opt), '<foo class=bar></foo>'); equal(str('Foo.bar', opt), '<Foo class=bar></Foo>'); equal(str('Foo.Bar', opt), '<Foo.Bar></Foo.Bar>'); equal(str('Foo.', opt), '<Foo class></Foo>'); equal(str('Foo.Bar.baz', opt), '<Foo.Bar class=baz></Foo.Bar>'); equal(str('Foo.Bar.Baz', opt), '<Foo.Bar.Baz></Foo.Bar.Baz>'); equal(str('.{theme.class}', opt), '<? class=theme.class></?>'); equal(str('#{id}', opt), '<? id=id></?>'); equal(str('Foo.{theme.class}', opt), '<Foo class=theme.class></Foo>'); }); it('errors', () => { throws(() => parse('str?'), /Unexpected character at 4/); throws(() => parse('foo,bar'), /Unexpected character at 4/); equal(str('foo\\,bar'), '<foo,bar></foo,bar>'); equal(str('foo\\'), '<foo></foo>'); }); it('missing braces', () => { // Do not throw errors on missing closing braces equal(str('div[title="test"'), '<div title="test"></div>'); equal(str('div(foo'), '<div></div>(<foo></foo>)'); equal(str('div{foo'), '<div>foo</div>'); }); }); <MSG> Ensure dots in attributes are supported Fixes #562 <DFF> @@ -14,6 +14,9 @@ describe('Parser', () => { equal(str('div.width1\\/2'), '<div class=width1/2></div>'); equal(str('#sample*3'), '<?*3 id=sample></?>'); + // https://github.com/emmetio/emmet/issues/562 + equal(str('li[repeat.for="todo of todoList"]'), '<li repeat.for="todo of todoList"></li>', 'Dots in attribute names'); + equal(str('a>b'), '<a><b></b></a>'); equal(str('a+b'), '<a></a><b></b>'); equal(str('a+b>c+d'), '<a></a><b><c></c><d></d></b>');
    3
    Ensure dots in attributes are supported
    0
    .ts
    ts
    mit
    emmetio/emmet
    10071829
    <NME> parser.ts <BEF> import { strictEqual as equal, throws } from 'assert'; import parser from '../src/parser'; import tokenizer from '../src/tokenizer'; import stringify from './assets/stringify'; import { ParserOptions } from '../src'; const parse = (abbr: string, options?: ParserOptions) => parser(tokenizer(abbr), options); const str = (abbr: string, options?: ParserOptions) => stringify(parse(abbr, options)); describe('Parser', () => { it('basic abbreviations', () => { equal(str('p'), '<p></p>'); equal(str('p{text}'), '<p>text</p>'); equal(str('div.width1\\/2'), '<div class=width1/2></div>'); equal(str('#sample*3'), '<?*3 id=sample></?>'); equal(str('a>b'), '<a><b></b></a>'); equal(str('a+b'), '<a></a><b></b>'); equal(str('a+b>c+d'), '<a></a><b><c></c><d></d></b>'); equal(str('a+b'), '<a></a><b></b>'); equal(str('a+b>c+d'), '<a></a><b><c></c><d></d></b>'); equal(str('a>b>c+e'), '<a><b><c></c><e></e></b></a>'); equal(str('a>b>c^d'), '<a><b><c></c></b><d></d></a>'); equal(str('a>b>c^^^^d'), '<a><b><c></c></b></a><d></d>'); equal(str('a:b>c'), '<a:b><c></c></a:b>'); equal(str('ul.nav[title="foo"]'), '<ul class=nav title="foo"></ul>'); }); it('groups', () => { equal(str('a>(b>c)+d'), '<a>(<b><c></c></b>)<d></d></a>'); equal(str('(a>b)+(c>d)'), '(<a><b></b></a>)(<c><d></d></c>)'); equal(str('a>((b>c)(d>e))f'), '<a>((<b><c></c></b>)(<d><e></e></d>))<f></f></a>'); equal(str('a>((((b>c))))+d'), '<a>((((<b><c></c></b>))))<d></d></a>'); equal(str('a>(((b>c))*4)+d'), '<a>(((<b><c></c></b>))*4)<d></d></a>'); equal(str('(div>dl>(dt+dd)*2)'), '(<div><dl>(<dt></dt><dd></dd>)*2</dl></div>)'); equal(str('a>()'), '<a>()</a>'); }); it('attributes', () => { equal(str('[].foo'), '<? class=foo></?>'); equal(str('[a]'), '<? a></?>'); equal(str('[a b c [d]]'), '<? a b c [d]></?>'); equal(str('[a=b]'), '<? a=b></?>'); equal(str('[a=b c= d=e]'), '<? a=b c d=e></?>'); equal(str('[a=b.c d=тест]'), '<? a=b.c d=тест></?>'); equal(str('[[a]=b (c)=d]'), '<? [a]=b (c)=d></?>'); // Quoted attribute values equal(str('[a="b"]'), '<? a="b"></?>'); equal(str('[a="b" c=\'d\' e=""]'), '<? a="b" c=\'d\' e=""></?>'); equal(str('[[a]="b" (c)=\'d\']'), '<? [a]="b" (c)=\'d\'></?>'); // Mixed quoted equal(str('[a="foo\'bar" b=\'foo"bar\' c="foo\\\"bar"]'), '<? a="foo\'bar" b=\'foo"bar\' c="foo"bar"></?>'); // Boolean & implied attributes equal(str('[a. b.]'), '<? a. b.></?>'); equal(str('[!a !b.]'), '<? !a !b.></?>'); // Default values equal(str('["a.b"]'), '<? ?="a.b"></?>'); equal(str('[\'a.b\' "c=d" foo=bar "./test.html"]'), '<? ?=\'a.b\' ?="c=d" foo=bar ?="./test.html"></?>'); // Expressions as values equal(str('[foo={1 + 2} bar={fn(1, "foo")}]'), '<? foo={1 + 2} bar={fn(1, "foo")}></?>'); // Tabstops as unquoted values equal(str('[name=${1} value=${2:test}]'), '<? name=${1} value=${2:test}></?>'); }); it('malformed attributes', () => { equal(str('[a'), '<? a></?>'); equal(str('[a={foo]'), '<? a={foo]></?>'); throws(() => str('[a="foo]'), /Unclosed quote/); throws(() => str('[a=b=c]'), /Unexpected "Operator" token/); }); it('elements', () => { equal(str('div'), '<div></div>'); equal(str('div.foo'), '<div class=foo></div>'); equal(str('div#foo'), '<div id=foo></div>'); equal(str('div#foo.bar'), '<div id=foo class=bar></div>'); equal(str('div.foo#bar'), '<div class=foo id=bar></div>'); equal(str('div.foo.bar.baz'), '<div class=foo class=bar class=baz></div>'); equal(str('.foo'), '<? class=foo></?>'); equal(str('#foo'), '<? id=foo></?>'); equal(str('.foo_bar'), '<? class=foo_bar></?>'); equal(str('#foo.bar'), '<? id=foo class=bar></?>'); // Attribute shorthands equal(str('.'), '<? class></?>'); equal(str('#'), '<? id></?>'); equal(str('#.'), '<? id class></?>'); equal(str('.#.'), '<? class id class></?>'); equal(str('.a..'), '<? class=a class></?>'); // Elements with attributes equal(str('div[foo=bar]'), '<div foo=bar></div>'); equal(str('div.a[b=c]'), '<div class=a b=c></div>'); equal(str('div[b=c].a'), '<div b=c class=a></div>'); equal(str('div[a=b][c="d"]'), '<div a=b c="d"></div>'); equal(str('[b=c]'), '<? b=c></?>'); equal(str('.a[b=c]'), '<? class=a b=c></?>'); equal(str('[b=c].a#d'), '<? b=c class=a id=d></?>'); equal(str('[b=c]a'), '<? b=c></?><a></a>', 'Do not consume node name after attribute set'); // Element with text equal(str('div{foo}'), '<div>foo</div>'); equal(str('{foo}'), '<?>foo</?>'); // Mixed equal(str('div.foo{bar}'), '<div class=foo>bar</div>'); equal(str('.foo{bar}#baz'), '<? class=foo id=baz>bar</?>'); equal(str('.foo[b=c]{bar}'), '<? class=foo b=c>bar</?>'); // Repeated element equal(str('div.foo*3'), '<div*3 class=foo></div>'); equal(str('.foo*'), '<?* class=foo></?>'); equal(str('.a[b=c]*10'), '<?*10 class=a b=c></?>'); equal(str('.a*10[b=c]'), '<?*10 class=a b=c></?>'); equal(str('.a*10{text}'), '<?*10 class=a>text</?>'); // Self-closing element equal(str('div/'), '<div />'); equal(str('.foo/'), '<? class=foo />'); equal(str('.foo[bar]/'), '<? class=foo bar />'); equal(str('.foo/*3'), '<?*3 class=foo />'); equal(str('.foo*3/'), '<?*3 class=foo />'); throws(() => parse('/'), /Unexpected character/); }); it('JSX', () => { const opt = { jsx: true }; equal(str('foo.bar', opt), '<foo class=bar></foo>'); equal(str('Foo.bar', opt), '<Foo class=bar></Foo>'); equal(str('Foo.Bar', opt), '<Foo.Bar></Foo.Bar>'); equal(str('Foo.', opt), '<Foo class></Foo>'); equal(str('Foo.Bar.baz', opt), '<Foo.Bar class=baz></Foo.Bar>'); equal(str('Foo.Bar.Baz', opt), '<Foo.Bar.Baz></Foo.Bar.Baz>'); equal(str('.{theme.class}', opt), '<? class=theme.class></?>'); equal(str('#{id}', opt), '<? id=id></?>'); equal(str('Foo.{theme.class}', opt), '<Foo class=theme.class></Foo>'); }); it('errors', () => { throws(() => parse('str?'), /Unexpected character at 4/); throws(() => parse('foo,bar'), /Unexpected character at 4/); equal(str('foo\\,bar'), '<foo,bar></foo,bar>'); equal(str('foo\\'), '<foo></foo>'); }); it('missing braces', () => { // Do not throw errors on missing closing braces equal(str('div[title="test"'), '<div title="test"></div>'); equal(str('div(foo'), '<div></div>(<foo></foo>)'); equal(str('div{foo'), '<div>foo</div>'); }); }); <MSG> Ensure dots in attributes are supported Fixes #562 <DFF> @@ -14,6 +14,9 @@ describe('Parser', () => { equal(str('div.width1\\/2'), '<div class=width1/2></div>'); equal(str('#sample*3'), '<?*3 id=sample></?>'); + // https://github.com/emmetio/emmet/issues/562 + equal(str('li[repeat.for="todo of todoList"]'), '<li repeat.for="todo of todoList"></li>', 'Dots in attribute names'); + equal(str('a>b'), '<a><b></b></a>'); equal(str('a+b'), '<a></a><b></b>'); equal(str('a+b>c+d'), '<a></a><b><c></c><d></d></b>');
    3
    Ensure dots in attributes are supported
    0
    .ts
    ts
    mit
    emmetio/emmet
    10071830
    <NME> transducers.js <BEF> var transducers = /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) /******/ return installedModules[moduleId].exports; /******/ /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ exports: {}, /******/ id: moduleId, /******/ loaded: false /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.loaded = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(0); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ /***/ function(module, exports, __webpack_require__) { // basic protocol helpers var symbolExists = typeof Symbol !== 'undefined'; var protocols = { iterator: symbolExists ? Symbol.iterator : '@@iterator', transformer: symbolExists ? Symbol('transformer') : '@@transformer' }; function throwProtocolError(name, coll) { throw new Error("don't know how to " + name + " collection: " + coll); } function fulfillsProtocol(obj, name) { if(name === 'iterator') { // Accept ill-formed iterators that don't conform to the // protocol by accepting just next() return obj[protocols.iterator] || obj.next; } return obj[protocols[name]]; } function getProtocolProperty(obj, name) { return obj[protocols[name]]; } function iterator(coll) { var iter = getProtocolProperty(coll, 'iterator'); if(iter) { return iter.call(coll); } else if(coll.next) { // Basic duck typing to accept an ill-formed iterator that doesn't // conform to the iterator protocol (all iterators should have the // @@iterator method and return themselves, but some engines don't // have that on generators like older v8) return coll; } else if(isArray(coll)) { return new ArrayIterator(coll); } else if(isObject(coll)) { return new ObjectIterator(coll); } } function ArrayIterator(arr) { this.arr = arr; this.index = 0; } ArrayIterator.prototype.next = function() { if(this.index < this.arr.length) { return { value: this.arr[this.index++], done: false }; } return { done: true } }; function ObjectIterator(obj) { this.obj = obj; this.keys = Object.keys(obj); this.index = 0; } ObjectIterator.prototype.next = function() { if(this.index < this.keys.length) { var k = this.keys[this.index++]; return { value: [k, this.obj[k]], done: false }; } return { done: true } }; // helpers var toString = Object.prototype.toString; var isArray = typeof Array.isArray === 'function' ? Array.isArray : function(obj) { return toString.call(obj) == '[object Array]'; }; function isFunction(x) { return typeof x === 'function'; } function isObject(x) { return x instanceof Object && Object.getPrototypeOf(x) === Object.getPrototypeOf({}); } function isNumber(x) { return typeof x === 'number'; } function Reduced(val) { this.val = val; } function reduce(coll, xform, init) { if(isArray(coll)) { var result = init; var index = -1; var len = coll.length; while(++index < len) { result = xform.step(result, coll[index]); if(result instanceof Reduced) { return result.val; } } return xform.result(result); } else if(isObject(coll) || fulfillsProtocol(coll, 'iterator')) { var result = init; var iter = iterator(coll); var val = iter.next(); while(!val.done) { result = xform.step(result, val.value); if(result instanceof Reduced) { return result.val; } val = iter.next(); } return xform.result(result); } throwProtocolError('iterate', coll); } function transduce(coll, xform, reducer, init) { xform = xform(reducer); if(init === undefined) { init = xform.init(); } return reduce(coll, xform, init); } function compose() { var funcs = Array.prototype.slice.call(arguments); return function(r) { var value = r; for(var i=funcs.length-1; i>=0; i--) { value = funcs[i](value); } return value; } } // transformations function transformer(f) { return { init: function() { throw new Error('init value unavailable'); }, result: function(v) { return v; }, step: f }; } function bound(f, ctx, count) { count = count != null ? count : 1; if(!ctx) { return f; } else { switch(count) { case 1: return function(x) { return f.call(ctx, x); } case 2: return function(x, y) { return f.call(ctx, x, y); } default: return f.bind(ctx); } } } function arrayMap(arr, f, ctx) { var index = -1; var length = arr.length; var result = Array(length); f = bound(f, ctx, 2); while (++index < length) { result[index] = f(arr[index], index); } return result; } function arrayFilter(arr, f, ctx) { var len = arr.length; var result = []; f = bound(f, ctx, 2); for(var i=0; i<len; i++) { if(f(arr[i], i)) { result.push(arr[i]); } } return result; } function Map(f, xform) { this.xform = xform; this.f = f; } Map.prototype.init = function() { return this.xform.init(); }; Map.prototype.result = function(v) { return this.xform.result(v); }; Map.prototype.step = function(res, input) { return this.xform.step(res, this.f(input)); }; function map(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { if(isArray(coll)) { return arrayMap(coll, f, ctx); } return seq(coll, map(f)); } return function(xform) { return new Map(f, xform); } } function Filter(f, xform) { this.xform = xform; this.f = f; } Filter.prototype.init = function() { return this.xform.init(); }; Filter.prototype.result = function(v) { return this.xform.result(v); }; Filter.prototype.step = function(res, input) { if(this.f(input)) { return this.xform.step(res, input); } return res; }; function filter(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { if(isArray(coll)) { return arrayFilter(coll, f, ctx); } return seq(coll, filter(f)); } return function(xform) { return new Filter(f, xform); }; } function remove(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); return filter(coll, function(x) { return !f(x); }); } function keep(coll) { return filter(coll, function(x) { return x != null }); } function Dedupe(xform) { this.xform = xform; this.last = undefined; } Dedupe.prototype.init = function() { return this.xform.init(); }; Dedupe.prototype.result = function(v) { return this.xform.result(v); }; Dedupe.prototype.step = function(result, input) { if(input !== this.last) { this.last = input; return this.xform.step(result, input); } return result; }; function dedupe(coll) { if(coll) { return seq(coll, dedupe()); } return function(xform) { return new Dedupe(xform); } } function TakeWhile(f, xform) { this.xform = xform; this.f = f; } TakeWhile.prototype.init = function() { return this.xform.init(); }; TakeWhile.prototype.result = function(v) { return this.xform.result(v); }; TakeWhile.prototype.step = function(result, input) { if(this.f(input)) { return this.xform.step(result, input); } return new Reduced(result); }; function takeWhile(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { return seq(coll, takeWhile(f)); } return function(xform) { return new TakeWhile(f, xform); } } function Take(n, xform) { this.n = n; this.i = 0; this.xform = xform; } Take.prototype.init = function() { return this.xform.init(); }; Take.prototype.result = function(v) { return this.xform.result(v); }; Take.prototype.step = function(result, input) { if(this.i++ < this.n) { return this.xform.step(result, input); } return new Reduced(result); }; function take(coll, n) { if(isNumber(coll)) { n = coll; coll = null } if(coll) { return seq(coll, take(n)); } return function(xform) { return new Take(n, xform); } } function Drop(n, xform) { this.n = n; this.i = 0; this.xform = xform; } Drop.prototype.init = function() { return this.xform.init(); }; Drop.prototype.result = function(v) { return this.xform.result(v); }; Drop.prototype.step = function(result, input) { if(this.i++ < this.n) { return result; } return this.xform.step(result, input); }; function drop(coll, n) { if(isNumber(coll)) { n = coll; coll = null } if(coll) { return seq(coll, drop(n)); } return function(xform) { return new Drop(n, xform); } } function DropWhile(f, xform) { this.xform = xform; this.f = f; this.dropping = true; } DropWhile.prototype.init = function() { return this.xform.init(); }; DropWhile.prototype.result = function(v) { return this.xform.result(v); }; DropWhile.prototype.step = function(result, input) { if(this.dropping) { if(this.f(input)) { return result; } else { this.dropping = false; } } return this.xform.step(result, input); }; function dropWhile(coll, f, ctx) { if(isFunction(coll)) { ctx = f; f = coll; coll = null; } f = bound(f, ctx); if(coll) { return seq(coll, dropWhile(f)); } return function(xform) { return new DropWhile(f, xform); } } // pure transducers (cannot take collections) function Cat(xform) { this.xform = xform; } Cat.prototype.init = function() { return this.xform.init(); }; Cat.prototype.result = function(v) { return this.xform.result(v); }; Cat.prototype.step = function(result, input) { var xform = this.xform; var newxform = { init: function() { return xform.init(); }, result: function(v) { return v; }, step: function(result, input) { var val = xform.step(result, input); return (val instanceof Reduced) ? new Reduced(val) : val; } } return reduce(input, newxform, result); }; function cat(xform) { return new Cat(xform); } function mapcat(f, ctx) { f = bound(f, ctx); return compose(map(f), cat); } // collection helpers function push(arr, x) { arr.push(x); return arr; } function merge(obj, x) { if(isArray(x) && x.length === 2) { obj[x[0]] = x[1]; } else { var keys = Object.keys(x); var len = keys.length; for(var i=0; i<len; i++) { obj[keys[i]] = x[keys[i]]; } } return obj; } var arrayReducer = { init: function() { return []; }, result: function(v) { return v; }, step: push } var objReducer = { init: function() { return {}; }, result: function(v) { return v; }, step: merge }; function getReducer(coll) { if(isArray(coll)) { return arrayReducer; } else if(isObject(coll)) { return objReducer; } else if(fulfillsProtocol(coll, 'transformer')) { return getProtocolProperty(coll, 'transformer'); } throwProtocolError('getReducer', coll); } // building new collections function toArray(coll, xform) { if(!xform) { return reduce(coll, arrayReducer, []); } return transduce(coll, xform, arrayReducer, []); } function toObj(coll, xform) { if(!xform) { return reduce(coll, objReducer, {}); } return transduce(coll, xform, objReducer, {}); } function toIter(coll, xform) { if(!xform) { return iterator(coll); } return new LazyTransformer(xform, coll); } function seq(coll, xform) { if(isArray(coll)) { return transduce(coll, xform, arrayReducer, []); } else if(isObject(coll)) { return transduce(coll, xform, objReducer, {}); } else if(fulfillsProtocol(coll, 'transformer')) { var transformer = getProtocolProperty(coll, 'transformer'); return transduce(coll, xform, transformer, transformer.init()); } else if(fulfillsProtocol(coll, 'iterator')) { return new LazyTransformer(xform, coll); } throwProtocolError('sequence', coll); } function into(to, xform, from) { if(isArray(to)) { return transduce(from, xform, arrayReducer, to); } else if(isObject(to)) { return transduce(from, xform, objReducer, to); } else if(fulfillsProtocol(to, 'transformer')) { return transduce(from, xform, getProtocolProperty(to, 'transformer'), to); } throwProtocolError('into', to); } // laziness var stepper = { result: function(v) { return (v instanceof Reduced) ? v.val : v; }, step: function(lt, x) { lt.items.push(x); return lt.rest; } } function Stepper(xform, iter) { this.xform = xform(stepper); this.iter = iter; } Stepper.prototype.step = function(lt) { var len = lt.items.length; while(lt.items.length === len) { var n = this.iter.next(); if(n.done || n.value instanceof Reduced) { // finalize this.xform.result(this); break; } // step this.xform.step(lt, n.value); } } function LazyTransformer(xform, coll) { this.iter = iterator(coll); this.items = []; this.stepper = new Stepper(xform, iterator(coll)); } LazyTransformer.prototype[protocols.iterator] = function() { return this; } LazyTransformer.prototype.next = function() { this.step(); if(this.items.length) { return { value: this.items.pop(), done: false } } else { return { done: true }; } }; LazyTransformer.prototype.step = function() { if(!this.items.length) { this.stepper.step(this); } } // util function range(n) { var arr = new Array(n); for(var i=0; i<arr.length; i++) { arr[i] = i; } return arr; } module.exports = { reduce: reduce, transformer: transformer, Reduced: Reduced, iterator: iterator, push: push, merge: merge, transduce: transduce, seq: seq, toArray: toArray, toObj: toObj, toIter: toIter, into: into, compose: compose, map: map, filter: filter, remove: remove, cat: cat, mapcat: mapcat, keep: keep, dedupe: dedupe, take: take, takeWhile: takeWhile, drop: drop, dropWhile: dropWhile, range: range, protocols: protocols, LazyTransformer: LazyTransformer }; /***/ } /******/ ]) <MSG> update browser file <DFF> @@ -150,8 +150,42 @@ var transducers = return typeof x === 'number'; } - function Reduced(val) { - this.val = val; + function Reduced(value) { + this.__transducers_reduced__ = true; + this.value = value; + } + + function isReduced(x) { + return (x instanceof Reduced) || (x && x.__transducers_reduced__); + } + + function deref(x) { + return x.value; + } + + /** + * This is for transforms that may call their nested transforms before + * Reduced-wrapping the result (e.g. "take"), to avoid nested Reduced. + */ + function ensureReduced(val) { + if(isReduced(val)) { + return val; + } else { + return new Reduced(val); + } + } + + /** + * This is for tranforms that call their nested transforms when + * performing completion (like "partition"), to avoid signaling + * termination after already completing. + */ + function ensureUnreduced(v) { + if(isReduced(v)) { + return deref(v); + } else { + return v; + } } function reduce(coll, xform, init) { @@ -161,8 +195,9 @@ var transducers = var len = coll.length; while(++index < len) { result = xform.step(result, coll[index]); - if(result instanceof Reduced) { - return result.val; + if(isReduced(result)) { + result = deref(result); + break; } } return xform.result(result); @@ -173,8 +208,9 @@ var transducers = var val = iter.next(); while(!val.done) { result = xform.step(result, val.value); - if(result instanceof Reduced) { - return result.val; + if(isReduced(result)) { + result = deref(result); + break; } val = iter.next(); } @@ -421,10 +457,16 @@ var transducers = }; Take.prototype.step = function(result, input) { - if(this.i++ < this.n) { - return this.xform.step(result, input); + if (this.i < this.n) { + result = this.xform.step(result, input); + if(this.i + 1 >= this.n) { + // Finish reducing on the same step as the final value. TODO: + // double-check that this doesn't break any semantics + result = ensureReduced(result); + } } - return new Reduced(result); + this.i++; + return result; }; function take(coll, n) { @@ -511,6 +553,98 @@ var transducers = } } + function Partition(n, xform) { + this.n = n; + this.i = 0; + this.xform = xform; + this.part = new Array(n); + } + + Partition.prototype.init = function() { + return this.xform.init(); + }; + + Partition.prototype.result = function(v) { + if (this.i > 0) { + return ensureUnreduced(this.xform.step(v, this.part.slice(0, this.i))); + } + return this.xform.result(v); + }; + + Partition.prototype.step = function(result, input) { + this.part[this.i] = input; + this.i += 1; + if (this.i === this.n) { + var out = this.part.slice(0, this.n); + this.part = new Array(this.n); + this.i = 0; + return this.xform.step(result, out); + } + return result; + }; + + function partition(coll, n) { + if (isNumber(coll)) { + n = coll; coll = null; + } + + if (coll) { + return seq(coll, partition(n)); + } + + return function(xform) { + return new Partition(n, xform); + }; + } + + var NOTHING = {}; + + function PartitionBy(f, xform) { + // TODO: take an "opts" object that allows the user to specify + // equality + this.f = f; + this.xform = xform; + this.part = []; + this.last = NOTHING; + } + + PartitionBy.prototype.init = function() { + return this.xform.init(); + }; + + PartitionBy.prototype.result = function(v) { + var l = this.part.length; + if (l > 0) { + return ensureUnreduced(this.xform.step(v, this.part.slice(0, l))); + } + return this.xform.result(v); + }; + + PartitionBy.prototype.step = function(result, input) { + var current = this.f(input); + if (current === this.last || this.last === NOTHING) { + this.part.push(input); + } else { + result = this.xform.step(result, this.part); + this.part = [input]; + } + this.last = current; + return result; + }; + + function partitionBy(coll, f, ctx) { + if (isFunction(coll)) { ctx = f; f = coll; coll = null; } + f = bound(f, ctx); + + if (coll) { + return seq(coll, partitionBy(f)); + } + + return function(xform) { + return new PartitionBy(f, xform); + }; + } + // pure transducers (cannot take collections) function Cat(xform) { @@ -536,7 +670,7 @@ var transducers = }, step: function(result, input) { var val = xform.step(result, input); - return (val instanceof Reduced) ? new Reduced(val) : val; + return isReduced(val) ? deref(val) : val; } } @@ -666,7 +800,7 @@ var transducers = var stepper = { result: function(v) { - return (v instanceof Reduced) ? v.val : v; + return isReduced(v) ? deref(v) : v; }, step: function(lt, x) { lt.items.push(x); @@ -683,7 +817,7 @@ var transducers = var len = lt.items.length; while(lt.items.length === len) { var n = this.iter.next(); - if(n.done || n.value instanceof Reduced) { + if(n.done || isReduced(n.value)) { // finalize this.xform.result(this); break; @@ -760,6 +894,8 @@ var transducers = takeWhile: takeWhile, drop: drop, dropWhile: dropWhile, + partition: partition, + partitionBy: partitionBy, range: range, protocols: protocols,
    148
    update browser file
    12
    .js
    js
    bsd-2-clause
    jlongster/transducers.js
    10071831
    <NME> dashboard.rb <BEF> # frozen_string_literal: true require "sinatra/base" require "split" require "bigdecimal" require "split/dashboard/helpers" require "split/dashboard/pagination_helpers" module Split class Dashboard < Sinatra::Base dir = File.dirname(File.expand_path(__FILE__)) set :views, "#{dir}/dashboard/views" set :public_folder, "#{dir}/dashboard/public" set :static, true set :method_override, true helpers Split::DashboardHelpers helpers Split::DashboardPaginationHelpers get "/" do # Display experiments without a winner at the top of the dashboard @experiments = Split::ExperimentCatalog.all_active_first @unintialized_experiments = Split.configuration.experiments.keys - @experiments.map(&:name) @metrics = Split::Metric.all # Display Rails Environment mode (or Rack version if not using Rails) if Object.const_defined?("Rails") @current_env = Rails.env.titlecase else @current_env = "Rack: #{Rack.version}" end erb :index end post "/initialize_experiment" do Split::ExperimentCatalog.find_or_create(params[:experiment]) unless params[:experiment].nil? || params[:experiment].empty? redirect url("/") end post "/force_alternative" do experiment = Split::ExperimentCatalog.find(params[:experiment]) alternative = Split::Alternative.new(params[:alternative], experiment.name) cookies = JSON.parse(request.cookies["split_override"]) rescue {} cookies[experiment.name] = alternative.name response.set_cookie("split_override", { value: cookies.to_json, path: "/" }) redirect url("/") end post "/experiment" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @alternative = Split::Alternative.new(params[:alternative], params[:experiment]) @experiment.winner = @alternative.name redirect url("/") end post "/start" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.start redirect url("/") end post "/reset" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.reset redirect url("/") end post "/reopen" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.reset_winner redirect url("/") end post "/update_cohorting" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) case params[:cohorting_action].downcase when "enable" @experiment.enable_cohorting when "disable" @experiment.disable_cohorting end redirect url("/") end delete "/experiment" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.delete redirect url("/") end end end <MSG> Handle when Rails is partially loaded as a Gem ...and not an application ``` NoMethodError: undefined method `env' for Rails:Module /bundle/ruby/3.0.0/gems/split-3.4.1/lib/split/dashboard.rb:28:in `block in <class:Dashboard>' ``` <DFF> @@ -26,7 +26,7 @@ module Split @metrics = Split::Metric.all # Display Rails Environment mode (or Rack version if not using Rails) - if Object.const_defined?("Rails") + if Object.const_defined?("Rails") && Rails.respond_to?(:env) @current_env = Rails.env.titlecase else @current_env = "Rack: #{Rack.version}"
    1
    Handle when Rails is partially loaded as a Gem
    1
    .rb
    rb
    mit
    splitrb/split
    10071832
    <NME> dashboard.rb <BEF> # frozen_string_literal: true require "sinatra/base" require "split" require "bigdecimal" require "split/dashboard/helpers" require "split/dashboard/pagination_helpers" module Split class Dashboard < Sinatra::Base dir = File.dirname(File.expand_path(__FILE__)) set :views, "#{dir}/dashboard/views" set :public_folder, "#{dir}/dashboard/public" set :static, true set :method_override, true helpers Split::DashboardHelpers helpers Split::DashboardPaginationHelpers get "/" do # Display experiments without a winner at the top of the dashboard @experiments = Split::ExperimentCatalog.all_active_first @unintialized_experiments = Split.configuration.experiments.keys - @experiments.map(&:name) @metrics = Split::Metric.all # Display Rails Environment mode (or Rack version if not using Rails) if Object.const_defined?("Rails") @current_env = Rails.env.titlecase else @current_env = "Rack: #{Rack.version}" end erb :index end post "/initialize_experiment" do Split::ExperimentCatalog.find_or_create(params[:experiment]) unless params[:experiment].nil? || params[:experiment].empty? redirect url("/") end post "/force_alternative" do experiment = Split::ExperimentCatalog.find(params[:experiment]) alternative = Split::Alternative.new(params[:alternative], experiment.name) cookies = JSON.parse(request.cookies["split_override"]) rescue {} cookies[experiment.name] = alternative.name response.set_cookie("split_override", { value: cookies.to_json, path: "/" }) redirect url("/") end post "/experiment" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @alternative = Split::Alternative.new(params[:alternative], params[:experiment]) @experiment.winner = @alternative.name redirect url("/") end post "/start" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.start redirect url("/") end post "/reset" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.reset redirect url("/") end post "/reopen" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.reset_winner redirect url("/") end post "/update_cohorting" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) case params[:cohorting_action].downcase when "enable" @experiment.enable_cohorting when "disable" @experiment.disable_cohorting end redirect url("/") end delete "/experiment" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.delete redirect url("/") end end end <MSG> Handle when Rails is partially loaded as a Gem ...and not an application ``` NoMethodError: undefined method `env' for Rails:Module /bundle/ruby/3.0.0/gems/split-3.4.1/lib/split/dashboard.rb:28:in `block in <class:Dashboard>' ``` <DFF> @@ -26,7 +26,7 @@ module Split @metrics = Split::Metric.all # Display Rails Environment mode (or Rack version if not using Rails) - if Object.const_defined?("Rails") + if Object.const_defined?("Rails") && Rails.respond_to?(:env) @current_env = Rails.env.titlecase else @current_env = "Rack: #{Rack.version}"
    1
    Handle when Rails is partially loaded as a Gem
    1
    .rb
    rb
    mit
    splitrb/split
    10071833
    <NME> dashboard.rb <BEF> # frozen_string_literal: true require "sinatra/base" require "split" require "bigdecimal" require "split/dashboard/helpers" require "split/dashboard/pagination_helpers" module Split class Dashboard < Sinatra::Base dir = File.dirname(File.expand_path(__FILE__)) set :views, "#{dir}/dashboard/views" set :public_folder, "#{dir}/dashboard/public" set :static, true set :method_override, true helpers Split::DashboardHelpers helpers Split::DashboardPaginationHelpers get "/" do # Display experiments without a winner at the top of the dashboard @experiments = Split::ExperimentCatalog.all_active_first @unintialized_experiments = Split.configuration.experiments.keys - @experiments.map(&:name) @metrics = Split::Metric.all # Display Rails Environment mode (or Rack version if not using Rails) if Object.const_defined?("Rails") @current_env = Rails.env.titlecase else @current_env = "Rack: #{Rack.version}" end erb :index end post "/initialize_experiment" do Split::ExperimentCatalog.find_or_create(params[:experiment]) unless params[:experiment].nil? || params[:experiment].empty? redirect url("/") end post "/force_alternative" do experiment = Split::ExperimentCatalog.find(params[:experiment]) alternative = Split::Alternative.new(params[:alternative], experiment.name) cookies = JSON.parse(request.cookies["split_override"]) rescue {} cookies[experiment.name] = alternative.name response.set_cookie("split_override", { value: cookies.to_json, path: "/" }) redirect url("/") end post "/experiment" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @alternative = Split::Alternative.new(params[:alternative], params[:experiment]) @experiment.winner = @alternative.name redirect url("/") end post "/start" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.start redirect url("/") end post "/reset" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.reset redirect url("/") end post "/reopen" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.reset_winner redirect url("/") end post "/update_cohorting" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) case params[:cohorting_action].downcase when "enable" @experiment.enable_cohorting when "disable" @experiment.disable_cohorting end redirect url("/") end delete "/experiment" do @experiment = Split::ExperimentCatalog.find(params[:experiment]) @experiment.delete redirect url("/") end end end <MSG> Handle when Rails is partially loaded as a Gem ...and not an application ``` NoMethodError: undefined method `env' for Rails:Module /bundle/ruby/3.0.0/gems/split-3.4.1/lib/split/dashboard.rb:28:in `block in <class:Dashboard>' ``` <DFF> @@ -26,7 +26,7 @@ module Split @metrics = Split::Metric.all # Display Rails Environment mode (or Rack version if not using Rails) - if Object.const_defined?("Rails") + if Object.const_defined?("Rails") && Rails.respond_to?(:env) @current_env = Rails.env.titlecase else @current_env = "Rack: #{Rack.version}"
    1
    Handle when Rails is partially loaded as a Gem
    1
    .rb
    rb
    mit
    splitrb/split
    10071834
    <NME> snippets.ts <BEF> import { ok } from 'assert'; import markup from '@emmetio/abbreviation'; import html from '../snippets/html.json'; import xsl from '../snippets/xsl.json'; describe('Snippets', () => { it('HTML', () => { Object.keys(html).forEach(k => ok(markup(html[k]), k)); }); it('XSL', () => { Object.keys(xsl).forEach(k => ok(markup(xsl[k]), k)); }); }); <MSG> FIxed issue with invalid nested snippets resolve <DFF> @@ -1,11 +1,13 @@ -import { ok } from 'assert'; +import { ok, strictEqual as equal } from 'assert'; import markup from '@emmetio/abbreviation'; +import expand from '../src'; import html from '../snippets/html.json'; import xsl from '../snippets/xsl.json'; describe('Snippets', () => { it('HTML', () => { Object.keys(html).forEach(k => ok(markup(html[k]), k)); + equal(expand('fset>input:c'), '<fieldset><input type="checkbox" name="" id=""></fieldset>'); }); it('XSL', () => {
    3
    FIxed issue with invalid nested snippets resolve
    1
    .ts
    ts
    mit
    emmetio/emmet
    10071835
    <NME> snippets.ts <BEF> import { ok } from 'assert'; import markup from '@emmetio/abbreviation'; import html from '../snippets/html.json'; import xsl from '../snippets/xsl.json'; describe('Snippets', () => { it('HTML', () => { Object.keys(html).forEach(k => ok(markup(html[k]), k)); }); it('XSL', () => { Object.keys(xsl).forEach(k => ok(markup(xsl[k]), k)); }); }); <MSG> FIxed issue with invalid nested snippets resolve <DFF> @@ -1,11 +1,13 @@ -import { ok } from 'assert'; +import { ok, strictEqual as equal } from 'assert'; import markup from '@emmetio/abbreviation'; +import expand from '../src'; import html from '../snippets/html.json'; import xsl from '../snippets/xsl.json'; describe('Snippets', () => { it('HTML', () => { Object.keys(html).forEach(k => ok(markup(html[k]), k)); + equal(expand('fset>input:c'), '<fieldset><input type="checkbox" name="" id=""></fieldset>'); }); it('XSL', () => {
    3
    FIxed issue with invalid nested snippets resolve
    1
    .ts
    ts
    mit
    emmetio/emmet
    10071836
    <NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end options[:alternatives] = alts set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} alts end def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration end redis.hmset(experiment_config_key, :resettable, resettable.to_s, :algorithm, algorithm.to_s) self end def validate! end def new_record? !redis.exists?(name) end def ==(obj) def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], alternatives: load_alternatives_from_redis, goals: Split::GoalsCollection.new(@name).load_from_redis, metadata: load_metadata_from_redis } set_alternatives_and_options(options) end def calc_winning_alternatives # Cache the winning alternatives so we recalculate them once per the specified interval. intervals_since_epoch = Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval if self.calc_time != intervals_since_epoch if goals.empty? self.estimate_winning_alternative else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Centralize checks for experiment existance in experiment catalog <DFF> @@ -94,7 +94,7 @@ module Split end def new_record? - !redis.exists?(name) + ExperimentCatalog.find(name).nil? end def ==(obj)
    1
    Centralize checks for experiment existance in experiment catalog
    1
    .rb
    rb
    mit
    splitrb/split
    10071837
    <NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end options[:alternatives] = alts set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} alts end def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration end redis.hmset(experiment_config_key, :resettable, resettable.to_s, :algorithm, algorithm.to_s) self end def validate! end def new_record? !redis.exists?(name) end def ==(obj) def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], alternatives: load_alternatives_from_redis, goals: Split::GoalsCollection.new(@name).load_from_redis, metadata: load_metadata_from_redis } set_alternatives_and_options(options) end def calc_winning_alternatives # Cache the winning alternatives so we recalculate them once per the specified interval. intervals_since_epoch = Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval if self.calc_time != intervals_since_epoch if goals.empty? self.estimate_winning_alternative else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Centralize checks for experiment existance in experiment catalog <DFF> @@ -94,7 +94,7 @@ module Split end def new_record? - !redis.exists?(name) + ExperimentCatalog.find(name).nil? end def ==(obj)
    1
    Centralize checks for experiment existance in experiment catalog
    1
    .rb
    rb
    mit
    splitrb/split
    10071838
    <NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end options[:alternatives] = alts set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} alts end def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration end redis.hmset(experiment_config_key, :resettable, resettable.to_s, :algorithm, algorithm.to_s) self end def validate! end def new_record? !redis.exists?(name) end def ==(obj) def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], alternatives: load_alternatives_from_redis, goals: Split::GoalsCollection.new(@name).load_from_redis, metadata: load_metadata_from_redis } set_alternatives_and_options(options) end def calc_winning_alternatives # Cache the winning alternatives so we recalculate them once per the specified interval. intervals_since_epoch = Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval if self.calc_time != intervals_since_epoch if goals.empty? self.estimate_winning_alternative else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Centralize checks for experiment existance in experiment catalog <DFF> @@ -94,7 +94,7 @@ module Split end def new_record? - !redis.exists?(name) + ExperimentCatalog.find(name).nil? end def ==(obj)
    1
    Centralize checks for experiment existance in experiment catalog
    1
    .rb
    rb
    mit
    splitrb/split
    10071839
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis) - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative ## Screencast To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Add a link to split-cli <DFF> @@ -661,6 +661,7 @@ end - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis) - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative + - [Split::Cli](https://github.com/craigmcnamara/split-cli) - a CLI to trigger Split A/B tests ## Screencast
    1
    Add a link to split-cli
    0
    .md
    md
    mit
    splitrb/split
    10071840
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis) - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative ## Screencast To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Add a link to split-cli <DFF> @@ -661,6 +661,7 @@ end - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis) - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative + - [Split::Cli](https://github.com/craigmcnamara/split-cli) - a CLI to trigger Split A/B tests ## Screencast
    1
    Add a link to split-cli
    0
    .md
    md
    mit
    splitrb/split
    10071841
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis) - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative ## Screencast To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Add a link to split-cli <DFF> @@ -661,6 +661,7 @@ end - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - store experiment data in mongoid (still uses redis) - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - automatically create cache buckets per test - [Split::Counters](https://github.com/bernardkroes/split-counters) - add counters per experiment and alternative + - [Split::Cli](https://github.com/craigmcnamara/split-cli) - a CLI to trigger Split A/B tests ## Screencast
    1
    Add a link to split-cli
    0
    .md
    md
    mit
    splitrb/split
    10071842
    <NME> spec_helper.rb <BEF> # frozen_string_literal: true ENV["RACK_ENV"] = "test" require 'bundler/setup' require 'split' require 'ostruct' require 'complex' if RUBY_VERSION.match(/1\.8/) Dir['./spec/support/*.rb'].each { |f| require f } require "split" require "ostruct" require "yaml" Dir["./spec/support/*.rb"].each { |f| require f } module GlobalSharedContext extend RSpec::SharedContext let(:mock_user) { Split::User.new(double(session: {})) } before(:each) do Split.configuration = Split::Configuration.new Split.redis = Redis.new Split.redis.select(10) Split.redis.flushdb Split::Cache.clear @ab_user = mock_user @params = nil end end RSpec.configure do |config| config.order = "random" config.include GlobalSharedContext config.raise_errors_for_deprecations! end def session @session ||= {} end def params @params ||= {} end def request(ua = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27") @request ||= begin r = OpenStruct.new r.user_agent = ua r.ip = "192.168.1.1" r end end <MSG> Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2 require yaml as it is not loaded in 2.0.0-p0 by default <DFF> @@ -4,6 +4,7 @@ require 'rubygems' require 'bundler/setup' require 'split' require 'ostruct' +require 'yaml' require 'complex' if RUBY_VERSION.match(/1\.8/) Dir['./spec/support/*.rb'].each { |f| require f }
    1
    Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2
    0
    .rb
    rb
    mit
    splitrb/split
    10071843
    <NME> spec_helper.rb <BEF> # frozen_string_literal: true ENV["RACK_ENV"] = "test" require 'bundler/setup' require 'split' require 'ostruct' require 'complex' if RUBY_VERSION.match(/1\.8/) Dir['./spec/support/*.rb'].each { |f| require f } require "split" require "ostruct" require "yaml" Dir["./spec/support/*.rb"].each { |f| require f } module GlobalSharedContext extend RSpec::SharedContext let(:mock_user) { Split::User.new(double(session: {})) } before(:each) do Split.configuration = Split::Configuration.new Split.redis = Redis.new Split.redis.select(10) Split.redis.flushdb Split::Cache.clear @ab_user = mock_user @params = nil end end RSpec.configure do |config| config.order = "random" config.include GlobalSharedContext config.raise_errors_for_deprecations! end def session @session ||= {} end def params @params ||= {} end def request(ua = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27") @request ||= begin r = OpenStruct.new r.user_agent = ua r.ip = "192.168.1.1" r end end <MSG> Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2 require yaml as it is not loaded in 2.0.0-p0 by default <DFF> @@ -4,6 +4,7 @@ require 'rubygems' require 'bundler/setup' require 'split' require 'ostruct' +require 'yaml' require 'complex' if RUBY_VERSION.match(/1\.8/) Dir['./spec/support/*.rb'].each { |f| require f }
    1
    Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2
    0
    .rb
    rb
    mit
    splitrb/split
    10071844
    <NME> spec_helper.rb <BEF> # frozen_string_literal: true ENV["RACK_ENV"] = "test" require 'bundler/setup' require 'split' require 'ostruct' require 'complex' if RUBY_VERSION.match(/1\.8/) Dir['./spec/support/*.rb'].each { |f| require f } require "split" require "ostruct" require "yaml" Dir["./spec/support/*.rb"].each { |f| require f } module GlobalSharedContext extend RSpec::SharedContext let(:mock_user) { Split::User.new(double(session: {})) } before(:each) do Split.configuration = Split::Configuration.new Split.redis = Redis.new Split.redis.select(10) Split.redis.flushdb Split::Cache.clear @ab_user = mock_user @params = nil end end RSpec.configure do |config| config.order = "random" config.include GlobalSharedContext config.raise_errors_for_deprecations! end def session @session ||= {} end def params @params ||= {} end def request(ua = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27") @request ||= begin r = OpenStruct.new r.user_agent = ua r.ip = "192.168.1.1" r end end <MSG> Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2 require yaml as it is not loaded in 2.0.0-p0 by default <DFF> @@ -4,6 +4,7 @@ require 'rubygems' require 'bundler/setup' require 'split' require 'ostruct' +require 'yaml' require 'complex' if RUBY_VERSION.match(/1\.8/) Dir['./spec/support/*.rb'].each { |f| require f }
    1
    Merge pull request #142 from phoet/fix_tests_with_yaml_and_ruby_2
    0
    .rb
    rb
    mit
    splitrb/split
    10071845
    <NME> README.md <BEF> Semantic-UI-Angular =================== Status: Currently migrating from https://github.com/caitp/angular-semantic [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) [![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/) **Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components. We are considering Angular 2 support in the future. We've decided to use TypeScript as a step to Angular 2 friendly environment. Status ------ **Work in progress** We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions. Once we release first `alpha.0` we are happy to get community help. Support ------- We support AngularJS 1.4.8 version. Building Semantic-UI-Angular ---------------------------- You have to have `nodejs` installed before running following commands. ``` npm install npm run build ``` The distribution packages will be stored in `dist` folder. Running tests ------------- Single run: ``` npm test ``` Dev mode: ``` npm run test-dev ``` <MSG> chore(README): Update README with dependencies badges <DFF> @@ -1,4 +1,6 @@ Semantic-UI-Angular =================== +[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) +[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) -Status: Currently migrating from https://github.com/caitp/angular-semantic +Status: **Work in Progress** (migration from https://github.com/caitp/angular-semantic)
    3
    chore(README): Update README with dependencies badges
    1
    .md
    md
    mit
    Semantic-Org/Semantic-UI-Angular
    10071846
    <NME> README.md <BEF> Semantic-UI-Angular =================== Status: Currently migrating from https://github.com/caitp/angular-semantic [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) [![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/) **Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components. We are considering Angular 2 support in the future. We've decided to use TypeScript as a step to Angular 2 friendly environment. Status ------ **Work in progress** We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions. Once we release first `alpha.0` we are happy to get community help. Support ------- We support AngularJS 1.4.8 version. Building Semantic-UI-Angular ---------------------------- You have to have `nodejs` installed before running following commands. ``` npm install npm run build ``` The distribution packages will be stored in `dist` folder. Running tests ------------- Single run: ``` npm test ``` Dev mode: ``` npm run test-dev ``` <MSG> chore(README): Update README with dependencies badges <DFF> @@ -1,4 +1,6 @@ Semantic-UI-Angular =================== +[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) +[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) -Status: Currently migrating from https://github.com/caitp/angular-semantic +Status: **Work in Progress** (migration from https://github.com/caitp/angular-semantic)
    3
    chore(README): Update README with dependencies badges
    1
    .md
    md
    mit
    Semantic-Org/Semantic-UI-Angular
    10071847
    <NME> README.md <BEF> Semantic-UI-Angular =================== Status: Currently migrating from https://github.com/caitp/angular-semantic [![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) [![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/) **Semantic-UI-Angular** is a pure AngularJS 1.x set of directives for Semantic-UI components. We are considering Angular 2 support in the future. We've decided to use TypeScript as a step to Angular 2 friendly environment. Status ------ **Work in progress** We are working on setting up proper environment, contribution guidelines and everything else for comfortable community contributions. Once we release first `alpha.0` we are happy to get community help. Support ------- We support AngularJS 1.4.8 version. Building Semantic-UI-Angular ---------------------------- You have to have `nodejs` installed before running following commands. ``` npm install npm run build ``` The distribution packages will be stored in `dist` folder. Running tests ------------- Single run: ``` npm test ``` Dev mode: ``` npm run test-dev ``` <MSG> chore(README): Update README with dependencies badges <DFF> @@ -1,4 +1,6 @@ Semantic-UI-Angular =================== +[![Dependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular) +[![devDependency Status](https://david-dm.org/Semantic-Org/Semantic-UI-Angular/dev-status.svg)](https://david-dm.org/Semantic-Org/Semantic-UI-Angular#info=devDependencies) -Status: Currently migrating from https://github.com/caitp/angular-semantic +Status: **Work in Progress** (migration from https://github.com/caitp/angular-semantic)
    3
    chore(README): Update README with dependencies badges
    1
    .md
    md
    mit
    Semantic-Org/Semantic-UI-Angular
    10071848
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b And our initializer: ```ruby rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..' rails_env = ENV['RAILS_ENV'] || 'development' split_config = YAML.load_file(rails_root + '/config/split.yml') Split.redis = split_config[rails_env] ``` ## Namespaces ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #289 from byterussian/update-readme-rails-initializer Update initializer code with Rails.root and Rails.env <DFF> @@ -551,11 +551,8 @@ production: redis1.example.com:6379 And our initializer: ```ruby -rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..' -rails_env = ENV['RAILS_ENV'] || 'development' - -split_config = YAML.load_file(rails_root + '/config/split.yml') -Split.redis = split_config[rails_env] +split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) +Split.redis = split_config[Rails.env] ``` ## Namespaces
    2
    Merge pull request #289 from byterussian/update-readme-rails-initializer
    5
    .md
    md
    mit
    splitrb/split
    10071849
    <NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b And our initializer: ```ruby rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..' rails_env = ENV['RAILS_ENV'] || 'development' split_config = YAML.load_file(rails_root + '/config/split.yml') Split.redis = split_config[rails_env] ``` ## Namespaces ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #289 from byterussian/update-readme-rails-initializer Update initializer code with Rails.root and Rails.env <DFF> @@ -551,11 +551,8 @@ production: redis1.example.com:6379 And our initializer: ```ruby -rails_root = ENV['RAILS_ROOT'] || File.dirname(__FILE__) + '/../..' -rails_env = ENV['RAILS_ENV'] || 'development' - -split_config = YAML.load_file(rails_root + '/config/split.yml') -Split.redis = split_config[rails_env] +split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) +Split.redis = split_config[Rails.env] ``` ## Namespaces
    2
    Merge pull request #289 from byterussian/update-readme-rails-initializer
    5
    .md
    md
    mit
    splitrb/split